Hadoop中的错误:"线程"main"java.lang.ClassNotFoundException中的异常"



我已经下载了hadoop-2.7.1(Apache Hadoop) on Ubuntu 14.04.3 LTS (GNU/Linux3.19.0-25-通用x86_64)在Oracle VM VirtualBox上。

我正在使用以下命令来编译和运行我的代码:

编译

hduser@dt-VirtualBox:~/Desktop/project/try1$ javac -classpath $HADOOP_HOME/share/hadoop/common/hadoop-common-2.7.1.jar:$HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:$HADOOP_HOME/share/hadoop/common/lib/commons-cli-1.2.jar -d /home/hduser/Desktop/project/try1 *.java}

然后制作类文件的 jar 文件并使用以下命令运行 jar 文件:

 {hduser@dt-VirtualBox:~/Desktop/project/try1$ hadoop jar table_one.jar DriverMap /trial/trial/ output_tryy1}

请在下面找到错误

{Exception in thread "main" java.lang.ClassNotFoundException: DriverMap
    at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
    at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
    at java.security.AccessController.doPrivileged(Native Method)
    at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
    at java.lang.Class.forName0(Native Method)
    at java.lang.Class.forName(Class.java:278)
    at org.apache.hadoop.util.RunJar.run(RunJar.java:214)
    at org.apache.hadoop.util.RunJar.main(RunJar.java:136)}

这是我的驱动程序映射.java文件:

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class DriverMap {
public class MyMapper extends Mapper<Text, Text, Text, Text> {
        String str, token = null;
        List<String> tokens = new ArrayList<String>();
        String productId, userId, score;
        private Text word = new Text();
        public void map(Text key, Text value, Context context) throws IOException, InterruptedException {     
        str = value.toString();
       Pattern p = Pattern.compile(".*productId.*$|.*userId.*$|.*score.*$");
            Matcher m = p.matcher(str);
            while(m.find())
            {
              token = m.group( 0 ); //group 0 is always the entire match

              tokens.add(token);
            }
        //System.out.println(tokens);
        String[] a = tokens.toString().split(":|\,|]");
        for(int j=0; j<a.length; j=j+6)
        {
            //System.out.println("a1 for " + j+ "  is : "+  a1[j]);
            productId = a[j+1];
            userId = a[j+3];
            score = a[j+5];
            word.set(productId + "|" +userId);
            context.write(word, new Text(score));
          /*System.out.println("productId is: "+ a[j+1]);
            System.out.println("userId is: "+ a[j+3]);
            System.out.println("score is: "+ a[j+5]);*/
        }
    }
}


public static void main(String[] args) throws Exception{
        // TODO Auto-generated method stub
     Configuration conf = new Configuration();
     Job job = new Job(conf, "recommendation");
     job.setOutputKeyClass(Text.class);
     job.setOutputValueClass(Text.class);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(Text.class);
     job.setMapperClass(MyMapper.class);
     //job.setReducerClass(Reduce.class);
     job.setNumReduceTasks(0);     //This turns off the reducer and our mapper        result will be outputed to the output file.
     job.setInputFormatClass(TextInputFormat.class);
     job.setOutputFormatClass(TextOutputFormat.class);
     job.setJarByClass(DriverMap.class);
     /*   FileInputFormat.addInputPath(job, new Path(args[0]));
     FileOutputFormat.setOutputPath(job, new Path(args[1]));
     */   
     TextInputFormat.setInputPaths(job, new Path(args[0]));
     TextOutputFormat.setOutputPath(job, new Path(args[1]));
     job.waitForCompletion(true);
      }
    }

你必须给 DriverMap 提供包名称。类似于"PackageName.DriverMap"的东西,同时运行jar文件。

此外,如果您渴望 jar 文件,则不需要编译 src 代码。

相关内容

  • 没有找到相关文章

最新更新