通过导入所有 JAR 文件尝试在 eclipse 中运行程序时出现 getCredentials 方法错误"Word Count"



error :线程中的异常" main" java.lang.nosuchmethoderror:org.apache.hadoop.security.usergroupinformation.usergroupinformation.getCredentials()/安全/凭据; atorg.apache.hadoop.mapreduce.job。(job.java:135) atrg.apache.hadoop.mapreduce.job.getinstance(job.java:176) atorg.apache.hadoop.mapreduce.job.getinstance(job.java:195) 在WordCount.Main(WordCount.java:20)

Hadoop版本2.2.0

WordCount.java

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class WordCount {
 public static void main(String[] args) throws Exception {
        if (args.length != 2) {
          System.out.println("usage: [input] [output]");
          System.exit(-1);
        }

        Job job = Job.getInstance(new Configuration(), "word count");
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(WordMapper.class); 
        job.setReducerClass(SumReducer.class);  
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        job.setJarByClass(WordCount.class);
        job.setJobName("WordCount");
        job.submit();



 }
}

wordmapper.java

import java.io.IOException;    
import java.util.StringTokenizer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordMapper extends Mapper<Object, Text, Text, IntWritable> {
private Text word = new Text();
private final static IntWritable one = new IntWritable(1);
 @Override
        public void map(Object key, Text value,
        Context contex) throws IOException, InterruptedException {
        // Break line into words for processing
        StringTokenizer wordList = new StringTokenizer(value.toString());
        while (wordList.hasMoreTokens()) {
       word.set(wordList.nextToken());
       contex.write(word, one);
      }
     }
    }

sumreducer.java

import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class SumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
 private IntWritable totalWordCount = new IntWritable();
 @Override
 public void reduce(Text key, Iterable<IntWritable> values, Context context)
            throws IOException, InterruptedException {
  int wordCount = 0;
  Iterator<IntWritable> it=values.iterator();
  while (it.hasNext()) {
   wordCount += it.next().get();
  }
  totalWordCount.set(wordCount);
  context.write(key, totalWordCount);
 }
}

请让我知道该怎么办?最新的MapReduce API用于该程序。Hadoop 2.2.0随附的所有罐子也被进口到Eclipse。

谢谢:)

您是否使用eclipse插件进行Hadoop?如果不是这样,那就是问题所在。如果仅运行WordCount类并且Hadoop找不到必要的罐子,则可以蚀过插件。捆绑所有包括WordCount在内的罐子,并在集群中运行。

如果要从Eclipse运行它,则需要Eclipse插件。如果您没有一个,则可以按照此说明

来构建插件

最新更新