如何修复NoSuchMethodError:org.apache.hadoop.mapred.InputSplit.wr



我正在写一个关于hadoop的项目。我有一个 1d 字符串数组,它的名字是"单词"。我

想将其发送到减速器,但我收到此错误:

Exception in thread "main" java.lang.NoSuchMethodError:org.apache.hadoop.mapred .InputSplit.write(Ljava/io/DataOutput;)V

我该怎么办?

谁能帮我?

这是我的映射器:

 public  abstract  class Mapn  implements Mapper<LongWritable, Text, Text, Text>{
@SuppressWarnings("unchecked")
public void map(LongWritable key, Text value, Context con) throws IOException, InterruptedException
        {                   
            String line = value.toString();
            String[] words=line.split(",");
            for(String word: words )
            {
                  Text outputKey = new Text(word.toUpperCase().trim());
              con.write(outputKey, words);
            }
            }


            }

当我学习hadoop mapreduce工具时,除了传统的WordCount程序之外,我编写了自己的程序,然后为此导出了jar。现在,我正在共享我为之编写的程序hadoop-1.2.1 jar 依赖项。它用于转换数字并用单词写下这些数字,这是在 4 个 lacs 数字上处理的,没有任何错误。

所以这是程序:

package com.whodesire.count;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import com.whodesire.numstats.AmtInWords;
public class CountInWords {
    public static class NumberTokenizerMapper 
                    extends Mapper <Object, Text, LongWritable, Text> {
        private static final Text theOne = new Text("1");
        private LongWritable longWord = new LongWritable();
        public void map(Object key, Text value, Context context) {
            try{
                StringTokenizer itr = new StringTokenizer(value.toString());
                while (itr.hasMoreTokens()) {
                    longWord.set(Long.parseLong(itr.nextToken()));
                    context.write(longWord, theOne);
                }
            }catch(ClassCastException cce){
                System.out.println("ClassCastException raiseddd...");
                System.exit(0);
            }catch(IOException | InterruptedException ioe){
                ioe.printStackTrace();
                System.out.println("IOException | InterruptedException raiseddd...");
                System.exit(0);
            }
        }
    }
    public static class ModeReducerCumInWordsCounter 
            extends Reducer <LongWritable, Text, LongWritable, Text>{
        private Text result = new Text();
        //This is the user defined reducer function which is invoked for each unique key
        public void reduce(LongWritable key, Iterable<Text> values, 
                Context context) throws IOException, InterruptedException {
            /*** Putting the key, which is a LongWritable value, 
                        putting in AmtInWords constructor as String***/
            AmtInWords aiw = new AmtInWords(key.toString());
            result.set(aiw.getInWords());
            //Finally the word and counting is sent to Hadoop MR and thus to target
            context.write(key, result);
        }
    }
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        /****
         *** all random numbers generated inside input files has been
         *** generated using url https://andrew.hedges.name/experiments/random/
         ****/
        //Load the configuration files and add them to the the conf object
        Configuration conf = new Configuration();       
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        Job job = new Job(conf, "CountInWords");
        //Specify the jar which contains the required classes for the job to run.
        job.setJarByClass(CountInWords.class);
        job.setMapperClass(NumberTokenizerMapper.class);
        job.setCombinerClass(ModeReducerCumInWordsCounter.class);
        job.setReducerClass(ModeReducerCumInWordsCounter.class);
        //Set the output key and the value class for the entire job
        job.setMapOutputKeyClass(LongWritable.class);
        job.setMapOutputValueClass(Text.class);
        //Set the Input (format and location) and similarly for the output also
        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        //Setting the Results to Single Target File
        job.setNumReduceTasks(1);
        //Submit the job and wait for it to complete
        System.exit(job.waitForCompletion(true) ? 0 : 1);       
    }
}

我建议你查看你添加的Hadoopjars,特别是在hadoop-core-x.x.x.jar上,因为在看到你的错误之后,你似乎没有在项目中添加一些mapreducejar。

相关内容

最新更新