MapReduce ArrayList类型不匹配



嗨,我刚使用Hadoop一周,正在尝试使用它。

我有以下输入值为CSV。

    PRAVEEN,400201399,Baby,026A1K,12/04/2010
    PRAVEEN,4002013410,TOY,02038L,1/04/2014
    PRAVEEN,2727272727272,abc,03383,03/14/2015
    PRAVEEN,2263637373,cde,7373737,12/24/2012

Map函数应该从CSV中选择第二个值作为键(即400201399等)和第三和最后一个值作为值(例如TOY和12/04/2010),我想把值放在ArrayList中,而不是作为文本。

但我得到以下错误-

    Error: java.io.IOException: Type mismatch in value from map: expected org.apache.hadoop.io.Text, received java.util.ArrayList

Reduce函数也很简单,我必须遍历列表并获得所需的结果作为最终值(在下面的Reduce代码中,我只从列表中选择日期)

这是我的代码-

    package com.test.mapreduce;
    import java.io.IOException;
    import java.text.ParseException;
    import java.text.SimpleDateFormat;
    import java.util.ArrayList;
    import java.util.Date;
    import java.util.HashSet;
    import java.util.Iterator;
    import java.util.List;
    import java.util.Set;
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.conf.Configured;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IntWritable;
    import org.apache.hadoop.io.LongWritable;
    import org.apache.hadoop.io.Text;
    import org.apache.hadoop.io.ArrayWritable;
    import org.apache.hadoop.mapred.FileInputFormat;
    import org.apache.hadoop.mapred.FileOutputFormat;
    import org.apache.hadoop.mapred.JobClient;
    import org.apache.hadoop.mapred.JobConf;
    import org.apache.hadoop.mapred.KeyValueTextInputFormat;
    import org.apache.hadoop.mapred.MapReduceBase;
    import org.apache.hadoop.mapred.Mapper;
    import org.apache.hadoop.mapred.OutputCollector;
    import org.apache.hadoop.mapred.Reducer;
    import org.apache.hadoop.mapred.Reporter;
    import org.apache.hadoop.mapred.TextInputFormat;
    import org.apache.hadoop.mapred.TextOutputFormat;
    import org.apache.hadoop.util.Tool;
    import org.apache.hadoop.util.ToolRunner;

 public class RetailCustomerProduct extends Configured implements Tool {
 public static class MapClass extends MapReduceBase
 implements Mapper<LongWritable, Text, Text, List<Text> > {
      private Text key1 = new Text();
      private List<Text> productList = new ArrayList<Text>();
      private Text value1 = new Text();
      private Text product = new Text();
      private int noofFields = 5;

       public void map(LongWritable key, Text value,
                 OutputCollector<Text, List<Text>> output,
                 Reporter reporter) throws IOException {
        String line = value.toString().replaceAll("\s+","");
        String[] split = line.split(",");

        if(split.length!=noofFields){
        return;
        }
        else {
            key1.set((split[1])); 
            value1.set(split[4].toString().trim());
            product.set(split[2].toString().trim());
            productList.add(value1);
            productList.add(product);

            System.out.println(split[4].toString().trim());
            output.collect(key1, productList);
     }
    }
  }
 public static class Reduce extends MapReduceBase implements Reducer<Text, List<Text>, Text,      Text> {
        public void reduce(Text key, Iterator<List<Text>> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
            SimpleDateFormat formatter = new SimpleDateFormat("MM/dd/yyyy");
            Date date = new Date();
            List<String> dateList = new ArrayList<String>();
            List<String> productList = new ArrayList<String>();
            for(Iterator<List<Text>> it = values; it.hasNext();) {
                // add the values in the arrayList
                dateList.add(((Text)it.next().get(0)).toString());
                productList.add(((Text)it.next().get(1)).toString());
                }
            if(dateList.size()==1){ 
                try  {
                    date = formatter.parse(dateList.get(0).toString());
                } catch (ParseException e) {
                    e.printStackTrace();
                }
            }  
            else {
                String str = dateList.get(0).toString();
                try {
                    date = formatter.parse(dateList.get(0).toString());
                } catch (ParseException e1) {
                    e1.printStackTrace();
                }
                for(int i=0 ; i <dateList.size();++i){
                    try {
                        if((formatter.parse(dateList.get(i).toString())).compareTo(date)>0){
                            date=formatter.parse(dateList.get(i).toString());
                            // getting the max date from the list
                        }
                    }
                    catch (ParseException e) {
                        e.printStackTrace();
                    }
                } 
            }    
            Text value = new Text(date.toString());
            output.collect(key, value);
        }
    }

 public int run(String[] args) throws Exception {
 Configuration conf = getConf();
 JobConf job = new JobConf(conf, RetailCustomerProduct.class);
 Path in = new Path(args[0]);
 Path out = new Path(args[1]);
 FileInputFormat.setInputPaths(job, in);
 FileOutputFormat.setOutputPath(job, out);
 job.setJobName("RetailCustomerProduct");
 job.setMapperClass(MapClass.class);
 job.setReducerClass(Reduce.class);
 job.setInputFormat(TextInputFormat.class);
 job.setOutputFormat(TextOutputFormat.class);
 job.setOutputKeyClass(Text.class);
 job.setOutputValueClass(Text.class);
 job.set("key.value.separator.in.input.line", ",");
 JobClient.runJob(job);
 return 0;
}
 public static void main(String[] args) throws Exception { 
 int res = ToolRunner.run(new Configuration(), new RetailCustomerProduct(), args);
 System.exit(res);
 }
}

在hadoop中是否有不同的ArrayList实现?

My Map函数应该以Longwritable作为KEY, Text作为VALUE,并且应该输出Text作为KEY, ArrayList作为VALUE。

我的Reduce函数应该接受Text作为KEY和ArrayList作为Value,然后输出Text作为KEY和Text作为Value。

所以在驱动类中,哪些类必须包含在内,目前是这样的

 job.setInputFormat(TextInputFormat.class);
 job.setOutputFormat(TextOutputFormat.class);
 job.setOutputKeyClass(Text.class);
 job.setOutputValueClass(Text.class);

有谁能告诉我正确的代码吗?

我也是Hadoop的新手。但我认为这一行是问题所在:

job.setOutputValueClass(Text.class);

设置输出类型为Text,而不是List<Text>。我没有尝试输出列表。相反,我从列表中构建一个以制表符分隔的字符串,并将其作为Text的实例输出。

new Text(split[4].toString().trim() + "t" + split[2].toString().trim());

相关内容

  • 没有找到相关文章

最新更新