Reducer在Hadoop Mapreduce中不起作用



嗨,我的Reducer没有打印出所需的结果,请查看代码。

这是我的地图功能

public  void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException 
        {   
            String str_line = value.toString();
            Detail_output1_column_array = str_line.split("\"+tabSpace);

            Outputkey = Detail_output1_column_array[2];
            System.out.println(Outputkey);
            context.write(new Text(Outputkey),NullWritable.get());
        }
    }
    public  static class ShopFile_Reducer extends Reducer<Text,Iterable<NullWritable>,NullWritable,Text> {

        public void reduce(Text Key,Iterable<NullWritable> Values,Context context) throws IOException, InterruptedException {

            Key = new Text(Key.toString());
            context.write(NullWritable.get(),new Text(Key));
        }
    }

假设Detail_output1_column_array[2]包含010101020102010301

减速器之后,我需要这样的输出010203

但它正在打印每一件东西010101020102010301

这是我的驾驶舱

Configuration Shopconf = new Configuration();
        Shopconf.setStrings("DTGroup_input",DTGroup_input);
        Job Shop = new Job(Shopconf,"Export_Column_Mapping");
        Shop.setJarByClass(ExportColumnMapping.class);
        Shop.setJobName("ShopFile_Job");
        Shop.setMapperClass(ShopFile_Mapper.class);
        Shop.setReducerClass(ShopFile_Reducer.class);
        Shop.setInputFormatClass(TextInputFormat.class);
        Shop.setOutputFormatClass(TextOutputFormat.class);
        Shop.setMapOutputKeyClass(Text.class);
        Shop.setMapOutputValueClass(NullWritable.class);
        Shop.setOutputKeyClass(Text.class);
        Shop.setOutputValueClass(Text.class);
        FileInputFormat.addInputPath(Shop, new Path(outputpath+"/Detailsfile/part*"));
        FileOutputFormat.setOutputPath(Shop, new Path(outputpath+"/Shopfile"));
        Shop.waitForCompletion(true);

在映射程序代码中使用该Outputkey.set( Detail_output1_column_array[2]);而不是Outputkey = Detail_output1_column_array[2];

您在行中有错误

 Detail_output1_column_array = str_line.split("\"+tabSpace);

字符串拆分似乎无法正常工作。如果你想按标签和空间拆分,你应该将其更改为

Detail_output1_column_array = str_line.split("( |\t)");

在您的配置中,您可以进行以下更改并检查吗

代替这条线

 Shop.setOutputKeyClass(Text.class);

添加此行

Shop.setOutputKeyClass(NullWritable.class);

好。所以我不知道那里到底出了什么问题。你的代码对我有效。

我的数据:

bash-4.1$ hdfs dfs -cat input/numbers
a   01
b   01
a   01
a   02
a   01
a   02
a   01
a   03
a   01

输出:

bash-4.1$ hdfs dfs -cat output/part-r-00000
01
02
03

您的代码:

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapTask;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.Iterator;
public class HdfsFiles {
  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", "hdfs://namenode:9000");
    Job job = Job.getInstance(conf);
    job.setJarByClass(HdfsFiles.class);
    job.setJobName("myjob");
    job.setMapperClass(ShopFile_Mapper.class);
    job.setReducerClass(ShopFile_Reducer.class);
    FileInputFormat.addInputPath(job, new Path("input"));
    FileOutputFormat.setOutputPath(job, new Path("output"));
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(NullWritable.class);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(Text.class);
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
  static class ShopFile_Mapper extends Mapper<LongWritable, Text, Text, NullWritable> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
      String str_line = value.toString();
      context.write(new Text(str_line.split("\t")[1]), NullWritable.get());
    }
  }
  static class ShopFile_Reducer extends Reducer<Text, Iterator<NullWritable>, NullWritable, Text> {
    @Override
    protected void reduce(Text key, Iterable<Iterator<NullWritable>> values, Context context) throws IOException, InterruptedException {
      context.write(NullWritable.get(), key);
    }
  }
}

相关内容

  • 没有找到相关文章

最新更新