我是Map-Reduce编程范式的新手。所以,我的问题对很多人来说可能听起来非常愚蠢。然而,我请求大家对我宽容。
我正在尝试计数特定单词在文件中的出现次数。现在,我为此编写了以下Java类:
它的输入文件有以下条目:
The tiger entered village in the night the the
Then ... the story continues...
I have put the word 'the' many times because of my own program purpose.
WordCountMapper.java
package com.demo.map_reduce.word_count.mapper;
import java.io.IOException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable>
{
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper.Context context) throws IOException, InterruptedException {
if(null != value) {
final String line = value.toString();
if(StringUtils.containsIgnoreCase(line, "the")) {
context.write(new Text("the"), new IntWritable(StringUtils.countMatches(line, "the")));
}
}
}
}
WordCountReducer.java
package com.demo.map_reduce.word_count.reducer;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable>
{
@SuppressWarnings({ "rawtypes", "unchecked" })
public void reduce(Text key, Iterable<IntWritable> values, org.apache.hadoop.mapreduce.Reducer.Context context)
throws IOException, InterruptedException {
int count = 0;
for (final IntWritable nextValue : values) {
count += nextValue.get();
}
context.write(key, new IntWritable(count));
}
}
WordCounter.java
package com.demo.map_reduce.word_count;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import com.demo.map_reduce.word_count.mapper.WordCountMapper;
import com.demo.map_reduce.word_count.reducer.WordCountReducer;
public class WordCounter
{
public static void main(String[] args) {
final String inputDataPath = "/input/my_wordcount_1/input_data_file.txt";
final String outputDataDir = "/output/my_wordcount_1";
try {
final Job job = Job.getInstance();
job.setJobName("Simple word count");
job.setJarByClass(WordCounter.class);
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(inputDataPath));
FileOutputFormat.setOutputPath(job, new Path(outputDataDir));
job.waitForCompletion(true);
}
} catch (Exception e) {
e.printStackTrace();
}
}
当我在Hadoop中运行这个程序时,我得到以下输出。
the 2
the 1
the 3
我希望减速器结果
the 4
我肯定我做错了什么;或者我可能没有完全理解。有人能帮我一下吗?
提前感谢。
-Niranjan
问题是您的reduce方法没有被调用
要使其工作,只需将reduce函数的签名更改为
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
问题是您没有规范化关键字,也没有计算单词数,而是计算包含单词the
的行数。
将您的映射逻辑更改为以下
protected void map(LongWritable key, Text value, org.apache.hadoop.mapreduce.Mapper.Context context) throws IOException, InterruptedException {
if(null != value) {
final String line = value.toString();
for(String word:line.split("\s+")){
context.write(new Text(word.trim().toLowerCase()), new IntWritable(1));
}
}
}
并将逻辑简化为如下
public void reduce(Text key, Iterable<IntWritable> values, org.apache.hadoop.mapreduce.Reducer.Context context)
throws IOException, InterruptedException {
int count = 0;
if(key.toString().trim().toLowerCase().equals("the")) {
for (final IntWritable nextValue : values) {
count += nextValue.get();
}
context.write(key, new IntWritable(count));
}
}