我有以下算法,可以按照字母顺序对数据进行排序
public void setup(Context context) throws IOException,
InterruptedException {
conf = context.getConfiguration();
caseSensitive = conf.getBoolean("amasort.case.sensitive", true);
}
@Override
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
word.set(line+"_"+key.toString());
context.write(word, one);
System.out.println("key:"+key.toString()+";value:"+value.toString());
}
}
public static class ForwardReducer
extends Reducer<Text,NullWritable,Text,NullWritable> {
private NullWritable result = NullWritable.get();
public void reduce(Text key, Iterable<NullWritable> values,
Context context
) throws IOException, InterruptedException {
String originalWord = key.toString();
originalWord = originalWord.substring(0, originalWord.lastIndexOf("_"));
key.set(originalWord);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
String[] remainingArgs = optionParser.getRemainingArgs();
Job job = Job.getInstance(conf, "word sort");
job.setJarByClass(AmaSort.class);
job.setMapperClass(LineMapper.class);
// job.setCombinerClass(ForwardReducer.class);
job.setReducerClass(ForwardReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.addInputPath(job, new Path(remainingArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(remainingArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
我尝试了这个算法来对包含(@xxxxxxx,0,tcp,xx,1,1,1,2,4,5,…)的mydata集进行排序,但所有以@开头的输出行都被删除,数据行结构0,tcp,x1x1,1114,。。。。被修改了,我只想用这个特定的字符(@)对我的数据集进行排序。所有的行在文件的第一行以@开头,其余的保持相同的结构。有人可以帮我修改这个算法吗?
您可以使用以下修改后的代码执行排序,
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class AmaSort
{
static Configuration conf = null;
private static boolean caseSensitive;
private static Text word = new Text();
public static class LineMapper extends Mapper<Object, Text, Text, NullWritable>{
public void setup(Context context) throws IOException, InterruptedException
{
conf = context.getConfiguration();
caseSensitive = conf.getBoolean("amasort.case.sensitive", true);
}
@Override
public void map(Object key, Text value, Context context) throws IOException, InterruptedException
{
String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
word.set(line);
context.write(word, NullWritable.get());
}
}
public static class ForwardReducer extends Reducer<Text, NullWritable, Text, NullWritable>
{
private NullWritable result = NullWritable.get();
public void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException
{
context.write(key, result);
}
}
public static void main(String[] args) throws Exception
{
Configuration conf = new Configuration();
GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);
String[] remainingArgs = optionParser.getRemainingArgs();
// Job job = Job.getInstance(conf, "word sort");
Job job = new Job(conf, "word sort");
job.setJarByClass(AmaSort.class);
job.setMapperClass(LineMapper.class);
// job.setCombinerClass(ForwardReducer.class);
job.setReducerClass(ForwardReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.addInputPath(job, new Path(remainingArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(remainingArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}