转换序列文件并通过map获取键,值对,并在hadoop中减少任务



我想通过hadoop映射reduce应用程序从连续文件中获取所有键值对。我按照以下帖子 http://lintool.github.com/Cloud9/docs/content/staging-records.html 在主类中读取连续文件,但这很有效。我想将所有键值对打印到 HDFS 系统中的普通文本文件,我该如何实现?我把我的代码写成波纹管。

            import java.io.File;
            import java.io.IOException;
            import java.util.*;
            import java.util.logging.Level;
            import java.util.logging.Logger;
            import org.apache.hadoop.fs.Path;
            import org.apache.hadoop.conf.*;
            import org.apache.hadoop.fs.FileSystem;
            import org.apache.hadoop.fs.FileUtil;
            import org.apache.hadoop.io.*;
            import org.apache.hadoop.mapreduce.*;
            import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
            import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat;
            import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
            import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
            import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
            public class WordCount
            {
                public static class Map extends Mapper
                {
                    private final static IntWritable one = new IntWritable(1);
                    private Text word = new Text();
                    public void map(BytesWritable key, BytesWritable value, Context context) throws IOException, InterruptedException
                    {
                        System.out.println(key.toString());
                        System.out.println(value.toString());
                        context.write(key, value);
                    }
                }
                public static class Reduce extends Reducer
                {
                    public void reduce(Text key, Iterable<IntWritable> values, Context context)
                            throws IOException, InterruptedException
                    {
                        int sum = 0;
                        for (IntWritable val : values)
                        {
                            sum += val.get();
                        }
                        context.write(key, new IntWritable(sum));
                    }
                }
                public static void main(String[] args) throws Exception
                {
                    FileUtil.fullyDelete(new File(args[1]));
                    Configuration conf = new Configuration();
                    Job job = new Job(conf, "wordcount");
                    job.setOutputKeyClass(BytesWritable.class);
                    job.setOutputValueClass(BytesWritable.class);
                    job.setMapperClass(Map.class);
                    job.setReducerClass(Reduce.class);
                    job.setInputFormatClass(org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat.class);
                    job.setOutputFormatClass(TextOutputFormat.class);
                    FileInputFormat.addInputPath(job, new Path(args[0]));
                    FileOutputFormat.setOutputPath(job, new Path(args[1]));
                    job.setJarByClass(WordCount.class);
                    job.waitForCompletion(true);
                }
            }

使用以下代码读取所有键/值对。根据您的需要更改它..

public class SequenceFileReader {
    public static void main(String args[]) throws Exception {
        System.out.println("Readeing Sequence File");
        Configuration conf = new Configuration();
        conf.addResource(new Path("/home/mohammad/hadoop-0.20.203.0/conf/core-site.xml"));
        conf.addResource(new Path("/home/mohammad/hadoop-0.20.203.0/conf/hdfs-site.xml"));  
        FileSystem fs = FileSystem.get(conf);
        Path path = new Path("/seq/file");
        SequenceFile.Reader reader = null;      
        try {
            reader = new SequenceFile.Reader(fs, path, conf);
            Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
            Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
            while (reader.next(key, value)) {
                System.out.println(key + "  <===>  " + value.toString());
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            IOUtils.closeStream(reader);
        }
}

}

Please find the Below program. It may be useful in getting some idea in converting BytesWritable to Text.
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;

public class SequenceFileRead {
    public static void main(String args[]) throws IOException{
        Configuration conf=new Configuration();
        Path path=new Path(args[0]);
        SequenceFile.Reader reader=null;
        try{
        reader=new SequenceFile.Reader(conf, Reader.file(path));
        Text key= new Text();
        BytesWritable value=new BytesWritable();
        while(reader.next(key,value)){
            System.out.println(key);
            byte[] bytes=value.getBytes();
            int size=bytes.length;
            byte[] b=new byte[size];
            InputStream is=new ByteArrayInputStream(bytes);
            is.read(b);
            System.out.println(new String(b));
        }
        }
        finally {
            IOUtils.closeStream(reader);
        }
    }
}

相关内容

  • 没有找到相关文章

最新更新