包装类型如何在Hadoop中工作



我不是Java专家,但我知道Java的基础知识,每当遇到Java代码时,我总是试图深入理解它。这可能是一个非常愚蠢的怀疑,但我很乐意在脑海中清晰地理解它
我在Java社区发帖,因为我的怀疑只是关于Java。

在过去的几个月里,我一直在使用hadoop,发现hadoop使用自己的类型,这些类型围绕着Java的基元类型,以提高在序列化和反序列化的基础上跨网络发送数据的效率。

我的困惑从这里开始,假设我们在HDFS中有一些数据要使用以下在hadoop代码中运行的Java代码进行处理

org.apache.hadoop.io.IntWritable;
org.apache.hadoop.io.LongWritable;
org.apache.hadoop.io.Text;
org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WordCountMapper
{
extends Mapper<LongWritable,Text,Text,IntWritable>
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{
}
}
String line = value.toString();
for (String word : line.split(" ")){
if(word.length()>0){
context.write(new Text(word),new IntWritable(1));
}

在这段代码中,hadoop的类型类似于LongWritable、Text、IntWritable
让我们选择围绕Java的String类型的Text类型(如果我错了,请纠正我)
我的疑问是,当我们将这些参数传递到上面代码中的方法映射时,这些参数是如何与import package i.e org.apache.hadoop.io.Text;中的代码交互的

下面是文本类代码

package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.MalformedInputException;
import java.text.CharacterIterator;
import java.text.StringCharacterIterator;
import java.util.Arrays;
import org.apache.avro.reflect.Stringable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;

@Stringable
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Text
extends BinaryComparable
implements WritableComparable<BinaryComparable>
{
private static final Log LOG = LogFactory.getLog(Text.class);
private static ThreadLocal<CharsetEncoder> ENCODER_FACTORY = new ThreadLocal()
{
protected CharsetEncoder initialValue() {
return Charset.forName("UTF-8").newEncoder().onMalformedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT);
}
};

private static ThreadLocal<CharsetDecoder> DECODER_FACTORY = new ThreadLocal()
{
protected CharsetDecoder initialValue() {
return Charset.forName("UTF-8").newDecoder().onMalformedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT);
}
};

private static final byte[] EMPTY_BYTES = new byte[0];
private byte[] bytes;
private int length;
public Text()
{
bytes = EMPTY_BYTES;
}

public Text(String string)
{
set(string);
}
public Text(Text utf8)
{
set(utf8);
}

public Text(byte[] utf8)
{
set(utf8);
}


public byte[] getBytes()
{
return bytes;
}
public int getLength()
{
return length;
}




public int charAt(int position)
{
if (position > length) return -1;
if (position < 0) { return -1;
}
ByteBuffer bb = (ByteBuffer)ByteBuffer.wrap(bytes).position(position);
return bytesToCodePoint(bb.slice());
}
public int find(String what) {
return find(what, 0);
}

public int find(String what, int start)
{
try
{
ByteBuffer src = ByteBuffer.wrap(bytes, 0, length);
ByteBuffer tgt = encode(what);
byte b = tgt.get();
src.position(start);
while (src.hasRemaining()) {
if (b == src.get()) {
src.mark();
tgt.mark();
boolean found = true;
int pos = src.position() - 1;
while (tgt.hasRemaining()) {
if (!src.hasRemaining()) {
tgt.reset();
src.reset();
found = false;
}
else if (tgt.get() != src.get()) {
tgt.reset();
src.reset();
found = false;
}
}
if (found) return pos;
}
}
return -1;
}
catch (CharacterCodingException e) {
e.printStackTrace(); }
return -1;
}
public void set(String string)
{
try
{
ByteBuffer bb = encode(string, true);
bytes = bb.array();
length = bb.limit();
} catch (CharacterCodingException e) {
throw new RuntimeException("Should not have happened " + e.toString());
}
}

public void set(byte[] utf8)
{
set(utf8, 0, utf8.length);
}
public void set(Text other)
{
set(other.getBytes(), 0, other.getLength());
}



public void set(byte[] utf8, int start, int len)
{
setCapacity(len, false);
System.arraycopy(utf8, start, bytes, 0, len);
length = len;
}



public void append(byte[] utf8, int start, int len)
{
setCapacity(length + len, true);
System.arraycopy(utf8, start, bytes, length, len);
length += len;
}

public void clear()
{
length = 0;
}





private void setCapacity(int len, boolean keepData)
{
if ((bytes == null) || (bytes.length < len)) {
if ((bytes != null) && (keepData)) {
bytes = Arrays.copyOf(bytes, Math.max(len, length << 1));
} else {
bytes = new byte[len];
}
}
}

public String toString()
{
try
{
return decode(bytes, 0, length);
} catch (CharacterCodingException e) {
throw new RuntimeException("Should not have happened " + e.toString());
}
}
public void readFields(DataInput in)
throws IOException
{
int newLength = WritableUtils.readVInt(in);
setCapacity(newLength, false);
in.readFully(bytes, 0, newLength);
length = newLength;
}
public static void skip(DataInput in) throws IOException
{
int length = WritableUtils.readVInt(in);
WritableUtils.skipFully(in, length);
}


public void write(DataOutput out)
throws IOException
{
WritableUtils.writeVInt(out, length);
out.write(bytes, 0, length);
}
public boolean equals(Object o)
{
if ((o instanceof Text))
return super.equals(o);
return false;
}

请允许我知道,当我们运行上述hadoop的代码时,HDFS中的数据会流经我们在map方法中提到的参数
HDFS中的第一个数据集一旦达到Text参数,它在org.apache.hadoop.io.Text类中如何流动
我的意思是从哪里开始(我假设它是从类中的set方法开始的,因为它有和前面提到的map方法相同的参数,我是对的吗?)
代码中它从哪里从普通字符串类型变为Text类型?

我的第二个疑问是:当数据存储在Text类型时,谁会踢它开始进行序列化?我的意思是,一旦数据到达网络上的目的地,谁会调用这个write(DataOutput out),谁又会调用readFields(DataInput in)
它是如何工作的,我需要去哪里看?

我希望我的要求是明确的。

与所有网络或磁盘操作一样,所有内容都以字节形式传输。Text类将字节反序列化为UTF-8。可写表决定数据的表示方式,可比较表决定数据排序方式。

作业中设置的InputFormat确定为映射或reduce Task提供哪些可写表。

InputSplit确定如何将原始字节流拆分并读取到可写中

在每个InputSplit 上启动一个映射任务

请参阅https://hadoop.apache.org/docs/stable/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html

最新更新