当我尝试在map reduce作业中使用自己的键时,我得到了一个异常。它似乎找不到我的键的默认构造函数,即使我已经指定了它。我发现了一个相关的问题(没有这样的方法例外Hadoop
(注意:我使用hadoop 2.2.0.)
例外:
java.lang.Exception: java.lang.RuntimeException: java.lang.NoSuchMethodException: org.apache.hadoop.io.WritableComparable.<init>()
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:403)
Caused by: java.lang.RuntimeException: java.lang.NoSuchMethodException: org.apache.hadoop.io.WritableComparable.<init>()
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:131)
at org.apache.hadoop.io.WritableComparator.newKey(WritableComparator.java:115)
at org.apache.hadoop.io.WritableComparator.<init>(WritableComparator.java:101)
at org.apache.hadoop.io.WritableComparator.get(WritableComparator.java:55)
at org.apache.hadoop.mapred.JobConf.getOutputKeyComparator(JobConf.java:885)
at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.init(MapTask.java:982)
at org.apache.hadoop.mapred.MapTask.createSortingCollector(MapTask.java:390)
at org.apache.hadoop.mapred.MapTask.access$100(MapTask.java:79)
at org.apache.hadoop.mapred.MapTask$NewOutputCollector.<init>(MapTask.java:674)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:746)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:339)
at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:235)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
at java.util.concurrent.FutureTask.run(FutureTask.java:262)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:744)
Caused by: java.lang.NoSuchMethodException: org.apache.hadoop.io.WritableComparable.<init>()
at java.lang.Class.getConstructor0(Class.java:2810)
at java.lang.Class.getDeclaredConstructor(Class.java:2053)
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:125)
... 16 more
键类:
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.UUID;
public class WritableUUID
implements Writable, Cloneable, WritableComparable<WritableUUID> {
private UUID uuid;
public WritableUUID() {
}
public WritableUUID(UUID uuid) {
this.uuid = uuid;
}
public UUID getUuid() {
return uuid;
}
@Override
public int compareTo(WritableUUID o) {
return uuid.compareTo(o.uuid);
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(uuid.getLeastSignificantBits());
dataOutput.writeLong(uuid.getMostSignificantBits());
}
@Override
public void readFields(DataInput dataInput) throws IOException {
long lsb = dataInput.readLong();
long msb = dataInput.readLong();
this.uuid = new UUID(msb, lsb);
}
}
谢谢你的帮助!
我找到问题了。这不是hadoop的问题,这是我对专有库的一些API混淆。