Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision Next revision Both sides next revision | ||
courses:mapreduce-tutorial:step-29 [2012/01/30 00:50] straka |
courses:mapreduce-tutorial:step-29 [2012/02/05 18:57] straka |
||
---|---|---|---|
Line 1: | Line 1: | ||
- | ====== MapReduce Tutorial : Custom | + | ====== MapReduce Tutorial : Custom |
- | Every custom format reading keys of type '' | + | ====== Custom sorting comparator ====== |
- | ===== FileAsPathInputFormat ===== | + | The keys are sorted before processed by a reducer, using a |
+ | [[http:// | ||
- | We start by creating '' | + | <code java> |
+ | public static class IntPair implements WritableComparable< | ||
+ | private int first = 0; | ||
+ | private int second = 0; | ||
- | When implementing new input format, we must | + | public void set(int left, int right) { first = left; second = right; } |
- | | + | |
- | | + | |
- | Our '' | + | |
- | <code java> | + | |
- | public class FileAsPathInputFormat extends FileInputFormat< | + | |
- | // Helper class, which does the actual work -- produce the (path, offset-length) input pair. | + | |
- | public static class FileAsPathRecordReader extends RecordReader< | + | |
- | private Path file; | + | |
- | long start, length; | + | |
- | private Text key, value; | + | |
- | + | ||
- | | + | |
- | | + | |
- | file = split.getPath(); | + | |
- | start = split.getStart(); | + | |
- | length = split.getLength(); | + | |
- | key = null; | + | |
- | value = null; | + | |
- | | + | |
- | public boolean nextKeyValue() throws IOException { | + | |
- | if (key != null) return false; | + | |
- | + | ||
- | key = new Text(file.toString()); | + | |
- | value = new Text(String.format(" | + | |
- | + | ||
- | return true; | + | |
- | } | + | |
- | + | ||
- | public Text getCurrentKey() { return key; } | + | |
- | public Text getCurrentValue() { return value; } | + | |
- | public float getProgress() { return (key == null) ? 0 : 1; } | + | |
- | public synchronized void close() throws IOException {} | + | |
} | } | ||
- | | + | public void write(DataOutput out) throws IOException { |
- | // Use the helper class as a RecordReader in out file format. | + | out.writeInt(first); |
- | public RecordReader< | + | |
- | | + | } |
- | } | + | |
- | + | | |
- | | + | |
- | protected boolean isSplittable(JobContext context, Path filename) { | + | |
- | | + | |
- | return | + | |
} | } | ||
} | } | ||
</ | </ | ||
- | ===== WholeFileInputFormat ===== | + | If we would like in a Hadoop job to sort the '' |
- | + | ||
- | Next we create | + | |
<code java> | <code java> | ||
- | public class WholeFileInputFormat extends FileInputFormat<Text, BytesWritable> { | + | public |
- | | + | |
- | public static class WholeFileRecordReader extends RecordReader<Text, BytesWritable> { | + | public static class FirstOnlyComparator implements RawComparator<IntPair> { |
- | | + | |
- | | + | |
- | private Text key; | + | |
- | private BytesWritable value; | + | |
- | DataInputStream in; | + | } |
- | + | | |
- | public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException | + | |
- | | + | |
- | file = split.getPath(); | + | |
- | | + | |
- | | + | |
- | value = null; | + | |
- | + | ||
- | | + | |
- | | + | |
- | + | ||
- | CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(context.getConfiguration()); | + | |
- | CompressionCodec codec = compressionCodecs.getCodec(file); | + | |
- | if (codec != null) | + | |
- | in = new DataInputStream(codec.createInputStream(in)); | + | |
} | } | ||
+ | } | ||
+ | } | ||
- | public boolean nextKeyValue() throws IOException { | + | ... |
- | if (key != null) return false; | + | |
- | byte[] data = new byte[length]; | + | job.setSortComparatorClass(IntPair.FirstOnlyComparator.class); |
- | in.readFully(data); | + | </ |
+ | Notice we used helper function '' | ||
- | key = new Text(file.toString()); | + | ====== Grouping comparator ====== |
- | value = new BytesWritable(data); | + | |
- | return true; | + | In a reduce, it is guaranteed that keys are processed in ascending order. Sometimes it would be useful if the //values associated with one key// could also be processed in ascending order. |
- | } | + | |
- | public Text getCurrentKey() { return | + | That is possible only to some degree. The (key, value) |
- | public BytesWritable getCurrentValue() { return | + | |
- | public float getProgress() { return key == null ? 0 : 1; } | + | |
- | public synchronized void close() throws IOException { if (in != null) { in.close(); in = null; } } | + | |
- | } | + | |
- | + | ||
- | | + | |
- | public RecordReader< | + | |
- | return new WholeFileRecordReader(); | + | |
- | } | + | |
- | + | ||
- | | + | |
- | protected boolean isSplittable(JobContext context, Path filename) { | + | |
- | return false; | + | |
- | } | + | |
- | } | + | |
- | </code> | + | |
- | ===== Exercise: ParagraphTextInputFormat ===== | + | ---- |
- | Implement '' | + | < |
+ | <table style=" | ||
+ | < | ||
+ | <td style=" | ||
+ | <td style=" | ||
+ | <td style=" | ||
+ | </ | ||
+ | </ | ||
+ | </ | ||
- | The '' | ||
- | * if the offset of the split is 0, start reading at the beginning of the split. If the offset of the split is larger than 0, start reading from the offset and ignore first paragraph found. | ||
- | * read all paragraphs that start |