A WordCount Tutorial for Hadoop 0.20.1

Because the document of Hadoop 0.20.1 describes a tutorial program which uses out-of-date APIs, I decided to write the following tutorial for Hadoop 0.20.1. It is notable that in 0.20.1, org.apache.hadoop.mapred.* are deprecated and it is recommended to use org.apache.hadoop.mapreduce.*. This tutorial is based on the new API.

For how to install and configure Hadoop, you might want to refer to my previous post. After Hadoop is installed, let us create a source code directory and put the following Java source file:


package org.sogou;

import java.io.IOException;
import java.lang.InterruptedException;
import java.util.StringTokenizer;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCount {
/**
* The map class of WordCount.
*/

public static class TokenCounterMapper
extends Mapper<Object, Text, Text, IntWritable> {

private final static IntWritable one = new IntWritable(1);
private Text word = new Text();

public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
/**
* The reducer class of WordCount
*/

public static class TokenCounterReducer
extends Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
context.write(key, new IntWritable(sum));
}
}
/**
* The main entry point.
*/

public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
Job job = new Job(conf, "Example Hadoop 0.20.1 WordCount");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenCounterMapper.class);
job.setReducerClass(TokenCounterReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

Then, we build this file and pack the result into a jar file:

mkdir classes
javac -classpath /Users/wyi/hadoop-0.20.1/hadoop-0.20.1-core.jar:/Users/wyi/hadoop-0.20.1//lib/commons-cli-1.2.jar -d classes WordCount.java && jar -cvf wordcount.jar -C classes/ .

Finally, we run the jar file in standalone mode of Hadoop

echo "hello world bye world" > /Users/wyi/tmp/in/0.txt
echo "hello hadoop goodebye hadoop" > /Users/wyi/tmp/in/1.txt
hadoop jar wordcount.jar org.sogou.WordCount /Users/wyi/tmp/in /Users/wyi/tmp/out