1.创建maven工程
在pom.xml文件中添加如下依赖
hadoop-client
junit
slf4j-nop
maven-compiler-plugin
2.在项目的resources目录下,创建一个文件,命名为 log4j.properties ,在文中填入以下内容
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
3.自定义一个类实现Writable接口
import org.apache.hadoop.io.Writable;import java.io.DataInput;import java.io.DataOutput;import java.io.IOException;public class FlowBean implements Writable { private long upFlow;//上行流量 private long downFlow;//下行流量 private long sumFlow;//总流量 //空参构造函数 public FlowBean() { } public long getUpFlow() { return upFlow; } public void setUpFlow(long upFlow) { this.upFlow = upFlow; } public long getDownFlow() { return downFlow; } public void setDownFlow(long downFlow) { this.downFlow = downFlow; } public long getSumFlow() { return sumFlow; } public void setSumFlow() { this.sumFlow = this.downFlow + this.upFlow; } //序列方法 @Override public void write(DataOutput out) throws IOException { out.writeLong(upFlow); out.writeLong(downFlow); out.writeLong(sumFlow); } //这里要注意:序列方法和反序列方法里的参数位置一定要一致 //反序列方法 @Override public void readFields(DataInput in) throws IOException { this.upFlow = in.readLong(); this.downFlow = in.readLong(); this.sumFlow = in.readLong(); } @Override public String toString() { return upFlow + "t" + downFlow + "t" + sumFlow; }}
4.Map类
import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;import java.io.IOException;public class FlowMapper extends Mapper
5.Reduce类
import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;import java.io.IOException;public class FlowReducer extends Reducer
6.Driver类
import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.io.IOException;public class FlowDriver { public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { //1.获取job Configuration conf = new Configuration(); Job job = Job.getInstance(conf); //2.设置jar job.setJarByClass(FlowDriver.class); //3.关联Mapper和Reduce job.setMapperClass(FlowMapper.class); job.setReducerClass(FlowReducer.class); //4.设置 Mapper的kv 输出类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(FlowBean.class); //5.设置 数据最终的kv 输出类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class); //6.设置输入和输出的路径 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); //7.提交job boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); }}
最后打jar包,传到linux上的hadoop文件下,运行代码如下:
hadoop jar 包名 Driver类的路径 输入路径 输出路径