欢迎您访问365答案网,请分享给你的朋友!
生活常识 学习资料

Hadoop序列化——电话流量案列

时间:2023-06-08

1.创建maven工程

在pom.xml文件中添加如下依赖 

   
       
            org.apache.hadoop
            hadoop-client
            3.1.3
       

       
       
            junit
            junit
            4.13.2
       

       
       
       
            org.slf4j
            slf4j-nop
            1.7.35
       

   

   
   
       
           
                maven-compiler-plugin
                3.6.1
               
                    1.8
                    1.8
               

           

           

















       

   

2.在项目的resources目录下,创建一个文件,命名为 log4j.properties ,在文中填入以下内容

log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n

3.自定义一个类实现Writable接口

import org.apache.hadoop.io.Writable;import java.io.DataInput;import java.io.DataOutput;import java.io.IOException;public class FlowBean implements Writable { private long upFlow;//上行流量 private long downFlow;//下行流量 private long sumFlow;//总流量 //空参构造函数 public FlowBean() { } public long getUpFlow() { return upFlow; } public void setUpFlow(long upFlow) { this.upFlow = upFlow; } public long getDownFlow() { return downFlow; } public void setDownFlow(long downFlow) { this.downFlow = downFlow; } public long getSumFlow() { return sumFlow; } public void setSumFlow() { this.sumFlow = this.downFlow + this.upFlow; } //序列方法 @Override public void write(DataOutput out) throws IOException { out.writeLong(upFlow); out.writeLong(downFlow); out.writeLong(sumFlow); } //这里要注意:序列方法和反序列方法里的参数位置一定要一致 //反序列方法 @Override public void readFields(DataInput in) throws IOException { this.upFlow = in.readLong(); this.downFlow = in.readLong(); this.sumFlow = in.readLong(); } @Override public String toString() { return upFlow + "t" + downFlow + "t" + sumFlow; }}

4.Map类

import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;import java.io.IOException;public class FlowMapper extends Mapper { //定义key输出类型 Text outK = new Text(); //定义value输出类型 FlowBean outV = new FlowBean(); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //1.获取一行,并转为String类型 String line = key.toString(); //2.切割 String[] split = line.split("t"); //3.抓取想要的数据 String phone = split[1]; String up = split[split.length - 3]; String down = split[split.length - 2]; //4.封装 outK.set(phone); outV.setUpFlow(Long.parseLong(up)); outV.setDownFlow(Long.parseLong(up)); outV.setSumFlow(); //输出 context.write(outK, outV); }}

5.Reduce类

import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;import java.io.IOException;public class FlowReducer extends Reducer { FlowBean outV = new FlowBean(); @Override protected void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException { long sumUp = 0; long sumDown = 0; //1.遍历集合进行累加 for (FlowBean value : values){ sumUp = value.getUpFlow(); sumDown = value.getDownFlow(); } //2.封装 outV.setUpFlow(sumUp); outV.setDownFlow(sumDown); outV.setSumFlow(); //3.输出 context.write(key, outV); }}

6.Driver类

import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.io.IOException;public class FlowDriver { public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { //1.获取job Configuration conf = new Configuration(); Job job = Job.getInstance(conf); //2.设置jar job.setJarByClass(FlowDriver.class); //3.关联Mapper和Reduce job.setMapperClass(FlowMapper.class); job.setReducerClass(FlowReducer.class); //4.设置 Mapper的kv 输出类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(FlowBean.class); //5.设置 数据最终的kv 输出类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class); //6.设置输入和输出的路径 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); //7.提交job boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1); }}

最后打jar包,传到linux上的hadoop文件下,运行代码如下:

hadoop jar         包名     Driver类的路径      输入路径        输出路径
 

Copyright © 2016-2020 www.365daan.com All Rights Reserved. 365答案网 版权所有 备案号:

部分内容来自互联网,版权归原作者所有,如有冒犯请联系我们,我们将在三个工作时内妥善处理。