MapReduce实战项目——芝加哥的犯罪数据分析,互联网架构师前景如何

MapReduce实战项目——芝加哥的犯罪数据分析,互联网架构师前景如何,第1张

MapReduce实战项目——芝加哥的犯罪数据分析,互联网架构师前景如何

import org.apache.hadoop.mapreduce.Mapper.Context;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class Crime {

private static class CMapper extends Mapper{

Text dis=new Text();

Text cnumber =new Text();

protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{

String line = value.toString();

String[] l = line.split(",");

dis.set(l[11]);

cnumber.set(l[1]);

context.write(dis, cnumber);

}

}

public static class CReduce extends Reducer{

private int sum=0;

public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException{

for(Text t : values){

sum=sum+1;

}

context.write(new Text(key),new IntWritable(sum));

}

}

public static void main(String[] args) throws Exception {

Configuration conf=new Configuration();

Job job=Job.getInstance(conf,“crime1”);

job.setJobName(“crime1”);

job.setJarByClass(Crime.class);

job.setMapperClass(CMapper.class);

job.setMapOutputKeyClass(Text.class);

job.setMapOutputValueClass(Text.class);

job.setReducerClass(CReduce.class);

job.setOutputKeyClass(Text.class);

job.setOutputValueClass(IntWritable.class);

FileInputFormat.addInputPath(job, new Path(args[0]));

FileOutputFormat.setOutputPath(job,new Path(args[1]));

job.waitForCompletion(true);

}

}

这里是按从小到大的顺序进行警区的排序,由此看出犯罪数量最少的警区是警区1,犯罪数量最多的是警区934,判断出来警区1 的治安比较好。

2.这里是按警区进行分组,然后统计每一组的逮捕数量,逮捕数量即为警察在案件发生后,成功抓捕犯罪人员的次数。逮捕数量高,说明这个警区的警察抓捕成功率更高,更容易抓到犯罪人员。

//这里的代码实现的是按地区District进行分组,然后统计逮捕数

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.Mapper.Context;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class Crime {

private static class CMapper extends Mapper{

Text dis=new Text();

//private final static IntWritable one = new IntWritable();

protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{

String line = value.toString();

String[] l = line.split(",");

dis.set(l[11]);

String arr=l[8];

String a =“FALSE”;

int b=0;

if (arr.equals(a)){

b=1;

}

else {

b=0;

}

if(b==0){

context.write(dis, new IntWritable(1));

}

}

}

public static class CReduce extends Reducer{

public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException{

int count = 0;

for (IntWritable val : values) {

count = count +val.get();

}

context.write(new Text(key), new IntWritable(count));

}

}

public static void main(String[] args) throws Exception {

Configuration conf=new Configuration();

Job job=Job.getInstance(conf,“crime1”);

job.setJobName(“crime1”);

job.setJarByClass(Crime.class);

job.setMapperClass(CMapper.class);

job.setMapOutputKeyClass(Text.class);

job.setMapOutputValueClass(IntWritable.class);

job.setReducerClass(CReduce.class);

job.setOutputKeyClass(Text.class);

job.setOutputValueClass(IntWritable.class);

FileInputFormat.addInputPath(job, new Path(args[0]));

FileOutputFormat.setOutputPath(job,new Path(args[1]));

job.waitForCompletion(true);

}

}

3.将上面两个分析的结果文件合并在一起,文件合并,合并后的文件名称为ave

import java.io.IOException;

import java.util.Vector;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.FloatWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.Mapper.Context;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFor

《一线大厂Java面试题解析+后端开发学习笔记+最新架构讲解视频+实战项目源码讲义》

【docs.qq.com/doc/DSmxTbFJ1cmN1R2dB】 完整内容开源分享

mat;

public class Crime {

private static class CMapper extends Mapper{

private FileSplit inputsplit;

int cnum=0;

int arr1=0;

Text dis=new Text();

protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{

inputsplit = (FileSplit)context.getInputSplit();

String filename = inputsplit.getPath().getName();

if(filename.contains(“cnumber”)){

String s1 = value.toString();

String[] split1 = s1.split("t");

dis=new Text(split1[0]);

cnum=Integer.parseInt(split1[1]);

context.write(dis, new Text(“cnumber”+cnum));

}

if(filename.contains(“arr”)){

String s2 = value.toString();

String[] split2 = s2.split("t");

dis=new Text(split2[0]);

arr1=Integer.parseInt(split2[1]);

context.write(dis, new Text(“arr”+arr1));

}

}

}

public static class CReduce extends Reducer{

public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException{

Vector a = new Vector();

Vector b = new Vector();

for(IntWritable value : values){

String line = value.toString();

if(line.startsWith(“cnumber”)){

a.add(line.substring(“cnumber”.length()));

}

if(line.startsWith(“arr”)){

b.add(line.substring(“arr”.length()));

}

}

for(String w1 : a) {

for(String w2 : b){

context.write(new Text(key+"/t"+w1),new Text(w2));

}

}

}

}

public static void main(String[] args) throws Exception {

Configuration conf=new Configuration();

Job job=Job.getInstance(conf,“crime1”);

job.setJobName(“crime1”);

job.setJarByClass(Crime.class);

job.setMapperClass(CMapper.class);

job.setMapOutputKeyClass(Text.class);

job.setMapOutputValueClass(Text.class);

job.setReducerClass(CReduce.class);

job.setOutputKeyClass(Text.class);

job.setOutputValueClass(Text.class);

FileInputFormat.addInputPath(job, new Path(args[0]));

FileOutputFormat.setOutputPath(job,new Path(args[1]));

job.waitForCompletion(true);

}

}

4.计算每个警区的逮捕率,逮捕率即说明各个警区的破案效率,逮捕率高说明破案速度更快,该警区的治安情况就可能会更好。

这里写了一个ave类,用来放入案件数量和逮捕数量。

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.DoubleWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/zaji/5656561.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-12-16
下一篇 2022-12-16

发表评论

登录后才能评论

评论列表(0条)

保存