千家信息网

hadoop中mapreducez如何自定义分区

发表于:2025-02-03 作者:千家信息网编辑
千家信息网最后更新 2025年02月03日,这篇文章主要为大家展示了"hadoop中mapreducez如何自定义分区",内容简而易懂,条理清晰,希望能够帮助大家解决疑惑,下面让小编带领大家一起研究并学习一下"hadoop中mapreducez
千家信息网最后更新 2025年02月03日hadoop中mapreducez如何自定义分区

这篇文章主要为大家展示了"hadoop中mapreducez如何自定义分区",内容简而易懂,条理清晰,希望能够帮助大家解决疑惑,下面让小编带领大家一起研究并学习一下"hadoop中mapreducez如何自定义分区"这篇文章吧。

package hello_hadoop;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.DoubleWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Partitioner;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class AutoParitionner {        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {                if(args.length!=2) {                        System.err.println("Usage: hadoop jar xxx.jar  ");                        System.exit(1);                }                Configuration conf = new Configuration();                Job job = Job.getInstance(conf, "avg of grades");                job.setJarByClass(AutoParitionner.class);                job.setMapperClass(PartitionInputClass.class);                job.setReducerClass(PartitionOutputClass.class);                job.setMapOutputKeyClass(Text.class);                job.setMapOutputValueClass(DoubleWritable.class);                job.setOutputKeyClass(Text.class);                job.setOutputValueClass(DoubleWritable.class);                //声明自定义分区的类,下面有类的声明                job.setPartitionerClass(MyPartitioner.class);                job.setNumReduceTasks(2);                FileInputFormat.addInputPath(job, new Path(args[0]));                FileOutputFormat.setOutputPath(job, new Path(args[1]));                                System.exit(job.waitForCompletion(true)?0:1);                        }        }class PartitionInputClass extends Mapper{        @Override        protected void map(LongWritable key, Text value, Mapper.Context context)                        throws IOException, InterruptedException {                String line = value.toString();                if(line.length()>0){                        String[] array = line.split("\t");                        if(array.length==2){                                String name=array[0];                                int grade = Integer.parseInt(array[1]);                                context.write(new Text(name), new DoubleWritable(grade));                        }                }                                        }        }class PartitionOutputClass extends Reducer{        @Override        protected void reduce(Text text, Iterable iterable,                        Reducer.Context context) throws IOException, InterruptedException {                int sum = 0;                int cnt= 0 ;                for(DoubleWritable iw : iterable) {                        sum+=iw.get();                        cnt++;                }                context.write(text, new DoubleWritable(sum/cnt));        }        }//自定义分区的类//Partitioner Text,DoubleWirtable分别为map结果的key,valueclass MyPartitioner extends Partitioner{        @Override        public int getPartition(Text text, DoubleWritable value, int numofreuceTask) {                String name = text.toString();                if(name.equals("wd")||name.equals("wzf")||name.equals("xzh")||name.equals("zz")) {                        return 0;                          }else                        return 1;        }        }

以上是"hadoop中mapreducez如何自定义分区"这篇文章的所有内容,感谢各位的阅读!相信大家都有了一定的了解,希望分享的内容对大家有所帮助,如果还想学习更多知识,欢迎关注行业资讯频道!

0