千家信息网

hadoop怎么合并sequcefie并在map中读取

发表于:2025-01-21 作者:千家信息网编辑
千家信息网最后更新 2025年01月21日,本篇内容介绍了"hadoop怎么合并sequcefie并在map中读取"的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读
千家信息网最后更新 2025年01月21日hadoop怎么合并sequcefie并在map中读取

本篇内容介绍了"hadoop怎么合并sequcefie并在map中读取"的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!

package hgs.sequencefile;import java.io.IOException;import java.net.URI;import java.net.URISyntaxException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FileStatus;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IOUtils;import org.apache.hadoop.io.SequenceFile;import org.apache.hadoop.io.Text;//合并小文件public class SequenceMain {        public static void main(String[] args) throws IOException, URISyntaxException {                                Configuration conf = new Configuration();                                FileSystem fs = FileSystem.get(new URI("hdfs://192.168.6.129:9000"),conf);                //获得该文件夹下的所有的文件                FileStatus[] fstats = fs.listStatus(new Path("/words"));                //System.out.println(fstats.length);                Text key = new Text();                Text value = new Text();                                @SuppressWarnings("deprecation")                //创建一个sequecewriter                //merge.seq是文件名                SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, new Path("/sequence/merge.seq"), key.getClass(), value.getClass());                //循环遍历每个文件                 for(FileStatus fis : fstats) {                        //将每个文件以key value的形式写入到sequencefile中                        FSDataInputStream finput = fs.open(fis.getPath());                        byte[] buffer = new byte[(int)fis.getLen()];                        IOUtils.readFully(finput, buffer, 0, buffer.length);                        //文件名为key 文件内容为value                        key.set(fis.getPath().getName());                        value.set(buffer);                        writer.append(key, value);                        finput.close();                                 }                writer.close();                fs.close();                     }}
package hgs.sequencefile;import java.io.IOException;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class SequnceMapper extends Mapper {        @Override        protected void map(Text key, Text value, Mapper.Context context)                        throws IOException, InterruptedException {                context.write(key, value);        }}
package hgs.sequencefile;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapred.SequenceFileOutputFormat;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class SequenceDriver {        public static void main(String[] args) throws Exception {                Configuration conf = new Configuration();                Job job = Job.getInstance(conf, "read_sequence_file");                job.setJarByClass(hgs.sequencefile.SequenceDriver.class);                // TODO: specify a mapper                job.setMapperClass(SequnceMapper.class);                // TODO: specify a reducer                //job.setReducerClass(Reducer.class);                // TODO: specify output types                job.setOutputKeyClass(Text.class);                job.setOutputValueClass(Text.class);                //在这个设置读取sequencefile的inputformat,该类读取的是String泪习惯的key value                //SequenceFileAsBinaryInputFormat 该类独处的ByteWritable的key value                job.setInputFormatClass(SequenceFileAsTextInputFormat.class);                // TODO: specify input and output DIRECTORIES (not files)                FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.6.129:9000/sequence"));                FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.6.129:9000/seqresult"));                if (!job.waitForCompletion(true))                        return;        }}

"hadoop怎么合并sequcefie并在map中读取"的内容就介绍到这里了,感谢大家的阅读。如果想了解更多行业相关的知识可以关注网站,小编将为大家输出更多高质量的实用文章!

0