HDFS文件操作
在分布式文件系統(tǒng)上驗(yàn)證HDFS文件命令,如下。
|
hadoop fs [genericOpitions] [-ls <path>] //顯示目標(biāo)路徑當(dāng)前目錄下的所有文件 [-lsr <path>] //遞歸顯示目標(biāo)路徑下的所有目錄及文件(深度優(yōu)先) [-du <path>] //以字節(jié)為單位顯示目錄中所有文件的大小,或該文件的大小(如果path為文件) [-dus <paht>] //以字節(jié)為單位顯示目標(biāo)文件大小(用于查看文件夾大小) [-count [-q] <path>] //將目錄的大小、包含文件(包括文件)個(gè)數(shù)的信息輸出到屏幕(標(biāo)準(zhǔn)stdout) [-mv <src> <dst>] //把文件或目錄移動(dòng)到目標(biāo)路徑,這個(gè)命令允許同時(shí)移動(dòng)多個(gè)文件,但是只允許移動(dòng)到一個(gè)目標(biāo)路徑中,參數(shù)中的最有一個(gè)文件夾即為目標(biāo)路徑 [-cp <src> <dst>] //復(fù)制文件或目錄到目標(biāo)路徑,這個(gè)命令允許同時(shí)復(fù)制多個(gè)文件,如果復(fù)制多個(gè)文件,目標(biāo)路徑必須是文件夾 [-rm [-skipTrash] <path>] //刪除文件,這個(gè)命令不能刪除文件夾 [-rmr [-skipTrash] <path>] //刪除文件夾及其下的所有文件 [-expunge] [-put <localsrc> ... <dst>] //從本地文件系統(tǒng)上傳文件到HDFS中 [-copyFromLocal <localsrc> ... <dst>] //與put相同 [-moveFromLocal <localsrc> ... <dst>] //與put相同,但是文件上傳之后會(huì)從本地文件系統(tǒng)中移除 [-get [-ignoreCrc] [-crc] <src> <localdst>] //復(fù)制文件到本地文件系統(tǒng)。這個(gè)命令可以選擇是否忽視校驗(yàn)和,忽視校驗(yàn)和和下載主要用于挽救那些已經(jīng)發(fā)生錯(cuò)誤的文件 [-getmerge <src> <localdst> [addnl]] //將源目錄中的所有文件進(jìn)行排序并寫入目標(biāo)文件中,文件之間以換行符分隔 [-cat <src>] //在終端顯示(標(biāo)準(zhǔn)輸出stdout)文件中的內(nèi)容,類似Linux系統(tǒng)中的cat [-text <src>] [-copyToLocal [-ignoreCrc] [-crc] <src> <localdst>] //與get相同 [-moveToLocal [-crc] <src> <localdst>] [-mkidr <path>] //創(chuàng)建文件夾 [-setrep [-R] [-w] <rep> <path/file>] //改變一個(gè)文件的副本個(gè)數(shù)。參數(shù)-R可以遞歸地對(duì)該目錄下的所有文件做統(tǒng)一操作 [-touchz <path>] //類似Linux中的touch,創(chuàng)建一個(gè)空文件 [-test -[ezd] <path>] //將源文件輸出為文本格式顯示到終端上,通過(guò)這個(gè)命令可以查看TextRecordInputStream(SequenceFile等)或zip文件 [-stat [format] <path>] //以指定格式返回路徑的信息 [-tail [-f] <file>] //在終端上顯示(標(biāo)準(zhǔn)輸出stdout)文件的最后1kb內(nèi)容。-f選項(xiàng)的行為與LInux中一致,會(huì)持續(xù)監(jiān)測(cè)先添加到文件中的內(nèi)容,這在查看日志文件時(shí)會(huì)顯得非常方便。 [-chmod [-R] <MODE[,MODE]...| OCTALMODE> PATH...] //改變文件的權(quán)限,只有文件的所有者或者是超級(jí)用戶才能使用這個(gè)命令。-R可以遞歸地改變文件夾內(nèi)的所有文件的權(quán)限 [-chown [-R] [OWNER] [:[GROUP] PATH...]] //改變文件的擁有者,-R可以遞歸地改變文件夾內(nèi)所有文件的擁有者。同樣,這個(gè)命令只有超級(jí)用戶才能使用 [-chgrp [-R] GROUP PATH...] //改變文件所屬的組,-R可以遞歸地改變文件夾內(nèi)所有文件所屬的組。這個(gè)命令必須是超級(jí)用戶才能使用 [-help [cmd]] //這是命令的幫助信息 |
|
|
[-ls <path>] //顯示目標(biāo)路徑當(dāng)前目錄下的所有文件
[-du <path>] //以字節(jié)為單位顯示目錄中所有文件的大小,或該文件的大小(如果path為文件)
HDFS接口編程
調(diào)用HDFS文件接口實(shí)現(xiàn)對(duì)分布式文件系統(tǒng)中文件的訪問(wèn),如創(chuàng)建、修改、刪除等。
參考代碼:
MAPREDUCE并行程序開發(fā)
3.1 求每年最高氣溫
原始數(shù)據(jù)如下:
|
2014010114 2014010216 2014010317 2014010410 2014010506 2012010609 2012010732 2012010812 2012010919 2012011023 2001010116 2001010212 2001010310 2001010411 2001010529 2013010619 2013010722 2013010812 2013010929 2013011023 2008010105 2008010216 2008010337 2008010414 2008010516 2007010619 2007010712 2007010812 2007010999 2007011023 2010010114 2010010216 2010010317 2010010410 2010010506 2015010649 2015010722 2015010812 2015010999 2015011023 |
代碼
|
import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class Temperature { /** * 四個(gè)泛型類型分別代表: * KeyIn Mapper的輸入數(shù)據(jù)的Key,這里是每行文字的起始位置(0,11,...) * ValueIn Mapper的輸入數(shù)據(jù)的Value,這里是每行文字 * KeyOut Mapper的輸出數(shù)據(jù)的Key,這里是每行文字中的“年份” * ValueOut Mapper的輸出數(shù)據(jù)的Value,這里是每行文字中的“氣溫” */ static class TempMapper extends Mapper<LongWritable, Text, Text, IntWritable> { @Override public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 打印樣本: Before Mapper: 0, 2000010115 System.out.print("Before Mapper: " + key + ", " + value); String line = value.toString(); String year = line.substring(0, 4); int temperature = Integer.parseInt(line.substring(8)); context.write(new Text(year), new IntWritable(temperature)); // 打印樣本: After Mapper:2000, 15 System.out.println( "======" + "After Mapper:" + new Text(year) + ", " + new IntWritable(temperature)); } }
/** * 四個(gè)泛型類型分別代表: * KeyIn Reducer的輸入數(shù)據(jù)的Key,這里是每行文字中的“年份” * ValueIn Reducer的輸入數(shù)據(jù)的Value,這里是每行文字中的“氣溫” * KeyOut Reducer的輸出數(shù)據(jù)的Key,這里是不重復(fù)的“年份” * ValueOut Reducer的輸出數(shù)據(jù)的Value,這里是這一年中的“最高氣溫” static class TempReducer extends Reducer<Text, IntWritable, Text, IntWritable> { @Override public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int maxValue = Integer.MIN_VALUE; StringBuffer sb = new StringBuffer(); //取values的最大值 for (IntWritable value : values) { maxValue = Math.max(maxValue, value.get()); sb.append(value).append(", "); } // 打印樣本: Before Reduce: 2000, 15, 23, 99, 12, 22, System.out.print("Before Reduce: " + key + ", " + sb.toString()); context.write(key, new IntWritable(maxValue)); // 打印樣本: After Reduce: 2000, 99 System.out.println( "======" + "After Reduce: " + key + ", " + maxValue); } }
public static void main(String[] args) throws Exception { //輸入路徑 String dst = "hdfs://localhost:9000/intput.txt"; //輸出路徑,必須是不存在的,空文件加也不行。 String dstOut = "hdfs://localhost:9000/output"; Configuration hadoopConfig = new Configuration();
hadoopConfig.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName() ); hadoopConfig.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName() ); Job job = new Job(hadoopConfig);
//如果需要打成jar運(yùn)行,需要下面這句 //job.setJarByClass(NewMaxTemperature.class);
//job執(zhí)行作業(yè)時(shí)輸入和輸出文件的路徑 FileInputFormat.addInputPath(job, new Path(dst)); FileOutputFormat.setOutputPath(job, new Path(dstOut));
//指定自定義的Mapper和Reducer作為兩個(gè)階段的任務(wù)處理類 job.setMapperClass(TempMapper.class); job.setReducerClass(TempReducer.class);
//設(shè)置最后輸出結(jié)果的Key和Value的類型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); //執(zhí)行job,直到完成 job.waitForCompletion(true); System.out.println("Finished"); } } |
將程序發(fā)布為jar包,并上傳到hadoop平臺(tái)運(yùn)行。
3.2 詞頻統(tǒng)計(jì)
maven建立quick-start工程。
pom.xml
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion>
<groupId>cn.edu.bupt.wcy</groupId> <artifactId>wordcount</artifactId> <version>0.0.1-SNAPSHOT</version> <packaging>jar</packaging>
<name>wordcount</name> <url>http://maven.apache.org</url>
<properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> </properties>
<dependencies> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>3.8.1</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.7.1</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>2.7.1</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.7.1</version> </dependency> </dependencies> </project> |
3個(gè)java代碼,mapper、reducer、runner主類:
mapper:
|
package cn.edu.bupt.wcy.wordcount;
import java.io.IOException;
import org.apache.commons.lang.StringUtils; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper;
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
@Override protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException { // TODO Auto-generated method stub //super.map(key, value, context); //String[] words = StringUtils.split(value.toString()); String[] words = StringUtils.split(value.toString(), " "); for(String word:words) { context.write(new Text(word), new LongWritable(1));
} } } |
reducer:
|
package cn.edu.bupt.wcy.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer;
public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
@Override protected void reduce(Text arg0, Iterable<LongWritable> arg1, Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException { // TODO Auto-generated method stub //super.reduce(arg0, arg1, arg2); int sum=0; for(LongWritable num:arg1) { sum += num.get();
} context.write(arg0,new LongWritable(sum));
} } |
runner:
|
package cn.edu.bupt.wcy.wordcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class WordCountRunner {
public static void main(String[] args) throws IllegalArgumentException, IOException, ClassNotFoundException, InterruptedException { Configuration conf = new Configuration(); Job job = new Job(conf); job.setJarByClass(WordCountRunner.class); job.setJobName("wordcount"); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); job.setMapperClass(WordCountMapper.class); job.setReducerClass(WordCountReducer.class); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.addInputPath(job, new Path(args[1])); FileOutputFormat.setOutputPath(job, new Path(args[2])); job.waitForCompletion(true); }
} |
打包成jar包后,放到集群上運(yùn)行。先在集群上新建一個(gè)文件夾:
hdfs dfs -mkdir /input_wordcount 再放入單詞文件,比如:
hello world
I like playing basketball
hello java。。。
運(yùn)行hadoop jar WordCount.jar(jar包) WordCountRunner(主類) /input_wordcount /output_wordcount
運(yùn)行完成后,查看:
hdfs dfs -ls /output_wordcount。已經(jīng)生成了結(jié)果,在cat一下查看內(nèi)容即可。
浙公網(wǎng)安備 33010602011771號(hào)