将编译后的Hadoop的包放到指定路径,然后环境遍历配置好Hadoop_HOME和PATH
2、创建一个Maven工程pom文件如下
junit junit4.11 test org.apache.logging.log4j log4j-core2.11.0 org.apache.hadoop hadoop-common3.3.1 org.apache.hadoop hadoop-client3.3.1 org.apache.hadoop hadoop-hdfs3.3.1
创建一个log4j.properties
log4j.rootLogger=INFO, stdout log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n log4j.appender.logfile=org.apache.log4j.FileAppender log4j.appender.logfile.File=target/spring.log log4j.appender.logfile.layout=org.apache.log4j.PatternLayout log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] -%m%n
写个启动类
public class HDFSClient { public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://002:9000"),conf,"user"); //修改路径 fs.mkdirs(new Path("/loong/1231")); //关闭资源 fs.close(); System.out.println("over"); } }二、HDFS的API *** 作 2.1文件上传
@Test public void testCopyFromLocal() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://002:9000"), conf, "user"); //执行上传 *** 作 fs.copyFromLocalFile(new Path("D:/WORK/tianlongbabu.txt"), new Path("/tianlongbabu.txt")); //关闭资源 fs.close(); System.out.println("over"); }2.2测试参数优先级
设置的参数优先级关系如下:
代码中设置的参数 > resources文件夹下的xml > 集群中etc目录下的xml文件中的设置 > hadoop的参数默认值
2.3文件下载fs.copyToLocalFile(new Path("/tianlongbabu.txt"), new Path("d:/tianlongbau.txt"));
若是修改一下代码
fs.copyToLocalFile(true,new Path("/tianlongbabu.txt"), new Path("d:/tianlongbau.txt"),true);
第一个true:是否删除hdfs上文件(设置true就相当于剪切)
第二个true:设置true就相当于同意下载下来的文件能够允许在本地修改(关闭了校验,就会少下一个crc文件)
2.4文件删除delete中boolean的参数主要针对目录,意思是,true的话就会递归删除,false的话文件不为空会报异常。
fs.delete(new Path("/loong"), true);2.5文件更名
fs.rename(new Path("/loong/tianlongbabu.txt"),new Path("/loong/kuihuabaodian.txt"));2.6文件详情查看
@Test public void testListFiles() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); conf.set("dfs.client.use.datanode.hostname", "true"); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://LOONG002:9000"), conf, "root"); RemoteIterator2.7判断是文件还是文件夹listFiles = fs.listFiles(new Path("/"), true); while (listFiles.hasNext()) { LocatedFileStatus fileStatus = listFiles.next(); //查看文件名称,权限,长度,块信息 System.out.println(fileStatus.getPath().getName()); System.out.println(fileStatus.getPermission()); System.out.println(fileStatus.getLen()); BlockLocation[] blockLocations = fileStatus.getBlockLocations(); for (BlockLocation blockLocation : blockLocations) { String[] hosts = blockLocation.getHosts(); for (String host : hosts) { System.out.println(host); } } System.out.println("--------分割线----------"); } //关闭资源 fs.close(); System.out.println("over"); }
@Test public void testListStatus() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); conf.set("dfs.client.use.datanode.hostname", "true"); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://LOONG002:9000"), conf, "root"); FileStatus[] listStatus=fs.listStatus(new Path("/")); for (FileStatus fileStatus:listStatus){ if(fileStatus.isFile()){ System.out.println("f:"+fileStatus.getPath().getName()); }else { System.out.println("d:"+fileStatus.getPath().getName()); } } //关闭资源 fs.close(); System.out.println("over"); }三、HDFS的I/O流 *** 作 3.1HDFS文件上传
@Test public void putFileToHDFS() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); conf.set("dfs.client.use.datanode.hostname", "true"); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://LOONG002:9000"), conf, "root"); //获取输入流 FileInputStream fis = new FileInputStream(new File("d:/work/pen.txt")); //获取输出流 FSDataOutputStream fos = fs.create(new Path("/pen.txt")); //流的对拷 IOUtils.copyBytes(fis, fos, conf); //关闭资源 IOUtils.closeStream(fos); IOUtils.closeStream(fis); fs.close(); System.out.println("over"); }3.2文件下载
@Test public void getFileFromHDFS() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); conf.set("dfs.client.use.datanode.hostname", "true"); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://LOONG002:9000"), conf, "root"); //获取输入流 FSDataInputStream fis = fs.open(new Path("/pen.txt"));; //获取输出流 FileOutputStream fos =new FileOutputStream(new File("d:/work/pen.txt")); //流的对拷 IOUtils.copyBytes(fis, fos, conf); //关闭资源 IOUtils.closeStream(fos); IOUtils.closeStream(fis); fs.close(); System.out.println("over"); }3.3定位读取文件
下载第一块block文件
@Test public void readFileSeek1() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); conf.set("dfs.client.use.datanode.hostname", "true"); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://LOONG002:9000"), conf, "root"); //获取输入流 FSDataInputStream fis = fs.open(new Path("/jdk-8u311-linux-x64.tar.gz")); //获取输出流 FileOutputStream fos = new FileOutputStream(new File("d:/jdk-8u311-linux-x64.tar.gz.part1")); //流的对拷 byte[] buf = new byte[1024]; for (int i = 0; i < 1024 * 128; i++) { fis.read(buf); fos.write(buf); } //关闭资源 IOUtils.closeStream(fos); IOUtils.closeStream(fis); fs.close(); System.out.println("over"); }
下载第二块
@Test public void readFileSeek2() throws IOException, URISyntaxException, InterruptedException { Configuration conf = new Configuration(); conf.set("dfs.client.use.datanode.hostname", "true"); //获取客户端对象 FileSystem fs = FileSystem.get(new URI("hdfs://LOONG002:9000"), conf, "root"); //获取输入流 FSDataInputStream fis = fs.open(new Path("/jdk-8u311-linux-x64.tar.gz")); //设置读取的起点 fis.seek(1024 * 1024 * 128); //获取输出流 FileOutputStream fos = new FileOutputStream(new File("d:/jdk-8u311-linux-x64.tar.gz.part2")); //流的对拷 IOUtils.copyBytes(fis, fos, conf); //关闭资源 IOUtils.closeStream(fos); IOUtils.closeStream(fis); fs.close(); System.out.println("over"); }四、HDFS读写数据流机制 1.写数据流流程 1.2 网络拓扑-节点距离计算
HDFS写数据的时候,Name会选择距离待上传数据距离最近的Datanode接收数据。
这里涉及到计算节点距离;
节点距离:两个节点到达最近公共祖先的距离总和。
1.3机架感知(副本存储节点选择)
副本节点选择如图,这是2.7.2版本的
2.HDFS读数据流程欢迎分享,转载请注明来源:内存溢出
评论列表(0条)