public static void main(String[] args) throws Exception {
String uri = "hdfs://120.92.4.44:9000/";
Configuration config = new Configuration();
config.set("hadoop.job.ugi", "root,Password@123");
FileSystem fs = FileSystem.get(URI.create(uri), config);
// 列出hdfs上/user/fkong/目录下的所有文件和目录
FileStatus[] statuses = fs.listStatus(new Path("/test"));
for (FileStatus status : statuses) {
System.out.println(status);
}
System.err.println("11");
// 在hdfs的/user/fkong目录下创建一个文件,并写入一行文本
FSDataOutputStream os = fs.create(new Path("/test/hadoop22.log"));
os.write("Hello World!".getBytes());
os.flush();
os.close();
// 显示在hdfs的/user/fkong下指定文件的内容
/* InputStream is = fs.open(new Path("/test/aa.txt"));
IOUtils.copyBytes(is, System.out, 1024, false);*/
}
log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
11
FileStatus{path=hdfs://120.92.4.44:9000/test/hadoop22.log; isDirectory=false; length=0; replication=3; blocksize=134217728; modification_time=1462802160953; access_time=1462802160953; owner=Administrator; group=supergroup; permission=rw-r--r--; isSymlink=false}
Exception in thread "main" org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /test/hadoop22.log could only be replicated to 0 nodes instead of minReplication (=1). There are 2 datanode(s) running and 2 node(s) are excluded in this operation.
at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1562)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3245)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:663)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:482)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2040)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2036)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1656)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2034)
节点和进程启动都成功,在线等,感激不尽
String uri = "hdfs://120.92.4.44:9000/";
Configuration config = new Configuration();
config.set("hadoop.job.ugi", "root,Password@123");
FileSystem fs = FileSystem.get(URI.create(uri), config);
// 列出hdfs上/user/fkong/目录下的所有文件和目录
FileStatus[] statuses = fs.listStatus(new Path("/test"));
for (FileStatus status : statuses) {
System.out.println(status);
}
System.err.println("11");
// 在hdfs的/user/fkong目录下创建一个文件,并写入一行文本
FSDataOutputStream os = fs.create(new Path("/test/hadoop22.log"));
os.write("Hello World!".getBytes());
os.flush();
os.close();
// 显示在hdfs的/user/fkong下指定文件的内容
/* InputStream is = fs.open(new Path("/test/aa.txt"));
IOUtils.copyBytes(is, System.out, 1024, false);*/
}
log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
11
FileStatus{path=hdfs://120.92.4.44:9000/test/hadoop22.log; isDirectory=false; length=0; replication=3; blocksize=134217728; modification_time=1462802160953; access_time=1462802160953; owner=Administrator; group=supergroup; permission=rw-r--r--; isSymlink=false}
Exception in thread "main" org.apache.hadoop.ipc.RemoteException(java.io.IOException): File /test/hadoop22.log could only be replicated to 0 nodes instead of minReplication (=1). There are 2 datanode(s) running and 2 node(s) are excluded in this operation.
at org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.chooseTarget4NewBlock(BlockManager.java:1562)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3245)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:663)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:482)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:619)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:962)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2040)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2036)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1656)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2034)
节点和进程启动都成功,在线等,感激不尽