- 浏览: 8657 次
- 性别:
- 来自: 武汉
文章分类
最新评论
1.测试环境:
1ns+1secondaryns+4ds
操作系统:Red Hat 4.1.2-46
CPU: 16 Intel(R) Xeon(R) CPU E5620 @ 2.40GHz
MEM: 12 GB
网卡: 1000Gb/s
2.测试准备:
ulimit -n 655350(临时生效)
ulimit -u 65535 (临时生效)
3.iops测试:
3.1测试代码
import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.protocol.*; import java.util.concurrent.*; import java.lang.InterruptedException.*; class single_thread implements Runnable{ private final CountDownLatch doneSignal; private int name; private String src; private String dst; private int size; private Configuration conf; private FileSystem hdfs; private FileSystem local; private Path srcPath; private Path dstPath; private FSDataOutputStream out; private FSDataInputStream in; private byte buffer[]; private long i; private int b; //static runTime_sum=0; public single_thread(int na,String src,String dst,int numOfM,CountDownLatch doneSignal_,int count){ this.doneSignal = doneSignal_; try{ name = na; conf = new Configuration(); hdfs = FileSystem.get(conf); local = FileSystem.getLocal(conf); long runTime=0,startTime,endTime; if(src.contains("dev")){ //srcPath = new Path(src); // in = local.open(srcPath); //in.close(); } else{ for(int i1=0;i1<count;i1++) { srcPath = new Path(src+"zero"+name+"."+i1); //startTime=System.nanoTime(); in = hdfs.open(srcPath); in.close(); //endTime=System.nanoTime(); //runTime=runTime+(endTime-startTime); } //runTime/=1; //runTime_sum+=runTime; //System.out.println("iops :"+1000000000/runTime); } if(dst.contains("dev")){ //dstPath = new Path(dst); // out = local.create(dstPath); //out.close(); } else{ for(int i1=0;i1<count;i1++) { dstPath = new Path(dst+"zero"+name+"."+i1); //startTime=System.nanoTime(); out = hdfs.create(dstPath); out.close(); //endTime=System.nanoTime(); //runTime=runTime+(endTime-startTime); } //runTime/=1; //runTime_sum+=runTime; //System.out.println("iops :"+1000000000/runTime); } buffer = new byte[1024]; i=0; size = numOfM; }catch(Exception e){ System.err.println("error:"+e.toString()); } } public void run(){ try{ while(i<1024*size){ i++; } doneSignal.countDown(); }catch(Exception e){ System.err.println("error:"+e.toString()); } } } public class hdfs_iops{ public static void main(String[] args) throws InterruptedException , ExecutionException { //System.out.println("test"); int fileSize = 0; int count = Integer.parseInt(args[2]); int threadNum = Integer.parseInt(args[3]); long totalSize = fileSize*threadNum*1024*1024; CountDownLatch doneSignal = new CountDownLatch(threadNum); Thread t[] = new Thread[threadNum]; long startTime=System.nanoTime(); for(int num=0;num<threadNum;num++){ t[num] = new Thread(new single_thread(num,args[0],args[1],fileSize,doneSignal,count)); } long endTime=System.nanoTime(); long runTime=(endTime-startTime)/count; //long startTime=System.nanoTime(); for(int num=0;num<threadNum;num++){ t[num].start(); } doneSignal.await(); System.out.println("thread :"+threadNum+"count :"+count+"iops :"+threadNum/((double)runTime/(double)1000000000.0)); } }
3.2编译
javac -cp hadoop-core-1.0.3.jar hdfs_iops.java
3.3执行:
在非namenode上
写:
方法1:hadoop hdfs_iops /dev/zero hdfs://ns:9000/ 10(每个线程io次数) 100(线程个数)
方法2:
java -cp :/home/hadoop/hadoop-1.0.3/lib/asm-3.2.jar:/home/hadoop/hadoop-1.0.3/lib/aspectjrt-1.6.5.jar:/home/hadoop/hadoop-1.0.3/lib/aspectjtools-1.6.5.jar:/home/hadoop/hadoop-1.0.3/lib/commons-beanutils-1.7.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-cli-1.2.jar:/home/hadoop/hadoop-1.0.3/lib/commons-codec-1.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-collections-3.2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-configuration-1.6.jar:/home/hadoop/hadoop-1.0.3/lib/commons-daemon-1.0.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-digester-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/commons-el-1.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-httpclient-3.0.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-io-2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-lang-2.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-logging-1.1.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-logging-api-1.0.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-math-2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-net-1.4.1.jar:/home/hadoop/hadoop-1.0.3/lib/core-3.1.1.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-capacity-scheduler-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-fairscheduler-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-thriftfs-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hsqldb-1.8.0.10.jar:/home/hadoop/hadoop-1.0.3/lib/jackson-core-asl-1.8.8.jar:/home/hadoop/hadoop-1.0.3/lib/jackson-mapper-asl-1.8.8.jar:/home/hadoop/hadoop-1.0.3/lib/jasper-compiler-5.5.12.jar:/home/hadoop/hadoop-1.0.3/lib/jasper-runtime-5.5.12.jar:/home/hadoop/hadoop-1.0.3/lib/jdeb-0.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-core-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-json-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-server-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jets3t-0.6.1.jar:/home/hadoop/hadoop-1.0.3/lib/jetty-6.1.26.jar:/home/hadoop/hadoop-1.0.3/lib/jetty-util-6.1.26.jar:/home/hadoop/hadoop-1.0.3/lib/jsch-0.1.42.jar:/home/hadoop/hadoop-1.0.3/lib/junit-4.5.jar:/home/hadoop/hadoop-1.0.3/lib/kfs-0.2.2.jar:/home/hadoop/hadoop-1.0.3/lib/log4j-1.2.15.jar:/home/hadoop/hadoop-1.0.3/lib/mockito-all-1.8.5.jar:/home/hadoop/hadoop-1.0.3/lib/oro-2.0.8.jar:/home/hadoop/hadoop-1.0.3/lib/servlet-api-2.5-20081211.jar:/home/hadoop/hadoop-1.0.3/lib/slf4j-api-1.4.3.jar:/home/hadoop/hadoop-1.0.3/lib/slf4j-log4j12-1.4.3.jar:/home/hadoop/hadoop-1.0.3/lib/xmlenc-0.52.jar hdfs_iops /dev/zero hdfs://ns:9000/ 10 100
读:
方法1:
hadoop hdfs_iops hdfs://ns:9000/ /dev/null 10(每个线程io次数) 100(线程个数)
方法2参见写方法2
iops测试结果:
4.吞吐率测试
测试代码
import org.apache.hadoop.conf.*; import org.apache.hadoop.fs.*; import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.protocol.*; import java.util.concurrent.*; import java.lang.InterruptedException.*; class single_thread implements Runnable{ private final CountDownLatch doneSignal; private int name; private String src; private String dst; private int size; private Configuration conf; private FileSystem hdfs; private FileSystem local; private Path srcPath; private Path dstPath; private FSDataOutputStream out; private FSDataInputStream in; private byte buffer[]; private long i; private int b; public single_thread(int na,String src,String dst,int numOfM,CountDownLatch doneSignal_){ this.doneSignal = doneSignal_; try{ name = na; conf = new Configuration(); hdfs = FileSystem.get(conf); local = FileSystem.getLocal(conf); if(src.contains("dev")){ srcPath = new Path(src); in = local.open(srcPath); } else{ srcPath = new Path(src+"pero"+name); in = hdfs.open(srcPath); } if(dst.contains("dev")){ dstPath = new Path(dst); out = local.create(dstPath); } else{ dstPath = new Path(dst+"pero"+name); out = hdfs.create(dstPath); } buffer = new byte[4096]; i=0; size = numOfM; }catch(Exception e){ System.err.println("error:"+e.toString()); } } public void run(){ try{ //long startTime=System.nanoTime(); while(i<256*size){ b=in.read(buffer); out.write(buffer,0,b); i++; } // long endTime=System.nanoTime(); // long runTime=endTime-startTime; // System.out.println(name+":"+runTime/1000000+"ns"); }catch(Exception e){ System.err.println("error:"+e.toString()); }finally{ try{ in.close(); out.close();}catch(Exception e){ System.err.println("error:"+e.toString()); } doneSignal.countDown(); } // System.out.println(1024*1024*size); } } public class hdfs_test{ public static void main(String[] args) throws InterruptedException , ExecutionException { //System.out.println("test"); int fileSize = Integer.parseInt(args[2]); int threadNum = Integer.parseInt(args[3]); double totalSize = fileSize*threadNum*1024.0*1024.0; CountDownLatch doneSignal = new CountDownLatch(threadNum); Thread t[] = new Thread[threadNum]; long startTime=System.nanoTime(); for(int num=0;num<threadNum;num++){ t[num] = new Thread(new single_thread(num,args[0],args[1],fileSize,doneSignal)); t[num].start(); } //for(int num=0;num<threadNum;num++){ //} doneSignal.await(); long endTime=System.nanoTime(); long runTime=endTime-startTime; System.out.println("totalSize:"+fileSize*threadNum+"MB "+"totalTime:"+runTime/1000000+"ms"); if(fileSize==0) System.out.println("iops :"+threadNum/((double)runTime/(double)1000000000.0)); else System.out.println("speed: "+totalSize*1000.0/(double)runTime+" totalsize: "+totalSize+" runtime: "+runTime); } }
编译与执行参照iops测试
(测试程序后面两个数字参数分别代表测试文件大小(以M为单位)和线程数目。)
吞吐率测试结果(由于数据规模等原因,部分结果不全):
相关推荐
HDFS测试案例v0.3
1. 理解 HDFS 体系架构。 2. 理解 HDFS 文件存储原理和数据读写过程。 3. 熟练掌握 HDFS Web UI 界面的使用。 4. 熟练掌握 HDFS Shell 常用命令的使用。 5. 熟练掌握 HDFS 项目开发环境的搭建。 6. 掌握使用 HDFS ...
本文重在探索hdfs分布式文件系统的元数据合并及更新原理。
HDFS是Hadoop分布式计算的存储基础。HDFS具有高容错性,可以部署在通用硬件设备上,适合数据密集型应用,并且提供对数据读写的高吞 吐量。HDFS能 够提供对数据的可扩展访问,通过简单地往集群里添加节点就可以解决...
HDFS 读写性能测试
hadoop单机安装与测试 1•Local (Standalone) Mode(单节点的本地模式)Linux的文件系统就是hadoop的存储系统运行在单个的jvm环境,它使用linux的文件系统,适用于开发、测试、调试环境 运行案例 2•Pseudo-...
flime安装+配置+测试+案例(采集日志至HDFS)+理论+搭建错误解决,超详细flum搭建,一篇带你入门flume,通俗易懂,详细步骤注解!!!
storm-hdfs, 用于与HDFS文件系统交互的风暴组件 风暴 HDFS用于与HDFS文件系统交互的风暴组件用法以下示例将在每 1,000个元组同步后将管道("|") -delimited文件写入HDFS路径 hdfs://localhost:54310/foo.,使它的对...
Hadoop学习总结之二:HDFS读写过程解析
windows平台下的HDFS文件浏览器,就像windows管理器一样管理你的hdfs文件系统。现在官网已经停止更新这款软件。具体配置如下: HDFS配置页面及端口http://master:50070 配置HDFS服务器 配置WebHDFS HDFS Explorer...
Hadoop技术内幕 深入解析HADOOP COMMON和HDFS架构设计与实现原理.pdf
本文通过对一些常见场景的测试,重在探索hdfs分布式文件系统的文件读写原理
文档详细的讲述了Hadoop中HDFS文件操作命令和HDFS编程
《HDFS——Hadoop分布式文件系统深度实践》
hadoop测试(1)---HDFS文件操作 完整测试代码, 相关文章:http://www.cnblogs.com/yinpengxiang/archive/2011/07/03/2096605.html
hdfs_fdw, 面向HDFS的PostgreSQL外部数据包装 用于PostgreSQL的Hadoop ( HDFS ) 外部数据包装这个PostgreSQL扩展实现了一个用于 ( HDFS )的外部数据包装器( FDW ...请注意,这个版本的hdfs_fdw与PostgreSQL和,高级服务
此主要为hdfs的入门总结,从认识到实战配置hdfs。以下为内容标题:分布式文件系统、HDFS简介、HDFS的存储原理、HDFS工作机制、HDFS文件读写操作、HDFS HA、HDFS联邦.....
Hadoop技术内幕 深入解析HADOOP COMMON和HDFS架构设计与实现原理
HDFS部署与Shell命令使用
分布式文件系统(HDFS)的高可靠性主要是由多种策略及机制共同作用实现的。