`
icetea
  • 浏览: 8657 次
  • 性别: Icon_minigender_1
  • 来自: 武汉
社区版块
存档分类
最新评论

hdfs吞吐率与iops测试

阅读更多

1.测试环境:

1ns+1secondaryns+4ds

操作系统:Red Hat 4.1.2-46

CPU:      16  Intel(R) Xeon(R) CPU           E5620  @ 2.40GHz

MEM:    12 GB

网卡:      1000Gb/s

2.测试准备:

ulimit -n 655350(临时生效)

ulimit -u 65535   (临时生效)

3.iops测试:

3.1测试代码

import org.apache.hadoop.conf.*;    
import org.apache.hadoop.fs.*;    
import org.apache.hadoop.hdfs.*;    
import org.apache.hadoop.hdfs.protocol.*;    
import java.util.concurrent.*;    
import java.lang.InterruptedException.*;    
                                                     
class single_thread implements Runnable{    
   private final CountDownLatch doneSignal;     
   private int name;    
   private String src;    
   private String dst;    
   private int size;    
   private Configuration conf;    
   private FileSystem hdfs;    
   private FileSystem local;    
   private Path srcPath;    
   private Path dstPath;    
   private FSDataOutputStream out;    
   private FSDataInputStream in;    
   private byte buffer[];    
   private long i;    
   private int b;    
   //static runTime_sum=0;    
                                                     
   public single_thread(int na,String src,String dst,int numOfM,CountDownLatch doneSignal_,int count){    
  this.doneSignal = doneSignal_;    
  try{    
   name = na;    
   conf = new Configuration();    
   hdfs = FileSystem.get(conf);    
   local = FileSystem.getLocal(conf);    
   long runTime=0,startTime,endTime;    
   if(src.contains("dev")){    
       //srcPath = new Path(src);    
     //  in = local.open(srcPath);    
           //in.close();    
       }    
   else{    
       for(int i1=0;i1<count;i1++)    
       {    
           srcPath = new Path(src+"zero"+name+"."+i1);    
           //startTime=System.nanoTime();    
           in = hdfs.open(srcPath);    
               in.close();    
           //endTime=System.nanoTime();    
           //runTime=runTime+(endTime-startTime);    
       }    
       //runTime/=1;    
       //runTime_sum+=runTime;    
       //System.out.println("iops :"+1000000000/runTime);    
       }    
   if(dst.contains("dev")){    
       //dstPath = new Path(dst);    
     //  out = local.create(dstPath);    
           //out.close();    
       }    
   else{    
       for(int i1=0;i1<count;i1++)    
       {    
           dstPath = new Path(dst+"zero"+name+"."+i1);    
           //startTime=System.nanoTime();    
           out = hdfs.create(dstPath);    
               out.close();    
           //endTime=System.nanoTime();    
           //runTime=runTime+(endTime-startTime);    
       }    
                                                            
       //runTime/=1;    
       //runTime_sum+=runTime;    
       //System.out.println("iops :"+1000000000/runTime);    
       }    
                                                     
   buffer = new byte[1024];    
   i=0;    
   size = numOfM;    
   }catch(Exception e){    
     System.err.println("error:"+e.toString());    
     }    
   }    
   public void run(){    
   try{    
                                                        
   while(i<1024*size){    
      i++;    
   }    
                                                     
    doneSignal.countDown();    
   }catch(Exception e){    
       System.err.println("error:"+e.toString());    
      }    
                                                        
                                                     
    }    
}    
                                                     
                                                     
public class hdfs_iops{    
   public static void main(String[] args)  throws  InterruptedException ,  ExecutionException {    
   //System.out.println("test");    
   int fileSize = 0;    
   int count = Integer.parseInt(args[2]);    
   int threadNum = Integer.parseInt(args[3]);    
   long totalSize = fileSize*threadNum*1024*1024;    
   CountDownLatch doneSignal = new CountDownLatch(threadNum);    
                                                        
   Thread t[] = new Thread[threadNum];    
   long startTime=System.nanoTime();    
   for(int num=0;num<threadNum;num++){    
       t[num] = new Thread(new single_thread(num,args[0],args[1],fileSize,doneSignal,count));    
    }    
   long endTime=System.nanoTime();    
   long runTime=(endTime-startTime)/count;    
                                                           
   //long startTime=System.nanoTime();    
   for(int num=0;num<threadNum;num++){    
       t[num].start();    
    }    
    doneSignal.await();    
   System.out.println("thread :"+threadNum+"count :"+count+"iops :"+threadNum/((double)runTime/(double)1000000000.0));     
                                                     
}    
}

 

3.2编译

javac -cp hadoop-core-1.0.3.jar hdfs_iops.java

3.3执行:

在非namenode上

写:

方法1:hadoop hdfs_iops /dev/zero hdfs://ns:9000/ 10(每个线程io次数) 100(线程个数)

方法2:

java -cp :/home/hadoop/hadoop-1.0.3/lib/asm-3.2.jar:/home/hadoop/hadoop-1.0.3/lib/aspectjrt-1.6.5.jar:/home/hadoop/hadoop-1.0.3/lib/aspectjtools-1.6.5.jar:/home/hadoop/hadoop-1.0.3/lib/commons-beanutils-1.7.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-beanutils-core-1.8.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-cli-1.2.jar:/home/hadoop/hadoop-1.0.3/lib/commons-codec-1.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-collections-3.2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-configuration-1.6.jar:/home/hadoop/hadoop-1.0.3/lib/commons-daemon-1.0.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-digester-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/commons-el-1.0.jar:/home/hadoop/hadoop-1.0.3/lib/commons-httpclient-3.0.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-io-2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-lang-2.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-logging-1.1.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-logging-api-1.0.4.jar:/home/hadoop/hadoop-1.0.3/lib/commons-math-2.1.jar:/home/hadoop/hadoop-1.0.3/lib/commons-net-1.4.1.jar:/home/hadoop/hadoop-1.0.3/lib/core-3.1.1.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-capacity-scheduler-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-fairscheduler-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hadoop-thriftfs-1.0.3.jar:/home/hadoop/hadoop-1.0.3/lib/hsqldb-1.8.0.10.jar:/home/hadoop/hadoop-1.0.3/lib/jackson-core-asl-1.8.8.jar:/home/hadoop/hadoop-1.0.3/lib/jackson-mapper-asl-1.8.8.jar:/home/hadoop/hadoop-1.0.3/lib/jasper-compiler-5.5.12.jar:/home/hadoop/hadoop-1.0.3/lib/jasper-runtime-5.5.12.jar:/home/hadoop/hadoop-1.0.3/lib/jdeb-0.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-core-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-json-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jersey-server-1.8.jar:/home/hadoop/hadoop-1.0.3/lib/jets3t-0.6.1.jar:/home/hadoop/hadoop-1.0.3/lib/jetty-6.1.26.jar:/home/hadoop/hadoop-1.0.3/lib/jetty-util-6.1.26.jar:/home/hadoop/hadoop-1.0.3/lib/jsch-0.1.42.jar:/home/hadoop/hadoop-1.0.3/lib/junit-4.5.jar:/home/hadoop/hadoop-1.0.3/lib/kfs-0.2.2.jar:/home/hadoop/hadoop-1.0.3/lib/log4j-1.2.15.jar:/home/hadoop/hadoop-1.0.3/lib/mockito-all-1.8.5.jar:/home/hadoop/hadoop-1.0.3/lib/oro-2.0.8.jar:/home/hadoop/hadoop-1.0.3/lib/servlet-api-2.5-20081211.jar:/home/hadoop/hadoop-1.0.3/lib/slf4j-api-1.4.3.jar:/home/hadoop/hadoop-1.0.3/lib/slf4j-log4j12-1.4.3.jar:/home/hadoop/hadoop-1.0.3/lib/xmlenc-0.52.jar  hdfs_iops  /dev/zero hdfs://ns:9000/  10  100

 

读:

方法1:

hadoop hdfs_iops  hdfs://ns:9000/  /dev/null 10(每个线程io次数) 100(线程个数)

方法2参见写方法2

 

iops测试结果:

 

4.吞吐率测试

测试代码

import org.apache.hadoop.conf.*;     
import org.apache.hadoop.fs.*;     
import org.apache.hadoop.hdfs.*;     
import org.apache.hadoop.hdfs.protocol.*;     
import java.util.concurrent.*;     
import java.lang.InterruptedException.*;     
                           
class single_thread implements Runnable{     
   private final CountDownLatch doneSignal;      
   private int name;     
   private String src;     
   private String dst;     
   private int size;     
   private Configuration conf;     
   private FileSystem hdfs;     
   private FileSystem local;     
   private Path srcPath;     
   private Path dstPath;     
   private FSDataOutputStream out;     
   private FSDataInputStream in;     
   private byte buffer[];     
   private long i;     
   private int b;     
                           
   public single_thread(int na,String src,String dst,int numOfM,CountDownLatch doneSignal_){     
  this.doneSignal = doneSignal_;     
  try{     
   name = na;     
   conf = new Configuration();     
   hdfs = FileSystem.get(conf);     
   local = FileSystem.getLocal(conf);     
   if(src.contains("dev")){     
       srcPath = new Path(src);     
       in = local.open(srcPath);     
       }     
   else{     
       srcPath = new Path(src+"pero"+name);     
       in = hdfs.open(srcPath);     
       }     
   if(dst.contains("dev")){     
       dstPath = new Path(dst);     
       out = local.create(dstPath);     
       }     
   else{     
       dstPath = new Path(dst+"pero"+name);     
       out = hdfs.create(dstPath);     
       }     
                           
   buffer = new byte[4096];     
   i=0;     
   size = numOfM;     
   }catch(Exception e){     
     System.err.println("error:"+e.toString());     
     }     
   }     
   public void run(){     
   try{     
   //long startTime=System.nanoTime();     
                              
   while(i<256*size){     
      b=in.read(buffer);     
      out.write(buffer,0,b);     
      i++;     
   }     
                              
  // long endTime=System.nanoTime();     
  // long runTime=endTime-startTime;     
  // System.out.println(name+":"+runTime/1000000+"ns");     
                           
   }catch(Exception e){     
       System.err.println("error:"+e.toString());     
      }finally{     
      try{     
     in.close();     
     out.close();}catch(Exception e){     
     System.err.println("error:"+e.toString());     
     }     
     doneSignal.countDown();     
      }     
                              
  // System.out.println(1024*1024*size);     
                           
    }     
}     
                           
                           
public class hdfs_test{     
   public static void main(String[] args)  throws  InterruptedException ,  ExecutionException {     
   //System.out.println("test");     
   int fileSize = Integer.parseInt(args[2]);     
   int threadNum = Integer.parseInt(args[3]);     
   double totalSize = fileSize*threadNum*1024.0*1024.0;     
   CountDownLatch doneSignal = new CountDownLatch(threadNum);     
                              
   Thread t[] = new Thread[threadNum];     
   long startTime=System.nanoTime();     
   for(int num=0;num<threadNum;num++){     
       t[num] = new Thread(new single_thread(num,args[0],args[1],fileSize,doneSignal));     
       t[num].start();     
    }     
                              
   //for(int num=0;num<threadNum;num++){     
                                  
    //}     
    doneSignal.await();     
   long endTime=System.nanoTime();     
   long runTime=endTime-startTime;     
   System.out.println("totalSize:"+fileSize*threadNum+"MB   "+"totalTime:"+runTime/1000000+"ms");     
   if(fileSize==0)     
   System.out.println("iops :"+threadNum/((double)runTime/(double)1000000000.0));     
   else
   System.out.println("speed: "+totalSize*1000.0/(double)runTime+"  totalsize: "+totalSize+"   runtime: "+runTime);     
}     
}

 

编译与执行参照iops测试

(测试程序后面两个数字参数分别代表测试文件大小(以M为单位)和线程数目。)

吞吐率测试结果(由于数据规模等原因,部分结果不全):

分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics