/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.infrastructure.importer; import com.facebook.infrastructure.concurrent.DebuggableThreadPoolExecutor; import com.facebook.infrastructure.concurrent.ThreadFactoryImpl; import com.facebook.infrastructure.db.ReadParameters; import com.facebook.infrastructure.db.Row; import com.facebook.infrastructure.db.RowMutation; import com.facebook.infrastructure.net.EndPoint; import com.facebook.infrastructure.net.Message; import com.facebook.infrastructure.net.MessagingService; import com.facebook.infrastructure.service.*; import com.facebook.infrastructure.utils.FBUtilities; import com.facebook.infrastructure.utils.LogUtil; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; import com.martiansoftware.jsap.*; import org.apache.log4j.Logger; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.net.SocketException; /** * Author : Avinash Lakshman ( alakshman@facebook.com) & Prashant Malik ( pmalik@facebook.com ) */ public class StressTest { private static Logger logger_ = Logger.getLogger(DataImporter.class); private static final String tablename_ = new String("Test"); public static EndPoint from_ = new EndPoint("172.24.24.209", 10001); public static EndPoint to_ = new EndPoint("hadoop071.sf2p.facebook.com", 7000); private static String server_ = new String("hadoop071.sf2p.facebook.com"); private static final String columnFamilyColumn_ = new String("ColumnList"); private static final String columnFamilySuperColumn_ = new String("SuperColumnList"); private static final String keyFix_ = new String("KeyName"); private static final String columnFix_ = new String("Column-"); private static final String superColumnFix_ = new String("SuperColumn-"); private Cassandra.Client peerstorageClient_ = null; TTransport transport_ = null; private int requestsPerSecond_ = 1000; private ExecutorService runner_ = null; class LoadManager implements Runnable { private RowMutation rm = null; LoadManager(RowMutation rmsg) { rm = rmsg; } public void run() { throw new UnsupportedOperationException("Message serialization"); /* TODO if( rm != null ) { Message message = new Message(from_ , StorageService.mutationStage_, StorageService.loadVerbHandler_, new Object[] {rm}); MessagingService.getMessagingInstance().sendOneWay(message, to_); } */ } } /* * This function will apply the given task . It is based on a requests per * second member variable which can be set to teh required ammount , it will * generate only those many requests and if thos emany requests have already * been entered then it will sleep . This function assumes that there is no * waiting in any other part of the code so the requests are being generated * instantaniously . */ public void applyLoad(RowMutation rm) throws IOException { try { throw new UnsupportedOperationException("Message serialization"); /* TODO long t = System.currentTimeMillis(); Message message = new Message(from_, StorageService.mutationStage_, StorageService.mutationVerbHandler_, new Object[]{ rm } ); MessagingService.getMessagingInstance().sendOneWay(message, to_); Thread.sleep(1, 1000000000/requestsPerSecond_); */ } catch (Exception e) { e.printStackTrace(); } } public void readLoad(ReadParameters readMessage) { IResponseResolver<Row> readResponseResolver = new ReadResponseResolver(); QuorumResponseHandler<Row> quorumResponseHandler = new QuorumResponseHandler<Row>( 1, readResponseResolver); throw new UnsupportedOperationException("Message serialization"); /* TODO Message message = new Message(from_, StorageService.readStage_, StorageService.readVerbHandler_, new Object[] { readMessage }); MessagingService.getMessagingInstance().sendOneWay(message, to_); */ /*IAsyncResult iar = MessagingService.getMessagingInstance().sendRR(message, to_); try { long t = System.currentTimeMillis(); iar.get(2000, TimeUnit.MILLISECONDS ); logger_.debug("Time taken for read..." + (System.currentTimeMillis() - t)); } catch (Exception ex) { ex.printStackTrace(); }*/ } public void randomReadColumn (int keys, int columns, int size, int tps) { Random random = new Random(); try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; int j = random.nextInt(columns) + 1; ReadParameters rm = new ReadParameters(tablename_, stringKey, columnFamilyColumn_ + ":" + columnFix_ + j); readLoad(rm); if ( requestsPerSecond_ > 1000) Thread.sleep(0, 1000000000/requestsPerSecond_); else Thread.sleep(1000/requestsPerSecond_); } } catch(Exception ex) { ex.printStackTrace(); } } public void randomWriteColumn(int keys, int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; RowMutation rm = new RowMutation(tablename_, stringKey); int j = random.nextInt(columns) + 1; random.nextBytes(bytes); rm.add( columnFamilyColumn_ + ":" + columnFix_ + j, bytes, ts); if ( ts == Integer.MAX_VALUE) { ts = 0 ; } ts++; for(int k = 0 ; k < requestsPerSecond_/1000 +1 ; k++ ) { runner_.submit(new LoadManager(rm)); } try { if ( requestsPerSecond_ > 1000) Thread.sleep(1); else Thread.sleep(1000/requestsPerSecond_); } catch ( Exception ex) { } } } catch(Exception ex) { ex.printStackTrace(); } } public void randomReadSuperColumn(int keys, int superColumns, int columns, int size, int tps) { Random random = new Random(); try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; int i = random.nextInt(superColumns) + 1; int j = random.nextInt(columns) + 1; ReadParameters rm = new ReadParameters(tablename_, stringKey, columnFamilySuperColumn_ + ":" + superColumnFix_ + i + ":" + columnFix_ + j); readLoad(rm); } } catch(Exception ex) { ex.printStackTrace(); } } public void randomWriteSuperColumn(int keys, int superColumns,int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; RowMutation rm = new RowMutation(tablename_, stringKey); int i = random.nextInt(superColumns) + 1; int j = random.nextInt(columns) + 1; random.nextBytes(bytes); rm.add( columnFamilySuperColumn_ + ":" + superColumnFix_ + i + ":" + columnFix_ + j, bytes, ts); if ( ts == Integer.MAX_VALUE ) { ts = 0 ; } ts++; for(int k = 0 ; k < requestsPerSecond_/1000 +1 ; k++ ) { runner_.submit(new LoadManager(rm)); } try { if ( requestsPerSecond_ > 1000) Thread.sleep(1); else Thread.sleep(1000/requestsPerSecond_); } catch ( Exception ex) { } } } catch(Exception ex) { ex.printStackTrace(); } } public void bulkWriteColumn(int keys, int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; long time = System.currentTimeMillis(); try { for(int key = 1; key <= keys ; key++) { String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; RowMutation rm = new RowMutation(tablename_, stringKey); for( int j = 1; j <= columns ; j++) { random.nextBytes(bytes); rm.add( columnFamilyColumn_ + ":" + columnFix_ + j, bytes, ts); } for(int k = 0 ; k < requestsPerSecond_/1000 +1 ; k++ ) { runner_.submit(new LoadManager(rm)); } try { if ( requestsPerSecond_ > 1000) Thread.sleep(1); else Thread.sleep(1000/requestsPerSecond_); } catch ( Exception ex) { } } } catch(Exception ex) { ex.printStackTrace(); } System.out.println(System.currentTimeMillis() - time); } public void bulkWriteSuperColumn(int keys, int superColumns, int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; try { for(int key = 1; key <= keys ; key++) { String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; RowMutation rm = new RowMutation(tablename_, stringKey); for( int i = 1; i <= superColumns ; i++) { for( int j = 1; j <= columns ; j++) { random.nextBytes(bytes); rm.add( columnFamilySuperColumn_ + ":" + superColumnFix_ + i + ":" + columnFix_ + j, bytes, ts); } } for(int k = 0 ; k < requestsPerSecond_/1000 +1 ; k++ ) { runner_.submit(new LoadManager(rm)); } try { if ( requestsPerSecond_ > 1000) Thread.sleep(1); else Thread.sleep(1000/requestsPerSecond_); } catch ( Exception ex) { } } } catch(Exception ex) { ex.printStackTrace(); } } // Stress the server using the thrift API public Cassandra.Client connect() throws SocketException { int port = 9160; TSocket socket = new TSocket(server_, port); if(transport_ != null) transport_.close(); transport_ = socket; TBinaryProtocol binaryProtocol = new TBinaryProtocol(transport_, false, false); Cassandra.Client peerstorageClient = new Cassandra.Client( binaryProtocol); try { transport_.open(); } catch(Exception e) { e.printStackTrace(); } return peerstorageClient; } public void apply(batch_mutation_t batchMutation) { try { if ( requestsPerSecond_ > 1000) Thread.sleep(0, 1000000000/requestsPerSecond_); else Thread.sleep(1000/requestsPerSecond_); peerstorageClient_.batch_insert(batchMutation); } catch (Exception e) { try { peerstorageClient_ = connect(); peerstorageClient_.batch_insert(batchMutation); } catch (Exception e1) { e1.printStackTrace(); } } } public void apply(batch_mutation_super_t batchMutation) { try { if ( requestsPerSecond_ > 1000) Thread.sleep(0, 1000000000/requestsPerSecond_); else Thread.sleep(1000/requestsPerSecond_); long t = System.currentTimeMillis(); peerstorageClient_.batch_insert_superColumn(batchMutation); logger_.debug("Time taken for thrift..." + (System.currentTimeMillis() - t)); } catch (Exception e) { try { peerstorageClient_ = connect(); peerstorageClient_.batch_insert_superColumn(batchMutation); } catch (Exception e1) { e1.printStackTrace(); } } } public void readLoadColumn(String tableName, String key, String cf) throws SocketException { try { column_t column = peerstorageClient_.get_column(tableName, key, cf); } catch(Exception ex) { peerstorageClient_ = connect(); ex.printStackTrace(); } } public void randomReadColumnThrift(int keys, int columns, int size, int tps) { Random random = new Random(); try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; int j = random.nextInt(columns) + 1; readLoadColumn(tablename_, stringKey, columnFamilyColumn_ + ":" + columnFix_ + j); if ( requestsPerSecond_ > 1000) Thread.sleep(0, 1000000000/requestsPerSecond_); else Thread.sleep(1000/requestsPerSecond_); } } catch(Exception ex) { ex.printStackTrace(); } } public void randomWriteColumnThrift(int keys, int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; RowMutation rm = new RowMutation(tablename_, stringKey); int j = random.nextInt(columns) + 1; random.nextBytes(bytes); rm.add( columnFamilyColumn_ + ":" + columnFix_ + j, bytes, ts); if ( ts == Integer.MAX_VALUE) { ts = 0 ; } ts++; applyLoad(rm); } } catch(Exception ex) { ex.printStackTrace(); } } public void randomReadSuperColumnThrift(int keys, int superColumns, int columns, int size, int tps) { Random random = new Random(); try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; int i = random.nextInt(superColumns) + 1; int j = random.nextInt(columns) + 1; readLoadColumn(tablename_, stringKey, columnFamilySuperColumn_ + ":" + superColumnFix_ + i + ":" + columnFix_ + j); } } catch(Exception ex) { ex.printStackTrace(); } } public void randomWriteSuperColumnThrift(int keys, int superColumns,int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; try { while(true) { int key = random.nextInt(keys) + 1; String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; RowMutation rm = new RowMutation(tablename_, stringKey); int i = random.nextInt(superColumns) + 1; int j = random.nextInt(columns) + 1; random.nextBytes(bytes); rm.add( columnFamilySuperColumn_ + ":" + superColumnFix_ + i + ":" + columnFix_ + j, bytes, ts); if ( ts == Integer.MAX_VALUE ) { ts = 0 ; } ts++; applyLoad(rm); } } catch(Exception ex) { ex.printStackTrace(); } } public void bulkWriteColumnThrift(int keys, int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; long time = System.currentTimeMillis(); try { for(int key = 1; key <= keys ; key++) { String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; batch_mutation_t bt = new batch_mutation_t(); bt.key = stringKey; bt.table = tablename_; bt.cfmap = new HashMap<String,List<column_t>>(); ArrayList<column_t> column_arr = new ArrayList<column_t>(); for( int j = 1; j <= columns ; j++) { random.nextBytes(bytes); column_arr.add(new column_t(columnFix_ + j, bytes.toString(), ts)); } bt.cfmap.put(columnFamilyColumn_, column_arr); apply(bt); } } catch(Exception ex) { ex.printStackTrace(); } System.out.println(System.currentTimeMillis() - time); } public void bulkWriteSuperColumnThrift(int keys, int supercolumns, int columns, int size, int tps) { Random random = new Random(); byte[] bytes = new byte[size]; int ts = 1; long time = System.currentTimeMillis(); try { for(int key = 1; key <= keys ; key++) { String stringKey = new Integer(key).toString(); stringKey = stringKey + keyFix_ ; batch_mutation_super_t bt = new batch_mutation_super_t(); bt.key = stringKey; bt.table = tablename_; bt.cfmap = new HashMap<String,List<superColumn_t>>(); ArrayList<superColumn_t> superColumn_arr = new ArrayList<superColumn_t>(); for( int i = 1; i <= supercolumns; i++ ) { ArrayList<column_t> column_arr = new ArrayList<column_t>(); for( int j = 1; j <= columns ; j++) { random.nextBytes(bytes); column_arr.add(new column_t(columnFix_ + j, bytes.toString(), ts)); } superColumn_arr.add(new superColumn_t(superColumnFix_ + i, column_arr)); } bt.cfmap.put(columnFamilySuperColumn_, superColumn_arr); apply(bt); } } catch(Exception ex) { ex.printStackTrace(); } System.out.println(System.currentTimeMillis() - time); } public void testCommitLog() throws Throwable { Random random = new Random(System.currentTimeMillis()); byte[] bytes = new byte[4096]; random.nextBytes(bytes); byte[] bytes1 = new byte[64]; random.nextBytes(bytes1); peerstorageClient_ = connect(); int t = 0 ; while( true ) { int key = random.nextInt(); int threadId = random.nextInt(); int word = random.nextInt(); peerstorageClient_.insert("Mailbox", Integer.toString(key), "MailboxMailList0:" + Integer.toString(threadId), new String(bytes1), t++); peerstorageClient_.insert("Mailbox", Integer.toString(key), "MailboxThreadList0:" + Integer.toString(word) + ":" + Integer.toString(threadId), new String(bytes), t++); peerstorageClient_.insert("Mailbox", Integer.toString(key), "MailboxUserList0:"+ Integer.toString(word) + ":" + Integer.toString(threadId), new String(bytes), t++); } } JSAPResult ParseArguments(String[] args) { JSAPResult config = null; try { SimpleJSAP jsap = new SimpleJSAP( "StressTest", "Runs stress test for Cassandra", new Parameter[] { new FlaggedOption( "keys", JSAP.INTEGER_PARSER, "10000", JSAP.REQUIRED, 'k', JSAP.NO_LONGFLAG, "The number of keys from 1 to this number" ), new FlaggedOption( "columns", JSAP.INTEGER_PARSER, "1000", JSAP.REQUIRED, 'c', JSAP.NO_LONGFLAG, "The number of columns from 1 to this number" ), new FlaggedOption( "supercolumns", JSAP.INTEGER_PARSER, "0", JSAP.NOT_REQUIRED, 'u', JSAP.NO_LONGFLAG, "The number of super columns from 1 to this number" ), new FlaggedOption( "size", JSAP.INTEGER_PARSER, "1000", JSAP.REQUIRED, 's', JSAP.NO_LONGFLAG, "The Size in bytes of each column" ), new FlaggedOption( "tps", JSAP.INTEGER_PARSER, "1000", JSAP.REQUIRED, 't', JSAP.NO_LONGFLAG, "Requests per second" ), new FlaggedOption( "thrift", JSAP.INTEGER_PARSER, "0", JSAP.REQUIRED, 'h', JSAP.NO_LONGFLAG, "Use Thrift - 1 , use messaging - 0" ), new FlaggedOption( "mailboxstress", JSAP.INTEGER_PARSER, "0", JSAP.REQUIRED, 'M', JSAP.NO_LONGFLAG, "Run mailbox stress - 1 , hmm default - 0" ), new FlaggedOption( "commitLogTest", JSAP.INTEGER_PARSER, "0", JSAP.REQUIRED, 'C', JSAP.NO_LONGFLAG, "Run mailbox stress - 1 , hmm default - 0" ), new QualifiedSwitch( "randomize", JSAP.STRING_PARSER, JSAP.NO_DEFAULT, JSAP.NOT_REQUIRED, 'z', "randomize", "Random reads or writes" ).setList( true ).setListSeparator( ',' ), new QualifiedSwitch( "reads", JSAP.STRING_PARSER, JSAP.NO_DEFAULT, JSAP.NOT_REQUIRED, 'r', "reads", "Read data" ).setList( true ).setListSeparator( ',' ), new QualifiedSwitch( "writes", JSAP.STRING_PARSER, JSAP.NO_DEFAULT, JSAP.NOT_REQUIRED, 'w', "writes", "Write Data" ).setList( false ).setListSeparator( ',' ), new QualifiedSwitch( "bulkwrites", JSAP.STRING_PARSER, JSAP.NO_DEFAULT, JSAP.NOT_REQUIRED, 'b', "bulkwrites", "Bulk Write Data" ).setList( false ).setListSeparator( ',' ), new UnflaggedOption( "Server", JSAP.STRING_PARSER, "hadoop071.sf2p.facebook.com", JSAP.REQUIRED, JSAP.NOT_GREEDY, "One or more names of people you would like to greet." ) } ) ; config = jsap.parse(args); if ( jsap.messagePrinted() ) return null; String hostName = FBUtilities.getHostName(); from_ = new EndPoint(hostName,10001); MessagingService.getMessagingInstance().listen(from_, false); } catch ( Exception ex) { logger_.debug(LogUtil.throwableToString(ex)); } return config; } void run( JSAPResult config ) throws Throwable { requestsPerSecond_ = config.getInt("tps"); int numThreads = requestsPerSecond_/1000 + 1; if(config.getString("server") != null) { server_ = config.getString("server"); to_ = new EndPoint(config.getString("server"), 7000); } runner_ = new DebuggableThreadPoolExecutor( numThreads, numThreads, Integer.MAX_VALUE, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactoryImpl("MEMTABLE-FLUSHER-POOL") ); if(config.getInt("mailboxstress") == 1) { // stressMailboxWrites(); return; } if(config.getInt("commitLogTest") == 1) { testCommitLog(); return; } if(config.getInt("thrift") == 0) { if(config.getInt("supercolumns") == 0) { if(config.getBoolean("reads")) { randomReadColumn(config.getInt("keys"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("bulkwrites")) { bulkWriteColumn(config.getInt("keys"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("writes")) { randomWriteColumn(config.getInt("keys"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } } else { if(config.getBoolean("reads")) { randomReadSuperColumn(config.getInt("keys"), config.getInt("supercolumns"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("bulkwrites")) { bulkWriteSuperColumn(config.getInt("keys"), config.getInt("supercolumns"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("writes")) { randomWriteSuperColumn(config.getInt("keys"), config.getInt("supercolumns"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } } } else { peerstorageClient_ = connect(); if(config.getInt("supercolumns") == 0) { if(config.getBoolean("reads")) { randomReadColumnThrift(config.getInt("keys"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("bulkwrites")) { bulkWriteColumnThrift(config.getInt("keys"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("writes")) { randomWriteColumnThrift(config.getInt("keys"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } } else { if(config.getBoolean("reads")) { randomReadSuperColumnThrift(config.getInt("keys"), config.getInt("supercolumns"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("bulkwrites")) { bulkWriteSuperColumnThrift(config.getInt("keys"), config.getInt("supercolumns"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } if(config.getBoolean("writes")) { randomWriteSuperColumnThrift(config.getInt("keys"), config.getInt("supercolumns"), config.getInt("columns"), config.getInt("size"), config.getInt("tps")); return; } } } } /** * @param args */ public static void main(String[] args) throws Throwable { StressTest stressTest = new StressTest(); JSAPResult config = stressTest.ParseArguments( args ); if( config == null ) System.exit(-1); stressTest.run(config); } }