Java Examples for backtype.storm.topology.TopologyBuilder

The following java examples will help you to understand the usage of backtype.storm.topology.TopologyBuilder. These source code samples are taken from different open source projects.

Example 1
Project: openbus-master  File: OnlineSiddhiTopology.java View source code
public static void main(String[] args) {
    if (args.length != 1) {
        System.out.println("USO: <fichero de parámetros>");
    }
    /**Creamos un Objeto de tipo Properties*/
    Properties propiedades = new Properties();
    try {
        propiedades.load(new FileInputStream(args[0]));
    } catch (IOException e1) {
        System.out.println("Error a la hora de abrir el fichero de PROPERTIES");
        e1.printStackTrace();
    }
    IRichSpout spout = null;
    //comprobamos el tipo de entrada
    String tipo = propiedades.getProperty("INPUT_ORIGIN");
    if (tipo.equals("kafka")) {
        //Configuramos el KafkaSpout
        ZkHosts zooHosts = new ZkHosts(propiedades.getProperty("KAFKA_ZOOKEEPER_LIST"));
        SpoutConfig spoutConfig = new SpoutConfig(zooHosts, propiedades.getProperty("KAFKA_TOPIC"), "", "STORM-ID");
        boolean fromBeginning = false;
        if (propiedades.getProperty("KAFKA_FROM_BEGINNING") != null) {
            fromBeginning = Boolean.parseBoolean(propiedades.getProperty("KAFKA_FROM_BEGINNING"));
        } else {
            fromBeginning = false;
        }
        //spoutConfig.startOffsetTime=-1;
        spoutConfig.forceFromStart = fromBeginning;
        if (!fromBeginning) {
            spoutConfig.startOffsetTime = -1;
        }
        spout = new KafkaSpout(spoutConfig);
    }
    if (tipo.equals("disco")) {
        spout = new SimpleFileStringSpout(propiedades.getProperty("INPUT_FILE"), "linea");
    }
    Config conf = new Config();
    conf.put(StormElasticSearchConstants.ES_CLUSTER_NAME, propiedades.getProperty("ES_CLUSTER_NAME"));
    conf.put(StormElasticSearchConstants.ES_HOST, propiedades.getProperty("ES_HOST"));
    conf.put(StormElasticSearchConstants.ES_PORT, Integer.parseInt(propiedades.getProperty("ES_PORT")));
    //Se crea la topología
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("source", spout, 1);
    builder.setBolt("streamer", new Tuple2Stream(Integer.parseInt(propiedades.getProperty("METADATA_SINCRO_SECS")), propiedades.getProperty("GET_METADATA_SERVICE_URL"), propiedades.getProperty("METADATA_FILE_JSON")), 1).shuffleGrouping("source");
    builder.setBolt("SiddhiBolt", new SiddhiBolt(propiedades.getProperty("PUT_METRICA_SERVICE_URL"), propiedades.getProperty("DELETE_METRICA_SERVICE_URL"), propiedades.getProperty("ELASTICSEARCH_OUTPUT").toLowerCase().equals("true")), 1).shuffleGrouping("streamer");
    if (propiedades.getProperty("ELASTICSEARCH_OUTPUT").toLowerCase().equals("true")) {
        builder.setBolt("ESBolt", new ElasticSearchBolt(new DefaultTupleMapper()), 1).shuffleGrouping("SiddhiBolt");
    }
    if (propiedades.getProperty("ECHO_OUTPUT").toLowerCase().equals("true")) {
        builder.setBolt("echo", new EchoBolt(), 1).shuffleGrouping("SiddhiBolt", "echo");
    }
    //Despliegue de la topología
    try {
        if (propiedades.getProperty("STORM_CLUSTER").equals("local")) {
            // local
            LocalCluster cluster = new LocalCluster();
            conf.setMaxSpoutPending(Integer.parseInt(propiedades.getProperty("STORM_MAX_SPOUT_PENDING")));
            cluster.submitTopology(propiedades.getProperty("STORM_TOPOLOGY_NAME"), conf, builder.createTopology());
        } else {
            //Server cluster 
            conf.setNumWorkers(Integer.parseInt(propiedades.getProperty("STORM_NUM_WORKERS")));
            conf.setMaxSpoutPending(Integer.parseInt(propiedades.getProperty("STORM_MAX_SPOUT_PENDING")));
            StormSubmitter.submitTopology(propiedades.getProperty("STORM_TOPOLOGY_NAME"), conf, builder.createTopology());
        }
    } catch (NumberFormatExceptionAlreadyAliveException | InvalidTopologyException |  e) {
        e.printStackTrace();
    }
}
Example 2
Project: storm-spring-master  File: RichBolt.java View source code
public void addToTopology(final TopologyBuilder builder) {
    BoltDeclarer boltDeclarer;
    if (parallelismHint == null) {
        boltDeclarer = builder.setBolt(componentId, stormBolt);
    } else {
        boltDeclarer = builder.setBolt(componentId, stormBolt, parallelismHint);
    }
    addBoltGroupingsToBolt(boltDeclarer);
    addConfigToComponent(boltDeclarer);
}
Example 3
Project: Storm08-starter-master  File: WindowTopology.java View source code
//    public static int BOOK_PER_SEC = 100;
private static void slidingWC(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    if (args != null && args.length > 0) {
        int nodes = 10;
        int bookPerSec = Integer.parseInt(args[1]);
        int blocks = Integer.parseInt(args[2]);
        //            builder.setSpout("SenSpout", new SentenceSource(), nodes);
        builder.setSpout("SenSpout", new BookSource(bookPerSec, blocks), nodes);
        builder.setBolt("GrepBolt", new GrepBolt(), nodes * 2).localOrShuffleGrouping("SenSpout");
        builder.setBolt("GrepResultBolt", new GrepResultBolt(), nodes).fieldsGrouping("GrepBolt", new Fields("word"));
        Config conf = new Config();
        //            conf.setDebug(true);
        conf.setNumWorkers(nodes * 4);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        int nodes = 1;
        builder.setSpout("SenSpout", new SentenceSource(), nodes);
        //            builder.setSpout("SenSpout", new BookSource(), nodes);
        builder.setBolt("GrepBolt", new GrepBolt(), nodes).localOrShuffleGrouping("SenSpout");
        builder.setBolt("GrepResultBolt", new GrepResultBolt(), 1).fieldsGrouping("GrepBolt", new Fields("word"));
        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("WindowTopology", conf, builder.createTopology());
        Utils.sleep(100000);
        cluster.killTopology("WindowTopology");
        cluster.shutdown();
    }
}
Example 4
Project: storm-hack-master  File: WordCountTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("word", new WordSpout(), 2);
    builder.setBolt("count", new ConsumerBolt(), 3).fieldsGrouping("word", new Fields("word"));
    Config conf = new Config();
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("word-count", conf, builder.createTopology());
        Thread.sleep(10000);
        cluster.shutdown();
    }
}
Example 5
Project: twitter-storm-demo-master  File: RankingTopology.java View source code
public static StormTopology buildTopology(final List<String> urls) {
    final TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(LOGGING_SPOUT, new UrlSpout(urls), 1);
    builder.setBolt(DISPATCHER_BOLT, new DispatcherBolt(), 2).shuffleGrouping(LOGGING_SPOUT);
    builder.setBolt(COUNTING_BOLT, new CountingBolt(), 2).fieldsGrouping(DISPATCHER_BOLT, new Fields("url"));
    builder.setBolt(RANKING_LIST_BOLT, new RankingListBolt(), 1).globalGrouping(COUNTING_BOLT);
    return builder.createTopology();
}
Example 6
Project: C2-Github-commit-count-master  File: TopologyTest.java View source code
@Override
public void run(ILocalCluster cluster) {
    MockedSources mockedSources = new MockedSources();
    mockedSources.addMockData("commit-feed-listener", new Values("12345 test@manning.com"));
    Config config = new Config();
    config.setDebug(true);
    CompleteTopologyParam topologyParam = new CompleteTopologyParam();
    topologyParam.setMockedSources(mockedSources);
    topologyParam.setStormConf(config);
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("commit-feed-listener", new CommitFeedListener());
    builder.setBolt("email-extractor", new EmailExtractor()).shuffleGrouping("commit-feed-listener");
    builder.setBolt("email-counter", new EmailCounter()).fieldsGrouping("email-extractor", new Fields("email"));
    StormTopology topology = builder.createTopology();
    Map result = Testing.completeTopology(cluster, topology, topologyParam);
    assertTrue(Testing.multiseteq(new Values(new Values("12345 test@manning.com")), Testing.readTuples(result, "commit-feed-listener")));
    assertTrue(Testing.multiseteq(new Values(new Values("test@manning.com")), Testing.readTuples(result, "email-extractor")));
    assertTrue(Testing.multiseteq(new Values(), Testing.readTuples(result, "email-counter")));
}
Example 7
Project: C3-Heatmap-master  File: StormTopologyBuilder.java View source code
public static StormTopology buildWithSpout(String spoutId, BaseRichSpout spout) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(spoutId, spout);
    builder.setBolt(GEOCODE_LOOKUP_ID, new GeocodeLookup()).shuffleGrouping(spoutId);
    builder.setBolt(HEATMAP_BUILDER_ID, new HeatMapBuilder()).addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3).globalGrouping(GEOCODE_LOOKUP_ID);
    builder.setBolt(PERSISTOR_ID, new Persistor()).shuffleGrouping(HEATMAP_BUILDER_ID);
    return builder.createTopology();
}
Example 8
Project: cep-master  File: TestTopology.java View source code
private void setupKafkaSpoutAndSubmitTopology() throws InterruptedException {
    BrokerHosts brokerHosts = new ZkHosts("localhost:2000");
    SpoutConfig kafkaConfig = new SpoutConfig(brokerHosts, TOPIC_NAME, "", "storm");
    kafkaConfig.forceStartOffsetTime(readFromMode);
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("words", new KafkaSpout(kafkaConfig), 1);
    VerboseCollectorBolt bolt = new VerboseCollectorBolt(expectedNumMessages);
    builder.setBolt("print", bolt).shuffleGrouping("words");
    Config config = new Config();
    cluster.submitTopology("kafka-test", config, builder.createTopology());
}
Example 9
Project: eswaraj-master  File: TopologyMain.java View source code
public static void main(String[] args) throws InterruptedException {
    //Topology definition
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("word-reader", new TestSpout());
    builder.setBolt("word-normalizer", new TestBolt()).shuffleGrouping("word-reader");
    builder.setBolt("word-counter", new PrintTestBolt(), 1).fieldsGrouping("word-normalizer", new Fields("word"));
    Logger logger = LoggerFactory.getLogger("test");
    logger.info("Test");
    //Configuration
    Config conf = new Config();
    conf.put("wordsFile", "/Users/ravi/Documents/github/others/examples-ch02-getting_started/src/main/resources/words.txt");
    conf.setDebug(false);
    //Topology run
    conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("Getting-Started-Toplogie", conf, builder.createTopology());
    logger.info("waiting for 10 seconds to finish");
    Thread.sleep(10000);
    logger.info("shutdown started");
    cluster.shutdown();
}
Example 10
Project: jstorm-master  File: TransactionalTopologyBuilder.java View source code
public TopologyBuilder buildTopologyBuilder() {
    // Transaction is not compatible with jstorm batch mode(task.batch.tuple)
    // so we close batch mode via system property
    System.setProperty(ConfigExtension.TASK_BATCH_TUPLE, "false");
    String coordinator = _spoutId + "/coordinator";
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
    for (Map conf : _spoutConfs) {
        declarer.addConfigurations(conf);
    }
    declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    BoltDeclarer emitterDeclarer = builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism).allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID).addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    if (_spout instanceof ICommitterTransactionalSpout) {
        emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
    }
    for (String id : _bolts.keySet()) {
        Component component = _bolts.get(id);
        Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
        for (String c : componentBoltSubscriptions(component)) {
            coordinatedArgs.put(c, SourceArgs.all());
        }
        IdStreamSpec idSpec = null;
        if (component.committer) {
            idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
        BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
        for (Map conf : component.componentConfs) {
            input.addConfigurations(conf);
        }
        for (String c : componentBoltSubscriptions(component)) {
            input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
        }
        for (InputDeclaration d : component.declarations) {
            d.declare(input);
        }
        if (component.committer) {
            input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
    }
    return builder;
}
Example 11
Project: netty-storm-master  File: NettyTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = createTopology();
    Config config = new Config();
    config.setDebug(true);
    //config.setMaxSpoutPending(1);
    try {
        StormRunner.runTopologyLocally(builder.createTopology(), "NettySpoutTest", config, 0);
    } catch (InterruptedException e) {
        System.out.println("\n\n Execution interrupted. \n\n");
    }
}
Example 12
Project: SE252-JAN2015-master  File: WordCountTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("sentences", new RandomSentenceSpout(), 1);
    //		UpdatableBoltLocationEntry updatableEntry = new UpdatableBoltLocationEntry("1", "v1", "in.dream_lab.stream.storm.samples.wordcount.bolts.SplitSentence");
    //		IUpdatableBolt b1 = UpdateFactory.newUpdatableBolt(updatableEntry, "topoName", "split");
    //		builder.setBolt("split", b1, 1).shuffleGrouping("sentences");
    builder.setBolt("split", new SplitSentence(), 1).shuffleGrouping("sentences");
    builder.setBolt("count", new WordCount(), 1).fieldsGrouping("split", new Fields("word"));
    //System.out.println("SIGNAME IS " + b1.getSignalName());
    Config conf = new Config();
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
Example 13
Project: SensorStorm-master  File: Main.java View source code
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.put(Config.TOPOLOGY_DEBUG, false);
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new SensorStormSpout(conf, new BlockFetcher(), true, 1000), 1);
    builder.setBolt("average", new SensorStormBolt(conf, 1000, WindowBatcher.class, AverageOperation.class, "sensorId"), 2).customGrouping("spout", new SensorStormFieldGrouping("sensorId"));
    builder.setBolt("printspeed", new SensorStormBolt(conf, 1000, PrintParticleSpeedOperation.class, null), 2).customGrouping("average", new SensorStormShuffleGrouping());
    builder.setBolt("printparticle", new SensorStormBolt(conf, 1000, PrintOperation.class, null), 2).customGrouping("printspeed", new SensorStormShuffleGrouping());
    if ((args != null) && (args.length > 0)) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
Example 14
Project: storm-esper-master  File: TestTopologyBuilder.java View source code
public StormTopology build() {
    final TopologyBuilder builder = new TopologyBuilder();
    for (Map.Entry<String, IRichSpout> spoutEntry : spoutMap.entrySet()) {
        builder.setSpout(spoutEntry.getKey(), spoutEntry.getValue());
    }
    for (Map.Entry<String, IRichBolt> boltEntry : boltMap.entrySet()) {
        InputDeclarer declarer = builder.setBolt(boltEntry.getKey(), boltEntry.getValue());
        List<Connection> connectionsForTarget = connections.get(boltEntry.getKey());
        if (connectionsForTarget != null) {
            for (Connection connection : connectionsForTarget) {
                declarer = declarer.shuffleGrouping(connection.getSourceComponent(), connection.getSourceStream());
            }
        }
    }
    return builder.createTopology();
}
Example 15
Project: storm-hackathon-master  File: LinkLogger.java View source code
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    TopologyBuilder builder = new TopologyBuilder();
    // Configuration
    Config config = new Config();
    SpoutConfig spoutConfig = new SpoutConfig(Common.getKafkaHosts(), "wikipedia_articles", "/kafkastorm", teamPrefix("wikipedia-state"));
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    // This tells the spout to start at the very beginning of the data stream
    // If you just want to resume where you left off, remove this line
    spoutConfig.forceStartOffsetTime(-2);
    builder.setSpout("articles", new KafkaSpout(spoutConfig));
    builder.setBolt("link-logger", new LinkExtractorBolt()).shuffleGrouping("articles");
    // Launch
    HackReduceStormSubmitter.submitTopology("wikipedia-logger", config, builder.createTopology());
}
Example 16
Project: storm-hbase-master  File: HBaseCountersTopology.java View source code
/**
   * @param args
   */
public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    // Add test spout
    builder.setSpout("spout", new TestSpout(), 1);
    // Build TupleTableConifg
    TupleTableConfig config = new TupleTableConfig("shorturl", "shortid");
    config.setBatch(false);
    /*
     * By default the HBaseCountersBolt will use the tuple output fields value
     * to set the CQ name. For example if the 'date' output field exists in the
     * tuple then its value (e.g. "YYYYMMDD") will be used to set the counters
     * CQ. However, the 'clicks' output field does not exist in the tuple so in
     * this case the counters CQ will be set to the given field name 'clicks'.
     */
    config.addColumn("data", "clicks");
    config.addColumn("daily", "date");
    // Add HBaseBolt
    builder.setBolt("hbase-counters", new HBaseCountersBolt(config), 1).shuffleGrouping("spout");
    Config stormConf = new Config();
    stormConf.setDebug(true);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("hbase-example", stormConf, builder.createTopology());
    Utils.sleep(10000);
    cluster.shutdown();
}
Example 17
Project: storm-kafka-0.8-plus-test-master  File: KafkaSpoutTestTopology.java View source code
public StormTopology buildTopology() {
    SpoutConfig kafkaConfig = new SpoutConfig(brokerHosts, "storm-sentence", "", "storm");
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("words", new KafkaSpout(kafkaConfig), 10);
    builder.setBolt("print", new PrinterBolt()).shuffleGrouping("words");
    return builder.createTopology();
}
Example 18
Project: storm-kafka-starter-master  File: WordCountTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new RandomSentenceSpout(), 5);
    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
    Config conf = new Config();
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("word-count", conf, builder.createTopology());
        Thread.sleep(10000);
        cluster.shutdown();
    }
}
Example 19
Project: storm-kestrel-master  File: TestTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    KestrelThriftSpout spout = new KestrelThriftSpout("localhost", 2229, "test", new StringScheme());
    builder.setSpout("spout", spout).setDebug(true);
    builder.setBolt("bolt", new FailEveryOther()).shuffleGrouping("spout");
    LocalCluster cluster = new LocalCluster();
    Config conf = new Config();
    cluster.submitTopology("test", conf, builder.createTopology());
    Thread.sleep(600000);
}
Example 20
Project: storm-solr-master  File: EventsimTopology.java View source code
public StormTopology build(StreamingApp app) throws Exception {
    SpringSpout eventsimSpout = new SpringSpout("eventsimSpout", spoutFields);
    SpringBolt collectionPerTimeFrameSolrBolt = new SpringBolt("collectionPerTimeFrameSolrBoltAction", app.tickRate("collectionPerTimeFrameSolrBoltAction"));
    // Send all docs for the same hash range to the same bolt instance,
    // which allows us to use a streaming approach to send docs to the leader
    int numShards = Integer.parseInt(String.valueOf(app.getStormConfig().get("spring.eventsimNumShards")));
    HashRangeGrouping hashRangeGrouping = new HashRangeGrouping(app.getStormConfig(), numShards);
    int tasksPerShard = hashRangeGrouping.getNumShards() * 2;
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("eventsimSpout", eventsimSpout, app.parallelism("eventsimSpout"));
    builder.setBolt("collectionPerTimeFrameSolrBolt", collectionPerTimeFrameSolrBolt, tasksPerShard).customGrouping("eventsimSpout", hashRangeGrouping);
    return builder.createTopology();
}
Example 21
Project: storm-starter-master  File: WordCountTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new RandomSentenceSpout(), 5);
    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
    Config conf = new Config();
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("word-count", conf, builder.createTopology());
        Thread.sleep(10000);
        cluster.shutdown();
    }
}
Example 22
Project: storm-unshortening-master  File: UnshortenTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TwitterSpout(), 1);
    builder.setBolt("unshortenBolt", new UnshortenBolt(), 4).shuffleGrouping("spout");
    builder.setBolt("dbBolt", new CassandraBolt(), 2).shuffleGrouping("unshortenBolt");
    Config conf = new Config();
    conf.setDebug(false);
    //submit it to the cluster, or submit it locally
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(10);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("unshortening", conf, builder.createTopology());
        Thread.sleep(10000);
        cluster.shutdown();
    }
}
Example 23
Project: stormkafka-master  File: MyTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new NewTwitterSpout(), 5);
    builder.setBolt("hashtag", new HashTagExtractionBolt(), 4).shuffleGrouping("spout");
    //    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout");
    //    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
    Config conf = new Config();
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("my-topology", conf, builder.createTopology());
        Thread.sleep(10000);
        cluster.shutdown();
    }
}
Example 24
Project: strata2012-master  File: Topology.java View source code
public static void main(String[] args) throws InterruptedException, AlreadyAliveException, InvalidTopologyException {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("tweets-collector", new ApiStreamingSpout(), 1);
    builder.setBolt("data-extractor", new TwitterDataExtractor()).shuffleGrouping("tweets-collector");
    builder.setBolt("tweets-saver", new TwitterHashtagsSaver()).shuffleGrouping("data-extractor");
    Config conf = new Config();
    int i = 0;
    conf.put("redisHost", args[i++]);
    conf.put("redisPort", new Integer(args[i++]));
    conf.put("track", args[i++]);
    conf.put("user", args[i++]);
    conf.put("password", args[i++]);
    i++;
    if (args.length <= i || args[i].equals("local")) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("twitter-hashtag-summarizer", conf, builder.createTopology());
    } else {
        StormSubmitter.submitTopology("twitter-hashtag-summarizer", conf, builder.createTopology());
    }
}
Example 25
Project: C6-Flash-sale-recommender-master  File: FlashSaleTopologyBuilder.java View source code
public static StormTopology build() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(CUSTOMER_RETRIEVAL_SPOUT, new CustomerRetrievalSpout()).setMaxSpoutPending(250);
    builder.setBolt(FIND_RECOMMENDED_SALES_FAST, new FindRecommendedSales(), 16).addConfiguration("timeout", 150).setNumTasks(16).shuffleGrouping(CUSTOMER_RETRIEVAL_SPOUT);
    builder.setBolt(FIND_RECOMMENDED_SALES_SLOW, new FindRecommendedSales(), 16).addConfiguration("timeout", 1500).setNumTasks(16).shuffleGrouping(FIND_RECOMMENDED_SALES_FAST, FindRecommendedSales.RETRY_STREAM).shuffleGrouping(FIND_RECOMMENDED_SALES_SLOW, FindRecommendedSales.RETRY_STREAM);
    builder.setBolt(LOOKUP_SALES_DETAILS, new LookupSalesDetails(), 16).setNumTasks(16).shuffleGrouping(FIND_RECOMMENDED_SALES_FAST, FindRecommendedSales.SUCCESS_STREAM).shuffleGrouping(FIND_RECOMMENDED_SALES_SLOW, FindRecommendedSales.SUCCESS_STREAM);
    builder.setBolt(SAVE_RECOMMENDED_SALES, new SaveRecommendedSales(), 4).setNumTasks(4).shuffleGrouping(LOOKUP_SALES_DETAILS);
    return builder.createTopology();
}
Example 26
Project: datastax-storm-cql3-demo-master  File: Main.java View source code
public static void main(String[] args) throws Exception {
    Config config = new Config();
    HashMap<String, Object> clientConfig = new HashMap<String, Object>();
    String nodes = PropertyHelper.getProperty("contactPoints", "localhost");
    clientConfig.put("cassandra.nodes", nodes);
    clientConfig.put("cassandra.keyspace", "storm_demo_cql3");
    RiskSpout riskSprout = new RiskSpout();
    RiskNameAggregator riskAggregator = new RiskNameAggregator();
    RiskHierarchyAggregator riskHierarchyAggregator = new RiskHierarchyAggregator();
    // create a CassandraBolt that writes to the "stormcf" column
    // family and uses the Tuple field "word" as the row key
    CassandraCqlBolt cassandraBolt = new CassandraCqlBolt();
    // setup topology:
    // wordSpout ==> countBolt ==> cassandraBolt
    TopologyBuilder builder = new TopologyBuilder();
    //Start with SPOUT
    builder.setSpout(RISK_SPOUT, riskSprout, 1);
    //Send to 2 bolts
    builder.setBolt(HIERARCHY_BOLT, riskHierarchyAggregator, 1).fieldsGrouping(RISK_SPOUT, new Fields("risk_sensitivity"));
    builder.setBolt(AGGREGATE_BOLT, riskAggregator, 1).fieldsGrouping(RISK_SPOUT, new Fields("risk_sensitivity"));
    //Both bolts use the writer bolt
    builder.setBolt(CASSANDRA_BOLT, cassandraBolt, 1).allGrouping(AGGREGATE_BOLT).allGrouping(HIERARCHY_BOLT);
    if (args.length == 0) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", clientConfig, builder.createTopology());
        //60 mins.
        Thread.sleep(60 * 1000);
        cluster.killTopology("test");
        cluster.shutdown();
    } else {
        config.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], clientConfig, builder.createTopology());
    }
    Thread.sleep(5 * 1000);
    System.exit(0);
}
Example 27
Project: DEBS-2015-Realtime-Analytics-Patterns-master  File: WordCountTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("line", new LineStreamer(), 5);
    builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("line");
    builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word"));
    Config conf = new Config();
    conf.setDebug(false);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("word-count", conf, builder.createTopology());
        Thread.sleep(10000);
        cluster.shutdown();
    }
}
Example 28
Project: flink-perf-master  File: ThroughputHostsTracking.java View source code
public static void main(String[] args) throws Exception {
    ParameterTool pt = ParameterTool.fromArgs(args);
    int par = pt.getInt("para");
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism"));
    int i = 0;
    for (; i < pt.getInt("repartitions", 1) - 1; i++) {
        System.out.println("adding source" + i + " --> source" + (i + 1));
        builder.setBolt("source" + (i + 1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
    }
    System.out.println("adding final source" + i + " --> sink");
    builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id"));
    Config conf = new Config();
    conf.setDebug(false);
    if (!pt.has("local")) {
        conf.setNumWorkers(par);
        StormSubmitter.submitTopologyWithProgressBar("throughput-" + pt.get("name", "no_name"), conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(par);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("throughput", conf, builder.createTopology());
        Thread.sleep(300000);
        cluster.shutdown();
    }
}
Example 29
Project: flume2storm-master  File: IntegrationTest.java View source code
@Override
public void run(final ILocalCluster cluster) throws Exception {
    // Building the test topology
    final TopologyBuilder builder = new TopologyBuilder();
    Set<F2SEventEmitter> eventEmitters = new HashSet<F2SEventEmitter>();
    eventEmitters.add(new BasicF2SEventEmitter());
    FlumeSpout<CP, SP> flumeSpout = new FlumeSpout<CP, SP>(eventEmitters, config.getConfiguration(SPOUT_CONFIG));
    builder.setSpout("FlumeSpout", flumeSpout, 2);
    final TestBolt psBolt = new TestBolt();
    builder.setBolt("TestBolt", psBolt, 2).shuffleGrouping("FlumeSpout");
    // Starting topology
    final Config conf = new Config();
    conf.setNumWorkers(4);
    conf.registerSerialization(F2SEvent.class, F2SEventSerializer.class);
    conf.setFallBackOnJavaSerialization(false);
    cluster.submitTopology(TEST_TOPOLOGY_NAME, conf, builder.createTopology());
    // Creating Flume Channel
    final PseudoTxnMemoryChannel channel = new PseudoTxnMemoryChannel();
    channel.configure(configToContext(config.getConfiguration(CHANNEL_CONFIG)));
    // Creating Flume sinks
    final StormSink<CP, ES> sink1 = new StormSink<CP, ES>();
    sink1.configure(configToContext(config.getConfiguration(SINK1_CONFIG)));
    sink1.setChannel(channel);
    final StormSink<CP, ES> sink2 = new StormSink<CP, ES>();
    sink2.configure(configToContext(config.getConfiguration(SINK2_CONFIG)));
    sink2.setChannel(channel);
    // Creating the Flume sink runner and processor
    LoadBalancingSinkProcessor sinkProcessor = new LoadBalancingSinkProcessor();
    sinkProcessor.setSinks(ImmutableList.of((Sink) sink1, (Sink) sink2));
    sinkProcessor.configure(configToContext(config.getConfiguration(SINK_PROCESSOR_CONFIG)));
    final SinkRunner sinkRunner = new SinkRunner(sinkProcessor);
    sinkRunner.start();
    // Thread to send the events once both Flume and Storm are ready
    Thread senderThread = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                // Waiting that topology is ready
                LOG.info("Waiting that receptors connect...");
                if (!TestUtils.waitFor(new TestCondition() {

                    @Override
                    public boolean evaluate() {
                        return sink1.getEventSernderStats().getNbClients() == 2 && sink2.getEventSernderStats().getNbClients() == 2;
                    }
                }, TEST_TIMEOUT)) {
                    Assert.fail("Receptors failed to connect to senders in time (" + TEST_TIMEOUT + " ms)");
                }
                LOG.info("Receptors connected... sending events");
                // Load balancing events between the 2 event senders
                for (F2SEvent event : eventsToSent) {
                    channel.put(EventBuilder.withBody(event.getBody(), event.getHeaders()));
                }
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    });
    senderThread.start();
    // Waiting that it's done
    if (!TestUtils.waitFor(new TestCondition() {

        @Override
        public boolean evaluate() {
            LOG.debug("Received so far: " + MemoryStorage.getInstance().getReceivedEvents().size());
            return MemoryStorage.getInstance().getReceivedEvents().size() >= NB_EVENTS;
        }
    }, TEST_TIMEOUT)) {
        Assert.fail("Failed to receive all events in time (" + TEST_TIMEOUT + " ms)");
    }
    // Testing results:
    // Programming note: I used SortedSet and iterate over the 2 sets to
    // speed up the comparison
    assertThat(MemoryStorage.getInstance().getReceivedEvents().size()).isEqualTo(NB_EVENTS);
    Iterator<F2SEvent> it1 = eventsToSent.iterator();
    Iterator<F2SEvent> it2 = MemoryStorage.getInstance().getReceivedEvents().iterator();
    while (it1.hasNext() && it2.hasNext()) {
        assertThat(it1.next()).isEqualTo(it2.next());
    }
    // Testing Flume
    for (StormSink<?, ?> sink : new StormSink<?, ?>[] { sink1, sink2 }) {
        assertThat(sink.getEventSernderStats().getNbEventsFailed()).isEqualTo(0);
        assertThat(sink.getEventSernderStats().getNbEventsIn()).isEqualTo(HALF_NB_EVENTS);
        assertThat(sink.getEventSernderStats().getNbEventsOut()).isEqualTo(HALF_NB_EVENTS);
        assertThat(sink.getSinkCounter().getEventDrainAttemptCount()).isEqualTo(HALF_NB_EVENTS);
        assertThat(sink.getSinkCounter().getEventDrainSuccessCount()).isEqualTo(HALF_NB_EVENTS);
    }
    // Stopping topology
    LOG.info("Killing topology...");
    KillOptions killOptions = new KillOptions();
    killOptions.set_wait_secs(5);
    cluster.killTopologyWithOpts(TEST_TOPOLOGY_NAME, killOptions);
    TestUtils.waitFor(new TestCondition() {

        @Override
        public boolean evaluate() {
            try {
                return cluster.getClusterInfo().get_topologies().isEmpty();
            } catch (Exception e) {
                return false;
            }
        }
    }, TEST_TIMEOUT);
    // Stopping Flume components
    LOG.info("Stopping Flume components...");
    sinkRunner.stop();
    TestUtils.waitFor(new TestCondition() {

        @Override
        public boolean evaluate() {
            return sinkRunner.getLifecycleState() == LifecycleState.STOP;
        }
    }, TEST_TIMEOUT);
    LOG.info("Integration test done!");
}
Example 30
Project: galaxy-sdk-java-master  File: SimpleEMQTopology.java View source code
private StormTopology buildTopology() {
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    EMQSpout spout = new EMQSpout(getEMQConfig());
    LogBolt bolt = new LogBolt();
    topologyBuilder.setSpout("spout", spout, 4);
    topologyBuilder.setBolt("bolt", bolt).shuffleGrouping("spout");
    return topologyBuilder.createTopology();
}
Example 31
Project: jst-master  File: TransactionalTopologyBuilder.java View source code
public TopologyBuilder buildTopologyBuilder() {
    // Transaction is not compatible with jstorm batch mode(task.batch.tuple)
    // so we close batch mode via system property
    System.setProperty(ConfigExtension.TASK_BATCH_TUPLE, "false");
    String coordinator = _spoutId + "/coordinator";
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
    for (Map conf : _spoutConfs) {
        declarer.addConfigurations(conf);
    }
    declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    BoltDeclarer emitterDeclarer = builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism).allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID).addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    if (_spout instanceof ICommitterTransactionalSpout) {
        emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
    }
    for (String id : _bolts.keySet()) {
        Component component = _bolts.get(id);
        Map<String, SourceArgs> coordinatedArgs = new HashMap<>();
        for (String c : componentBoltSubscriptions(component)) {
            coordinatedArgs.put(c, SourceArgs.all());
        }
        IdStreamSpec idSpec = null;
        if (component.committer) {
            idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
        BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
        for (Map conf : component.componentConfs) {
            input.addConfigurations(conf);
        }
        for (String c : componentBoltSubscriptions(component)) {
            input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
        }
        for (InputDeclaration d : component.declarations) {
            d.declare(input);
        }
        if (component.committer) {
            input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
    }
    return builder;
}
Example 32
Project: mdrill-master  File: TransactionalTopologyBuilder.java View source code
public TopologyBuilder buildTopologyBuilder() {
    String coordinator = _spoutId + "/coordinator";
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
    for (Map conf : _spoutConfs) {
        declarer.addConfigurations(conf);
    }
    declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    BoltDeclarer emitterDeclarer = builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism).allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID).addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    if (_spout instanceof ICommitterTransactionalSpout) {
        emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
    }
    for (String id : _bolts.keySet()) {
        Component component = _bolts.get(id);
        Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
        for (String c : componentBoltSubscriptions(component)) {
            coordinatedArgs.put(c, SourceArgs.all());
        }
        IdStreamSpec idSpec = null;
        if (component.committer) {
            idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
        BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
        for (Map conf : component.componentConfs) {
            input.addConfigurations(conf);
        }
        for (String c : componentBoltSubscriptions(component)) {
            input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
        }
        for (InputDeclaration d : component.declarations) {
            d.declare(input);
        }
        if (component.committer) {
            input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
    }
    return builder;
}
Example 33
Project: Metamorphosis-master  File: TestTopology.java View source code
public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new MetaSpout(initMetaConfig(), new ConsumerConfig("storm-spout"), new StringScheme()), 10);
    builder.setBolt("bolt", new FailEveryOther()).shuffleGrouping("spout");
    Config conf = new Config();
    // Set the consume topic
    conf.put(MetaSpout.TOPIC, "neta-test");
    // Set the max buffer size in bytes to fetch messages.
    conf.put(MetaSpout.FETCH_MAX_SIZE, 1024 * 1024);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
}
Example 34
Project: real-time-statsS-pilot-master  File: ThroughputTest.java View source code
//storm jar storm-benchmark-0.0.1-SNAPSHOT-standalone.jar storm.benchmark.ThroughputTest demo 100 8 8 8 10000
public static void main(String[] args) throws Exception {
    int size = Integer.parseInt(args[1]);
    int workers = Integer.parseInt(args[2]);
    int spout = Integer.parseInt(args[3]);
    int bolt = Integer.parseInt(args[4]);
    int maxPending = Integer.parseInt(args[5]);
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new GenSpout(size), spout);
    //        builder.setBolt("count", new CountBolt(), bolt)
    //                .fieldsGrouping("bolt", new Fields("id"));
    //        builder.setBolt("bolt", new IdentityBolt(), bolt)
    //                .shuffleGrouping("spout");
    builder.setBolt("bolt2", new AckBolt(), bolt).shuffleGrouping("spout");
    //        builder.setBolt("count2", new CountBolt(), bolt)
    //                .fieldsGrouping("bolt2", new Fields("id"));
    Config conf = new Config();
    conf.setNumWorkers(workers);
    //conf.setMaxSpoutPending(maxPending);
    conf.setNumAckers(0);
    conf.setStatsSampleRate(0.0001);
    //topology.executor.receive.buffer.size: 8192 #batched
    //topology.executor.send.buffer.size: 8192 #individual messages
    //topology.transfer.buffer.size: 1024 # batched
    //conf.put("topology.executor.send.buffer.size", 1024);
    //conf.put("topology.transfer.buffer.size", 8);
    //conf.put("topology.receiver.buffer.size", 8);
    //conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-Xdebug -Xrunjdwp:transport=dt_socket,address=1%ID%,server=y,suspend=n");
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
Example 35
Project: resa-master  File: ResaVLDTopFoxFileInputChangeLogo.java View source code
public static void main(String args[]) throws InterruptedException, AlreadyAliveException, InvalidTopologyException, FileNotFoundException {
    if (args.length != 1) {
        System.out.println("Enter path to config file!");
        System.exit(0);
    }
    Config conf = readConfig(args[0]);
    String host = getString(conf, "redis.host");
    int port = getInt(conf, "redis.port");
    String sigQueue = getString(conf, "tVLDSignalQueue");
    TopologyBuilder builder = new ResaTopologyBuilder();
    //        TopologyBuilder builder = new TopologyBuilder();
    String spoutName = "tVLDSpout";
    String signalName = "tVLDSignal";
    String patchGenBolt = "tVLDPatchGen";
    String patchProcBolt = "tVLDPatchProc";
    String patchAggBolt = "tVLDPatchAgg";
    String patchDrawBolt = "tVLDPatchDraw";
    String redisFrameOut = "tVLDRedisFrameOut";
    builder.setSpout(spoutName, new tomFrameSpoutResizeFox(), getInt(conf, spoutName + ".parallelism")).setNumTasks(getInt(conf, spoutName + ".tasks"));
    builder.setSpout(signalName, new SignalSpoutFromRedis(host, port, sigQueue), 1);
    builder.setBolt(patchGenBolt, new PatchGenFox(), getInt(conf, patchGenBolt + ".parallelism")).shuffleGrouping(spoutName, SAMPLE_FRAME_STREAM).setNumTasks(getInt(conf, patchGenBolt + ".tasks"));
    builder.setBolt(patchProcBolt, new PatchProcessorFoxChangeLogo(), getInt(conf, patchProcBolt + ".parallelism")).allGrouping(signalName, SIGNAL_STREAM).allGrouping(patchProcBolt, LOGO_TEMPLATE_UPDATE_STREAM).shuffleGrouping(patchGenBolt, PATCH_FRAME_STREAM).setNumTasks(getInt(conf, patchProcBolt + ".tasks"));
    builder.setBolt(patchAggBolt, new PatchAggFox(), getInt(conf, patchAggBolt + ".parallelism")).fieldsGrouping(patchProcBolt, DETECTED_LOGO_STREAM, new Fields(FIELD_SAMPLE_ID)).setNumTasks(getInt(conf, patchAggBolt + ".tasks"));
    builder.setBolt(patchDrawBolt, new tDrawPatchDelta(), getInt(conf, patchDrawBolt + ".parallelism")).fieldsGrouping(patchAggBolt, PROCESSED_FRAME_STREAM, new Fields(FIELD_FRAME_ID)).fieldsGrouping(spoutName, RAW_FRAME_STREAM, new Fields(FIELD_FRAME_ID)).setNumTasks(getInt(conf, patchDrawBolt + ".tasks"));
    builder.setBolt(redisFrameOut, new RedisFrameOutput(), getInt(conf, redisFrameOut + ".parallelism")).globalGrouping(patchDrawBolt, STREAM_FRAME_DISPLAY).setNumTasks(getInt(conf, redisFrameOut + ".tasks"));
    StormTopology topology = builder.createTopology();
    int numberOfWorkers = getInt(conf, "tVLDNumOfWorkers");
    conf.setNumWorkers(numberOfWorkers);
    conf.setMaxSpoutPending(getInt(conf, "tVLDMaxPending"));
    conf.setStatsSampleRate(1.0);
    conf.registerSerialization(Serializable.Mat.class);
    List<String> templateFiles = getListOfStrings(conf, "originalTemplateFileNames");
    ResaConfig resaConfig = ResaConfig.create();
    resaConfig.putAll(conf);
    if (resa.util.ConfigUtil.getBoolean(conf, "tVLD.metric.resa", false)) {
        resaConfig.addDrsSupport();
        resaConfig.put(ResaConfig.REBALANCE_WAITING_SECS, 0);
        System.out.println("ResaMetricsCollector is registered");
    }
    if (resa.util.ConfigUtil.getBoolean(conf, "tVLD.metric.redis", false)) {
        resaConfig.registerMetricsConsumer(RedisMetricsCollector.class);
        System.out.println("RedisMetricsCollector is registered");
    }
    int sampleFrames = getInt(resaConfig, "sampleFrames");
    int W = ConfigUtil.getInt(resaConfig, "width", 640);
    int H = ConfigUtil.getInt(resaConfig, "height", 480);
    int maxPending = getInt(resaConfig, "topology.max.spout.pending");
    StormSubmitter.submitTopology("resaVLDTopFoxFileInChLG-s" + sampleFrames + "-" + W + "-" + H + "-L" + templateFiles.size() + "-p" + maxPending, resaConfig, topology);
}
Example 36
Project: SIF-master  File: RecommenderTopology.java View source code
/**
     * @param args
     * @throws Exception
     */
public static void main(String[] args) throws Exception {
    String topologyName = args[0];
    String configFilePath = args[1];
    if (args.length != 2) {
        throw new IllegalArgumentException("Need two arguments: topology name and config file path");
    }
    FileInputStream fis = new FileInputStream(configFilePath);
    Properties configProps = new Properties();
    configProps.load(fis);
    //initialize config
    Config conf = new Config();
    conf.setDebug(true);
    for (Object key : configProps.keySet()) {
        String keySt = key.toString();
        String val = configProps.getProperty(keySt);
        conf.put(keySt, val);
    }
    //spout
    TopologyBuilder builder = new TopologyBuilder();
    int spoutThreads = ConfigUtility.getInt(configProps, "spout.threads", 1);
    RedisSpout spout = new RedisSpout();
    spout.withTupleFields(RecommenderBolt.USER_ID, RecommenderBolt.SESSION_ID, RecommenderBolt.ITEM_ID, RecommenderBolt.EVENT_ID, RecommenderBolt.TS_ID);
    builder.setSpout("recommenderRedisSpout", spout, spoutThreads);
    //bolt
    RecommenderBolt bolt = new RecommenderBolt();
    int boltThreads = ConfigUtility.getInt(configProps, "bolt.threads", 1);
    builder.setBolt("recommenderBolt", bolt, boltThreads).fieldsGrouping("recommenderRedisSpout", new Fields(RecommenderBolt.USER_ID));
    //submit topology
    int numWorkers = ConfigUtility.getInt(configProps, "num.workers", 1);
    int maxSpoutPending = ConfigUtility.getInt(configProps, "max.spout.pending", 1000);
    int maxTaskParalleism = ConfigUtility.getInt(configProps, "max.task.parallelism", 100);
    conf.setNumWorkers(numWorkers);
    conf.setMaxSpoutPending(maxSpoutPending);
    conf.setMaxTaskParallelism(maxTaskParalleism);
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
Example 37
Project: sifarish-master  File: RecommenderTopology.java View source code
/**
     * @param args
     * @throws Exception
     */
public static void main(String[] args) throws Exception {
    String topologyName = args[0];
    String configFilePath = args[1];
    if (args.length != 2) {
        throw new IllegalArgumentException("Need two arguments: topology name and config file path");
    }
    FileInputStream fis = new FileInputStream(configFilePath);
    Properties configProps = new Properties();
    configProps.load(fis);
    //initialize config
    Config conf = new Config();
    conf.setDebug(true);
    for (Object key : configProps.keySet()) {
        String keySt = key.toString();
        String val = configProps.getProperty(keySt);
        conf.put(keySt, val);
    }
    //spout
    TopologyBuilder builder = new TopologyBuilder();
    int spoutThreads = ConfigUtility.getInt(configProps, "spout.threads", 1);
    RedisSpout spout = new RedisSpout();
    spout.withTupleFields(RecommenderBolt.USER_ID, RecommenderBolt.SESSION_ID, RecommenderBolt.ITEM_ID, RecommenderBolt.EVENT_ID, RecommenderBolt.TS_ID);
    builder.setSpout("recommenderRedisSpout", spout, spoutThreads);
    //bolt
    RecommenderBolt bolt = new RecommenderBolt();
    int boltThreads = ConfigUtility.getInt(configProps, "bolt.threads", 1);
    builder.setBolt("recommenderBolt", bolt, boltThreads).fieldsGrouping("recommenderRedisSpout", new Fields(RecommenderBolt.USER_ID));
    //submit topology
    int numWorkers = ConfigUtility.getInt(configProps, "num.workers", 1);
    int maxSpoutPending = ConfigUtility.getInt(configProps, "max.spout.pending", 1000);
    int maxTaskParalleism = ConfigUtility.getInt(configProps, "max.task.parallelism", 100);
    conf.setNumWorkers(numWorkers);
    conf.setMaxSpoutPending(maxSpoutPending);
    conf.setMaxTaskParallelism(maxTaskParalleism);
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
Example 38
Project: storm-contrib-master  File: RDBMSDumperTopology.java View source code
public static void main(String[] args) throws SQLException {
    ArrayList<String> columnNames = new ArrayList<String>();
    ArrayList<String> columnTypes = new ArrayList<String>();
    String tableName = "testTable";
    // Note: if the rdbms table need not to have a primary key, set the variable 'primaryKey' to 'N/A'
    // else set its value to the name of the tuple field which is to be treated as primary key
    String primaryKey = "N/A";
    String rdbmsUrl = "jdbc:mysql://localhost:3306/testDB";
    String rdbmsUserName = "root";
    String rdbmsPassword = "root";
    //add the column names and the respective types in the two arraylists
    columnNames.add("word");
    columnNames.add("number");
    //add the types
    columnTypes.add("varchar (100)");
    columnTypes.add("int");
    TopologyBuilder builder = new TopologyBuilder();
    //set the spout for the topology
    builder.setSpout("spout", new SampleSpout(), 10);
    //dump the stream data into rdbms table		
    RDBMSDumperBolt dumperBolt = new RDBMSDumperBolt(primaryKey, tableName, columnNames, columnTypes, rdbmsUrl, rdbmsUserName, rdbmsPassword);
    builder.setBolt("dumperBolt", dumperBolt, 1).shuffleGrouping("spout");
    Config conf = new Config();
    conf.setDebug(true);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("rdbms-workflow", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.shutdown();
}
Example 39
Project: storm-example-master  File: WordCountTopology.java View source code
public static void main(String[] args) throws Exception {
    SentenceSpout spout = new SentenceSpout();
    SplitSentenceBolt splitBolt = new SplitSentenceBolt();
    WordCountBolt countBolt = new WordCountBolt();
    ReportBolt reportBolt = new ReportBolt();
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(SENTENCE_SPOUT_ID, spout, 2);
    // SentenceSpout --> SplitSentenceBolt
    builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2).setNumTasks(4).shuffleGrouping(SENTENCE_SPOUT_ID);
    // SplitSentenceBolt --> WordCountBolt
    builder.setBolt(COUNT_BOLT_ID, countBolt, 4).fieldsGrouping(SPLIT_BOLT_ID, new Fields("word"));
    // WordCountBolt --> ReportBolt
    builder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(COUNT_BOLT_ID);
    Config config = new Config();
    config.setNumWorkers(2);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
    waitForSeconds(10);
    cluster.killTopology(TOPOLOGY_NAME);
    cluster.shutdown();
}
Example 40
Project: stormapp-master  File: Topology.java View source code
/**
     * @param args
     */
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    //Tweets from twitter sport
    //TODO: setup your twitter credentials
    TwitterSpout twitterSpout = new TwitterSpout();
    builder.setSpout("twitter", twitterSpout);
    //Initial filter
    builder.setBolt("filter", new TwitterFilterBolt(), 2).shuffleGrouping("twitter");
    //Tags publishing
    builder.setBolt("tags", new RedisTagsPublisherBolt("tags")).shuffleGrouping("filter");
    //Retweets
    builder.setBolt("retweets", new RedisRetweetBolt(3), 2).shuffleGrouping("filter");
    //Links
    builder.setBolt("linkFilter", new LinkFilterBolt(), 2).shuffleGrouping("filter");
    builder.setBolt("links", new RedisLinksPublisherBolt(), 4).shuffleGrouping("linkFilter");
    builder.setBolt("market", new RedisMarketBolt(), 1).shuffleGrouping("links");
    builder.setBolt("articles", new RedisGooseExtractor(), 5).shuffleGrouping("retweets");
    Config conf = new Config();
    conf.setDebug(false);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("twitter", conf, builder.createTopology());
    }
}
Example 41
Project: workberch-tolopogy-master  File: WorkberchIterStgyNode.java View source code
@Override
public BoltDeclarer addStrategy2Topology(final String guid, final TopologyBuilder tBuilder, final int paralellism) {
    final List<String> strategiesNames = new ArrayList<String>();
    for (final WorkberchIterStgy workberchIterStgy : childStrategies) {
        strategiesNames.add(workberchIterStgy.getBoltName());
        workberchIterStgy.addStrategy2Topology(guid, tBuilder, paralellism);
    }
    final BoltDeclarer boltReturned;
    if (cross) {
        if (optimized) {
            final WorkberchCartesianDummyBolt bolt = new WorkberchCartesianDummyBolt(guid, getOutputFields(), getFlowField());
            BoltDeclarer boltDeclarer = tBuilder.setBolt(CROSS_PREFIX + processorName, bolt, 1);
            boltReturned = boltDeclarer;
            final Iterator<String> iterStrategies = strategiesNames.iterator();
            while (iterStrategies.hasNext()) {
                final String strategyName = iterStrategies.next();
                boltDeclarer = StringUtils.endsWith(strategyName, getFlowField()) ? boltDeclarer.shuffleGrouping(strategyName) : boltDeclarer.allGrouping(strategyName);
            }
        } else {
            final WorkberchCartesianBolt bolt = new WorkberchCartesianBolt(guid, getOutputFields());
            BoltDeclarer boltDeclarer = tBuilder.setBolt(CROSS_PREFIX + processorName, bolt, 1);
            final Iterator<String> iterStrategies = strategiesNames.iterator();
            while (iterStrategies.hasNext()) {
                final String strategyName = iterStrategies.next();
                boltDeclarer = iterStrategies.hasNext() ? boltDeclarer.allGrouping(strategyName) : boltDeclarer.shuffleGrouping(strategyName);
            }
            final WorkberchOrderBolt orderBolt = new WorkberchOrderBolt(guid, getOutputFields(), false) {

                private static final long serialVersionUID = -1687335238822989302L;

                @Override
                public void executeOrdered(final WorkberchTuple input, final BasicOutputCollector collector, final boolean lastValues, final String uuid) {
                    emitTuple(new ArrayList<Object>(input.getValues().values()), collector, lastValues, uuid);
                }
            };
            boltReturned = tBuilder.setBolt(ORDER_PREFIX + processorName, orderBolt, 1).shuffleGrouping(CROSS_PREFIX + processorName);
        }
    } else {
        final WorkberchDotBolt bolt = new WorkberchDotBolt(guid, getOutputFields());
        final String startBolt = DOT_PREFIX + processorName;
        BoltDeclarer boltDeclarer = tBuilder.setBolt(startBolt, bolt, paralellism);
        for (final String strategyName : strategiesNames) {
            boltDeclarer = boltDeclarer.fieldsGrouping(strategyName, new Fields(WorkberchConstants.INDEX_FIELD));
        }
        boltReturned = boltDeclarer;
    }
    return boltReturned;
}
Example 42
Project: aeolus-master  File: MeasureOutputDataRate.java View source code
/**
	 * TODO
	 * 
	 * @throws IOException
	 * @throws InvalidTopologyException
	 * @throws AlreadyAliveException
	 * @throws TException
	 * @throws NotAliveException
	 * 
	 */
@SuppressWarnings("unchecked")
public static void main(String[] args) throws IOException, AlreadyAliveException, InvalidTopologyException, NotAliveException, TException {
    final String spoutId = "Spout";
    final String sinkId = "Sink";
    final String spoutStatisticsId = "SpoutStats";
    final String sinkStatisticsId = "BoltStats";
    final String topologyId = "microbenchmark-MeasureOutputDataRate";
    final String spoutStatsFile = "/tmp/aeolus-spout.stats";
    final String sinkStatsFile = "/tmp/aeolus-sink.stats";
    String aeolusConfigFile = null;
    AeolusConfig aeolusConfig = null;
    // true => submit; false => terminate
    boolean submitOrTerminate = true;
    int i = -1;
    while (++i < args.length) {
        if (args[i].equals("-h") || args[i].equals("--help")) {
            printHelp();
            return;
        } else if (args[i].equals("-c")) {
            ++i;
            if (i == args.length) {
                System.err.println("flag -c found but no <filename> was specified");
                return;
            }
            aeolusConfigFile = args[i];
        } else if (args[i].equals("--kill")) {
            submitOrTerminate = false;
        } else {
            System.err.println("unknown flag " + args[i]);
            return;
        }
    }
    // add $HOME/.storm as system property, such that StormSubmitter can look for storm.yaml there
    String userHome = System.getProperties().getProperty("user.home");
    if (userHome != null) {
        try {
            Method method = URLClassLoader.class.getDeclaredMethod("addURL", new Class[] { URL.class });
            method.setAccessible(true);
            method.invoke(ClassLoader.getSystemClassLoader(), new Object[] { new File(userHome + File.separator + ".storm").toURI().toURL() });
        } catch (NoSuchMethodException e) {
            logger.debug("Could not add $HOME/.storm as system resource.", e);
        } catch (SecurityException e) {
            logger.debug("Could not add $HOME/.storm as system resource.", e);
        } catch (IllegalArgumentException e) {
            logger.debug("Could not add $HOME/.storm as system resource.", e);
        } catch (MalformedURLException e) {
            logger.debug("Could not add $HOME/.storm as system resource.", e);
        } catch (IllegalAccessException e) {
            logger.debug("Could not add $HOME/.storm as system resource.", e);
        } catch (InvocationTargetException e) {
            logger.debug("Could not add $HOME/.storm as system resource.", e);
        }
    }
    if (System.getProperty("storm.jar") == null) {
        System.setProperty("storm.jar", "target/monitoring-1.0-SNAPSHOT-microbenchmarks.jar");
    }
    if (aeolusConfigFile != null) {
        aeolusConfig = ConfigReader.readConfig(aeolusConfigFile);
    } else {
        try {
            // default configuration directory within maven project
            aeolusConfig = ConfigReader.readConfig("src/main/resources/");
            logger.trace("using {} from src/main/recources", ConfigReader.defaultConfigFile);
        } catch (FileNotFoundException e) {
            logger.debug("{} not found in src/main/recources/", ConfigReader.defaultConfigFile);
            try {
                aeolusConfig = ConfigReader.readConfig();
                logger.trace("using local {}", ConfigReader.defaultConfigFile);
            } catch (FileNotFoundException f) {
                logger.debug("{} not found in local working directory (.)", ConfigReader.defaultConfigFile);
            }
        }
    }
    Config stormConfig = new Config();
    stormConfig.putAll(Utils.readStormConfig());
    // Aeolus configuration overwrites Storm configuration
    if (aeolusConfig != null) {
        // add only if not null, otherwise StormSubmitter cannot add values from storm.yaml
        String nimbusHost = aeolusConfig.getNimbusHost();
        if (nimbusHost != null) {
            logger.trace("using nimbus.host from {}", ConfigReader.defaultConfigFile);
            stormConfig.put(Config.NIMBUS_HOST, nimbusHost);
        }
        Integer nimbusPort = aeolusConfig.getNimbusPort();
        if (nimbusPort != null) {
            logger.trace("using nimbus.port from {}", ConfigReader.defaultConfigFile);
            stormConfig.put(Config.NIMBUS_THRIFT_PORT, nimbusPort);
        }
    }
    // command line arguments overwrite everything
    stormConfig.putAll(Utils.readCommandLineOpts());
    Client client = NimbusClient.getConfiguredClient(stormConfig).getClient();
    if (submitOrTerminate) {
        final double dataRate = Double.parseDouble(System.getProperty("aeolus.microbenchmarks.dataRate"));
        final int batchSize = Integer.parseInt(System.getProperty("aeolus.microbenchmarks.batchSize"));
        final int interval = Integer.parseInt(System.getProperty("aeolus.microbenchmarks.reportingInterval"));
        TopologyBuilder builder = new TopologyBuilder();
        // spout
        IRichSpout spout = new ThroughputSpout(new FixedStreamRateDriverSpout(new SchemaSpout(), dataRate), interval);
        if (batchSize > 0) {
            HashMap<String, Integer> batchSizes = new HashMap<String, Integer>();
            batchSizes.put(Utils.DEFAULT_STREAM_ID, new Integer(batchSize));
            spout = new SpoutOutputBatcher(spout, batchSizes);
        }
        builder.setSpout(spoutId, spout);
        // sink
        IRichBolt sink = new ThroughputBolt(new ForwardBolt(), interval, true);
        if (batchSize > 0) {
            sink = new InputDebatcher(sink);
        }
        builder.setBolt(sinkId, sink).shuffleGrouping(spoutId);
        // statistics
        builder.setBolt(spoutStatisticsId, new FileFlushSinkBolt(spoutStatsFile)).shuffleGrouping(spoutId, MonitoringTopoloyBuilder.DEFAULT_THROUGHPUT_STREAM);
        builder.setBolt(sinkStatisticsId, new FileFlushSinkBolt(sinkStatsFile)).shuffleGrouping(sinkId, MonitoringTopoloyBuilder.DEFAULT_THROUGHPUT_STREAM);
        stormConfig.setNumWorkers(4);
        // stormConfig.setFallBackOnJavaSerialization(false);
        // stormConfig.setSkipMissingKryoRegistrations(false);
        // stormConfig.put(Config.STORM_THRIFT_TRANSPORT_PLUGIN,
        // "backtype.storm.security.auth.SimpleTransportPlugin");
        // stormConfig.put("storm.thrift.transport", "backtype.storm.security.auth.SimpleTransportPlugin");
        SpoutOutputBatcher.registerKryoClasses(stormConfig);
        StormSubmitter.submitTopology(topologyId, stormConfig, builder.createTopology());
        // LocalCluster c = new LocalCluster();
        // c.submitTopology(topologyId, stormConfig, builder.createTopology());
        String id = client.getClusterInfo().get_topologies().get(0).get_id();
        TopologyInfo info = client.getTopologyInfo(id);
        String spoutHost = null, sinkHost = null, spoutStatsHost = null, sinkStatsHost = null;
        for (ExecutorSummary executor : info.get_executors()) {
            String operatorId = executor.get_component_id();
            if (operatorId.equals(spoutId)) {
                spoutHost = executor.get_host();
                if (spoutHost.equals(sinkHost)) {
                    throw new RuntimeException("spout and sink deployed at same host");
                }
            } else if (operatorId.equals(sinkId)) {
                sinkHost = executor.get_host();
                if (sinkHost.equals(spoutHost)) {
                    throw new RuntimeException("spout and sink deployed at same host");
                }
            } else if (operatorId.equals(spoutStatisticsId)) {
                spoutStatsHost = executor.get_host();
            } else if (operatorId.equals(sinkStatisticsId)) {
                sinkStatsHost = executor.get_host();
            }
        }
        System.out.println("Aeolus.MeasureOutputDataRate.spoutHost=" + spoutHost);
        System.out.println("Aeolus.MeasureOutputDataRate.sinkHost=" + sinkHost);
        System.out.println("Aeolus.MeasureOutputDataRate.spoutStatsHost=" + spoutStatsHost);
        System.out.println("Aeolus.MeasureOutputDataRate.spoutStatsFile=" + spoutStatsFile);
        System.out.println("Aeolus.MeasureOutputDataRate.sinkStatsHost=" + sinkStatsHost);
        System.out.println("Aeolus.MeasureOutputDataRate.sinkStatsFile=" + sinkStatsFile);
    // c.killTopology(topologyId);
    // Utils.sleep(1000);
    // c.shutdown();
    } else {
        KillOptions killOptions = new KillOptions();
        killOptions.set_wait_secs(0);
        client.killTopologyWithOpts(topologyId, killOptions);
    }
}
Example 43
Project: alfresco-apache-storm-demo-master  File: ESCrawlTopology.java View source code
@Override
protected int run(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new ElasticSearchSpout());
    builder.setBolt("partitioner", new URLPartitionerBolt()).shuffleGrouping("spout");
    builder.setBolt("fetch", new FetcherBolt()).fieldsGrouping("partitioner", new Fields("key"));
    builder.setBolt("sitemap", new SiteMapParserBolt()).localOrShuffleGrouping("fetch");
    builder.setBolt("parse", new JSoupParserBolt()).localOrShuffleGrouping("sitemap");
    // consider that the process has been succesful regardless of what
    // happens with the indexing
    builder.setBolt("switch", new StatusStreamBolt()).localOrShuffleGrouping("parse");
    builder.setBolt("indexer", new IndexerBolt()).localOrShuffleGrouping("parse");
    builder.setBolt("status", new StatusUpdaterBolt()).localOrShuffleGrouping("switch", Constants.StatusStreamName).localOrShuffleGrouping("fetch", Constants.StatusStreamName).localOrShuffleGrouping("sitemap", Constants.StatusStreamName).localOrShuffleGrouping("parse", Constants.StatusStreamName);
    conf.registerMetricsConsumer(MetricsConsumer.class);
    return submit("crawl", conf, builder);
}
Example 44
Project: avenir-master  File: ReinforcementLearnerTopology.java View source code
/**
     * @param args
     * @throws Exception
     */
public static void main(String[] args) throws Exception {
    String topologyName = args[0];
    String configFilePath = args[1];
    if (args.length != 2) {
        throw new IllegalArgumentException("Need two arguments: topology name and config file path");
    }
    FileInputStream fis = new FileInputStream(configFilePath);
    Properties configProps = new Properties();
    configProps.load(fis);
    //intialize config
    Config conf = new Config();
    conf.setDebug(true);
    for (Object key : configProps.keySet()) {
        String keySt = key.toString();
        String val = configProps.getProperty(keySt);
        conf.put(keySt, val);
    }
    //spout
    TopologyBuilder builder = new TopologyBuilder();
    int spoutThreads = ConfigUtility.getInt(configProps, "spout.threads", 1);
    RedisSpout spout = new RedisSpout();
    spout.withTupleFields(ReinforcementLearnerBolt.EVENT_ID, ReinforcementLearnerBolt.ROUND_NUM);
    builder.setSpout("reinforcementLearnerRedisSpout", spout, spoutThreads);
    //bolt
    ReinforcementLearnerBolt bolt = new ReinforcementLearnerBolt();
    int boltThreads = ConfigUtility.getInt(configProps, "bolt.threads", 1);
    builder.setBolt("reinforcementLearnerRedisBolt", bolt, boltThreads).shuffleGrouping("reinforcementLearnerRedisSpout");
    //submit topology
    int numWorkers = ConfigUtility.getInt(configProps, "num.workers", 1);
    int maxSpoutPending = ConfigUtility.getInt(configProps, "max.spout.pending", 1000);
    int maxTaskParalleism = ConfigUtility.getInt(configProps, "max.task.parallelism", 100);
    conf.setNumWorkers(numWorkers);
    conf.setMaxSpoutPending(maxSpoutPending);
    conf.setMaxTaskParallelism(maxTaskParalleism);
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
Example 45
Project: chombo-master  File: RealtimeUtil.java View source code
/**
	 * @param topologyName
	 * @param conf
	 * @param builder
	 * @throws AlreadyAliveException
	 * @throws InvalidTopologyException
	 */
public static void submitStormTopology(String topologyName, Config conf, TopologyBuilder builder) throws AlreadyAliveException, InvalidTopologyException {
    int numWorkers = ConfigUtility.getInt(conf, "num.workers", 1);
    int maxSpoutPending = ConfigUtility.getInt(conf, "max.spout.pending", 1000);
    int maxTaskParalleism = ConfigUtility.getInt(conf, "max.task.parallelism", 100);
    conf.setNumWorkers(numWorkers);
    conf.setMaxSpoutPending(maxSpoutPending);
    conf.setMaxTaskParallelism(maxTaskParalleism);
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
Example 46
Project: heron-master  File: TaskHookTopology.java View source code
public static void main(String[] args) throws Exception {
    if (args.length != 1) {
        throw new RuntimeException("Specify topology name");
    }
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("word", new AckingTestWordSpout(), 2);
    builder.setBolt("count", new CountBolt(), 2).shuffleGrouping("word");
    Config conf = new Config();
    conf.setDebug(true);
    // Put an arbitrary large number here if you don't want to slow the topology down
    conf.setMaxSpoutPending(1000 * 1000 * 1000);
    // To enable acking, we need to setEnableAcking true
    conf.setNumAckers(1);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
    // Set the task hook
    List<String> taskHooks = new LinkedList<>();
    taskHooks.add("com.twitter.heron.examples.TaskHookTopology$TestTaskHook");
    com.twitter.heron.api.Config.setAutoTaskHooks(conf, taskHooks);
    conf.setNumWorkers(1);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
Example 47
Project: jpmml-storm-master  File: Main.java View source code
public static void main(String... args) throws Exception {
    if (args.length != 3) {
        System.err.println("Usage: java " + Main.class.getName() + " <PMML file> <Input CSV file> <Output CSV file>");
        System.exit(-1);
    }
    Evaluator evaluator = PMMLBoltUtil.createEvaluator(new File(args[0]));
    PMMLBolt pmmlBolt = new PMMLBolt(evaluator);
    List<FieldName> inputFields = new ArrayList<>();
    inputFields.addAll(evaluator.getActiveFields());
    CsvReaderSpout csvReader = new CsvReaderSpout(new File(args[1]), inputFields);
    List<FieldName> outputFields = new ArrayList<>();
    outputFields.addAll(evaluator.getTargetFields());
    outputFields.addAll(evaluator.getOutputFields());
    CsvWriterBolt csvWriter = new CsvWriterBolt(new File(args[2]), outputFields);
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("input", csvReader);
    topologyBuilder.setBolt("pmml", pmmlBolt).shuffleGrouping("input");
    topologyBuilder.setBolt("output", csvWriter).shuffleGrouping("pmml");
    Config config = new Config();
    config.setDebug(false);
    StormTopology topology = topologyBuilder.createTopology();
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("example", config, topology);
    Utils.sleep(30L * 1000L);
    localCluster.killTopology("example");
    localCluster.shutdown();
}
Example 48
Project: rocketmq-storm-master  File: SimpleTopologyDemo.java View source code
private static TopologyBuilder buildTopology(Config config) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    int boltParallel = NumberUtils.toInt((String) config.get("topology.bolt.parallel"), 1);
    int spoutParallel = NumberUtils.toInt((String) config.get("topology.spout.parallel"), 1);
    BoltDeclarer writerBolt = builder.setBolt(BOLT_NAME, new RocketMqBolt(), boltParallel);
    SimpleMessageSpout defaultSpout = (SimpleMessageSpout) RocketMQSpoutFactory.getSpout(RocketMQSpouts.SIMPLE.getValue());
    RocketMQConfig mqConig = (RocketMQConfig) config.get(ConfigUtils.CONFIG_ROCKETMQ);
    defaultSpout.setConfig(mqConig);
    String id = (String) config.get(ConfigUtils.CONFIG_TOPIC);
    builder.setSpout(id, defaultSpout, spoutParallel);
    writerBolt.shuffleGrouping(id);
    return builder;
}
Example 49
Project: squall-master  File: StormJoinerBoltComponent.java View source code
private void initialize(StormEmitter firstEmitter, StormEmitter secondEmitter, ComponentProperties cp, List<String> allCompNames, Predicate joinPredicate, int hierarchyPosition, TopologyBuilder builder, TopologyKiller killer, Config conf) {
    _firstEmitterIndex = String.valueOf(allCompNames.indexOf(firstEmitter.getName()));
    _secondEmitterIndex = String.valueOf(allCompNames.indexOf(secondEmitter.getName()));
    _operatorChain = cp.getChainOperator();
    _aggBatchOutputMillis = cp.getBatchOutputMillis();
    _joinPredicate = joinPredicate;
}
Example 50
Project: Stock-Analysis-master  File: DumpToHBaseTopology.java View source code
/**
     * HBase Data Dump to Another HBase Table Topology
     * @param args
     * @throws Exception
     */
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    int topoWorkers = Constants.STORM_TOPOLOGY_WORKERS;
    int spoutTasks = Constants.STORM_SPOUT_TASKS;
    builder.setSpout("hbaseSpout", new HBaseSpout("20040101", "20040201"), spoutTasks);
    int boltTasks = Constants.STORM_BOLT_TASKS;
    builder.setBolt("dumpBolt", new DumpToHBaseBolt(), boltTasks).shuffleGrouping("hbaseSpout");
    Config conf = new Config();
    if (args != null && args.length > 0) {
        // run on storm cluster
        conf.setNumAckers(1);
        conf.setNumWorkers(topoWorkers);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        // run on local cluster
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(100000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
Example 51
Project: Storm-Simple-Crawler-master  File: CrawlerTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    System.out.println("in main:");
    String topicName = "crawl";
    builder.setSpout("random-sentence", new RandomSentenceSpout());
    builder.setBolt("forwardToKafka", new ForwardToKafkaBolt("54.245.107.71:9092", "kafka.serializer.StringEncoder", topicName), 2).shuffleGrouping("random-sentence");
    BrokerHosts hosts = new ZkHosts("54.245.107.71:2181");
    SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/" + topicName, UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
    builder.setSpout(CRAWL_SPOUT_ID, kafkaSpout);
    // Kafka crawl spout --> URLDeduplicatorBolt
    builder.setBolt(URLDEDUP_BOLT_ID, new URLDeduplicatorBolt()).shuffleGrouping(CRAWL_SPOUT_ID);
    // // URLDedupliatorBolt --> URLPartitionerBolt
    // builder.setBolt(PARTITION_BOLT_ID, new URLPartitionerBolt())
    // .fieldsGrouping(URLDEDUP_BOLT_ID, new Fields("host"));
    // // URLPartitionerBolt --> SimpleFetcherBolt
    // builder.setBolt(FETCH_BOLT_ID, new SimpleFetcherBolt())
    // .shuffleGrouping(PARTITION_BOLT_ID);
    // // SimpleFetcherBolt --> ParserBolt
    // builder.setBolt(PARSE_BOLT_ID, new ParserBolt()).shuffleGrouping(
    // FETCH_BOLT_ID);
    Config config = new Config();
    if (args.length == 0) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        Utils.sleep(50000);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
    } else {
        StormSubmitter.submitTopology(args[0], config, builder.createTopology());
    }
}
Example 52
Project: stormrider-master  File: StormTopologyImpl.java View source code
public void submitAnalyzeTopology(boolean isLocalMode, int numOfWorkers, long interval, boolean isReified, String storeConfigFile, String viewsConfigFile) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("analyze", new AnalyzeSpout(interval, isReified, storeConfigFile, viewsConfigFile), TwitterConstants.PARALLELISM_HINT);
    builder.setBolt("deg-cen", new DegreeCentralityBolt(viewsConfigFile), TwitterConstants.PARALLELISM_HINT).shuffleGrouping("analyze");
    builder.setBolt("close-cen", new ClosenessCentralityBolt(viewsConfigFile), TwitterConstants.PARALLELISM_HINT).shuffleGrouping("analyze");
    builder.setBolt("bet-cen", new BetweennessCentralityBolt(viewsConfigFile), TwitterConstants.PARALLELISM_HINT).shuffleGrouping("analyze");
    submitTopology(StormRiderConstants.ANALYZE_TOPOLOGY_NAME + System.nanoTime(), isLocalMode, numOfWorkers, builder.createTopology());
}
Example 53
Project: StormSampleProject-master  File: SentimentAnalysisTopology.java View source code
private static StormTopology createTopology() {
    SpoutConfig kafkaConf = new SpoutConfig(new ZkHosts(Properties.getString("rts.storm.zkhosts")), KAFKA_TOPIC, "/kafka", "KafkaSpout");
    kafkaConf.scheme = new SchemeAsMultiScheme(new StringScheme());
    TopologyBuilder topology = new TopologyBuilder();
    topology.setSpout("kafka_spout", new KafkaSpout(kafkaConf), 4);
    topology.setBolt("twitter_filter", new TwitterFilterBolt(), 4).shuffleGrouping("kafka_spout");
    topology.setBolt("text_filter", new TextFilterBolt(), 4).shuffleGrouping("twitter_filter");
    topology.setBolt("stemming", new StemmingBolt(), 4).shuffleGrouping("text_filter");
    topology.setBolt("positive", new PositiveSentimentBolt(), 4).shuffleGrouping("stemming");
    topology.setBolt("negative", new NegativeSentimentBolt(), 4).shuffleGrouping("stemming");
    topology.setBolt("join", new JoinSentimentsBolt(), 4).fieldsGrouping("positive", new Fields("tweet_id")).fieldsGrouping("negative", new Fields("tweet_id"));
    topology.setBolt("score", new SentimentScoringBolt(), 4).shuffleGrouping("join");
    topology.setBolt("hdfs", new HDFSBolt(), 4).shuffleGrouping("score");
    topology.setBolt("nodejs", new NodeNotifierBolt(), 4).shuffleGrouping("score");
    return topology.createTopology();
}
Example 54
Project: StormTweetsSentimentAnalysis-master  File: SentimentAnalysisTopology.java View source code
public static final void main(final String[] args) throws Exception {
    try {
        final Config config = new Config();
        config.setMessageTimeoutSecs(120);
        config.setDebug(false);
        final TopologyBuilder topologyBuilder = new TopologyBuilder();
        topologyBuilder.setSpout("twitterspout", new TwitterSpout());
        topologyBuilder.setBolt("statelocatorbolt", new StateLocatorBolt()).shuffleGrouping("twitterspout");
        //Create Bolt with the frequency of logging [in seconds].
        topologyBuilder.setBolt("sentimentcalculatorbolt", new SentimentCalculatorBolt(30)).fieldsGrouping("statelocatorbolt", new Fields("state"));
        //Submit it to the cluster, or submit it locally
        if (null != args && 0 < args.length) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } else {
            config.setMaxTaskParallelism(10);
            final LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology(Constants.TOPOLOGY_NAME, config, topologyBuilder.createTopology());
            //Run this topology for 120 seconds so that we can complete processing of decent # of tweets.
            Utils.sleep(120 * 1000);
            LOGGER.info("Shutting down the cluster...");
            localCluster.killTopology(Constants.TOPOLOGY_NAME);
            localCluster.shutdown();
            Runtime.getRuntime().addShutdownHook(new Thread() {

                @Override
                public void run() {
                    LOGGER.info("Shutting down the cluster...");
                    localCluster.killTopology(Constants.TOPOLOGY_NAME);
                    localCluster.shutdown();
                }
            });
        }
    } catch (final AlreadyAliveExceptionInvalidTopologyException |  exception) {
        exception.printStackTrace();
    } catch (final Exception exception) {
        exception.printStackTrace();
    }
    LOGGER.info("\n\n\n\t\t*****Please clean your temp folder \"{}\" now!!!*****", System.getProperty("java.io.tmpdir"));
}
Example 55
Project: StormTweetsWordCount-master  File: WordCountTopology.java View source code
public static final void main(final String[] args) {
    try {
        final Config config = new Config();
        config.setMessageTimeoutSecs(120);
        config.setDebug(false);
        final TopologyBuilder topologyBuilder = new TopologyBuilder();
        topologyBuilder.setSpout("twitterspout", new TwitterSpout());
        //Create WordSplitBolt with minimum word length to be considered.
        //This is more to reduce the number of words to be processed i.e. for ignoring simple and most used words.
        topologyBuilder.setBolt("wordsplitbolt", new WordSplitBolt(4)).shuffleGrouping("twitterspout");
        //Create Bolt with the frequency of logging [in seconds] and count threshold of words.
        topologyBuilder.setBolt("wordcountbolt", new WordCountBolt(30, 9)).shuffleGrouping("wordsplitbolt");
        //Submit it to the cluster, or submit it locally
        if (null != args && 0 < args.length) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } else {
            config.setMaxTaskParallelism(10);
            final LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology(Constants.TOPOLOGY_NAME, config, topologyBuilder.createTopology());
            //Run this topology for 120 seconds so that we can complete processing of decent # of tweets.
            Utils.sleep(120 * 1000);
            LOGGER.info("Shutting down the cluster...");
            localCluster.killTopology(Constants.TOPOLOGY_NAME);
            localCluster.shutdown();
        }
    } catch (final AlreadyAliveExceptionInvalidTopologyException |  exception) {
    } catch (final Exception exception) {
    }
    LOGGER.info("\n\n\n\t\t*****Please clean your temp folder \"{}\" now!!!*****", System.getProperty("java.io.tmpdir"));
}
Example 56
Project: visitante-master  File: VisitTopology.java View source code
/**
     * @param args
     * @throws Exception
     */
public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        throw new IllegalArgumentException("Need two arguments: topology name and config file path");
    }
    String topologyName = args[0];
    String configFilePath = args[1];
    Config conf = RealtimeUtil.buildStormConfig(configFilePath);
    boolean debugOn = ConfigUtility.getBoolean(conf, "debug.on", false);
    System.out.println("config file:" + configFilePath + " debugOn:" + debugOn);
    conf.put(Config.TOPOLOGY_DEBUG, debugOn);
    //spout
    TopologyBuilder builder = new TopologyBuilder();
    int spoutThreads = ConfigUtility.getInt(conf, "spout.threads", 1);
    VisitDepthSpout spout = new VisitDepthSpout();
    spout.withTupleFields(VisitTopology.SESSION_ID, VisitTopology.VISIT_TIME, VisitTopology.VISIT_URL);
    builder.setSpout("visitDepthRedisSpout", spout, spoutThreads);
    //visit session bolt
    int visSessTickFreqInSec = ConfigUtility.getInt(conf, "visit.session.tick.freq.sec", 1);
    VisitSessionBolt viSessBolt = new VisitSessionBolt(visSessTickFreqInSec);
    viSessBolt.withTupleFields(VisitTopology.PAGE_ID, VisitTopology.PAGE_COUNT);
    int boltThreads = ConfigUtility.getInt(conf, "visit.session.bolt.threads", 1);
    builder.setBolt("visitSessionBolt", viSessBolt, boltThreads).fieldsGrouping("visitDepthRedisSpout", new Fields(VisitTopology.SESSION_ID));
    //visit depth bolt
    int visDepthTickFreqInSec = ConfigUtility.getInt(conf, "visit.depth.tick.freq.sec", 1);
    VisitDepthBolt viDepthBolt = new VisitDepthBolt(visDepthTickFreqInSec);
    boltThreads = ConfigUtility.getInt(conf, "visit.depth.bolt.threads", 1);
    builder.setBolt("visitDepthBolt", viDepthBolt, boltThreads).shuffleGrouping("visitSessionBolt");
    //submit
    RealtimeUtil.submitStormTopology(topologyName, conf, builder);
}
Example 57
Project: beymani-master  File: OutlierPredictor.java View source code
public static void main(String[] args) throws Exception {
    String topologyName = args[0];
    String configFilePath = args[1];
    FileInputStream fis = new FileInputStream(configFilePath);
    Properties configProps = new Properties();
    configProps.load(fis);
    //intialize config
    Config conf = new Config();
    conf.setDebug(true);
    for (Object key : configProps.keySet()) {
        String keySt = key.toString();
        String val = configProps.getProperty(keySt);
        conf.put(keySt, val);
    }
    //spout
    TopologyBuilder builder = new TopologyBuilder();
    int spoutThreads = Integer.parseInt(configProps.getProperty("predictor.spout.threads"));
    builder.setSpout("predictorSpout", new PredictorSpout(), spoutThreads);
    //detector bolt
    int boltThreads = Integer.parseInt(configProps.getProperty("predictor.bolt.threads"));
    builder.setBolt("predictor", new PredictorBolt(), boltThreads).fieldsGrouping("predictorSpout", new Fields("entityID"));
    //submit topology
    int numWorkers = Integer.parseInt(configProps.getProperty("num.workers"));
    conf.setNumWorkers(numWorkers);
    StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
Example 58
Project: dip-master  File: DataIngestionWindowBasedTopology.java View source code
private static StormTopology buildTopology(AppArgs appArgs) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(KAFKA_SPOUT_ID, KafkaSpoutFactory.getKafkaSpout(appArgs), 2);
    builder.setBolt(FILTER_BOLT_ID, new TwitterRawJsonConvertorBolt()).shuffleGrouping(KAFKA_SPOUT_ID);
    builder.setBolt(HDFS_BOLT_ID, HdfsBoltBuilder.build(appArgs)).shuffleGrouping(FILTER_BOLT_ID);
    builder.setBolt(HBASE_BOLT_ID, HBaseBoltBuilder.build(appArgs, "hbaseConfig")).shuffleGrouping(FILTER_BOLT_ID);
    builder.setBolt("USERS_MAX_FOLLOWERS", new UsersWithMaxFollowers()).shuffleGrouping(FILTER_BOLT_ID);
    builder.setBolt("TOPN_USERS_MAX_FOLLOWERS", new TopNUsersWithMaxFollowers()).globalGrouping("USERS_MAX_FOLLOWERS");
    builder.setBolt("LOCATION_BY_TWEETS", new LocationByTweets()).shuffleGrouping(FILTER_BOLT_ID);
    builder.setBolt("TOPN_LOCATION_BY_TWEETS", new TopNLocationByTweets()).globalGrouping("LOCATION_BY_TWEETS");
    builder.setBolt("MYSQL_WRITER", new MySQLDataWriterBolt(), 2).fieldsGrouping("TOPN_USERS_MAX_FOLLOWERS", new Fields("tableName")).fieldsGrouping("TOPN_LOCATION_BY_TWEETS", new Fields("tableName"));
    return builder.createTopology();
}
Example 59
Project: hadoop-arch-book-master  File: MovingAvgLocalTopologyRunner.java View source code
/**
   * Return the object creating our moving average topology.
   */
private static StormTopology buildTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("stock-ticks-spout", new StockTicksSpout());
    builder.setBolt("hdfs-persister-bolt", createHdfsBolt()).shuffleGrouping("stock-ticks-spout");
    builder.setBolt("parse-ticks", new ParseTicksBolt()).shuffleGrouping("stock-ticks-spout");
    builder.setBolt("calc-moving-avg", new CalcMovingAvgBolt(), 2).fieldsGrouping("parse-ticks", new Fields("ticker"));
    return builder.createTopology();
}
Example 60
Project: Real-Time_Analytics_with_Apache_Storm__Udacity_Course-master  File: TweetTopology.java View source code
public static void main(String[] args) throws Exception {
    // create the topology
    TopologyBuilder builder = new TopologyBuilder();
    /*
     * In order to create the spout, you need to get twitter credentials
     * If you need to use Twitter firehose/Tweet stream for your idea,
     * create a set of credentials by following the instructions at
     *
     * https://dev.twitter.com/discussions/631
     *
     */
    // now create the tweet spout with the credentials
    TweetSpout tweetSpout = new TweetSpout("[Your customer key]", "[Your secret key]", "[Your access token]", "[Your access secret]");
    // attach the tweet spout to the topology - parallelism of 1
    builder.setSpout("tweet-spout", tweetSpout, 1);
    //*********************************************************************
    // Complete the Topology.
    // Part 1: // attach the parse tweet bolt, parallelism of 10 (what grouping is needed?)
    builder.setBolt("parse-tweet-bolt", new ParseTweetBolt(), 10).shuffleGrouping("tweet-spout");
    // Part 2: // attach the count bolt, parallelism of 15 (what grouping is needed?)
    builder.setBolt("count-bolt", new CountBolt(), 15).fieldsGrouping("parse-tweet-bolt", new Fields("tweet-word"));
    // Part 3: attach the report bolt, parallelism of 1 (what grouping is needed?)
    builder.setBolt("report-bolt", new ReportBolt(), 1).globalGrouping("count-bolt");
    // Submit and run the topology.
    //*********************************************************************
    // create the default config object
    Config conf = new Config();
    // set the config in debugging mode
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        // run it in a live cluster
        // set the number of workers for running all spout and bolt tasks
        conf.setNumWorkers(3);
        // create the topology and submit with config
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        // run it in a simulated local cluster
        // set the number of threads to run - similar to setting number of workers in live cluster
        conf.setMaxTaskParallelism(3);
        // create the local cluster instance
        LocalCluster cluster = new LocalCluster();
        // submit the topology to the local cluster
        cluster.submitTopology("tweet-word-count", conf, builder.createTopology());
        // let the topology run for 30 seconds. note topologies never terminate!
        Utils.sleep(30000);
        // now kill the topology
        cluster.killTopology("tweet-word-count");
        // we are done, so shutdown the local cluster
        cluster.shutdown();
    }
}
Example 61
Project: recsys-online-master  File: Recsys.java View source code
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
    logger.info("begin to running recsys.");
    BrokerHosts brokerHosts = new ZkHosts(Constants.kafka_zk_address);
    SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, Constants.kafka_topic, Constants.kafka_zk_root, Constants.kafka_id);
    Config conf = new Config();
    Map<String, String> map = new HashMap<String, String>();
    map.put("metadata.broker.list", Constants.kakfa_broker_list);
    map.put("serializer.class", "kafka.serializer.StringEncoder");
    conf.put("kafka.broker.properties", map);
    //		conf.put("topic", "topic2");
    spoutConfig.scheme = new SchemeAsMultiScheme(new MessageScheme());
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new KafkaSpout(spoutConfig));
    builder.setBolt("bolt", new HBaseStoreBolt()).shuffleGrouping("spout");
    if (!islocal) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(Constants.storm_topology_name, conf, builder.createTopology());
    } else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(Constants.storm_topology_name, conf, builder.createTopology());
        Utils.sleep(100000);
        cluster.killTopology(Constants.storm_topology_name);
        cluster.shutdown();
    }
    logger.info("run recsys finish.");
}
Example 62
Project: storm-cassandra-master  File: CassandraBoltTest.java View source code
@Test
public void testBolt() throws Exception {
    TupleMapper<String, String, String> tupleMapper = new DefaultTupleMapper(KEYSPACE, "users", "VALUE");
    String configKey = "cassandra-config";
    CassandraBatchingBolt<String, String, String> bolt = new CassandraBatchingBolt<String, String, String>(configKey, tupleMapper);
    TopologyBuilder builder = new TopologyBuilder();
    builder.setBolt("TEST_BOLT", bolt);
    Fields fields = new Fields("VALUE");
    TopologyContext context = new MockTopologyContext(builder.createTopology(), fields);
    Config config = new Config();
    config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 5000);
    Map<String, Object> clientConfig = new HashMap<String, Object>();
    clientConfig.put(StormCassandraConstants.CASSANDRA_HOST, "localhost:9160");
    clientConfig.put(StormCassandraConstants.CASSANDRA_KEYSPACE, Arrays.asList(new String[] { KEYSPACE }));
    config.put(configKey, clientConfig);
    bolt.prepare(config, context, null);
    System.out.println("Bolt Preparation Complete.");
    Values values = new Values(42);
    Tuple tuple = new TupleImpl(context, values, 5, "test");
    bolt.execute(tuple);
    // wait very briefly for the batch to complete
    Thread.sleep(250);
    AstyanaxContext<Keyspace> astyContext = newContext("localhost:9160", KEYSPACE);
    Keyspace ks = astyContext.getEntity();
    Column<String> result = ks.prepareQuery(new ColumnFamily<String, String>("users", StringSerializer.get(), StringSerializer.get())).getKey("42").getColumn("VALUE").execute().getResult();
    assertEquals("42", result.getStringValue());
}
Example 63
Project: Storm-ud381-master  File: TweetTopology.java View source code
public static void main(String[] args) throws Exception {
    // create the topology
    TopologyBuilder builder = new TopologyBuilder();
    /*
     * In order to create the spout, you need to get twitter credentials
     * If you need to use Twitter firehose/Tweet stream for your idea,
     * create a set of credentials by following the instructions at
     *
     * https://dev.twitter.com/discussions/631
     *
     */
    // now create the tweet spout with the credentials
    TweetSpout tweetSpout = new TweetSpout();
    // attach the tweet spout to the topology - parallelism of 1
    builder.setSpout("tweet-spout", tweetSpout, 1);
    //*********************************************************************
    // Complete the Topology.
    // Part 1: // attach the parse tweet bolt, parallelism of 10 (what grouping is needed?)
    // Part 2: // attach the count bolt, parallelism of 15 (what grouping is needed?)
    // Part 3: attach the report bolt, parallelism of 1 (what grouping is needed?)
    // Submit and run the topology.
    //*********************************************************************
    // create the default config object
    Config conf = new Config();
    // set the config in debugging mode
    conf.setDebug(true);
    if (args != null && args.length > 0) {
        // run it in a live cluster
        // set the number of workers for running all spout and bolt tasks
        conf.setNumWorkers(3);
        // create the topology and submit with config
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        // run it in a simulated local cluster
        // set the number of threads to run - similar to setting number of workers in live cluster
        conf.setMaxTaskParallelism(3);
        // create the local cluster instance
        LocalCluster cluster = new LocalCluster();
        // submit the topology to the local cluster
        cluster.submitTopology("tweet-word-count", conf, builder.createTopology());
        // let the topology run for 30 seconds. note topologies never terminate!
        Utils.sleep(30000);
        // now kill the topology
        cluster.killTopology("tweet-word-count");
        // we are done, so shutdown the local cluster
        cluster.shutdown();
    }
}
Example 64
Project: StormTweetsSentimentD3UKViz-master  File: SentimentAnalysisTopology.java View source code
public static final void main(final String[] args) throws Exception {
    final ApplicationContext applicationContext = new ClassPathXmlApplicationContext("applicationContext.xml");
    final JmsProvider jmsProvider = new SpringJmsProvider(applicationContext, "jmsConnectionFactory", "notificationQueue");
    final TopologyBuilder topologyBuilder = new TopologyBuilder();
    final JmsBolt jmsBolt = new JmsBolt();
    jmsBolt.setJmsProvider(jmsProvider);
    jmsBolt.setJmsMessageProducer(( session,  input) -> {
        final String json = "{\"stateCode\":\"" + input.getString(0) + "\", \"sentiment\":" + input.getInteger(1) + "}";
        return session.createTextMessage(json);
    });
    try {
        final Config config = new Config();
        config.setMessageTimeoutSecs(120);
        config.setDebug(true);
        topologyBuilder.setSpout("twitterspout", new TwitterSpout());
        topologyBuilder.setBolt("statelocatorbolt", new StateLocatorBolt()).shuffleGrouping("twitterspout");
        topologyBuilder.setBolt("sentimentcalculatorbolt", new SentimentCalculatorBolt()).fieldsGrouping("statelocatorbolt", new Fields("state"));
        topologyBuilder.setBolt("jmsBolt", jmsBolt).fieldsGrouping("sentimentcalculatorbolt", new Fields("stateCode"));
        //Submit it to the cluster, or submit it locally
        if (null != args && 0 < args.length) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } else {
            config.setMaxTaskParallelism(10);
            final LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology(Constants.TOPOLOGY_NAME, config, topologyBuilder.createTopology());
            //Run this topology for 600 seconds so that we can complete processing of decent # of tweets.
            Utils.sleep(600 * 1000);
            LOGGER.info("Shutting down the cluster...");
            localCluster.killTopology(Constants.TOPOLOGY_NAME);
            localCluster.shutdown();
            Runtime.getRuntime().addShutdownHook(new Thread() {

                @Override
                public void run() {
                    LOGGER.info("Shutting down the cluster...");
                    localCluster.killTopology(Constants.TOPOLOGY_NAME);
                    localCluster.shutdown();
                }
            });
        }
    } catch (final Exception exception) {
        exception.printStackTrace();
    }
    LOGGER.info("\n\n\n\t\t*****Please clean your temp folder \"{}\" now!!!*****", System.getProperty("java.io.tmpdir"));
}
Example 65
Project: StormTweetsSentimentD3Viz-master  File: SentimentAnalysisTopology.java View source code
public static final void main(final String[] args) throws Exception {
    final ApplicationContext applicationContext = new ClassPathXmlApplicationContext("applicationContext.xml");
    final JmsProvider jmsProvider = new SpringJmsProvider(applicationContext, "jmsConnectionFactory", "notificationQueue");
    final TopologyBuilder topologyBuilder = new TopologyBuilder();
    final JmsBolt jmsBolt = new JmsBolt();
    jmsBolt.setJmsProvider(jmsProvider);
    jmsBolt.setJmsMessageProducer(( session,  input) -> {
        final String json = "{\"stateCode\":\"" + input.getString(0) + "\", \"sentiment\":" + input.getInteger(1) + "}";
        return session.createTextMessage(json);
    });
    try {
        final Config config = new Config();
        config.setMessageTimeoutSecs(120);
        config.setDebug(true);
        topologyBuilder.setSpout("twitterspout", new TwitterSpout());
        topologyBuilder.setBolt("statelocatorbolt", new StateLocatorBolt()).shuffleGrouping("twitterspout");
        topologyBuilder.setBolt("sentimentcalculatorbolt", new SentimentCalculatorBolt()).fieldsGrouping("statelocatorbolt", new Fields("state"));
        topologyBuilder.setBolt("jmsBolt", jmsBolt).fieldsGrouping("sentimentcalculatorbolt", new Fields("stateCode"));
        //Submit it to the cluster, or submit it locally
        if (null != args && 0 < args.length) {
            config.setNumWorkers(3);
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } else {
            config.setMaxTaskParallelism(10);
            final LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology(Constants.TOPOLOGY_NAME, config, topologyBuilder.createTopology());
            //Run this topology for 600 seconds so that we can complete processing of decent # of tweets.
            Utils.sleep(600 * 1000);
            LOGGER.info("Shutting down the cluster...");
            localCluster.killTopology(Constants.TOPOLOGY_NAME);
            localCluster.shutdown();
            Runtime.getRuntime().addShutdownHook(new Thread() {

                @Override
                public void run() {
                    LOGGER.info("Shutting down the cluster...");
                    localCluster.killTopology(Constants.TOPOLOGY_NAME);
                    localCluster.shutdown();
                }
            });
        }
    } catch (final Exception exception) {
        exception.printStackTrace();
    }
    LOGGER.info("\n\n\n\t\t*****Please clean your temp folder \"{}\" now!!!*****", System.getProperty("java.io.tmpdir"));
}
Example 66
Project: streamreduce-core-master  File: JuggaloaderTopology.java View source code
public StormTopology createJuggaloaderTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("eventSpout", new EventSpout());
    builder.setSpout("commandSpout", new JuggaloaderCommandSpout());
    builder.setBolt("accountMetricsBolt", new AccountMetricsBolt()).shuffleGrouping("eventSpout", GroupingNameConstants.ACCOUNT_GROUPING_NAME);
    builder.setBolt("connectionMetricsBolt", new ConnectionMetricsBolt()).shuffleGrouping("eventSpout", GroupingNameConstants.CONNECTION_GROUPING_NAME);
    builder.setBolt("inventoryItemMetricsBolt", new InventoryItemMetricsBolt()).shuffleGrouping("eventSpout", GroupingNameConstants.INVENTORY_ITEM_GROUPING_NAME);
    builder.setBolt("userMetricsBolt", new UserMetricsBolt()).shuffleGrouping("eventSpout", GroupingNameConstants.USER_GROUPING_NAME);
    builder.setBolt("messageMetricsBolt", new SobaMessageMetricsBolt()).shuffleGrouping("eventSpout", GroupingNameConstants.MESSAGE_GROUPING_NAME);
    builder.setBolt("second", new JuggaloaderTimeBaseBolt(0)).fieldsGrouping("accountMetricsBolt", new Fields("metricAccount", "metricName")).fieldsGrouping("connectionMetricsBolt", new Fields("metricAccount", "metricName")).fieldsGrouping("inventoryItemMetricsBolt", new Fields("metricAccount", "metricName")).fieldsGrouping("userMetricsBolt", new Fields("metricAccount", "metricName")).fieldsGrouping("messageMetricsBolt", new Fields("metricAccount", "metricName")).fieldsGrouping("commandSpout", new Fields("metricAccount", "metricName"));
    builder.setBolt("minute", new JuggaloaderTimeBaseBolt(Constants.PERIOD_MINUTE)).fieldsGrouping("second", new Fields("metricAccount", "metricName"));
    builder.setBolt("hour", new JuggaloaderTimeBaseBolt(Constants.PERIOD_HOUR)).fieldsGrouping("minute", new Fields("metricAccount", "metricName"));
    builder.setBolt("day", new JuggaloaderTimeBaseBolt(Constants.PERIOD_DAY)).fieldsGrouping("hour", new Fields("metricAccount", "metricName"));
    builder.setBolt("week", new JuggaloaderTimeBaseBolt(Constants.PERIOD_WEEK)).fieldsGrouping("day", new Fields("metricAccount", "metricName"));
    builder.setBolt("month", new JuggaloaderTimeBaseBolt(Constants.PERIOD_MONTH)).fieldsGrouping("week", new Fields("metricAccount", "metricName"));
    builder.setBolt("persistence", new PersistMetricsBolt()).shuffleGrouping("minute").shuffleGrouping("hour").shuffleGrouping("day").shuffleGrouping("week").shuffleGrouping("month");
    // builder.setBolt("message", new JuggaloaderAnomalyGeneratorBolt()) // TODO - replace the next line with this one when SOBA-1521 is done
    builder.setBolt("message", new JuggaloaderMessageGeneratorBolt()).fieldsGrouping("second", new Fields("metricAccount", "metricName")).fieldsGrouping("minute", new Fields("metricAccount", "metricName")).fieldsGrouping("hour", new Fields("metricAccount", "metricName")).fieldsGrouping("day", new Fields("metricAccount", "metricName")).fieldsGrouping("week", new Fields("metricAccount", "metricName")).fieldsGrouping("month", new Fields("metricAccount", "metricName"));
    return builder.createTopology();
}
Example 67
Project: aws-big-data-blog-master  File: SampleTopology.java View source code
public static void main(String[] args) throws IllegalArgumentException, KeeperException, InterruptedException, AlreadyAliveException, InvalidTopologyException, IOException {
    String propertiesFile = null;
    String mode = null;
    if (args.length != 2) {
        printUsageAndExit();
    } else {
        propertiesFile = args[0];
        mode = args[1];
    }
    configure(propertiesFile);
    final KinesisSpoutConfig config = new KinesisSpoutConfig(streamName, zookeeperEndpoint).withZookeeperPrefix(zookeeperPrefix).withInitialPositionInStream(initialPositionInStream).withRegion(Regions.fromName(regionName));
    final KinesisSpout spout = new KinesisSpout(config, new CustomCredentialsProviderChain(), new ClientConfiguration());
    TopologyBuilder builder = new TopologyBuilder();
    LOG.info("Using Kinesis stream: " + config.getStreamName());
    // Using number of shards as the parallelism hint for the spout.
    builder.setSpout("Kinesis", spout, 2);
    builder.setBolt("Parse", new ParseReferrerBolt(), 6).shuffleGrouping("Kinesis");
    builder.setBolt("Count", new RollingCountBolt(5, 2, elasticCacheRedisEndpoint), 6).fieldsGrouping("Parse", new Fields("referrer"));
    //builder.setBolt("Count", new CountReferrerBolt(), 12).fieldsGrouping("Parse", new Fields("referrer"));
    Config topoConf = new Config();
    topoConf.setFallBackOnJavaSerialization(true);
    topoConf.setDebug(false);
    if (mode.equals("LocalMode")) {
        LOG.info("Starting sample storm topology in LocalMode ...");
        new LocalCluster().submitTopology("test_spout", topoConf, builder.createTopology());
    } else if (mode.equals("RemoteMode")) {
        topoConf.setNumWorkers(1);
        topoConf.setMaxSpoutPending(5000);
        LOG.info("Submitting sample topology " + topologyName + " to remote cluster.");
        StormSubmitter.submitTopology(topologyName, topoConf, builder.createTopology());
    } else {
        printUsageAndExit();
    }
}
Example 68
Project: flowbox-master  File: FlowboxFactory.java View source code
/**
   * @return A topology builder than can further be customized.
   */
public TopologyBuilder createFlowbox() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(EVENT, eventsSpout, 1);
    builder.setSpout(FLOW_LOADER_STREAM, ruleSpout, 1);
    builder.setSpout("tick", new TickSpout(1000), 1);
    // kicks off a flow determining where to start
    builder.setBolt(INITIALIZER, new FlowInitializerBolt(), parallelismHint).shuffleGrouping(EVENT).allGrouping(FLOW_LOADER_STREAM, FLOW_LOADER_STREAM);
    declarebolt(builder, FILTER, new FilterBolt(), parallelismHint);
    declarebolt(builder, SELECT, new SelectorBolt(), parallelismHint);
    declarebolt(builder, PARTITION, new PartitionBolt(), parallelismHint);
    declarebolt(builder, STOP_GATE, new StopGateBolt(), parallelismHint);
    declarebolt(builder, AGGREGATE, new AggregatorBolt(), parallelismHint);
    declarebolt(builder, JOIN, new JoinBolt(), parallelismHint);
    declarebolt(builder, EACH, new EachBolt(), parallelismHint);
    declarebolt(builder, OUTPUT, outputBolt, parallelismHint);
    return builder;
}
Example 69
Project: Kafka-Storm-ElasticSearch-master  File: AuditActiveLoginsTopology.java View source code
public StormTopology buildTopology(Properties properties) {
    // Load properties for the storm topology
    String kafkaTopic = properties.getProperty("kafka.topic");
    SpoutConfig kafkaConfig = new SpoutConfig(kafkaBrokerHosts, kafkaTopic, "", "storm");
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    TopologyBuilder builder = new TopologyBuilder();
    // Specific audit logs analysis bolts
    AuditLoginsCounterBolt loginCounterbolt = new AuditLoginsCounterBolt();
    AuditParserBolt auditParserBolt = new AuditParserBolt();
    // Elastic search bolt
    TupleMapper tupleMapper = new DefaultTupleMapper();
    ElasticSearchBolt elasticSearchBolt = new ElasticSearchBolt(tupleMapper);
    // Topology scheme: KafkaSpout -> auditParserBolt -> loginCounterBolt -> elasticSearchBolt
    builder.setSpout("KafkaSpout", new KafkaSpout(kafkaConfig), 1);
    builder.setBolt("ParseBolt", auditParserBolt, 1).shuffleGrouping("KafkaSpout");
    builder.setBolt("CountBolt", loginCounterbolt, 1).shuffleGrouping("ParseBolt");
    builder.setBolt("ElasticSearchBolt", elasticSearchBolt, 1).fieldsGrouping("CountBolt", new Fields("id", "index", "type", "document"));
    return builder.createTopology();
}
Example 70
Project: kappaeg-master  File: CDRStormTopology.java View source code
public void setupLoggingBolts(TopologyBuilder bldr) {
    LoggingBolt cdrLoggingBolt = new LoggingBolt();
    bldr.setBolt("cdrLoggingBolt", cdrLoggingBolt, 4).shuffleGrouping("cdrKafkaSpout");
    LoggingBolt twitterLoggingBolt = new LoggingBolt();
    bldr.setBolt("twitterLoggingBolt", twitterLoggingBolt, 4).shuffleGrouping("twitterKafkaSpout");
}
Example 71
Project: Pulsar-master  File: StormExample.java View source code
public static void main(String[] args) throws PulsarClientException {
    ClientConfiguration clientConf = new ClientConfiguration();
    // String authPluginClassName = "com.yahoo.pulsar.client.impl.auth.MyAuthentication";
    // String authParams = "key1:val1,key2:val2";
    // clientConf.setAuthentication(authPluginClassName, authParams);
    String topic1 = "persistent://my-property/use/my-ns/my-topic1";
    String topic2 = "persistent://my-property/use/my-ns/my-topic2";
    String subscriptionName1 = "my-subscriber-name1";
    String subscriptionName2 = "my-subscriber-name2";
    // create spout
    PulsarSpoutConfiguration spoutConf = new PulsarSpoutConfiguration();
    spoutConf.setServiceUrl(serviceUrl);
    spoutConf.setTopic(topic1);
    spoutConf.setSubscriptionName(subscriptionName1);
    spoutConf.setMessageToValuesMapper(messageToValuesMapper);
    PulsarSpout spout = new PulsarSpout(spoutConf, clientConf);
    // create bolt
    PulsarBoltConfiguration boltConf = new PulsarBoltConfiguration();
    boltConf.setServiceUrl(serviceUrl);
    boltConf.setTopic(topic2);
    boltConf.setTupleToMessageMapper(tupleToMessageMapper);
    PulsarBolt bolt = new PulsarBolt(boltConf, clientConf);
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("testSpout", spout);
    builder.setBolt("testBolt", bolt).shuffleGrouping("testSpout");
    Config conf = new Config();
    conf.setNumWorkers(2);
    conf.setDebug(true);
    conf.registerMetricsConsumer(PulsarMetricsConsumer.class);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    PulsarClient pulsarClient = PulsarClient.create(serviceUrl, clientConf);
    // create a consumer on topic2 to receive messages from the bolt when the processing is done
    Consumer consumer = pulsarClient.subscribe(topic2, subscriptionName2);
    // create a producer on topic1 to send messages that will be received by the spout
    Producer producer = pulsarClient.createProducer(topic1);
    for (int i = 0; i < 10; i++) {
        String msg = "msg-" + i;
        producer.send(msg.getBytes());
        LOG.info("Message {} sent", msg);
    }
    Message msg = null;
    for (int i = 0; i < 10; i++) {
        msg = consumer.receive(1, TimeUnit.SECONDS);
        LOG.info("Message {} received", new String(msg.getData()));
    }
    cluster.killTopology("test");
    cluster.shutdown();
}
Example 72
Project: storm-jms-master  File: ExampleJmsTopology.java View source code
@SuppressWarnings("serial")
public static void main(String[] args) throws Exception {
    // JMS Queue Provider
    JmsProvider jmsQueueProvider = new SpringJmsProvider("jms-activemq.xml", "jmsConnectionFactory", "notificationQueue");
    // JMS Topic provider
    JmsProvider jmsTopicProvider = new SpringJmsProvider("jms-activemq.xml", "jmsConnectionFactory", "notificationTopic");
    // JMS Producer
    JmsTupleProducer producer = new JsonTupleProducer();
    // JMS Queue Spout
    JmsSpout queueSpout = new JmsSpout();
    queueSpout.setJmsProvider(jmsQueueProvider);
    queueSpout.setJmsTupleProducer(producer);
    queueSpout.setJmsAcknowledgeMode(Session.CLIENT_ACKNOWLEDGE);
    // allow multiple instances
    queueSpout.setDistributed(true);
    TopologyBuilder builder = new TopologyBuilder();
    // spout with 5 parallel instances
    builder.setSpout(JMS_QUEUE_SPOUT, queueSpout, 5);
    // intermediate bolt, subscribes to jms spout, anchors on tuples, and auto-acks
    builder.setBolt(INTERMEDIATE_BOLT, new GenericBolt("INTERMEDIATE_BOLT", true, true, new Fields("json")), 3).shuffleGrouping(JMS_QUEUE_SPOUT);
    // bolt that subscribes to the intermediate bolt, and auto-acks
    // messages.
    builder.setBolt(FINAL_BOLT, new GenericBolt("FINAL_BOLT", true, true), 3).shuffleGrouping(INTERMEDIATE_BOLT);
    // bolt that subscribes to the intermediate bolt, and publishes to a JMS Topic		
    JmsBolt jmsBolt = new JmsBolt();
    jmsBolt.setJmsProvider(jmsTopicProvider);
    // anonymous message producer just calls toString() on the tuple to create a jms message
    jmsBolt.setJmsMessageProducer(new JmsMessageProducer() {

        @Override
        public Message toMessage(Session session, Tuple input) throws JMSException {
            System.out.println("Sending JMS Message:" + input.toString());
            TextMessage tm = session.createTextMessage(input.toString());
            return tm;
        }
    });
    builder.setBolt(JMS_TOPIC_BOLT, jmsBolt).shuffleGrouping(INTERMEDIATE_BOLT);
    // JMS Topic spout
    JmsSpout topicSpout = new JmsSpout();
    topicSpout.setJmsProvider(jmsTopicProvider);
    topicSpout.setJmsTupleProducer(producer);
    topicSpout.setJmsAcknowledgeMode(Session.CLIENT_ACKNOWLEDGE);
    topicSpout.setDistributed(false);
    builder.setSpout(JMS_TOPIC_SPOUT, topicSpout);
    builder.setBolt(ANOTHER_BOLT, new GenericBolt("ANOTHER_BOLT", true, true), 1).shuffleGrouping(JMS_TOPIC_SPOUT);
    Config conf = new Config();
    if (args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("storm-jms-example", conf, builder.createTopology());
        Utils.sleep(60000);
        cluster.killTopology("storm-jms-example");
        cluster.shutdown();
    }
}
Example 73
Project: storm-s3-master  File: S3Topology.java View source code
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.put(PREFIX, "test");
    config.put(EXTENSION, ".txt");
    config.put(PATH, "foo");
    config.put(ROTATION_SIZE, 1.0F);
    config.put(ROTATION_UNIT, "MB");
    config.put(BUCKET_NAME, "test-bucket");
    config.put(CONTENT_TYPE, "text/plain");
    SentenceSpout spout = new SentenceSpout();
    S3Bolt bolt = new S3Bolt();
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
    // SentenceSpout --> MyBolt
    builder.setBolt(BOLT_ID, bolt, 2).shuffleGrouping(SENTENCE_SPOUT_ID);
    if (args.length == 0) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(120);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 1) {
        StormSubmitter.submitTopology(args[0], config, builder.createTopology());
    } else {
        System.out.println("Usage: S3Topology [topology name]");
    }
}
Example 74
Project: strato-workshop-master  File: WordCountTopology.java View source code
public static void main(String[] args) throws Exception {
    if (args != null && args.length == 7) {
        try {
            boolean runOnCluster = args[0].equals("cluster");
            String fileName = args[1];
            String counterPath = args[2];
            if (!(new File(fileName)).exists()) {
                throw new FileNotFoundException();
            }
            int spoutParallelism = Integer.parseInt(args[3]);
            int counterParallelism = Integer.parseInt(args[4]);
            int sinkParallelism = Integer.parseInt(args[5]);
            int numberOfWorkers = Integer.parseInt(args[6]);
            TopologyBuilder builder = new TopologyBuilder();
            builder.setSpout("spout", new TextSpout(fileName, counterPath), spoutParallelism);
            builder.setBolt("count", new WordCount(counterPath), counterParallelism).fieldsGrouping("spout", new Fields("word"));
            builder.setBolt("sink", new Sink(), sinkParallelism).shuffleGrouping("count");
            Config conf = new Config();
            conf.setDebug(false);
            conf.setNumWorkers(numberOfWorkers);
            if (runOnCluster) {
                StormSubmitter.submitTopology("wordcount", conf, builder.createTopology());
            } else {
                // running locally for 50 seconds
                conf.setMaxTaskParallelism(3);
                LocalCluster cluster = new LocalCluster();
                cluster.submitTopology("word-count", conf, builder.createTopology());
                Thread.sleep(50000);
                cluster.shutdown();
            }
        } catch (NumberFormatException e) {
            printUsage();
        } catch (FileNotFoundException e) {
            printUsage();
        }
    } else {
        printUsage();
    }
}
Example 75
Project: streamflow-master  File: StandardTopologyCreator.java View source code
@Override
public StormTopology build(Topology topology, StreamflowConfig configuration, boolean isClusterMode) throws FrameworkException {
    TopologyBuilder builder = new TopologyBuilder();
    // Iterate over all of the nodes to add them to the topology
    for (TopologyComponent component : topology.getDeployedConfig().getComponents().values()) {
        if (component.getType().equalsIgnoreCase(Component.STORM_SPOUT_TYPE)) {
            SpoutDeclarer spoutDeclarer;
            // potential startup of a local DRPC server
            if (component.getMainClass().equals(DRPCSpout.class.getName())) {
                // Attempt to get the name of the DRPC function
                String drpcFunction = component.getProperties().get("drpc-function");
                if (StringUtils.isBlank(drpcFunction)) {
                    drpcFunction = topology.getId();
                }
                if (configuration.getSelectedCluster().getId().equals(Cluster.LOCAL)) {
                    // Local cluster deploys require manual startup of a DRPC server
                    LocalDRPC drpcServer = new LocalDRPC();
                    // Create the DRPC spout specifying the name of the DRPC function
                    DRPCSpout drpcSpout = new DRPCSpout(drpcFunction, drpcServer);
                    spoutDeclarer = builder.setSpout(component.getKey(), drpcSpout, component.getParallelism());
                } else {
                    // Create the DRPC spout using the DRPC server on the cluster
                    DRPCSpout drpcSpout = new DRPCSpout(drpcFunction);
                    spoutDeclarer = builder.setSpout(component.getKey(), drpcSpout, component.getParallelism());
                }
            } else {
                RichSpoutWrapper richSpoutWrapper = new RichSpoutWrapper(topology, component, isClusterMode, configuration);
                // Add the spout instance to the topology
                spoutDeclarer = builder.setSpout(component.getKey(), richSpoutWrapper, component.getParallelism());
            }
            // Add the properties for the specific component as component specific properties
            for (Map.Entry<String, String> componentProperty : component.getProperties().entrySet()) {
                spoutDeclarer.addConfiguration(componentProperty.getKey(), componentProperty.getValue());
            }
        } else if (component.getType().equalsIgnoreCase(Component.STORM_BOLT_TYPE)) {
            BoltDeclarer boltDeclarer;
            try {
                // Attempt to load the bolt as a RichBolt
                RichBoltWrapper richBoltWrapper = new RichBoltWrapper(topology, component, isClusterMode, configuration);
                // Add the spout instance to the topology
                boltDeclarer = builder.setBolt(component.getKey(), richBoltWrapper, component.getParallelism());
            } catch (FrameworkException ex) {
                BasicBoltWrapper basicBoltWrapper = new BasicBoltWrapper(topology, component, isClusterMode, configuration);
                boltDeclarer = builder.setBolt(component.getKey(), basicBoltWrapper, component.getParallelism());
            }
            // Add the properties for the specific component as component specific properties
            for (Map.Entry<String, String> componentProperty : component.getProperties().entrySet()) {
                boltDeclarer.addConfiguration(componentProperty.getKey(), componentProperty.getValue());
            }
            // Iterate over each of the edges to see if it is the target
            for (TopologyConnector connector : topology.getDeployedConfig().getConnectors().values()) {
                // The current edge is the target for the edge
                if (connector.getTargetComponentKey().equals(component.getKey())) {
                    String grouping = connector.getGrouping();
                    if (grouping.equalsIgnoreCase("Shuffle")) {
                        boltDeclarer.shuffleGrouping(connector.getSourceComponentKey(), connector.getSourceComponentInterface());
                    } else if (grouping.equalsIgnoreCase("Fields")) {
                        boltDeclarer.fieldsGrouping(connector.getSourceComponentKey(), connector.getSourceComponentInterface(), new Fields(connector.getGroupingRef()));
                    } else if (grouping.equalsIgnoreCase("All")) {
                        boltDeclarer.allGrouping(connector.getSourceComponentKey(), connector.getSourceComponentInterface());
                    } else if (grouping.equalsIgnoreCase("Global")) {
                        boltDeclarer.globalGrouping(connector.getSourceComponentKey(), connector.getSourceComponentInterface());
                    } else if (grouping.equalsIgnoreCase("None")) {
                        boltDeclarer.noneGrouping(connector.getSourceComponentKey(), connector.getSourceComponentInterface());
                    } else if (grouping.equalsIgnoreCase("Direct")) {
                        boltDeclarer.directGrouping(connector.getSourceComponentKey(), connector.getSourceComponentInterface());
                    }
                }
            }
        }
    }
    // Build the topology using the topology configured in the builder
    return builder.createTopology();
}
Example 76
Project: Twitalyse-master  File: FirstTwitalyseTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    PropertyLoader propLoader = new PropertyLoader();
    // get twitter credentials
    Properties twitterProps = propLoader.loadSystemProperty("twitterProps.properties");
    String consumerKey = twitterProps.getProperty("consumerKey");
    String consumerKeySecure = twitterProps.getProperty("consumerKeySecure");
    String token = twitterProps.getProperty("token");
    String tokenSecret = twitterProps.getProperty("tokenSecret");
    // get ignoredWords
    String ignoreWords = propLoader.loadSystemProperty("stopWords.properties").getProperty("ignoreWords");
    List<String> ignoreList = Arrays.asList(ignoreWords.split(";"));
    // get redis configuration
    Properties redisProps = propLoader.loadSystemProperty("redisProps.properties");
    String host = redisProps.getProperty("host");
    int port = Integer.valueOf(redisProps.getProperty("port"));
    Jedis jedis = new Jedis(host, port);
    jedis.getClient().setTimeout(9999);
    //		#########################################################
    //		#					Jedis Key´s							#
    //		#########################################################
    //		#	Name	#	Typ		#	Desc						#
    //		#########################################################
    //		#			#			#								#
    //		#	words	#	HashMap	#	Counts all words.			#
    //		#	#stati	#	K, V	#	Counts all stati.			#
    //		#	#words	#	K, V	#	Counts all words.			#
    //		#			#			#								#
    //		#########################################################
    //		#														#
    //		#########################################################
    // TwitterSpout
    TwitterStreamSpout twitterStreamSpout = new TwitterStreamSpout(consumerKey, consumerKeySecure, token, tokenSecret, host, port);
    // WordCount
    GetStatusTextBolt getTextBolt = new GetStatusTextBolt();
    SplitStatusTextBolt splitStatusTextBolt = new SplitStatusTextBolt(ignoreList, host, port);
    CountWordsBolt countWordsBolt = new CountWordsBolt(host, port);
    // Source Bolt
    GetStatusSourceBolt getStatusSourceBolt = new GetStatusSourceBolt();
    CountSourceBolt countSourceBolt = new CountSourceBolt(host, port);
    // Language Bolt
    GetLanguageBolt getLanguageBolt = new GetLanguageBolt();
    CountLanguageBolt countLanguageBolt = new CountLanguageBolt(host, port);
    // WordCount
    builder.setSpout("twitterStreamSpout", twitterStreamSpout, 1);
    builder.setBolt("getTextBolt", getTextBolt).shuffleGrouping("twitterStreamSpout");
    builder.setBolt("splitStatusTextBolt", splitStatusTextBolt).shuffleGrouping("getTextBolt");
    builder.setBolt("countWordsBolt", countWordsBolt).shuffleGrouping("splitStatusTextBolt");
    // Source Bolt
    builder.setBolt("getStatusSourceBolt", getStatusSourceBolt).shuffleGrouping("twitterStreamSpout");
    builder.setBolt("countSourceBolt", countSourceBolt).shuffleGrouping("getStatusSourceBolt");
    // Language Bolt
    builder.setBolt("getLanguageBolt", getLanguageBolt).shuffleGrouping("twitterStreamSpout");
    builder.setBolt("countLanguageBolt", countLanguageBolt).shuffleGrouping("getLanguageBolt");
    Config conf = new Config();
    conf.setDebug(false);
    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {
        conf.setMaxTaskParallelism(3);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("twitalyse", conf, builder.createTopology());
        Thread.sleep(10000);
        cluster.shutdown();
    }
    jedis.disconnect();
}
Example 77
Project: Web-Karma-master  File: TestBasicKarmaTopology.java View source code
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public void run(ILocalCluster cluster) throws Exception {
    // TODO Auto-generated method stub
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("karma-json-spout", new BasicJSONTestSpout());
    Properties basicKarmaBoltProperties = new Properties();
    basicKarmaBoltProperties.setProperty("name", "Stormy");
    basicKarmaBoltProperties.setProperty("karma.input.type", "JSON");
    basicKarmaBoltProperties.setProperty("base.uri", "http://ex.com");
    String source = null;
    try {
        source = new File(this.getClass().getClassLoader().getResource("people-model.ttl").toURI()).getAbsolutePath().toString();
        basicKarmaBoltProperties.setProperty("model.file", source);
    } catch (URISyntaxException e) {
        LOG.error("Unable to load model", e);
    }
    builder.setBolt("karma-generate-json", new KarmaBolt(basicKarmaBoltProperties, null)).shuffleGrouping("karma-json-spout");
    Set<String> sources = new HashSet<>();
    sources.add(source);
    KarmaReducerBolt reducerBolt = new KarmaReducerBolt(sources);
    builder.setBolt("karma-reducer-json", reducerBolt).fieldsGrouping("karma-generate-json", new Fields("id"));
    String inputs = IOUtils.toString(getTestResource("input/people.json"));
    JSONArray array = new JSONArray(inputs);
    List<Values> values = new LinkedList<>();
    for (int i = 0; i < array.length(); i++) {
        JSONObject obj = array.getJSONObject(i);
        values.add(new Values("a.txt", obj.toString()));
    }
    MockedSources mockedSources = new MockedSources();
    mockedSources.addMockData("karma-json-spout", values.toArray(new Values[values.size()]));
    Config config = new Config();
    config.setDebug(true);
    StormTopology topology = builder.createTopology();
    CompleteTopologyParam completeTopologyParam = new CompleteTopologyParam();
    completeTopologyParam.setMockedSources(mockedSources);
    completeTopologyParam.setStormConf(config);
    Map results = Testing.completeTopology(cluster, topology, completeTopologyParam);
    ArrayList<String> karmaJsonSpoutResults = (ArrayList<String>) results.get("karma-json-spout");
    Assert.assertEquals(7, karmaJsonSpoutResults.size());
    ArrayList<String> karmaJsonReducerResults = (ArrayList<String>) results.get("karma-reducer-json");
    Assert.assertEquals(7, karmaJsonReducerResults.size());
    ArrayList<String> karmaBoltResults = (ArrayList<String>) results.get("karma-generate-json");
    Assert.assertEquals(7, karmaBoltResults.size());
}
Example 78
Project: cloudpelican-lsd-master  File: Main.java View source code
public static void main(String[] args) throws Exception {
    ArrayList<String> argList = new ArrayList<String>();
    for (String arg : args) {
        argList.add(arg);
    }
    // Config
    HashMap<String, String> argsMap = new HashMap<String, String>();
    for (String arg : argList) {
        String[] split = arg.split("=", 2);
        if (split.length == 2 && split[0].trim().length() > 0 && split[1].trim().length() > 0) {
            if (split[0].equals("-zookeeper")) {
                argsMap.put("zookeeper_nodes", split[1]);
            } else if (split[0].equals("-grep")) {
                argsMap.put("match_regex", split[1]);
            } else if (split[0].equals("-topic")) {
                argsMap.put("kafka_topic", split[1]);
            } else if (split[0].equals("-supervisor-host")) {
                argsMap.put("supervisor_host", split[1]);
            } else if (split[0].equals("-supervisor-username")) {
                argsMap.put("supervisor_username", split[1]);
            } else if (split[0].equals("-supervisor-password")) {
                argsMap.put("supervisor_password", split[1]);
            } else if (split[0].equals("-conf")) {
                argsMap.put("conf_path", split[1]);
            } else if (split[0].startsWith("-")) {
                // Default
                argsMap.put(split[0].substring(1), split[1]);
            }
        }
    }
    // Default settings
    if (!argsMap.containsKey("kafka_consumer_id")) {
        argsMap.put("kafka_consumer_id", "cloudpelican_lsd_consumer");
    }
    // Settings object
    Settings settings = new Settings();
    JsonObject settingsData = new JsonObject();
    // Add light settings to json
    for (Map.Entry<String, String> kv : argsMap.entrySet()) {
        settingsData.addProperty(kv.getKey(), kv.getValue());
    }
    // Debug & load
    LOG.info(settingsData.toString());
    settings.load(settingsData);
    // Topology
    TopologyBuilder builder = new TopologyBuilder();
    // Time
    TimeZone.setDefault(TimeZone.getTimeZone("Etc/UTC"));
    // Read from kafka
    BrokerHosts hosts = new ZkHosts(settings.get("zookeeper_nodes"));
    SpoutConfig spoutConfig = new SpoutConfig(hosts, settings.get("kafka_topic"), "/" + settings.get("kafka_topic"), settings.get("kafka_consumer_id"));
    spoutConfig.startOffsetTime = kafka.api.OffsetRequest.EarliestTime();
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
    int kafkaPartitions = Integer.parseInt(settings.getOrDefault("kafka_partitions", "3"));
    builder.setSpout(KAFKA_SPOUT, kafkaSpout, kafkaPartitions);
    // Parse bolt
    // No local to prevent hotspots
    builder.setBolt(PARSE_BOLT, new ParseBolt(settings), GLOBAL_CONCURRENCY * 1).shuffleGrouping(KAFKA_SPOUT);
    // Match bolt
    // No local to prevent hotspots
    builder.setBolt(MATCH_BOLT, new MatchBolt(settings), GLOBAL_CONCURRENCY * 6).shuffleGrouping(PARSE_BOLT, "messages");
    // Error classifier bolt
    builder.setBolt(ERROR_CLASSIFIER_BOLT, new ErrorClassifierBolt(settings), GLOBAL_CONCURRENCY * 1).fieldsGrouping(MATCH_BOLT, new Fields("filter_id"));
    // Supervisor result writer bolt
    // For efficiency fields grouping would have been better but creates hotspots
    builder.setBolt(SUPERVISOR_RESULT_WRITER, new SupervisorResultWriterBolt(settings), GLOBAL_CONCURRENCY * 1).shuffleGrouping(MATCH_BOLT);
    // Supervisor stats writer bolt
    builder.setBolt(ROLLUP_STATS, new RollupStatsBolt(settings), concurrency(1, 2)).fieldsGrouping(MATCH_BOLT, "match_stats", new Fields("filter_id")).fieldsGrouping(ERROR_CLASSIFIER_BOLT, "error_stats", new Fields("filter_id"));
    builder.setBolt(SUPERVISOR_STATS_WRITER, new SupervisorStatsWriterBolt(settings), concurrency(1, 4)).fieldsGrouping(ROLLUP_STATS, "rollup_stats", new Fields("filter_id"));
    // Outlier detection bolts (sharded by filter ID)
    if (Boolean.parseBoolean(settings.getOrDefault("outlier_detection_enabled", "true"))) {
        builder.setBolt(OUTLIER_DETECTION, new OutlierDetectionBolt(settings), GLOBAL_CONCURRENCY * 2).fieldsGrouping(MATCH_BOLT, "dispatch_outlier_checks", new Fields("filter_id"));
        builder.setBolt(OUTLIER_COLLECTOR, new OutlierCollectorBolt(settings), concurrency(1, 10)).shuffleGrouping(OUTLIER_DETECTION, "outliers");
    }
    // Sink
    if (settings.get("sinks") != null) {
        String[] sinkIds = settings.get("sinks").split(",");
        for (String sinkId : sinkIds) {
            // Type
            String sinkType = settings.get("sinks." + sinkId + ".type");
            AbstractSinkBolt sinkBolt = null;
            // @todo Sink factory if we have multiple types
            if (sinkType == null) {
                throw new Exception("Sink '" + sinkId + "' invalid");
            } else if (sinkType.equalsIgnoreCase("bigquery")) {
                // Google BigQuery sink
                sinkBolt = new BigQuerySinkBolt(sinkId, settings);
            } else {
                throw new Exception("Sink type '" + sinkType + "' not supported");
            }
            // Add to topology
            if (sinkBolt != null) {
                String sinkName = "sink_" + sinkType + "_" + sinkId;
                LOG.info("Setting up sink '" + sinkName + "'");
                if (!sinkBolt.isValid()) {
                    LOG.error("Sink '" + sinkName + "' not valid");
                }
                // For efficiency fields grouping would have been better but creates hotspots
                builder.setBolt(sinkName, sinkBolt, GLOBAL_CONCURRENCY * 2).shuffleGrouping(MATCH_BOLT);
            }
        }
    }
    // Debug on for testing
    Config conf = new Config();
    conf.setDebug(false);
    // Default is 30 seconds, which might be too short under peak load spikes, or when we run the outlier detection
    conf.setMessageTimeoutSecs(120);
    String topologyName = settings.getOrDefault("topology_name", "cloudpelican_stormprocessor");
    if (argList.contains("-submit")) {
        conf.setNumWorkers(GLOBAL_CONCURRENCY);
        // ackers = workers means every VM has an acker reducing overhead
        conf.setNumAckers(GLOBAL_CONCURRENCY);
        conf.setMaxSpoutPending(GLOBAL_CONCURRENCY * Integer.parseInt(settings.getOrDefault("topology_max_spout_multiplier", "1000")) * kafkaPartitions);
        conf.setStatsSampleRate(Double.parseDouble(settings.getOrDefault("topology_stats_sample_rate", "0.05")));
        StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, builder.createTopology());
    } else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, conf, builder.createTopology());
        // Keep running until interrupt
        Runtime.getRuntime().addShutdownHook(new Thread() {

            @Override
            public void run() {
                LOG.info("Shutting down");
                isRunning = false;
            }
        });
        while (isRunning) {
            Thread.sleep(100);
        }
        cluster.killTopology(topologyName);
        cluster.shutdown();
    }
}
Example 79
Project: Europeana-Cloud-master  File: ExtractBoltTest.java View source code
@Override
public void run(ILocalCluster cluster) throws IOException, AlreadyAliveException, InvalidTopologyException {
    AckTracker tracker = new AckTracker();
    FeederSpout spout = new FeederSpout(StormTaskTuple.getFields());
    spout.setAckFailDelegate(tracker);
    //build topology
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("testSpout", spout);
    builder.setBolt("extractBolt", new ExtractTextBolt(informStream, storeStream)).shuffleGrouping("testSpout");
    StormTopology topology = builder.createTopology();
    TrackedTopology tt = Testing.mkTrackedTopology(cluster, topology);
    //topology config
    Config config = new Config();
    config.setNumWorkers(1);
    //config.setDebug(true);      
    cluster.submitTopology("testTopology", config, tt.getTopology());
    //prepare test data                    
    List<StormTaskTuple> data = prepareInputData();
    for (StormTaskTuple tuple : data) {
        spout.feed(tuple.toStormTuple());
        //Waits until topology is idle and 'amt' more tuples have been emitted by spouts
        //topology, amt, timeout
        Testing.trackedWait(tt, 1, 60000);
    }
    assertEquals(data.size(), tracker.getNumAcks());
}
Example 80
Project: incubator-brooklyn-master  File: StormAbstractCloudLiveTest.java View source code
private StormTopology createTopology() throws AlreadyAliveException, InvalidTopologyException {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
    return builder.createTopology();
}
Example 81
Project: storm-hdfs-master  File: SequenceFileTopology.java View source code
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setNumWorkers(1);
    SentenceSpout spout = new SentenceSpout();
    // sync the filesystem after every 1k tuples
    SyncPolicy syncPolicy = new CountSyncPolicy(1000);
    // rotate files when they reach 5MB
    FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, Units.MB);
    FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath("/source/").withExtension(".seq");
    // create sequence format instance.
    DefaultSequenceFormat format = new DefaultSequenceFormat("timestamp", "sentence");
    SequenceFileBolt bolt = new SequenceFileBolt().withFsUrl(args[0]).withFileNameFormat(fileNameFormat).withSequenceFormat(format).withRotationPolicy(rotationPolicy).withSyncPolicy(syncPolicy).withCompressionType(SequenceFile.CompressionType.RECORD).withCompressionCodec("deflate").addRotationAction(new MoveFileAction().toDestination("/dest/"));
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(SENTENCE_SPOUT_ID, spout, 1);
    // SentenceSpout --> MyBolt
    builder.setBolt(BOLT_ID, bolt, 4).shuffleGrouping(SENTENCE_SPOUT_ID);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        waitForSeconds(120);
        cluster.killTopology(TOPOLOGY_NAME);
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 2) {
        StormSubmitter.submitTopology(args[1], config, builder.createTopology());
    }
}
Example 82
Project: streaming-benchmarks-master  File: AdvertisingTopology.java View source code
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();
    Options opts = new Options();
    opts.addOption("conf", true, "Path to the config file.");
    CommandLineParser parser = new DefaultParser();
    CommandLine cmd = parser.parse(opts, args);
    String configPath = cmd.getOptionValue("conf");
    Map commonConfig = Utils.findAndReadConfigFile(configPath, true);
    String zkServerHosts = joinHosts((List<String>) commonConfig.get("zookeeper.servers"), Integer.toString((Integer) commonConfig.get("zookeeper.port")));
    String redisServerHost = (String) commonConfig.get("redis.host");
    String kafkaTopic = (String) commonConfig.get("kafka.topic");
    int kafkaPartitions = ((Number) commonConfig.get("kafka.partitions")).intValue();
    int workers = ((Number) commonConfig.get("storm.workers")).intValue();
    int ackers = ((Number) commonConfig.get("storm.ackers")).intValue();
    int cores = ((Number) commonConfig.get("process.cores")).intValue();
    int parallel = Math.max(1, cores / 7);
    ZkHosts hosts = new ZkHosts(zkServerHosts);
    SpoutConfig spoutConfig = new SpoutConfig(hosts, kafkaTopic, "/" + kafkaTopic, UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
    builder.setSpout("ads", kafkaSpout, kafkaPartitions);
    builder.setBolt("event_deserializer", new DeserializeBolt(), parallel).shuffleGrouping("ads");
    builder.setBolt("event_filter", new EventFilterBolt(), parallel).shuffleGrouping("event_deserializer");
    builder.setBolt("event_projection", new EventProjectionBolt(), parallel).shuffleGrouping("event_filter");
    builder.setBolt("redis_join", new RedisJoinBolt(redisServerHost), parallel).shuffleGrouping("event_projection");
    builder.setBolt("campaign_processor", new CampaignProcessor(redisServerHost), parallel * 2).fieldsGrouping("redis_join", new Fields("campaign_id"));
    Config conf = new Config();
    if (args != null && args.length > 0) {
        conf.setNumWorkers(workers);
        conf.setNumAckers(ackers);
        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    } else {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        backtype.storm.utils.Utils.sleep(10000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
Example 83
Project: Tstream-master  File: TridentTopologyBuilder.java View source code
public StormTopology buildTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    Map<GlobalStreamId, String> batchIdsForSpouts = fleshOutStreamBatchIds(false);
    Map<GlobalStreamId, String> batchIdsForBolts = fleshOutStreamBatchIds(true);
    Map<String, List<String>> batchesToCommitIds = new HashMap<String, List<String>>();
    Map<String, List<ITridentSpout>> batchesToSpouts = new HashMap<String, List<ITridentSpout>>();
    for (String id : _spouts.keySet()) {
        TransactionalSpoutComponent c = _spouts.get(id);
        if (c.spout instanceof IRichSpout) {
            //TODO: wrap this to set the stream name
            builder.setSpout(id, (IRichSpout) c.spout, c.parallelism);
        } else {
            String batchGroup = c.batchGroupId;
            if (!batchesToCommitIds.containsKey(batchGroup)) {
                batchesToCommitIds.put(batchGroup, new ArrayList<String>());
            }
            batchesToCommitIds.get(batchGroup).add(c.commitStateId);
            if (!batchesToSpouts.containsKey(batchGroup)) {
                batchesToSpouts.put(batchGroup, new ArrayList<ITridentSpout>());
            }
            batchesToSpouts.get(batchGroup).add((ITridentSpout) c.spout);
            BoltDeclarer scd = builder.setBolt(spoutCoordinator(id), new TridentSpoutCoordinator(c.commitStateId, (ITridentSpout) c.spout)).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.BATCH_STREAM_ID).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.SUCCESS_STREAM_ID).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.COMMIT_STREAM_ID);
            for (Map m : c.componentConfs) {
                scd.addConfigurations(m);
            }
            Map<String, TridentBoltExecutor.CoordSpec> specs = new HashMap();
            specs.put(c.batchGroupId, new CoordSpec());
            BoltDeclarer bd = builder.setBolt(id, new TridentBoltExecutor(new TridentSpoutExecutor(c.commitStateId, c.streamName, ((ITridentSpout) c.spout)), batchIdsForSpouts, specs), c.parallelism);
            bd.allGrouping(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID);
            bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.SUCCESS_STREAM_ID);
            if (c.spout instanceof ICommitterTridentSpout) {
                bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.COMMIT_STREAM_ID);
            }
            for (Map m : c.componentConfs) {
                bd.addConfigurations(m);
            }
        }
    }
    for (String id : _batchPerTupleSpouts.keySet()) {
        SpoutComponent c = _batchPerTupleSpouts.get(id);
        SpoutDeclarer d = builder.setSpout(id, new RichSpoutBatchTriggerer((IRichSpout) c.spout, c.streamName, c.batchGroupId), c.parallelism);
        for (Map conf : c.componentConfs) {
            d.addConfigurations(conf);
        }
    }
    for (String id : _bolts.keySet()) {
        Component c = _bolts.get(id);
        Map<String, CoordSpec> specs = new HashMap();
        for (GlobalStreamId s : getBoltSubscriptionStreams(id)) {
            String batch = batchIdsForBolts.get(s);
            if (!specs.containsKey(batch))
                specs.put(batch, new CoordSpec());
            CoordSpec spec = specs.get(batch);
            CoordType ct;
            if (_batchPerTupleSpouts.containsKey(s.get_componentId())) {
                ct = CoordType.single();
            } else {
                ct = CoordType.all();
            }
            spec.coords.put(s.get_componentId(), ct);
        }
        for (String b : c.committerBatches) {
            specs.get(b).commitStream = new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
        }
        BoltDeclarer d = builder.setBolt(id, new TridentBoltExecutor(c.bolt, batchIdsForBolts, specs), c.parallelism);
        for (Map conf : c.componentConfs) {
            d.addConfigurations(conf);
        }
        for (InputDeclaration inputDecl : c.declarations) {
            inputDecl.declare(d);
        }
        Map<String, Set<String>> batchToComponents = getBoltBatchToComponentSubscriptions(id);
        for (String b : batchToComponents.keySet()) {
            for (String comp : batchToComponents.get(b)) {
                d.directGrouping(comp, TridentBoltExecutor.COORD_STREAM(b));
            }
        }
        for (String b : c.committerBatches) {
            d.allGrouping(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
        }
    }
    for (String batch : batchesToCommitIds.keySet()) {
        List<String> commitIds = batchesToCommitIds.get(batch);
        boolean batchCommit = false;
        builder.setSpout(masterCoordinator(batch), new MasterBatchCoordinator(commitIds, batchesToSpouts.get(batch)));
    }
    return builder.createTopology();
}
Example 84
Project: distributed-processor-master  File: TridentTopologyBuilder.java View source code
public StormTopology buildTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    Map<GlobalStreamId, String> batchIdsForSpouts = fleshOutStreamBatchIds(false);
    Map<GlobalStreamId, String> batchIdsForBolts = fleshOutStreamBatchIds(true);
    Map<String, List<String>> batchesToCommitIds = new HashMap<String, List<String>>();
    Map<String, List<ITridentSpout>> batchesToSpouts = new HashMap<String, List<ITridentSpout>>();
    for (String id : _spouts.keySet()) {
        TransactionalSpoutComponent c = _spouts.get(id);
        if (c.spout instanceof IRichSpout) {
            //TODO: wrap this to set the stream name
            builder.setSpout(id, (IRichSpout) c.spout, c.parallelism);
        } else {
            String batchGroup = c.batchGroupId;
            if (!batchesToCommitIds.containsKey(batchGroup)) {
                batchesToCommitIds.put(batchGroup, new ArrayList<String>());
            }
            batchesToCommitIds.get(batchGroup).add(c.commitStateId);
            if (!batchesToSpouts.containsKey(batchGroup)) {
                batchesToSpouts.put(batchGroup, new ArrayList<ITridentSpout>());
            }
            batchesToSpouts.get(batchGroup).add((ITridentSpout) c.spout);
            BoltDeclarer scd = builder.setBolt(spoutCoordinator(id), new TridentSpoutCoordinator(c.commitStateId, (ITridentSpout) c.spout)).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.BATCH_STREAM_ID).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.SUCCESS_STREAM_ID);
            for (Map m : c.componentConfs) {
                scd.addConfigurations(m);
            }
            Map<String, TridentBoltExecutor.CoordSpec> specs = new HashMap();
            specs.put(c.batchGroupId, new CoordSpec());
            BoltDeclarer bd = builder.setBolt(id, new TridentBoltExecutor(new TridentSpoutExecutor(c.commitStateId, c.streamName, ((ITridentSpout) c.spout)), batchIdsForSpouts, specs), c.parallelism);
            bd.allGrouping(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID);
            bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.SUCCESS_STREAM_ID);
            if (c.spout instanceof ICommitterTridentSpout) {
                bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.COMMIT_STREAM_ID);
            }
            for (Map m : c.componentConfs) {
                bd.addConfigurations(m);
            }
        }
    }
    for (String id : _batchPerTupleSpouts.keySet()) {
        SpoutComponent c = _batchPerTupleSpouts.get(id);
        SpoutDeclarer d = builder.setSpout(id, new RichSpoutBatchTriggerer((IRichSpout) c.spout, c.streamName, c.batchGroupId), c.parallelism);
        for (Map conf : c.componentConfs) {
            d.addConfigurations(conf);
        }
    }
    for (String batch : batchesToCommitIds.keySet()) {
        List<String> commitIds = batchesToCommitIds.get(batch);
        builder.setSpout(masterCoordinator(batch), new MasterBatchCoordinator(commitIds, batchesToSpouts.get(batch)));
    }
    for (String id : _bolts.keySet()) {
        Component c = _bolts.get(id);
        Map<String, CoordSpec> specs = new HashMap();
        for (GlobalStreamId s : getBoltSubscriptionStreams(id)) {
            String batch = batchIdsForBolts.get(s);
            if (!specs.containsKey(batch))
                specs.put(batch, new CoordSpec());
            CoordSpec spec = specs.get(batch);
            CoordType ct;
            if (_batchPerTupleSpouts.containsKey(s.get_componentId())) {
                ct = CoordType.single();
            } else {
                ct = CoordType.all();
            }
            spec.coords.put(s.get_componentId(), ct);
        }
        for (String b : c.committerBatches) {
            specs.get(b).commitStream = new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
        }
        BoltDeclarer d = builder.setBolt(id, new TridentBoltExecutor(c.bolt, batchIdsForBolts, specs), c.parallelism);
        for (Map conf : c.componentConfs) {
            d.addConfigurations(conf);
        }
        for (InputDeclaration inputDecl : c.declarations) {
            inputDecl.declare(d);
        }
        Map<String, Set<String>> batchToComponents = getBoltBatchToComponentSubscriptions(id);
        for (String b : batchToComponents.keySet()) {
            for (String comp : batchToComponents.get(b)) {
                d.directGrouping(comp, TridentBoltExecutor.COORD_STREAM(b));
            }
        }
        for (String b : c.committerBatches) {
            d.allGrouping(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
        }
    }
    return builder.createTopology();
}
Example 85
Project: flowmix-master  File: FlowmixBuilder.java View source code
/**
   * @return A topology builder than can further be customized.
   */
public TopologyBuilder create() {
    TopologyBuilder builder = new TopologyBuilder();
    if (eventsComponent instanceof IRichSpout)
        builder.setSpout(EVENT, (IRichSpout) eventsComponent, eventLoaderParallelism == -1 ? parallelismHint : eventLoaderParallelism);
    else if (eventsComponent instanceof IRichBolt)
        builder.setBolt(EVENT, (IRichBolt) eventsComponent, eventLoaderParallelism == -1 ? parallelismHint : eventLoaderParallelism);
    else
        throw new RuntimeException("The component for events is not valid. Must be IRichSpout or IRichBolt");
    if (flowLoaderSpout instanceof IRichSpout)
        builder.setSpout(FLOW_LOADER_STREAM, (IRichSpout) flowLoaderSpout, 1);
    else if (flowLoaderSpout instanceof IRichBolt)
        builder.setBolt(FLOW_LOADER_STREAM, (IRichBolt) flowLoaderSpout, 1);
    else
        throw new RuntimeException("The component for rules is not valid. Must be IRichSpout or IRichBolt");
    builder.setSpout("tick", new TickSpout(1000), 1);
    // kicks off a flow determining where to start
    builder.setBolt(INITIALIZER, new FlowInitializerBolt(), parallelismHint).localOrShuffleGrouping(EVENT).allGrouping(FLOW_LOADER_STREAM, FLOW_LOADER_STREAM);
    declarebolt(builder, FILTER, new FilterBolt(), parallelismHint, true);
    declarebolt(builder, SELECT, new SelectorBolt(), parallelismHint, true);
    declarebolt(builder, PARTITION, new PartitionBolt(), parallelismHint, true);
    declarebolt(builder, SWITCH, new SwitchBolt(), parallelismHint, true);
    declarebolt(builder, AGGREGATE, new AggregatorBolt(), parallelismHint, true);
    declarebolt(builder, JOIN, new JoinBolt(), parallelismHint, true);
    declarebolt(builder, EACH, new EachBolt(), parallelismHint, true);
    declarebolt(builder, SORT, new SortBolt(), parallelismHint, true);
    declarebolt(builder, SPLIT, new SplitBolt(), parallelismHint, true);
    declarebolt(builder, OUTPUT, outputBolt, parallelismHint, false);
    return builder;
}
Example 86
Project: kettle-storm-master  File: KettleStormUtils.java View source code
/**
   * Create a topology from a transformation.
   *
   * @param conf Storm configuration to use to configure connection information.
   * @param meta Transformation meta to build topology from.
   * @return Storm topology capable of executing the Kettle transformation.
   * @throws KettleException Error loading the transformation details or initializing the kettle environment
   * @throws IOException     Error generating the transformation XML from the meta.
   */
public StormTopology createTopology(Config conf, TransMeta meta) throws KettleException, IOException {
    initKettleEnvironment();
    TransConfiguration transConfig = new TransConfiguration(meta, new TransExecutionConfiguration());
    String transXml = transConfig.getXML();
    Trans trans = new Trans(meta);
    trans.prepareExecution(null);
    List<StepMetaDataCombi> steps = trans.getSteps();
    String topologyName = generateTopologyName(meta.getName());
    setTopologyName(conf, topologyName);
    TopologyBuilder builder = new TopologyBuilder();
    Set<String> leafSteps = collectLeafStepNames(trans);
    String controlBoltId = topologyName + "-control-bolt";
    BasicSignalNotifier notifier = new BasicSignalNotifier(controlBoltId);
    BoltDeclarer controlBoltDeclarer = builder.setBolt(controlBoltId, new KettleControlBolt(topologyName, notifier, leafSteps));
    for (StepMetaDataCombi step : steps) {
        step.step.init(step.meta, step.data);
        // The control bolt must receive all signal tuples from all leaf steps
        if (leafSteps.contains(step.step.getStepname())) {
            controlBoltDeclarer.allGrouping(step.step.getStepname(), "signal");
        }
        if (isSpout(step)) {
            builder.setSpout(step.step.getStepname(), new KettleStepSpout(step.step.getStepname(), transXml, step), step.step.getStepMeta().getCopies()).setMaxTaskParallelism(step.step.getStepMeta().getCopies());
        } else {
            BoltDeclarer bd = builder.setBolt(step.step.getStepname(), new KettleStepBolt(step.step.getStepname(), transXml, step), step.step.getStepMeta().getCopies()).setMaxTaskParallelism(step.step.getStepMeta().getCopies());
            for (StreamInterface info : step.stepMeta.getStepMetaInterface().getStepIOMeta().getInfoStreams()) {
                StepMetaDataCombi infoStep = findStep(trans, info.getStepname());
                bd.fieldsGrouping(info.getStepname(), getOutputFields(infoStep));
                bd.allGrouping(info.getStepname(), "signal");
            }
            for (RowSet input : step.step.getInputRowSets()) {
                StepMetaDataCombi inputStep = findStep(trans, input.getOriginStepName());
                bd.fieldsGrouping(input.getOriginStepName(), getOutputFields(inputStep));
                // All bolts must receive all signal tuples from all previous steps
                bd.allGrouping(input.getOriginStepName(), "signal");
            }
        }
    }
    return builder.createTopology();
}
Example 87
Project: Storm-master  File: TridentTopologyBuilder.java View source code
public StormTopology buildTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    Map<GlobalStreamId, String> batchIdsForSpouts = fleshOutStreamBatchIds(false);
    Map<GlobalStreamId, String> batchIdsForBolts = fleshOutStreamBatchIds(true);
    Map<String, List<String>> batchesToCommitIds = new HashMap<String, List<String>>();
    Map<String, List<ITridentSpout>> batchesToSpouts = new HashMap<String, List<ITridentSpout>>();
    for (String id : _spouts.keySet()) {
        TransactionalSpoutComponent c = _spouts.get(id);
        if (c.spout instanceof IRichSpout) {
            //TODO: wrap this to set the stream name
            builder.setSpout(id, (IRichSpout) c.spout, c.parallelism);
        } else {
            String batchGroup = c.batchGroupId;
            if (!batchesToCommitIds.containsKey(batchGroup)) {
                batchesToCommitIds.put(batchGroup, new ArrayList<String>());
            }
            batchesToCommitIds.get(batchGroup).add(c.commitStateId);
            if (!batchesToSpouts.containsKey(batchGroup)) {
                batchesToSpouts.put(batchGroup, new ArrayList<ITridentSpout>());
            }
            batchesToSpouts.get(batchGroup).add((ITridentSpout) c.spout);
            BoltDeclarer scd = builder.setBolt(spoutCoordinator(id), new TridentSpoutCoordinator(c.commitStateId, (ITridentSpout) c.spout)).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.BATCH_STREAM_ID).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.SUCCESS_STREAM_ID);
            for (Map m : c.componentConfs) {
                scd.addConfigurations(m);
            }
            Map<String, TridentBoltExecutor.CoordSpec> specs = new HashMap();
            specs.put(c.batchGroupId, new CoordSpec());
            BoltDeclarer bd = builder.setBolt(id, new TridentBoltExecutor(new TridentSpoutExecutor(c.commitStateId, c.streamName, ((ITridentSpout) c.spout)), batchIdsForSpouts, specs), c.parallelism);
            bd.allGrouping(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID);
            bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.SUCCESS_STREAM_ID);
            if (c.spout instanceof ICommitterTridentSpout) {
                bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.COMMIT_STREAM_ID);
            }
            for (Map m : c.componentConfs) {
                bd.addConfigurations(m);
            }
        }
    }
    for (String id : _batchPerTupleSpouts.keySet()) {
        SpoutComponent c = _batchPerTupleSpouts.get(id);
        SpoutDeclarer d = builder.setSpout(id, new RichSpoutBatchTriggerer((IRichSpout) c.spout, c.streamName, c.batchGroupId), c.parallelism);
        for (Map conf : c.componentConfs) {
            d.addConfigurations(conf);
        }
    }
    for (String batch : batchesToCommitIds.keySet()) {
        List<String> commitIds = batchesToCommitIds.get(batch);
        builder.setSpout(masterCoordinator(batch), new MasterBatchCoordinator(commitIds, batchesToSpouts.get(batch)));
    }
    for (String id : _bolts.keySet()) {
        Component c = _bolts.get(id);
        Map<String, CoordSpec> specs = new HashMap();
        for (GlobalStreamId s : getBoltSubscriptionStreams(id)) {
            String batch = batchIdsForBolts.get(s);
            if (!specs.containsKey(batch))
                specs.put(batch, new CoordSpec());
            CoordSpec spec = specs.get(batch);
            CoordType ct;
            if (_batchPerTupleSpouts.containsKey(s.get_componentId())) {
                ct = CoordType.single();
            } else {
                ct = CoordType.all();
            }
            spec.coords.put(s.get_componentId(), ct);
        }
        for (String b : c.committerBatches) {
            specs.get(b).commitStream = new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
        }
        BoltDeclarer d = builder.setBolt(id, new TridentBoltExecutor(c.bolt, batchIdsForBolts, specs), c.parallelism);
        for (Map conf : c.componentConfs) {
            d.addConfigurations(conf);
        }
        for (InputDeclaration inputDecl : c.declarations) {
            inputDecl.declare(d);
        }
        Map<String, Set<String>> batchToComponents = getBoltBatchToComponentSubscriptions(id);
        for (String b : batchToComponents.keySet()) {
            for (String comp : batchToComponents.get(b)) {
                d.directGrouping(comp, TridentBoltExecutor.COORD_STREAM(b));
            }
        }
        for (String b : c.committerBatches) {
            d.allGrouping(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
        }
    }
    return builder.createTopology();
}
Example 88
Project: storm-perf-test-master  File: Main.java View source code
public void realMain(String[] args) throws Exception {
    Map clusterConf = Utils.readStormConfig();
    clusterConf.putAll(Utils.readCommandLineOpts());
    Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
    CmdLineParser parser = new CmdLineParser(this);
    parser.setUsageWidth(80);
    try {
        // parse the arguments.
        parser.parseArgument(args);
    } catch (CmdLineException e) {
        System.err.println(e.getMessage());
        _help = true;
    }
    if (_help) {
        parser.printUsage(System.err);
        System.err.println();
        return;
    }
    if (_numWorkers <= 0) {
        throw new IllegalArgumentException("Need at least one worker");
    }
    if (_name == null || _name.isEmpty()) {
        throw new IllegalArgumentException("name must be something");
    }
    if (!_ackEnabled) {
        _ackers = 0;
    }
    try {
        for (int topoNum = 0; topoNum < _numTopologies; topoNum++) {
            TopologyBuilder builder = new TopologyBuilder();
            LOG.info("Adding in " + _spoutParallel + " spouts");
            builder.setSpout("messageSpout", new SOLSpout(_messageSize, _ackEnabled), _spoutParallel);
            LOG.info("Adding in " + _boltParallel + " bolts");
            builder.setBolt("messageBolt1", new SOLBolt(), _boltParallel).shuffleGrouping("messageSpout");
            for (int levelNum = 2; levelNum <= _numLevels; levelNum++) {
                LOG.info("Adding in " + _boltParallel + " bolts at level " + levelNum);
                builder.setBolt("messageBolt" + levelNum, new SOLBolt(), _boltParallel).shuffleGrouping("messageBolt" + (levelNum - 1));
            }
            Config conf = new Config();
            conf.setDebug(_debug);
            conf.setNumWorkers(_numWorkers);
            conf.setNumAckers(_ackers);
            if (_maxSpoutPending > 0) {
                conf.setMaxSpoutPending(_maxSpoutPending);
            }
            StormSubmitter.submitTopology(_name + "_" + topoNum, conf, builder.createTopology());
        }
        metrics(client, _messageSize, _pollFreqSec, _testRunTimeSec);
    } finally {
        //Kill it right now!!!
        KillOptions killOpts = new KillOptions();
        killOpts.set_wait_secs(0);
        for (int topoNum = 0; topoNum < _numTopologies; topoNum++) {
            LOG.info("KILLING " + _name + "_" + topoNum);
            try {
                client.killTopologyWithOpts(_name + "_" + topoNum, killOpts);
            } catch (Exception e) {
                LOG.error("Error tying to kill " + _name + "_" + topoNum, e);
            }
        }
    }
}
Example 89
Project: storm-kafka-xlog-master  File: XlogKafkaSpoutTopology.java View source code
public StormTopology buildTopology(String topic) {
    SpoutConfig kafkaConfig = new SpoutConfig(brokerHosts, topic, "", "xlog_storm_" + topic);
    kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("KafkaSpout", new KafkaSpout(kafkaConfig));
    builder.setBolt("XlogBolt", new XlogBolt()).shuffleGrouping("KafkaSpout");
    return builder.createTopology();
}
Example 90
Project: gennai-master  File: GungnirTopology.java View source code
private String phase3(DefaultDirectedGraph<Operator, StreamEdge> graph, List<Operator> operators, Map<String, Map<String, List<String>>> outputFields, Map<String, GroupFields> groupFields, boolean explain) throws GungnirTopologyException {
    context = new GungnirContext();
    context.setTopologyId(id);
    context.setTopologyName(name);
    context.setAccountId(owner.getId());
    context.setOutputFields(outputFields);
    context.setGroupFields(groupFields);
    ExecBolt bolt = null;
    bolts = Lists.newArrayList();
    boltsIndex = Maps.newHashMap();
    if (!explain) {
        builder = new TopologyBuilder();
        boltDeclarers = Lists.newArrayList();
    }
    StringBuilder sb = new StringBuilder();
    sb.append("Components:\n");
    for (Operator operator : operators) {
        if (graph.inDegreeOf(operator) == 0) {
            Integer boltIndex = null;
            if (operator instanceof SpoutOperator) {
                spout = new ExecSpout(context);
                sb.append(' ');
                sb.append(spout.getName());
                sb.append(" {");
            } else if (operator instanceof PartitionOperator) {
                PartitionOperator incomingOperator = (PartitionOperator) operator;
                boltIndex = boltsIndex.get(incomingOperator.getGrouping());
                if (boltIndex == null) {
                    boltIndex = bolts.size();
                    bolt = new ExecBolt(context);
                    bolt.setId(boltIndex + 1);
                    bolts.add(bolt);
                } else {
                    bolt = bolts.get(boltIndex);
                }
                sb.append(' ');
                sb.append(bolt.getName());
                sb.append(" {");
            }
            List<PartitionOperator> outgoingOperators = Lists.newArrayList();
            BreadthFirstIterator<Operator, StreamEdge> it = new BreadthFirstIterator<Operator, StreamEdge>(graph, operator);
            int parallelism = 0;
            Map<String, Metrics> metricsMap = Maps.newHashMap();
            while (it.hasNext()) {
                Operator operator2 = it.next();
                if (graph.outDegreeOf(operator2) == 0) {
                    if (operator2 instanceof PartitionOperator) {
                        outgoingOperators.add((PartitionOperator) operator2);
                    }
                } else {
                    Dispatcher dispatcher = edgesToDispatcher(graph, operator2);
                    operator2.setDispatcher(dispatcher);
                    operator2.registerMetrics(METRICS_DISPATCH_COUNT, new MultiCountMeter());
                    sb.append("\n  ");
                    sb.append(operator2.getName());
                    sb.append(' ');
                    sb.append(dispatcher);
                }
                if (operator2.getParallelism() > parallelism) {
                    parallelism = operator2.getParallelism();
                }
                collectMetrics(operator2, metricsMap);
            }
            sb.append("\n } parallelism=");
            sb.append(parallelism);
            sb.append('\n');
            if (operator instanceof SpoutOperator) {
                spout.setIncomingOperator((SpoutOperator) operator);
                spout.addOutgoingOperators(outgoingOperators);
                spout.registerMetrics(metricsMap);
                if (!explain) {
                    builder.setSpout(spout.getName(), spout, parallelism);
                }
            } else if (operator instanceof PartitionOperator) {
                PartitionOperator incomingOperator = (PartitionOperator) operator;
                bolt.addIncomingOperator(incomingOperator);
                bolt.addOutgoingOperators(outgoingOperators);
                bolt.registerMetrics(metricsMap);
                if (!boltsIndex.containsKey(incomingOperator.getGrouping())) {
                    if (!explain) {
                        boltDeclarers.add(builder.setBolt(bolt.getName(), bolt, parallelism));
                    }
                    boltsIndex.put(incomingOperator.getGrouping(), boltIndex);
                }
            }
        }
    }
    sb.deleteCharAt(sb.length() - 1);
    return sb.toString();
}
Example 91
Project: opensoc-streaming-master  File: TopologyRunner.java View source code
public void initTopology(String args[], String subdir) throws Exception {
    Cli command_line = new Cli(args);
    command_line.parse();
    System.out.println("[OpenSOC] Starting topology deployment...");
    debug = command_line.isDebug();
    System.out.println("[OpenSOC] Debug mode set to: " + debug);
    local_mode = command_line.isLocal_mode();
    System.out.println("[OpenSOC] Local mode set to: " + local_mode);
    if (command_line.getPath() != null) {
        config_path = command_line.getPath();
        System.out.println("[OpenSOC] Setting config path to external config path: " + config_path);
    } else {
        config_path = default_config_path;
        System.out.println("[OpenSOC] Initializing from default internal config path: " + config_path);
    }
    String topology_conf_path = config_path + "/topologies/" + subdir + "/topology.conf";
    String environment_identifier_path = config_path + "/topologies/environment_identifier.conf";
    String topology_identifier_path = config_path + "/topologies/" + subdir + "/topology_identifier.conf";
    System.out.println("[OpenSOC] Looking for environment identifier: " + environment_identifier_path);
    System.out.println("[OpenSOC] Looking for topology identifier: " + topology_identifier_path);
    System.out.println("[OpenSOC] Looking for topology config: " + topology_conf_path);
    config = new PropertiesConfiguration(topology_conf_path);
    JSONObject environment_identifier = SettingsLoader.loadEnvironmentIdnetifier(environment_identifier_path);
    JSONObject topology_identifier = SettingsLoader.loadTopologyIdnetifier(topology_identifier_path);
    String topology_name = SettingsLoader.generateTopologyName(environment_identifier, topology_identifier);
    System.out.println("[OpenSOC] Initializing Topology: " + topology_name);
    builder = new TopologyBuilder();
    conf = new Config();
    conf.registerSerialization(JSONObject.class, MapSerializer.class);
    conf.setDebug(debug);
    System.out.println("[OpenSOC] Initializing Spout: " + topology_name);
    if (command_line.isGenerator_spout()) {
        String component_name = config.getString("spout.test.name", "DefaultTopologySpout");
        success = initializeTestingSpout(component_name);
        messageComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "spout.test");
    }
    if (!command_line.isGenerator_spout()) {
        String component_name = config.getString("spout.kafka.name", "DefaultTopologyKafkaSpout");
        success = initializeKafkaSpout(component_name);
        messageComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "spout.kafka");
    }
    if (config.getBoolean("bolt.parser.enabled", true)) {
        String component_name = config.getString("bolt.parser.name", "DefaultTopologyParserBot");
        success = initializeParsingBolt(topology_name, component_name);
        messageComponents.add(component_name);
        errorComponents.add(component_name);
        dataComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.parser");
    }
    if (config.getBoolean("bolt.enrichment.geo.enabled", false)) {
        String component_name = config.getString("bolt.enrichment.geo.name", "DefaultGeoEnrichmentBolt");
        success = initializeGeoEnrichment(topology_name, component_name);
        messageComponents.add(component_name);
        errorComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.enrichment.geo");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "mysql");
    }
    if (config.getBoolean("bolt.enrichment.host.enabled", false)) {
        String component_name = config.getString("bolt.enrichment.host.name", "DefaultHostEnrichmentBolt");
        success = initializeHostsEnrichment(topology_name, component_name, "OpenSOC_Configs/etc/whitelists/known_hosts.conf");
        messageComponents.add(component_name);
        errorComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.enrichment.host");
    }
    if (config.getBoolean("bolt.enrichment.whois.enabled", false)) {
        String component_name = config.getString("bolt.enrichment.whois.name", "DefaultWhoisEnrichmentBolt");
        success = initializeWhoisEnrichment(topology_name, component_name);
        messageComponents.add(component_name);
        errorComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.enrichment.whois");
    }
    if (config.getBoolean("bolt.enrichment.cif.enabled", false)) {
        String component_name = config.getString("bolt.enrichment.cif.name", "DefaultCIFEnrichmentBolt");
        success = initializeCIFEnrichment(topology_name, component_name);
        messageComponents.add(component_name);
        errorComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.enrichment.cif");
    }
    if (config.getBoolean("bolt.enrichment.threat.enabled", false)) {
        String component_name = config.getString("bolt.enrichment.threat.name", "DefaultThreatEnrichmentBolt");
        success = initializeThreatEnrichment(topology_name, component_name);
        messageComponents.add(component_name);
        errorComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.enrichment.threat");
    }
    if (config.getBoolean("bolt.alerts.enabled", false)) {
        String component_name = config.getString("bolt.alerts.name", "DefaultAlertsBolt");
        success = initializeAlerts(topology_name, component_name, config_path + "/topologies/" + subdir + "/alerts.xml", environment_identifier, topology_identifier);
        messageComponents.add(component_name);
        errorComponents.add(component_name);
        alertComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.alerts");
    }
    if (config.getBoolean("bolt.alerts.indexing.enabled") && config.getBoolean("bolt.alerts.enabled")) {
        String component_name = config.getString("bolt.alerts.indexing.name", "DefaultAlertsBolt");
        success = initializeAlertIndexing(component_name);
        terminalComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.alerts.indexing");
    }
    if (config.getBoolean("bolt.kafka.enabled", false)) {
        String component_name = config.getString("bolt.kafka.name", "DefaultKafkaBolt");
        success = initializeKafkaBolt(component_name);
        terminalComponents.add(component_name);
        System.out.println("[OpenSOC] Component " + component_name + " initialized");
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.kafka");
    }
    if (config.getBoolean("bolt.indexing.enabled", true)) {
        String component_name = config.getString("bolt.indexing.name", "DefaultIndexingBolt");
        success = initializeIndexingBolt(component_name);
        errorComponents.add(component_name);
        terminalComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.indexing");
    }
    if (config.getBoolean("bolt.hdfs.enabled", false)) {
        String component_name = config.getString("bolt.hdfs.name", "DefaultHDFSBolt");
        success = initializeHDFSBolt(topology_name, component_name);
        terminalComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.hdfs");
    }
    if (config.getBoolean("bolt.error.indexing.enabled")) {
        String component_name = config.getString("bolt.error.indexing.name", "DefaultErrorIndexingBolt");
        success = initializeErrorIndexBolt(component_name);
        terminalComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.error");
    }
    if (config.containsKey("bolt.hbase.enabled") && config.getBoolean("bolt.hbase.enabled")) {
        String component_name = config.getString("bolt.hbase.name", "DefaultHbaseBolt");
        String shuffleType = config.getString("bolt.hbase.shuffle.type", "direct");
        success = initializeHbaseBolt(component_name, shuffleType);
        terminalComponents.add(component_name);
        System.out.println("[OpenSOC] ------Component " + component_name + " initialized with the following settings:");
        SettingsLoader.printConfigOptions((PropertiesConfiguration) config, "bolt.hbase");
    }
    System.out.println("[OpenSOC] Topology Summary: ");
    System.out.println("[OpenSOC] Message Stream: " + printComponentStream(messageComponents));
    System.out.println("[OpenSOC] Alerts Stream: " + printComponentStream(alertComponents));
    System.out.println("[OpenSOC] Error Stream: " + printComponentStream(errorComponents));
    System.out.println("[OpenSOC] Data Stream: " + printComponentStream(dataComponents));
    System.out.println("[OpenSOC] Terminal Components: " + printComponentStream(terminalComponents));
    if (local_mode) {
        conf.setNumWorkers(config.getInt("num.workers"));
        conf.setMaxTaskParallelism(1);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topology_name, conf, builder.createTopology());
    } else {
        conf.setNumWorkers(config.getInt("num.workers"));
        conf.setNumAckers(config.getInt("num.ackers"));
        StormSubmitter.submitTopology(topology_name, conf, builder.createTopology());
    }
}
Example 92
Project: incubator-samoa-master  File: StormTopology.java View source code
public TopologyBuilder getStormBuilder() {
    return builder;
}
Example 93
Project: samoa-master  File: StormTopology.java View source code
public TopologyBuilder getStormBuilder() {
    return builder;
}
Example 94
Project: storm-trident-elasticsearch-master  File: IndexBatchBoltTest.java View source code
@Override
public StormTopology buildTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("batch", new StaticSpout()).setMaxTaskParallelism(1);
    builder.setBolt("index", newIndexBatchBolt()).shuffleGrouping("batch");
    return builder.createTopology();
}
Example 95
Project: tfidf-topology-master  File: TermTopology.java View source code
public static StormTopology getTwitterTopology() {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("twitterSpout", new TwitterSpout(searchTerms, 1000), 1);
    builder.setBolt("publishBolt", new PublishURLBolt(), 2).shuffleGrouping("twitterSpout");
    return builder.createTopology();
}