Java Examples for org.apache.kafka.common.serialization.ByteArraySerializer

The following java examples will help you to understand the usage of org.apache.kafka.common.serialization.ByteArraySerializer. These source code samples are taken from different open source projects.

Example 1
Project: streamsx.messaging-master  File: KafkaConfigUtilities.java View source code
public static Properties setDefaultSerializers(AttributeHelper keyAH, AttributeHelper messageAH, Properties props) {
    final Logger trace = Logger.getLogger(KafkaConfigUtilities.class.getCanonicalName());
    if (//$NON-NLS-1$
    !props.containsKey("key.serializer")) {
        if (keyAH.isString()) {
            //$NON-NLS-1$ //$NON-NLS-2$
            props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            //$NON-NLS-1$
            trace.log(//$NON-NLS-1$
            TraceLevel.INFO, //$NON-NLS-1$
            "Adding unspecified property key.serializer=org.apache.kafka.common.serialization.StringSerializer");
        } else {
            //$NON-NLS-1$ //$NON-NLS-2$
            props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
            //$NON-NLS-1$
            trace.log(//$NON-NLS-1$
            TraceLevel.INFO, //$NON-NLS-1$
            "Adding unspecified property key.serializer=org.apache.kafka.common.serialization.ByteArraySerializer");
        }
    }
    if (//$NON-NLS-1$
    !props.containsKey("value.serializer")) {
        if (messageAH.isString()) {
            //$NON-NLS-1$ //$NON-NLS-2$
            props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            //$NON-NLS-1$
            trace.log(//$NON-NLS-1$
            TraceLevel.INFO, //$NON-NLS-1$
            "Adding unspecified property value.serializer=org.apache.kafka.common.serialization.StringSerializer");
        } else {
            //$NON-NLS-1$ //$NON-NLS-2$
            props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
            //$NON-NLS-1$
            trace.log(//$NON-NLS-1$
            TraceLevel.INFO, //$NON-NLS-1$
            "Adding unspecified property value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer");
        }
    }
    return props;
}
Example 2
Project: aesop-master  File: KafkaClient.java View source code
void init() {
    this.config = ConfigFactory.parseFile(new File(kafkaConfig.getConfig()));
    Properties props = new Properties();
    props.put("zookeeper.connect", config.getString("zookeeper.connect"));
    props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    props.put("zk.connectiontimeout.ms", config.getString("zk.connectiontimeout.ms"));
    props.put("bootstrap.servers", config.getString("bootstrap.servers"));
    this.client = new KafkaProducer(props);
}
Example 3
Project: Decision-master  File: HardwareEmulatorMain.java View source code
private static Properties createKafkaProducerConfig(String brokerList) {
    Properties properties = new Properties();
    properties.put(org.apache.kafka.clients.producer.ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    properties.put(org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
    properties.put(org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
    return properties;
}
Example 4
Project: SkyEye-master  File: KafkaProducerDemo.java View source code
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put("bootstrap.servers", "192.168.88.140:9092,192.168.88.145:9092,192.168.88.148:9092");
    properties.put("retries", 0);
    // props.put("batch.size", 16384);
    properties.put("linger.ms", 0);
    properties.put("max.block.ms", 3000);
    properties.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    //        properties.put("partitioner.class", "com.goodix.kafka.MyPartition");
    KafkaProducer<byte[], String> producer = new KafkaProducer<byte[], String>(properties);
    ProducerRecord<byte[], String> record = new ProducerRecord<byte[], String>("app-log-test-1", "ni shi shei".getBytes(), "ni shi shei");
    producer.send(record, new Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            if (null != exception) {
                LOGGER.error("error, {}", exception.getMessage());
            } else {
                LOGGER.info("The offset of the record we just sent is: " + metadata.offset());
                LOGGER.info("The partition of the record we just sent is: " + metadata.partition());
            }
        }
    });
    try {
        Thread.sleep(1000);
        producer.close();
    } catch (InterruptedException exception) {
        exception.printStackTrace();
    }
}
Example 5
Project: brave-master  File: KafkaSpanCollector.java View source code
public static Builder builder(String bootstrapServers) {
    Properties props = new Properties();
    props.put("bootstrap.servers", bootstrapServers);
    props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    return builder().kafkaProperties(props);
}
Example 6
Project: message-queue-client-framework-master  File: KafkaMessageNewReceiverTest.java View source code
@Test
public void test() throws Exception {
    Properties sendProperties = new Properties();
    sendProperties.setProperty("bootstrap.servers", "localhost:" + port);
    sendProperties.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    sendProperties.setProperty("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    @SuppressWarnings("unchecked") KafkaMessageNewSender<byte[], byte[]> sender = KafkaMessageNewSender.getOrCreateInstance(sendProperties);
    for (int i = 0; i < 10; i++) {
        sender.sendWithKey(topic, ("key" + i).getBytes(), ("value" + i).getBytes());
    }
    sender.shutDown();
    Properties receProperties = new Properties();
    receProperties.setProperty("bootstrap.servers", "localhost:" + port);
    receProperties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    receProperties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    receProperties.put("group.id", "KafkaMessageNewReceiverTest");
    KafkaMessageReceiver<byte[], byte[]> receiver = new KafkaMessageNewReceiver<byte[], byte[]>(receProperties);
    Assert.assertEquals(receiver.getPartitionCount(topic), 1);
    Assert.assertEquals(receiver.getEarliestOffset(topic, 0), 0);
    Assert.assertEquals(receiver.getLatestOffset(topic, 0), 10);
    List<byte[]> vals1 = receiver.receive(topic, 0, 0, 2);
    Assert.assertEquals(vals1.size(), 2);
    receiver.receive(topic, 0, 2, 5);
    try {
        receiver.receive(topic, 0, 2, 0);
    } catch (Exception e) {
    }
    receiver.receive(topic, 0, -1, 10);
    receiver.receive(topic, 0, -1, 11);
    Map<byte[], byte[]> maps1 = receiver.receiveWithKey(topic, 0, 1, 2);
    Assert.assertEquals(maps1.size(), 2);
    receiver.receiveWithKey(topic, 0, 3, 5);
    try {
        receiver.receiveWithKey(topic, 0, 3, 0);
    } catch (Exception e) {
    }
    receiver.receiveWithKey(topic, 0, -1, 10);
    receiver.receiveWithKey(topic, 0, -1, 11);
    receiver.shutDown();
    receiver.shutDown();
}
Example 7
Project: esper-master  File: TestKafkaInputCustomSubscriberAndProcessor.java View source code
public void testInput() {
    Properties pluginProperties = SupportConstants.getInputPluginProps(TOPIC, ByteArrayDeserializer.class.getName(), null);
    pluginProperties.put(EsperIOKafkaConfig.INPUT_PROCESSOR_CONFIG, SupportInputCustomProcessor.class.getName());
    EPServiceProvider epService = SupportConstants.getEngineWKafkaInput(this.getClass().getSimpleName(), pluginProperties);
    epService.getEPAdministrator().getConfiguration().addEventType(SupportBean.class);
    EPStatement stmt = epService.getEPAdministrator().createEPL("select * from SupportBean");
    SupportListener listener = new SupportListener();
    stmt.addListener(listener);
    Properties producerProperties = SupportConstants.getProducerProps(org.apache.kafka.common.serialization.ByteArraySerializer.class.getName());
    KafkaProducer<String, byte[]> producer = new KafkaProducer<>(producerProperties);
    int randomNumber = (int) (Math.random() * 100000000);
    byte[] bytes = ByteBuffer.allocate(4).putInt(randomNumber).array();
    producer.send(new ProducerRecord<>(TOPIC, bytes));
    SupportAwaitUtil.awaitOrFail(10, TimeUnit.SECONDS, "failed to receive expected event", (Supplier<Object>) () -> {
        for (EventBean[] events : listener.getEvents()) {
            for (EventBean event : events) {
                SupportBean bean = (SupportBean) event.getUnderlying();
                if (bean.getIntProp() == randomNumber) {
                    return true;
                }
            }
        }
        return null;
    });
    producer.close();
    epService.destroy();
    assertTrue(SupportInputCustomProcessor.isClosed());
    assertNotNull(SupportInputCustomProcessor.getContext().getAdapter());
    assertNotNull(SupportInputCustomProcessor.getContext().getEngine());
    assertNotNull(SupportInputCustomProcessor.getContext().getProperties());
    assertNotNull(SupportInputCustomProcessor.getContext().getConsumer());
}
Example 8
Project: nifi-master  File: PublishKafka.java View source code
protected PublisherPool createPublisherPool(final ProcessContext context) {
    final int maxMessageSize = context.getProperty(MAX_REQUEST_SIZE).asDataSize(DataUnit.B).intValue();
    final long maxAckWaitMillis = context.getProperty(ACK_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS).longValue();
    final Map<String, Object> kafkaProperties = new HashMap<>();
    KafkaProcessorUtils.buildCommonKafkaProperties(context, ProducerConfig.class, kafkaProperties);
    kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    kafkaProperties.put("max.request.size", String.valueOf(maxMessageSize));
    return new PublisherPool(kafkaProperties, getLogger(), maxMessageSize, maxAckWaitMillis);
}
Example 9
Project: kafka-master  File: KafkaConfigBackingStore.java View source code
// package private for testing
KafkaBasedLog<String, byte[]> setupAndCreateKafkaBasedLog(String topic, WorkerConfig config) {
    Map<String, Object> producerProps = new HashMap<>();
    producerProps.putAll(config.originals());
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    producerProps.put(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE);
    Map<String, Object> consumerProps = new HashMap<>();
    consumerProps.putAll(config.originals());
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    return createKafkaBasedLog(topic, producerProps, consumerProps, new ConsumeCallback());
}
Example 10
Project: suro-master  File: KafkaSink.java View source code
@Override
public void open() {
    producer = new KafkaProducer<>(props, new ByteArraySerializer(), new ByteArraySerializer());
    executor.submit(new Runnable() {

        @Override
        public void run() {
            while (true) {
                final MessageContainer message;
                try {
                    message = metadataWaitingQueue.poll(1, TimeUnit.SECONDS);
                } catch (InterruptedException e) {
                    continue;
                }
                if (message == null) {
                    continue;
                }
                // check poison msg for shutdown
                if (message == SHUTDOWN_POISON_MSG) {
                    break;
                }
                try {
                    if (!metadataFetchedTopicSet.contains(message.getRoutingKey())) {
                        producer.partitionsFor(message.getRoutingKey());
                        metadataFetchedTopicSet.add(message.getRoutingKey());
                    }
                    sendMessage(message);
                } catch (Throwable t) {
                    log.error("failed to get metadata: " + message.getRoutingKey(), t);
                    if (!metadataWaitingQueue.offer(message)) {
                        dropMessage(message.getRoutingKey(), "metadataWaitingQueueFull");
                    }
                }
            }
        }
    });
}
Example 11
Project: chaperone-master  File: KafkaAuditReporter.java View source code
private Producer<String, byte[]> getKafkaProducer(String brokerList, String requiredAcks, HostMetadata host) {
    Properties properties = new Properties();
    // Old props list
    properties.put("metadata.broker.list", brokerList);
    properties.put("serializer.class", "kafka.serializer.DefaultEncoder");
    properties.put("key.serializer.class", "kafka.serializer.StringEncoder");
    properties.put("compression.type", "gzip");
    properties.put("request.required.acks", requiredAcks);
    // New props needed
    properties.put("bootstrap.servers", brokerList);
    properties.put("client.id", host.getTier() + "-" + host.getHost());
    return new KafkaProducer<>(properties, new StringSerializer(), new ByteArraySerializer());
}
Example 12
Project: core-ng-project-master  File: Kafka.java View source code
protected Producer<String, byte[]> createProducer() {
    if (uri == null)
        throw new Error("uri must not be null");
    StopWatch watch = new StopWatch();
    try {
        Map<String, Object> config = Maps.newHashMap();
        config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, uri);
        // metadata update timeout
        config.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, Duration.ofSeconds(30).toMillis());
        Producer<String, byte[]> producer = new KafkaProducer<>(config, new StringSerializer(), new ByteArraySerializer());
        producerMetrics.setMetrics(producer.metrics());
        return producer;
    } finally {
        logger.info("create kafka producer, uri={}, name={}, elapsedTime={}", uri, name, watch.elapsedTime());
    }
}
Example 13
Project: examples-master  File: EventDeduplicationLambdaIntegrationTest.java View source code
@Test
public void shouldRemoveDuplicatesFromTheInput() throws Exception {
    // e.g. "4ff3cb44-abcb-46e3-8f9a-afb7cc74fbb8"
    String firstId = UUID.randomUUID().toString();
    String secondId = UUID.randomUUID().toString();
    String thirdId = UUID.randomUUID().toString();
    List<String> inputValues = Arrays.asList(firstId, secondId, firstId, firstId, secondId, thirdId, thirdId, firstId, secondId);
    List<String> expectedValues = Arrays.asList(firstId, secondId, thirdId);
    //
    // Step 1: Configure and start the processor topology.
    //
    KStreamBuilder builder = new KStreamBuilder();
    Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "deduplication-lambda-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    // The commit interval for flushing records to state stores and downstream must be lower than
    // this integration test's timeout (30 secs) to ensure we observe the expected processing results.
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, TimeUnit.SECONDS.toMillis(10));
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    // Use a temporary directory for storing state, which will be automatically removed after the test.
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
    // How long we "remember" an event.  During this time, any incoming duplicates of the event
    // will be, well, dropped, thereby de-duplicating the input data.
    //
    // The actual value depends on your use case.  To reduce memory and disk usage, you could
    // decrease the size to purge old windows more frequently at the cost of potentially missing out
    // on de-duplicating late-arriving records.
    long maintainDurationPerEventInMs = TimeUnit.MINUTES.toMillis(10);
    StateStoreSupplier deduplicationStoreSupplier = Stores.create("eventId-store").withKeys(// must match the return type of the Transformer's id extractor
    Serdes.String()).withValues(Serdes.Long()).persistent().windowed(maintainDurationPerEventInMs, TimeUnit.MINUTES.toMillis(30), 3, false).build();
    builder.addStateStore(deduplicationStoreSupplier);
    KStream<byte[], String> input = builder.stream(inputTopic);
    KStream<byte[], String> deduplicated = input.transform(// function as needed.
    () -> new DeduplicationTransformer<>(maintainDurationPerEventInMs, ( key,  value) -> value), "eventId-store");
    deduplicated.to(outputTopic);
    KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration);
    streams.start();
    //
    // Step 2: Produce some input data to the input topic.
    //
    Properties producerConfig = new Properties();
    producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    producerConfig.put(ProducerConfig.ACKS_CONFIG, "all");
    producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
    producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    IntegrationTestUtils.produceValuesSynchronously(inputTopic, inputValues, producerConfig);
    //
    // Step 3: Verify the application's output data.
    //
    Properties consumerConfig = new Properties();
    consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "deduplication-integration-test-standard-consumer");
    consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    List<String> actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig, outputTopic, expectedValues.size());
    streams.close();
    assertThat(actualValues).containsExactlyElementsOf(expectedValues);
}
Example 14
Project: gobblin-master  File: KafkaSimpleStreamingTest.java View source code
/**
   * testExtractor checks that the extractor code does the right thing. First it creates a topic, and sets up a source to point
   * to it. workUnits are generated from the source (only a single wU should be returned). Then it writes a record to this topic
   * and reads back from the extractor to verify the right record is returned. A second record is then written and read back
   * through the extractor to verify poll works as expected. Finally we test the commit api by forcing a commit and then starting
   * a new extractor to ensure we fetch data from after the commit. The commit is also verified in Kafka directly
   * @throws IOException
   * @throws InterruptedException
   * @throws DataRecordException
   */
@Test(timeOut = 10000)
public void testExtractor() throws IOException, InterruptedException, DataRecordException {
    final String topic = "testSimpleStreamingExtractor";
    _kafkaTestHelper.provisionTopic(topic);
    Properties props = new Properties();
    props.put("bootstrap.servers", "localhost:" + _kafkaTestHelper.getKafkaServerPort());
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    Producer<String, byte[]> producer = new KafkaProducer<>(props);
    final byte[] record_1 = { 0, 1, 3 };
    final byte[] record_2 = { 2, 4, 6 };
    final byte[] record_3 = { 5, 7, 9 };
    // Write a sample record to the topic
    producer.send(new ProducerRecord<String, byte[]>(topic, topic, record_1));
    producer.flush();
    KafkaSimpleStreamingExtractor<String, byte[]> kSSE = getStreamingExtractor(topic);
    TopicPartition tP = new TopicPartition(topic, 0);
    KafkaSimpleStreamingExtractor.KafkaWatermark kwm = new KafkaSimpleStreamingExtractor.KafkaWatermark(tP, new LongWatermark(0));
    byte[] reuse = new byte[1];
    RecordEnvelope<byte[]> oldRecord = new RecordEnvelope<>(reuse, kwm);
    Map<String, CheckpointableWatermark> committedWatermarks = new HashMap<>();
    WatermarkStorage mockWatermarkStorage = mock(WatermarkStorage.class);
    when(mockWatermarkStorage.getCommittedWatermarks(any(Class.class), any(Iterable.class))).thenReturn(committedWatermarks);
    kSSE.start(mockWatermarkStorage);
    // read and verify the record matches we just wrote
    RecordEnvelope<byte[]> record = kSSE.readRecord(oldRecord);
    Assert.assertEquals(record.getRecord(), record_1);
    // write a second record.
    producer.send(new ProducerRecord<String, byte[]>(topic, topic, record_2));
    producer.flush();
    // read the second record using same extractor to verify it matches whats expected
    record = kSSE.readRecord(oldRecord);
    Assert.assertEquals(record.getRecord(), record_2);
    // Commit the watermark
    committedWatermarks.put(record.getWatermark().getSource(), record.getWatermark());
    // write a third record.
    producer.send(new ProducerRecord<String, byte[]>(topic, topic, record_3));
    producer.flush();
    // recreate extractor to force a seek.
    kSSE = getStreamingExtractor(topic);
    kSSE.start(mockWatermarkStorage);
    record = kSSE.readRecord(oldRecord);
    // check it matches the data written
    Assert.assertEquals(record.getRecord(), record_3);
}
Example 15
Project: datacollector-master  File: KafkaProducer09IT.java View source code
@Test
public void testKafkaProducer09WriteFailsRecordTooLarge() throws IOException, StageException {
    HashMap<String, Object> kafkaProducerConfigs = new HashMap<>();
    kafkaProducerConfigs.put("retries", 0);
    kafkaProducerConfigs.put("batch.size", 100);
    kafkaProducerConfigs.put("linger.ms", 0);
    kafkaProducerConfigs.put(KafkaConstants.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    kafkaProducerConfigs.put(KafkaConstants.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    // Set the message size to 510 as "message.max.bytes" is set to 500
    final String message = StringUtils.leftPad("a", 510, "b");
    SdcKafkaProducer sdcKafkaProducer = createSdcKafkaProducer(port, kafkaProducerConfigs);
    sdcKafkaProducer.init();
    String topic = getNextTopic();
    sdcKafkaProducer.enqueueMessage(topic, message.getBytes(), "0");
    try {
        sdcKafkaProducer.write();
        fail("Expected exception but didn't get any");
    } catch (StageException se) {
        assertEquals(KafkaErrors.KAFKA_69, se.getErrorCode());
    } catch (Exception e) {
        fail("Expected Stage Exception but got " + e);
    }
}
Example 16
Project: Aletheia-master  File: KafkaBinarySender.java View source code
protected Properties getProducerConfig() {
    final Properties producerProperties = (Properties) kafkaTopicDeliveryEndPoint.getProperties().clone();
    if (producerProperties.getProperty("value.serializer") != null || producerProperties.getProperty("key.serializer") != null) {
        logger.warn("serializer cannot be provided as producer properties. " + "Overriding manually to be the correct serialization type.");
    }
    producerProperties.setProperty("key.serializer", StringSerializer.class.getName());
    producerProperties.setProperty("value.serializer", ByteArraySerializer.class.getName());
    producerProperties.setProperty("client.id", kafkaTopicDeliveryEndPoint.getProperties().getProperty("client.id", UUID.randomUUID().toString()));
    producerProperties.setProperty("bootstrap.servers", kafkaTopicDeliveryEndPoint.getBrokerList());
    producerProperties.setProperty("metric.reporters", "com.outbrain.aletheia.datum.metrics.kafka.KafkaMetrics");
    logger.warn("Using producer config: {}", producerProperties);
    return producerProperties;
}
Example 17
Project: euphoria-master  File: KafkaUtils.java View source code
public static Producer<byte[], byte[]> newProducer(String brokerList, Settings config) {
    final Properties ps = toProperties(config);
    ps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    ps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    ps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    if (ps.getProperty(ProducerConfig.ACKS_CONFIG) == null) {
        ps.setProperty(ProducerConfig.ACKS_CONFIG, "1");
    }
    return new KafkaProducer<>(ps);
}
Example 18
Project: heritrix3-master  File: KafkaCrawlLogFeed.java View source code
protected KafkaProducer<String, byte[]> kafkaProducer() {
    if (kafkaProducer == null) {
        synchronized (this) {
            if (kafkaProducer == null) {
                final Properties props = new Properties();
                props.put("bootstrap.servers", getBrokerList());
                props.put("acks", "1");
                props.put("producer.type", "async");
                props.put("key.serializer", StringSerializer.class.getName());
                props.put("value.serializer", ByteArraySerializer.class.getName());
                /*
                     * XXX This mess here exists so that the kafka producer
                     * thread is in a thread group that is not the ToePool,
                     * so that it doesn't get interrupted at the end of the
                     * crawl in ToePool.cleanup(). 
                     */
                kafkaProducerThreads = new ThreadGroup(Thread.currentThread().getThreadGroup().getParent(), "KafkaProducerThreads");
                ThreadFactory threadFactory = new ThreadFactory() {

                    public Thread newThread(Runnable r) {
                        return new Thread(kafkaProducerThreads, r);
                    }
                };
                Callable<KafkaProducer<String, byte[]>> task = new Callable<KafkaProducer<String, byte[]>>() {

                    public KafkaProducer<String, byte[]> call() throws InterruptedException {
                        return new KafkaProducer<String, byte[]>(props);
                    }
                };
                ExecutorService executorService = Executors.newFixedThreadPool(1, threadFactory);
                Future<KafkaProducer<String, byte[]>> future = executorService.submit(task);
                try {
                    kafkaProducer = future.get();
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                } catch (ExecutionException e) {
                    throw new RuntimeException(e);
                } finally {
                    executorService.shutdown();
                }
            }
        }
    }
    return kafkaProducer;
}
Example 19
Project: schema-registry-master  File: KafkaStore.java View source code
@Override
public void init() throws StoreInitializationException {
    if (initialized.get()) {
        throw new StoreInitializationException("Illegal state while initializing store. Store was already initialized");
    }
    // create the schema topic if needed
    createSchemaTopic();
    // set the producer properties and initialize a Kafka producer client
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
    props.put(ProducerConfig.ACKS_CONFIG, "-1");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.ByteArraySerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.ByteArraySerializer.class);
    // Producer should not retry
    props.put(ProducerConfig.RETRIES_CONFIG, 0);
    props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, this.config.getString(SchemaRegistryConfig.KAFKASTORE_SECURITY_PROTOCOL_CONFIG));
    addSecurityConfigsToClientProperties(this.config, props);
    producer = new KafkaProducer<byte[], byte[]>(props);
    // start the background thread that subscribes to the Kafka topic and applies updates.
    // the thread must be created after the schema topic has been created.
    this.kafkaTopicReader = new KafkaStoreReaderThread<>(this.bootstrapBrokers, topic, groupId, this.storeUpdateHandler, serializer, this.localStore, this.noopKey, this.config);
    this.kafkaTopicReader.start();
    try {
        waitUntilKafkaReaderReachesLastOffset(initTimeout);
    } catch (StoreException e) {
        throw new StoreInitializationException(e);
    }
    boolean isInitialized = initialized.compareAndSet(false, true);
    if (!isInitialized) {
        throw new StoreInitializationException("Illegal state while initializing store. Store " + "was already initialized");
    }
}
Example 20
Project: flink-master  File: FlinkKafkaProducerBaseTest.java View source code
/**
	 * Tests that constructor defaults to key value serializers in config to byte array deserializers if not set
	 */
@Test
public void testKeyValueDeserializersSetIfMissing() throws Exception {
    Properties props = new Properties();
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:12345");
    // should set missing key value deserializers
    new DummyFlinkKafkaProducer<>(props, null);
    assertTrue(props.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
    assertTrue(props.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG));
    assertTrue(props.getProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).equals(ByteArraySerializer.class.getCanonicalName()));
    assertTrue(props.getProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).equals(ByteArraySerializer.class.getCanonicalName()));
}
Example 21
Project: camel-master  File: KafkaProducer.java View source code
/**
     * Attempts to convert the object to the same type as the serialized class specified
     */
protected Object tryConvertToSerializedType(Exchange exchange, Object object, String serializerClass) {
    Object answer = null;
    if (KafkaConstants.KAFKA_DEFAULT_SERIALIZER.equals(serializerClass)) {
        answer = exchange.getContext().getTypeConverter().tryConvertTo(String.class, exchange, object);
    } else if ("org.apache.kafka.common.serialization.ByteArraySerializer".equals(serializerClass)) {
        answer = exchange.getContext().getTypeConverter().tryConvertTo(byte[].class, exchange, object);
    } else if ("org.apache.kafka.common.serialization.ByteBufferSerializer".equals(serializerClass)) {
        answer = exchange.getContext().getTypeConverter().tryConvertTo(ByteBuffer.class, exchange, object);
    } else if ("org.apache.kafka.common.serialization.BytesSerializer".equals(serializerClass)) {
        // we need to convert to byte array first
        byte[] array = exchange.getContext().getTypeConverter().tryConvertTo(byte[].class, exchange, object);
        if (array != null) {
            answer = new Bytes(array);
        }
    }
    return answer != null ? answer : object;
}
Example 22
Project: ddth-kafka-master  File: KafkaHelper.java View source code
/**
     * Builds default consumer's properties, and applies custom configurations
     * if any.
     * 
     * <p>
     * Note: custom configuration properties will be populated <i>after</i> and
     * <i>additional/overridden</i> to the default configuration.
     * </p>
     * 
     * @param type
     * @param bootstrapServers
     * @param customProps
     * @return
     * @since 1.3.2
     */
public static Properties buildKafkaProducerProps(ProducerType type, String bootstrapServers, Properties customProps) {
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    // max request size: 128kb, it's also max message size
    props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, String.valueOf(128 * 1024));
    // 60mb buffer & 256-record batch size
    props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, String.valueOf(60 * 1024 * 1024));
    props.put(ProducerConfig.BATCH_SIZE_CONFIG, String.valueOf(256));
    // KafkaProducer.send() / KafkaProducer.partitionsFor() max block: 60s
    props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, String.valueOf(60000));
    // props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
    props.put(ProducerConfig.LINGER_MS_CONFIG, String.valueOf(10));
    // ack timeout 30 seconds
    props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(30000));
    // metadata fetch timeout: 10 seconds
    props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, String.valueOf(10000));
    props.put(ProducerConfig.RETRIES_CONFIG, String.valueOf(3));
    props.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(1000));
    switch(type) {
        // }
        case LEADER_ACK:
            {
                props.put(ProducerConfig.ACKS_CONFIG, "1");
                // props.put("producer.type", "sync");
                break;
            }
        case ALL_ACKS:
            {
                props.put(ProducerConfig.ACKS_CONFIG, "all");
                // props.put("producer.type", "sync");
                break;
            }
        case NO_ACK:
        default:
            {
                props.put(ProducerConfig.ACKS_CONFIG, "0");
                // props.put("producer.type", "sync");
                break;
            }
    }
    if (customProps != null) {
        // populate custom configurations
        props.putAll(customProps);
    }
    return props;
}
Example 23
Project: seldon-server-master  File: ItemSimilarityProcessor.java View source code
public void sendMessages(List<JaccardSimilarity> sims, long timestamp) {
    Properties producerConfig = new Properties();
    producerConfig.put("bootstrap.servers", this.kafkaServers);
    producerConfig.put("key.serializer", "org.apache.kafka.common" + ".serialization.ByteArraySerializer");
    producerConfig.put("value.serializer", "org.apache.kafka.common" + ".serialization.StringSerializer");
    KafkaProducer producer = new KafkaProducer<byte[], String>(producerConfig);
    StringBuffer buf = new StringBuffer();
    producer.send(new ProducerRecord<byte[], String>(this.outputTopic, "START".getBytes(), "0,,,"));
    for (JaccardSimilarity s : sims) {
        buf.append(timestamp).append(",").append(s.item1).append(",").append(s.item2).append(",").append(s.similarity);
        producer.send(new ProducerRecord<byte[], String>(this.outputTopic, "A".getBytes(), buf.toString()));
        buf.delete(0, buf.length());
    }
    producer.send(new ProducerRecord<byte[], String>(this.outputTopic, "END".getBytes(), "0,,,"));
}
Example 24
Project: manifold-master  File: KafkaOutputConnector.java View source code
/**
   * Connect.
   *
   * @param configParameters is the set of configuration parameters, which in
   * this case describe the target appliance, basic auth configuration, etc.
   * (This formerly came out of the ini file.)
   */
@Override
public void connect(ConfigParams configParameters) {
    super.connect(configParameters);
    Properties props = new Properties();
    String IP = params.getParameter(KafkaConfig.IP);
    String PORT = params.getParameter(KafkaConfig.PORT);
    //System.out.println("Kafka IP: " + IP);
    //System.out.println("Kafka Port: " + PORT);
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, IP + ":" + PORT);
    props.put(ProducerConfig.RETRIES_CONFIG, "3");
    props.put(ProducerConfig.ACKS_CONFIG, "all");
    props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "none");
    props.put(ProducerConfig.BATCH_SIZE_CONFIG, 200);
    props.put(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, true);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
    producer = new KafkaProducer(props);
}
Example 25
Project: kafka-rest-master  File: AbstractConsumerTest.java View source code
protected void produceBinaryMessages(List<ProducerRecord<byte[], byte[]>> records) {
    Properties props = new Properties();
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    props.setProperty(ProducerConfig.ACKS_CONFIG, "all");
    Producer<byte[], byte[]> producer = new KafkaProducer<byte[], byte[]>(props);
    for (ProducerRecord<byte[], byte[]> rec : records) {
        try {
            producer.send(rec).get();
        } catch (Exception e) {
            fail("Consumer test couldn't produce input messages to Kafka");
        }
    }
    producer.close();
}
Example 26
Project: flume-master  File: TestKafkaSource.java View source code
private Properties createProducerProps(String bootStrapServers) {
    Properties props = new Properties();
    props.put(ProducerConfig.ACKS_CONFIG, "-1");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootStrapServers);
    return props;
}
Example 27
Project: server-interface-master  File: KafkaWebsocketProducer.java View source code
@SuppressWarnings("unchecked")
public void start() {
    if (producer == null) {
        producer = new KafkaProducer(producerConfig, new StringSerializer(), new ByteArraySerializer());
    }
}
Example 28
Project: helios-master  File: KafkaClientProvider.java View source code
/**
   * Returns a producer that uses {@link StringSerializer} for
   * keys and {@link ByteArraySerializer} for values.
   * @return An {@link Optional} of {@link KafkaProducer}.
   */
public Optional<KafkaProducer<String, byte[]>> getDefaultProducer() {
    return getProducer(new StringSerializer(), new ByteArraySerializer());
}
Example 29
Project: comsat-master  File: FiberKafkaProducerTest.java View source code
@Before
public void setUp() {
    mockProducer = new MockProducer<>(false, new ByteArraySerializer(), new ByteArraySerializer());
    fiberProducer = new FiberKafkaProducer<>(mockProducer);
    phaser = new co.paralleluniverse.strands.concurrent.Phaser(2);
}