Java Examples for org.apache.kafka.common.PartitionInfo
The following java examples will help you to understand the usage of org.apache.kafka.common.PartitionInfo. These source code samples are taken from different open source projects.
Example 1
| Project: SkyEye-master File: KeyModPartitioner.java View source code |
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
int partitionNum = 0;
try {
partitionNum = Utils.murmur2(keyBytes);
} catch (Exception e) {
partitionNum = key.hashCode();
}
return Math.abs(partitionNum % numPartitions);
}Example 2
| Project: suro-master File: KafkaRetentionPartitioner.java View source code |
public int getKey(String topic, List<PartitionInfo> partitions) {
if (topic == null) {
throw new IllegalArgumentException("topic is null");
}
if (partitions.isEmpty()) {
throw new IllegalArgumentException("no partitions for topic: " + topic);
}
final int numPartitions = partitions.size();
Integer index = indexCache.get(topic);
if (index != null) {
// stick to the same partition in cache
return index;
} else {
// randomly pick a new partition from [0, numPartitions) range
int partition = prng.nextInt(numPartitions);
// try to find a partition with leader
for (int i = 0; i < numPartitions; i++) {
if (partitions.get(partition).leader() != null) {
// found a partition with leader
index = indexCache.putIfAbsent(topic, partition);
return index != null ? index : partition;
} else {
// try next partition
partition = (partition + 1) % numPartitions;
}
}
// but don't update cache in this case.
return partition;
}
}Example 3
| Project: ja-micro-master File: TopicMessageCounter.java View source code |
/**
* Gets the total message count for the topic.
* <b>WARNING: Don't use with compacted topics</b>
*/
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
KafkaConsumer consumer = buildConsumer(kafkaBrokers);
try {
@SuppressWarnings("unchecked") Map<String, List<PartitionInfo>> topics = consumer.listTopics();
List<PartitionInfo> partitionInfos = topics.get(topic);
if (partitionInfos == null) {
logger.warn("Partition information was not found for topic {}", topic);
return 0;
} else {
Collection<TopicPartition> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
partitions.add(partition);
}
Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
return diffOffsets(beginningOffsets, endingOffsets);
}
} finally {
consumer.close();
}
}Example 4
| Project: kafka-master File: KafkaConsumer.java View source code |
/**
* Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it
* does not already have any metadata about the given topic.
*
* @param topic The topic to get partition metadata for
* @return The list of partitions
* @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
* function is called
* @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
* this function is called
* @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic
* @throws org.apache.kafka.common.errors.TimeoutException if the topic metadata could not be fetched before
* expiration of the configured request timeout
* @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
*/
@Override
public List<PartitionInfo> partitionsFor(String topic) {
acquire();
try {
Cluster cluster = this.metadata.fetch();
List<PartitionInfo> parts = cluster.partitionsForTopic(topic);
if (!parts.isEmpty())
return parts;
Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata(new MetadataRequest.Builder(Collections.singletonList(topic)), requestTimeoutMs);
return topicMetadata.get(topic);
} finally {
release();
}
}Example 5
| Project: tajo-master File: KafkaTablespace.java View source code |
@Override
public List<Fragment> getSplits(String inputSourceId, TableDesc table, boolean requireSorted, EvalNode filterCondition) throws IOException, TajoException {
String topic = table.getMeta().getProperty(KafkaStorageConstants.KAFKA_TOPIC);
int fragmentSize = Integer.parseInt(table.getMeta().getProperty(KafkaStorageConstants.KAFKA_FRAGMENT_SIZE, KafkaStorageConstants.DEFAULT_FRAGMENT_SIZE));
// If isn't specific partitions, scan all partition of topic.
String partitions = table.getMeta().getProperty(KafkaStorageConstants.KAFKA_TOPIC_PARTITION, KafkaStorageConstants.DEFAULT_PARTITION);
List<PartitionInfo> partitionList;
if (partitions.equals(KafkaStorageConstants.DEFAULT_PARTITION)) {
partitionList = SimpleConsumerManager.getPartitions(uri, topic);
} else {
partitionList = new LinkedList<>();
// filter partitions.
List<PartitionInfo> topicPartitions = SimpleConsumerManager.getPartitions(uri, topic);
Map<String, PartitionInfo> topicPartitionsMap = new HashMap<>();
for (PartitionInfo partitionInfo : topicPartitions) {
topicPartitionsMap.put(Integer.toString(partitionInfo.partition()), partitionInfo);
}
for (String partitionId : partitions.split(",")) {
partitionList.add(topicPartitionsMap.get(partitionId));
}
}
List<Fragment> fragments = new ArrayList<Fragment>();
for (PartitionInfo partitionInfo : partitionList) {
int partitionId = partitionInfo.partition();
String leaderHost = partitionInfo.leader().host();
long lastOffset;
long startOffset;
try (SimpleConsumerManager simpleConsumerManager = new SimpleConsumerManager(uri, topic, partitionId)) {
lastOffset = simpleConsumerManager.getLatestOffset();
startOffset = simpleConsumerManager.getEarliestOffset();
}
long messageSize = lastOffset - startOffset;
if (0 == lastOffset || 0 == messageSize)
continue;
// If message count of partition is less than fragmentSize(message count of one fragment),
if (messageSize <= fragmentSize) {
fragments.add(new KafkaFragment(table.getUri(), inputSourceId, topic, startOffset, lastOffset, partitionId, leaderHost));
} else {
// If message count of partition is greater than fragmentSize,
long nextFragmentStartOffset = startOffset;
while (nextFragmentStartOffset < lastOffset) {
// partition data: 0 1 2 3 4 5 6 7 8 9 10
// start offset: 0
// last offset: 11
// fragment size: 3
// result: (0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10)
// 1st nextFragmentStartOffset=0, nextFragmentLastOffset=3
// 2st nextFragmentStartOffset=3, nextFragmentLastOffset=6
// 3st nextFragmentStartOffset=6, nextFragmentLastOffset=9
// 4st nextFragmentStartOffset=9, nextFragmentLastOffset=12
long nextFragmentLastOffset = nextFragmentStartOffset + fragmentSize;
// the offset of last part is small than fragmentSize so that Tajo gets the minimum value.
long fragmentLstOffset = Math.min(nextFragmentLastOffset, lastOffset);
fragments.add(new KafkaFragment(table.getUri(), inputSourceId, topic, nextFragmentStartOffset, fragmentLstOffset, partitionId, leaderHost));
nextFragmentStartOffset = nextFragmentLastOffset;
}
}
}
return fragments;
}Example 6
| Project: flink-master File: FlinkKafkaProducerBase.java View source code |
// ----------------------------------- Utilities --------------------------
/**
* Initializes the connection to Kafka.
*/
@Override
public void open(Configuration configuration) {
producer = getKafkaProducer(this.producerConfig);
RuntimeContext ctx = getRuntimeContext();
if (partitioner != null) {
// the fetched list is immutable, so we're creating a mutable copy in order to sort it
List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(defaultTopicId));
// sort the partitions by partition id to make sure the fetched partition list is the same across subtasks
Collections.sort(partitionsList, new Comparator<PartitionInfo>() {
@Override
public int compare(PartitionInfo o1, PartitionInfo o2) {
return Integer.compare(o1.partition(), o2.partition());
}
});
partitions = new int[partitionsList.size()];
for (int i = 0; i < partitions.length; i++) {
partitions[i] = partitionsList.get(i).partition();
}
partitioner.open(ctx.getIndexOfThisSubtask(), ctx.getNumberOfParallelSubtasks(), partitions);
}
LOG.info("Starting FlinkKafkaProducer ({}/{}) to produce into topic {}", ctx.getIndexOfThisSubtask() + 1, ctx.getNumberOfParallelSubtasks(), defaultTopicId);
// register Kafka metrics to Flink accumulators
if (!Boolean.parseBoolean(producerConfig.getProperty(KEY_DISABLE_METRICS, "false"))) {
Map<MetricName, ? extends Metric> metrics = this.producer.metrics();
if (metrics == null) {
// MapR's Kafka implementation returns null here.
LOG.info("Producer implementation does not support metrics");
} else {
final MetricGroup kafkaMetricGroup = getRuntimeContext().getMetricGroup().addGroup("KafkaProducer");
for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue()));
}
}
}
if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
flushOnCheckpoint = false;
}
if (logFailuresOnly) {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
LOG.error("Error while sending record to Kafka: " + e.getMessage(), e);
}
acknowledgeMessage();
}
};
} else {
callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null && asyncException == null) {
asyncException = exception;
}
acknowledgeMessage();
}
};
}
}Example 7
| Project: kafka-monitor-master File: MultiClusterTopicManagementService.java View source code |
void maybeReassignPartitionAndElectLeader() throws Exception {
ZkUtils zkUtils = ZkUtils.apply(_zkConnect, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
try {
List<PartitionInfo> partitionInfoList = getPartitionInfo(zkUtils, _topic);
Collection<Broker> brokers = scala.collection.JavaConversions.asJavaCollection(zkUtils.getAllBrokersInCluster());
if (partitionInfoList.size() == 0)
throw new IllegalStateException("Topic " + _topic + " does not exist in cluster " + _zkConnect);
int currentReplicationFactor = getReplicationFactor(partitionInfoList);
if (_replicationFactor < currentReplicationFactor)
throw new RuntimeException(String.format("Configured replication factor %d " + "is smaller than the current replication factor %d of the topic %s in cluster %s", _replicationFactor, currentReplicationFactor, _topic, _zkConnect));
if (_replicationFactor > currentReplicationFactor && zkUtils.getPartitionsBeingReassigned().isEmpty()) {
LOG.info("MultiClusterTopicManagementService will increase the replication factor of the topic {} in cluster {}", _topic, _zkConnect);
reassignPartitions(zkUtils, brokers, _topic, partitionInfoList.size(), _replicationFactor);
}
if (someBrokerNotPreferredLeader(partitionInfoList, brokers) && zkUtils.getPartitionsBeingReassigned().isEmpty()) {
LOG.info("MultiClusterTopicManagementService will reassign partitions of the topic {} in cluster {}", _topic, _zkConnect);
reassignPartitions(zkUtils, brokers, _topic, partitionInfoList.size(), _replicationFactor);
}
if (someBrokerNotElectedLeader(partitionInfoList, brokers)) {
LOG.info("MultiClusterTopicManagementService will trigger preferred leader election for the topic {} in cluster {}", _topic, _zkConnect);
triggerPreferredLeaderElection(zkUtils, partitionInfoList);
}
} finally {
zkUtils.close();
}
}Example 8
| Project: distributedlog-master File: KafkaDistributedLogProducer.java View source code |
@Override public List<PartitionInfo> partitionsFor(String s) { String[] streams = getStreamsForTopic(s); List<PartitionInfo> partitions = Lists.newArrayListWithExpectedSize(streams.length); for (int i = 0; i < streams.length; i++) { // TODO: maybe add getOwner from dl write proxy to return the owner of the partition partitions.add(new PartitionInfo(s, i, null, null, null)); } return partitions; }
Example 9
| Project: Kylin-master File: KafkaSource.java View source code |
@Override
public SourcePartition parsePartitionBeforeBuild(IBuildable buildable, SourcePartition srcPartition) {
checkSourceOffsets(srcPartition);
final SourcePartition result = SourcePartition.getCopyOf(srcPartition);
final CubeInstance cube = (CubeInstance) buildable;
if (result.getStartOffset() == 0) {
final CubeSegment last = cube.getLastSegment();
if (last != null) {
logger.debug("Last segment exists, continue from last segment " + last.getName() + "'s end position: " + last.getSourcePartitionOffsetEnd());
// from last seg's end position
result.setSourcePartitionOffsetStart(last.getSourcePartitionOffsetEnd());
} else if (cube.getDescriptor().getPartitionOffsetStart() != null && cube.getDescriptor().getPartitionOffsetStart().size() > 0) {
logger.debug("Last segment doesn't exist, use the start offset that be initiated previously: " + cube.getDescriptor().getPartitionOffsetStart());
result.setSourcePartitionOffsetStart(cube.getDescriptor().getPartitionOffsetStart());
} else {
// from the topic's earliest offset;
logger.debug("Last segment doesn't exist, and didn't initiate the start offset, will seek from topic's earliest offset.");
result.setSourcePartitionOffsetStart(KafkaClient.getEarliestOffsets(cube));
}
}
final KafkaConfig kafkaConfig = KafkaConfigManager.getInstance(KylinConfig.getInstanceFromEnv()).getKafkaConfig(cube.getRootFactTable());
final String brokers = KafkaClient.getKafkaBrokers(kafkaConfig);
final String topic = kafkaConfig.getTopic();
try (final KafkaConsumer consumer = KafkaClient.getKafkaConsumer(brokers, cube.getName(), null)) {
final List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
for (PartitionInfo partitionInfo : partitionInfos) {
if (result.getSourcePartitionOffsetStart().containsKey(partitionInfo.partition()) == false) {
// has new partition added
logger.debug("has new partition added");
long earliest = KafkaClient.getEarliestOffset(consumer, topic, partitionInfo.partition());
logger.debug("new partition " + partitionInfo.partition() + " starts from " + earliest);
result.getSourcePartitionOffsetStart().put(partitionInfo.partition(), earliest);
}
}
}
if (result.getEndOffset() == Long.MAX_VALUE) {
logger.debug("Seek end offsets from topic");
Map<Integer, Long> latestOffsets = KafkaClient.getLatestOffsets(cube);
logger.debug("The end offsets are " + latestOffsets);
for (Integer partitionId : latestOffsets.keySet()) {
if (result.getSourcePartitionOffsetStart().containsKey(partitionId)) {
if (result.getSourcePartitionOffsetStart().get(partitionId) > latestOffsets.get(partitionId)) {
throw new IllegalArgumentException("Partition " + partitionId + " end offset (" + latestOffsets.get(partitionId) + ") is smaller than start offset ( " + result.getSourcePartitionOffsetStart().get(partitionId) + ")");
}
} else {
throw new IllegalStateException("New partition added in between, retry.");
}
}
result.setSourcePartitionOffsetEnd(latestOffsets);
}
long totalStartOffset = 0, totalEndOffset = 0;
for (Long v : result.getSourcePartitionOffsetStart().values()) {
totalStartOffset += v;
}
for (Long v : result.getSourcePartitionOffsetEnd().values()) {
totalEndOffset += v;
}
if (totalStartOffset > totalEndOffset) {
throw new IllegalArgumentException("Illegal offset: start: " + totalStartOffset + ", end: " + totalEndOffset);
}
if (totalStartOffset == totalEndOffset) {
throw new IllegalArgumentException("No new message comes, startOffset = endOffset:" + totalStartOffset);
}
result.setStartOffset(totalStartOffset);
result.setEndOffset(totalEndOffset);
logger.debug("parsePartitionBeforeBuild() return: " + result);
return result;
}Example 10
| Project: euphoria-master File: KafkaSink.java View source code |
@Override
public Writer<Pair<byte[], byte[]>> openWriter(int partitionId) {
String cacheKey = brokers;
Producer<byte[], byte[]> producer = PRODUCERS.get(cacheKey);
if (producer == null) {
// ~ ok, let's create a new producer (this may take some time)
final Producer<byte[], byte[]> p = KafkaUtils.newProducer(brokers, config);
// ~ now, let's try to store it in our global cache
final Producer<byte[], byte[]> p1 = PRODUCERS.putIfAbsent(cacheKey, p);
if (p1 == null) {
producer = p;
} else {
// ~ looks like somebody managed to create concurrently a new
// producer in between and store it quicker into the global cache
producer = p1;
// ~ must close the created one to avoid leaking resources!
p.close();
}
}
final List<PartitionInfo> partitions = producer.partitionsFor(topic);
return new ProducerWriter(producer, topic, partitionId % partitions.size());
}Example 11
| Project: registry-master File: KafkaAvroSerDesWithKafkaServerTest.java View source code |
private ConsumerRecords<String, Object> consumeMessage(String topicName, String bootstrapServers, String consumerGroup) throws InterruptedException {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.putAll(SCHEMA_REGISTRY_CLIENT_CONF);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());
KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(props);
List<PartitionInfo> partitionInfos = consumer.partitionsFor(topicName);
Collection<TopicPartition> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
partitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
}
LOG.info("partitions [{}]", partitions);
LOG.info("subscribed topis: [{}] ", consumer.listTopics());
consumer.assign(partitions);
consumer.seekToBeginning(partitions);
ConsumerRecords<String, Object> consumerRecords = null;
int ct = 0;
while (ct++ < 100 && (consumerRecords == null || consumerRecords.isEmpty())) {
LOG.info("Polling for consuming messages");
consumerRecords = consumer.poll(500);
}
consumer.commitSync();
consumer.close();
return consumerRecords;
}Example 12
| Project: gobblin-master File: KafkaSimpleStreamingSource.java View source code |
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
Consumer<String, byte[]> consumer = getKafkaConsumer(config);
LOG.debug("Consumer is {}", consumer);
String topic = ConfigUtils.getString(config, TOPIC_WHITELIST, // TODO: fix this to use the new API when KafkaWrapper is fixed
StringUtils.EMPTY);
List<WorkUnit> workUnits = new ArrayList<WorkUnit>();
List<PartitionInfo> topicPartitions;
topicPartitions = consumer.partitionsFor(topic);
LOG.info("Partition count is {}", topicPartitions.size());
for (PartitionInfo topicPartition : topicPartitions) {
Extract extract = this.createExtract(DEFAULT_TABLE_TYPE, DEFAULT_NAMESPACE_NAME, topicPartition.topic());
LOG.info("Partition info is {}", topicPartition);
WorkUnit workUnit = WorkUnit.create(extract);
setTopicNameInState(workUnit, topicPartition.topic());
workUnit.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, topicPartition.topic());
setPartitionId(workUnit, topicPartition.partition());
workUnits.add(workUnit);
}
return workUnits;
}Example 13
| Project: datacollector-master File: MapRStreamsValidationUtil09.java View source code |
@Override
public int getPartitionCount(String metadataBrokerList, String topic, Map<String, Object> kafkaClientConfigs, int messageSendMaxRetries, long retryBackoffMs) throws StageException {
int partitionCount = -1;
try {
KafkaConsumer<String, String> kafkaConsumer = createTopicMetadataClient();
List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topic);
if (partitionInfoList != null) {
partitionCount = partitionInfoList.size();
}
} catch (KafkaException e) {
LOG.error(KafkaErrors.KAFKA_41.getMessage(), topic, e.toString(), e);
throw new StageException(KafkaErrors.KAFKA_41, topic, e.toString(), e);
}
return partitionCount;
}Example 14
| Project: beam-master File: KafkaIO.java View source code |
/**
* The partitions are evenly distributed among the splits. The number of splits returned is
* {@code min(desiredNumSplits, totalNumPartitions)}, though better not to depend on the exact
* count.
*
* <p>It is important to assign the partitions deterministically so that we can support
* resuming a split from last checkpoint. The Kafka partitions are sorted by
* {@code <topic, partition>} and then assigned to splits in round-robin order.
*/
@Override
public List<UnboundedKafkaSource<K, V>> split(int desiredNumSplits, PipelineOptions options) throws Exception {
List<TopicPartition> partitions = new ArrayList<>(spec.getTopicPartitions());
if (partitions.isEmpty()) {
try (Consumer<?, ?> consumer = spec.getConsumerFactoryFn().apply(spec.getConsumerConfig())) {
for (String topic : spec.getTopics()) {
for (PartitionInfo p : consumer.partitionsFor(topic)) {
partitions.add(new TopicPartition(p.topic(), p.partition()));
}
}
}
}
Collections.sort(partitions, new Comparator<TopicPartition>() {
@Override
public int compare(TopicPartition tp1, TopicPartition tp2) {
return ComparisonChain.start().compare(tp1.topic(), tp2.topic()).compare(tp1.partition(), tp2.partition()).result();
}
});
checkArgument(desiredNumSplits > 0);
checkState(partitions.size() > 0, "Could not find any partitions. Please check Kafka configuration and topic names");
int numSplits = Math.min(desiredNumSplits, partitions.size());
List<List<TopicPartition>> assignments = new ArrayList<>(numSplits);
for (int i = 0; i < numSplits; i++) {
assignments.add(new ArrayList<TopicPartition>());
}
for (int i = 0; i < partitions.size(); i++) {
assignments.get(i % numSplits).add(partitions.get(i));
}
List<UnboundedKafkaSource<K, V>> result = new ArrayList<>(numSplits);
for (int i = 0; i < numSplits; i++) {
List<TopicPartition> assignedToSplit = assignments.get(i);
LOG.info("Partitions assigned to split {} (total {}): {}", i, assignedToSplit.size(), Joiner.on(",").join(assignedToSplit));
result.add(new UnboundedKafkaSource<>(spec.toBuilder().setTopics(Collections.<String>emptyList()).setTopicPartitions(assignedToSplit).build(), i));
}
return result;
}Example 15
| Project: flume-master File: KafkaChannel.java View source code |
private Map<TopicPartition, OffsetAndMetadata> getKafkaOffsets(KafkaConsumer<String, byte[]> client) {
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
List<PartitionInfo> partitions = client.partitionsFor(topicStr);
for (PartitionInfo partition : partitions) {
TopicPartition key = new TopicPartition(topicStr, partition.partition());
OffsetAndMetadata offsetAndMetadata = client.committed(key);
if (offsetAndMetadata != null) {
offsets.put(key, offsetAndMetadata);
}
}
return offsets;
}Example 16
| Project: wildfly-camel-master File: SimpleKafkaPartitioner.java View source code |
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
return key.hashCode() % numPartitions;
}Example 17
| Project: comsat-master File: FiberKafkaProducer.java View source code |
@Override
public List<PartitionInfo> partitionsFor(String topic) {
return producer.partitionsFor(topic);
}Example 18
| Project: message-queue-client-framework-master File: KafkaMessageNewSender.java View source code |
/**
* Gets topic partitions.
*
* @param topic the topic
* @return the partitions
*/
public List<PartitionInfo> getPartitions(String topic) {
return kafkaProducer.partitionsFor(topic);
}Example 19
| Project: manifold-master File: KafkaOutputConnector.java View source code |
/**
* Test the connection. Returns a string describing the connection integrity.
*
* @return the connection's status as a displayable string.
*/
@Override
public String check() throws ManifoldCFException {
try {
List<PartitionInfo> partitions = producer.partitionsFor(params.getParameter(KafkaConfig.TOPIC));
return super.check();
} catch (ManifoldCFException e) {
return "Connection failed: " + e.getMessage();
}
}Example 20
| Project: ddth-kafka-master File: KafkaMsgConsumer.java View source code |
private Map<String, List<PartitionInfo>> getTopicInfo() {
if (topicInfo == null || lastTopicInfoFetched + 1000 < System.currentTimeMillis()) {
synchronized (metadataConsumer) {
topicInfo = metadataConsumer.listTopics();
}
lastTopicInfoFetched = System.currentTimeMillis();
}
return topicInfo;
}