获取kafka队列中待消费Lag

获取kafka队列中待消费Lag,第1张

获取kafka队列中待消费Lag 获取kafka中消费者某个topic主题待消费数据量

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndmetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;



public class GetKafkaLag {
	 
	public final static String KAFKA_BOOTSTRAP_SERVERS = "cdhcm.fahaicc.com:9092,cdh1.fahaicc.com:9092,cdh2.fahaicc.com:9092";

	public static Properties getConsumeProperties(String groupID) {
		Properties props = new Properties();
		props.put("group.id", groupID);
		props.put("bootstrap.servers", KAFKA_BOOTSTRAP_SERVERS);
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

		return props;
	}

	public static void main(String[] args) {
		// 要查询的消费组
		String groupID = "SYNC_ES";
		// 消费组中监控的topic
		String topic = "CRV_ES";

		long countLong = countByArgs(groupID, topic);
		
	}

	public static long countByArgs(String groupID, String topic) {
		Map endOffsetMap = new HashMap();
		Map commitOffsetMap = new HashMap();

		Properties consumeProps = getConsumeProperties(groupID);
		System.out.println("consumer properties:" + consumeProps);
		// 查询topic partitions
		KafkaConsumer consumer = new KafkaConsumer(consumeProps);
		List topicPartitions = new ArrayList();
		List partitionsFor = consumer.partitionsFor(topic);
		for (PartitionInfo partitionInfo : partitionsFor) {
			TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
			topicPartitions.add(topicPartition);
		}

		// 查询log size
		Map endOffsets = consumer.endOffsets(topicPartitions);
		for (TopicPartition partitionInfo : endOffsets.keySet()) {
			endOffsetMap.put(partitionInfo.partition(), endOffsets.get(partitionInfo));
		}
		for (Integer partitionId : endOffsetMap.keySet()) {
			System.out.println(String.format("at %s, topic:%s, partition:%s, logSize:%s", System.currentTimeMillis(),
					topic, partitionId, endOffsetMap.get(partitionId)));
		}

		// 查询消费offset
		for (TopicPartition topicAndPartition : topicPartitions) {
			OffsetAndmetadata committed = consumer.committed(topicAndPartition);
			commitOffsetMap.put(topicAndPartition.partition(), committed.offset());
		}

		// 累加lag
		long lagSum = 0l;
		if (endOffsetMap.size() == commitOffsetMap.size()) {
			for (Integer partition : endOffsetMap.keySet()) {
				long endOffSet = endOffsetMap.get(partition);
				long commitOffSet = commitOffsetMap.get(partition);
				long diffOffset = endOffSet - commitOffSet;
				lagSum += diffOffset;
				System.out.println("Topic:" + topic + ", groupID:" + groupID + ", partition:" + partition
						+ ", endOffset:" + endOffSet + ", commitOffset:" + commitOffSet + ", diffOffset:" + diffOffset);
			}
			System.out.println("Topic:" + topic + ", groupID:" + groupID + ", LAG:" + lagSum);
		} else {
			System.out.println("this topic partitions lost");
		}

		consumer.close();

		return lagSum;
	}
}

欢迎分享,转载请注明来源:内存溢出

原文地址:https://54852.com/zaji/5717902.html

(0)
打赏 微信扫一扫微信扫一扫 支付宝扫一扫支付宝扫一扫
上一篇 2022-12-18
下一篇2022-12-18

发表评论

登录后才能评论

评论列表(0条)

    保存