kafka客户端调用
kafka客户端调用
springboot整合kafka
- 手动提交需要在配置文件配置kafka属性 kafka.listener.ack-mode: manual
@Component public class MyKafkaListener { @Autowired private SaslClient saslClient; //监听所有分区 @KafkaListener(topics ={ "主题" },groupId = "消费组") //监听指定分区 // @KafkaListener( // topicPartitions ={ // @TopicPartition(topic = "主题" partitionOffsets = { @PartitionOffset(partition = "3", initialOffset = "323"), @PartitionOffset(partition = "4", initialOffset = "6629") } // ) },groupId = "消费组") public void onMessage(ConsumerRecord<String, String> record, Acknowledgment ack) { try { saslClient.consume(record); } catch (Exception e) { e.printStackTrace(); } finally{ ack.acknowledge();//手动提交 } } }
- yml增加配置
kafka: listener: ack-mode: manual # 配置手动提交 bootstrap-servers: # 服务组 consumer: isolation-level: read-committed enable-auto-commit: false #关闭自动提交 #auto-commit-interval: 1000 auto-offset-reset: earliest #当各分区下无初始偏移量或偏移量不再可用时,从最早的消息记录开始读取 key-deserializer: org.apache.kafka.common.serialization.StringDeserializer value-deserializer: org.apache.kafka.common.serialization.StringDeserializer max-poll-records: 2 properties: #安全认证 security: protocol: SASL_PLAINTEXT sasl: mechanism: SCRAM-SHA-512 jaas: config: org.apache.kafka.common.security.scram.ScramLoginModule required username="用户" password="密码"; session: timeout: ms: 24000 max: poll: interval: ms: 30000
java调用kafka
//主题
@Value("${kafakaData.topic}")
private String topic;
//消费
@Value("${kafkaData.group}")
private String group;
@Value("${kafkaData.jaas}")
private String jaas;
@Value("${kafkaData.key}")
private String key;
@Value("${kafkaData.brokers}")
private String brokers; //需使用安全接入点的地址
public void consume() throws Exception {
Properties properties = new Properties();
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "SCRAM-SHA-512");
properties.put("bootstrap.servers", brokers);
properties.put("group.id", group);
properties.put("enable.auto.commit", "false");
properties.put("auto.offset.reset", "earliest");
properties.put("max.poll.records", 2); //每次poll的最大数量
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
properties.put("sasl.jaas.config", jaas);
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
// 重置消费者的偏移量到最早的偏移量
consumer.subscribe(Arrays.asList(topic));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(3000));//deprecated
System.out.printf("poll records size = %d%n", records.count());
try{
for (ConsumerRecord<String, String> record : records) {
//对加密数据解密
String publicDecrypt = RSAUtil.publicDecrypt(record.value(), RSAUtil.getPublicKey(key));
JSONObject jsonObject = JSONObject.parseObject(publicDecrypt);
String msg = jsonObject.getString("msg");
String page = jsonObject.getString("page");
String size = jsonObject.getString("size");
String time = jsonObject.getString("time");
String total = jsonObject.getString("total");
String type = jsonObject.getString("type");
String operation = jsonObject.getString("operation");
//todo 业务处理
}
}catch (Exception e){
e.printStackTrace();
}finally {
consumer.commitAsync();//手动提交
}
}
}
其他问题
- 每次消费一条数据必须提交,否则会影响分区,导致偏移量错位,后面就消费不到数据了
原文地址:https://blog.csdn.net/weixin_45569664/article/details/144425177
免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!