优化并更改kafka实现方式

dev.data.processing.dataTreating
面包骑士 2024-09-28 20:08:09 +08:00
parent 0c0d9fca3c
commit 2c1765b3a6
9 changed files with 157 additions and 62 deletions

View File

@ -25,8 +25,9 @@
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.springframework.kafka</groupId> <groupId>org.apache.kafka</groupId>
<artifactId>spring-kafka</artifactId> <artifactId>kafka-clients</artifactId>
<version>3.0.0</version>
</dependency> </dependency>
</dependencies> </dependencies>

View File

@ -0,0 +1,54 @@
package com.muyu.common.kafka.config;
import com.muyu.common.kafka.constants.KafkaConstants;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.HashMap;
import java.util.Map;
/**
* kafka
*/
@Configuration
public class KafkaConsumerConfig {
@Bean
public KafkaConsumer kafkaConsumer() {
Map<String, Object> configs = new HashMap<>();
//kafka服务端的IP和端口,格式:(ip:port)
configs.put("bootstrap.servers", "47.116.173.119:9092");
//开启consumer的偏移量(offset)自动提交到Kafka
configs.put("enable.auto.commit", true);
//consumer的偏移量(offset) 自动提交的时间间隔,单位毫秒
configs.put("auto.commit.interval", 5000);
//在Kafka中没有初始化偏移量或者当前偏移量不存在情况
//earliest, 在偏移量无效的情况下, 自动重置为最早的偏移量
//latest, 在偏移量无效的情况下, 自动重置为最新的偏移量
//none, 在偏移量无效的情况下, 抛出异常.
configs.put("auto.offset.reset", "latest");
//请求阻塞的最大时间(毫秒)
configs.put("fetch.max.wait", 500);
//请求应答的最小字节数
configs.put("fetch.min.size", 1);
//心跳间隔时间(毫秒)
configs.put("heartbeat-interval", 3000);
//一次调用poll返回的最大记录条数
configs.put("max.poll.records", 500);
//指定消费组
configs.put("group.id", KafkaConstants.KafkaGrop);
//指定key使用的反序列化类
Deserializer keyDeserializer = new StringDeserializer();
//指定value使用的反序列化类
Deserializer valueDeserializer = new StringDeserializer();
//创建Kafka消费者
KafkaConsumer kafkaConsumer = new KafkaConsumer(configs, keyDeserializer, valueDeserializer);
return kafkaConsumer;
}
}

View File

@ -1,45 +0,0 @@
package com.muyu.common.kafka.config;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import java.util.HashMap;
import java.util.Map;
/**
* @Author:
* @Name: KafkaProducerConfig
* @Description: kafka
* @CreatedDate: 2024/9/27 7:38
* @FilePath: com.muyu.common.kafka.config
* kafka
*/
@Configuration
public class KafkaProducerConfig {
@Bean
public ProducerFactory<String, Object> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "47.116.173.119:9092");
configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
// 添加事务相关的配置
configProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
configProps.put(ProducerConfig.ACKS_CONFIG, "all");
configProps.put(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE);
configProps.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId");
return new DefaultKafkaProducerFactory<>(configProps);
}
@Bean
public KafkaTemplate<String, Object> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}

View File

@ -0,0 +1,45 @@
package com.muyu.common.kafka.config;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.HashMap;
import java.util.Map;
/**
* kafka
*/
@Configuration
public class KafkaProviderConfig {
@Bean
public KafkaProducer kafkaProducer() {
Map<String, Object> configs = new HashMap<>();
//#kafka服务端的IP和端口,格式:(ip:port)
configs.put("bootstrap.servers", "47.116.173.119:9092");
//客户端发送服务端失败的重试次数
configs.put("retries", 2);
//多个记录被发送到同一个分区时,生产者将尝试将记录一起批处理成更少的请求.
//此设置有助于提高客户端和服务器的性能,配置控制默认批量大小(以字节为单位)
configs.put("batch.size", 16384);
//生产者可用于缓冲等待发送到服务器的记录的总内存字节数(以字节为单位)
configs.put("buffer-memory", 33554432);
//生产者producer要求leader节点在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化
//acks=0,设置为0,则生产者producer将不会等待来自服务器的任何确认.该记录将立即添加到套接字(socket)缓冲区并视为已发送.在这种情况下,无法保证服务器已收到记录,并且重试配置(retries)将不会生效(因为客户端通常不会知道任何故障),每条记录返回的偏移量始终设置为-1.
//acks=1,设置为1,leader节点会把记录写入本地日志,不需要等待所有follower节点完全确认就会立即应答producer.在这种情况下,在follower节点复制前,leader节点确认记录后立即失败的话,记录将会丢失.
//acks=all,acks=-1,leader节点将等待所有同步复制副本完成再确认记录,这保证了只要至少有一个同步复制副本存活,记录就不会丢失.
configs.put("acks", "-1");
//指定key使用的序列化类
Serializer keySerializer = new StringSerializer();
//指定value使用的序列化类
Serializer valueSerializer = new StringSerializer();
//创建Kafka生产者
KafkaProducer kafkaProducer = new KafkaProducer(configs, keySerializer, valueSerializer);
return kafkaProducer;
}
}

View File

@ -1,7 +1,5 @@
package com.muyu.common.kafka.constants; package com.muyu.common.kafka.constants;
import org.springframework.beans.factory.annotation.Value;
/** /**
* @Author: * @Author:
* @date: 2024/7/10 * @date: 2024/7/10

View File

@ -1 +1,2 @@
com.muyu.common.kafka.config.KafkaProducerConfig com.muyu.common.kafka.config.KafkaConsumerConfig
com.muyu.common.kafka.config.KafkaProviderConfig

View File

@ -16,7 +16,7 @@ import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
* @FilePath: com.muyu.data.processing * @FilePath: com.muyu.data.processing
*/ */
@EnableMyFeignClients
@SpringBootApplication @SpringBootApplication
public class MyDataApplication { public class MyDataApplication {
public static void main(String[] args) { public static void main(String[] args) {

View File

@ -1,19 +1,21 @@
package com.muyu.data.processing.controller; package com.muyu.data.processing.controller;
import com.alibaba.fastjson.JSONObject;
import com.muyu.common.kafka.constants.KafkaConstants; import com.muyu.common.kafka.constants.KafkaConstants;
import com.muyu.data.processing.domain.IotDbData;
import com.muyu.data.processing.domain.req.TestReq; import com.muyu.data.processing.domain.req.TestReq;
import com.muyu.data.processing.domain.resp.TestResp; import com.muyu.data.processing.domain.resp.TestResp;
import com.muyu.data.processing.strategy.root.RootStrategy; import com.muyu.data.processing.strategy.root.RootStrategy;
import jakarta.annotation.Resource; import jakarta.annotation.Resource;
import org.springframework.kafka.core.KafkaTemplate; import org.apache.kafka.clients.producer.KafkaProducer;
import org.springframework.messaging.MessageHeaders; import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.transaction.annotation.Transactional; import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.bind.annotation.*; import org.springframework.web.bind.annotation.*;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
import org.springframework.messaging.Message;
/** /**
*
* @Author: * @Author:
* @Name: Test * @Name: Test
* @Description: * @Description:
@ -25,13 +27,22 @@ import org.springframework.messaging.Message;
@RequestMapping("/Test") @RequestMapping("/Test")
public class TestController { public class TestController {
@Resource @Resource
private KafkaTemplate<String,Object> kafkaTemplate; private KafkaProducer<String,String> kafkaProducer;
@GetMapping("/testKafka") @GetMapping("/testKafka")
@Transactional
public void sendMsg(@RequestParam("msg") String msg) { public void sendMsg(@RequestParam("msg") String msg) {
try { try {
kafkaTemplate.send(KafkaConstants.KafkaTopic, msg).get(); IotDbData iotDbData = IotDbData.builder()
.timestamp(System.currentTimeMillis())
.vin("vin666")
.key("test")
.label("测试数据")
.value("Kafka测试")
.type("String")
.build();
String jsonString = JSONObject.toJSONString(iotDbData);
ProducerRecord<String, String> producerRecord = new ProducerRecord<>(KafkaConstants.KafkaTopic, jsonString);
kafkaProducer.send(producerRecord);
System.out.println("同步消息发送成功: " + msg); System.out.println("同步消息发送成功: " + msg);
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();

View File

@ -1,11 +1,22 @@
package com.muyu.data.processing.kafka; package com.muyu.data.processing.kafka;
import cn.hutool.core.thread.ThreadUtil;
import cn.hutool.json.JSONUtil;
import com.alibaba.nacos.shaded.com.google.common.collect.Lists;
import com.muyu.common.kafka.constants.KafkaConstants; import com.muyu.common.kafka.constants.KafkaConstants;
import com.muyu.data.processing.domain.IotDbData;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.annotation.KafkaListener; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.stereotype.Component; import org.springframework.stereotype.Component;
import java.time.Duration;
import java.util.Collection;
/** /**
* @Author: * @Author:
* @Name: KafkaConsumerService * @Name: KafkaConsumerService
@ -16,10 +27,29 @@ import org.springframework.stereotype.Component;
@Slf4j @Slf4j
@Component @Component
public class KafkaConsumerService { public class KafkaConsumerService implements InitializingBean {
@Resource
private KafkaConsumer kafkaConsumer;
@KafkaListener(topics = {KafkaConstants.KafkaTopic}, groupId = KafkaConstants.KafkaGrop) @Override
public void listen(String msg) { public void afterPropertiesSet() throws Exception {
log.info("kafka 消费消息:{}", msg); Thread thread = new Thread(() -> {
log.info("启动线程监听Topic: {}", KafkaConstants.KafkaTopic);
ThreadUtil.sleep(1000);
Collection<String> topics = Lists.newArrayList(KafkaConstants.KafkaTopic);
kafkaConsumer.subscribe(topics);
while (true) {
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofMillis(1000));
for (ConsumerRecord consumerRecord : consumerRecords) {
//1.从ConsumerRecord中获取消费数据
String originalMsg = (String) consumerRecord.value();
log.info("从Kafka中消费的原始数据: " + originalMsg);
//2.把消费数据转换为DTO对象
IotDbData iotDbData = JSONUtil.toBean(originalMsg, IotDbData.class);
log.info("消费数据转换为DTO对象: " + iotDbData.toString());
}
}
});
thread.start();
} }
} }