diff --git a/JavaSample-tcp150158332341883/.lck b/JavaSample-tcp150158332341883/.lck
new file mode 100644
index 0000000..e69de29
diff --git a/cloud-common/cloud-common-kafka/pom.xml b/cloud-common/cloud-common-kafka/pom.xml
new file mode 100644
index 0000000..44dbe21
--- /dev/null
+++ b/cloud-common/cloud-common-kafka/pom.xml
@@ -0,0 +1,35 @@
+
+
+ 4.0.0
+
+ com.muyu
+ cloud-common
+ 3.6.3
+
+
+ cloud-common-kafka
+
+
+ 17
+ 17
+ UTF-8
+
+
+
+
+
+ com.muyu
+ cloud-common-redis
+
+
+
+ org.apache.kafka
+ kafka-clients
+ 3.0.0
+
+
+
+
+
diff --git a/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/config/KafkaConsumerConfig.java b/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/config/KafkaConsumerConfig.java
new file mode 100644
index 0000000..8055e42
--- /dev/null
+++ b/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/config/KafkaConsumerConfig.java
@@ -0,0 +1,54 @@
+package com.muyu.common.kafka.config;
+
+import com.muyu.common.kafka.constants.KafkaConstants;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.SpringBootConfiguration;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * kafka 消息的消费者 配置类
+ */
+@Configuration
+public class KafkaConsumerConfig {
+
+ @Bean
+ public KafkaConsumer kafkaConsumer() {
+ Map configs = new HashMap<>();
+ //kafka服务端的IP和端口,格式:(ip:port)
+ configs.put("bootstrap.servers", "60.204.221.52:9092");
+ //开启consumer的偏移量(offset)自动提交到Kafka
+ configs.put("enable.auto.commit", true);
+ //consumer的偏移量(offset) 自动提交的时间间隔,单位毫秒
+ configs.put("auto.commit.interval", 5000);
+ //在Kafka中没有初始化偏移量或者当前偏移量不存在情况
+ //earliest, 在偏移量无效的情况下, 自动重置为最早的偏移量
+ //latest, 在偏移量无效的情况下, 自动重置为最新的偏移量
+ //none, 在偏移量无效的情况下, 抛出异常.
+ configs.put("auto.offset.reset", "latest");
+ //请求阻塞的最大时间(毫秒)
+ configs.put("fetch.max.wait", 500);
+ //请求应答的最小字节数
+ configs.put("fetch.min.size", 1);
+ //心跳间隔时间(毫秒)
+ configs.put("heartbeat-interval", 3000);
+ //一次调用poll返回的最大记录条数
+ configs.put("max.poll.records", 500);
+ //指定消费组
+ configs.put("group.id", KafkaConstants.KafkaGrop);
+ //指定key使用的反序列化类
+ Deserializer keyDeserializer = new StringDeserializer();
+ //指定value使用的反序列化类
+ Deserializer valueDeserializer = new StringDeserializer();
+ //创建Kafka消费者
+ KafkaConsumer kafkaConsumer = new KafkaConsumer(configs, keyDeserializer, valueDeserializer);
+ return kafkaConsumer;
+ }
+
+}
diff --git a/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/config/KafkaProviderConfig.java b/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/config/KafkaProviderConfig.java
new file mode 100644
index 0000000..07b56d3
--- /dev/null
+++ b/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/config/KafkaProviderConfig.java
@@ -0,0 +1,45 @@
+package com.muyu.common.kafka.config;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.common.serialization.Serializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.springframework.boot.SpringBootConfiguration;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * kafka 消息的生产者 配置类
+ */
+@Configuration
+public class KafkaProviderConfig {
+
+ @Bean
+ public KafkaProducer kafkaProducer() {
+ Map configs = new HashMap<>();
+ //#kafka服务端的IP和端口,格式:(ip:port)
+ configs.put("bootstrap.servers", "47.116.173.119:9092");
+ //客户端发送服务端失败的重试次数
+ configs.put("retries", 2);
+ //多个记录被发送到同一个分区时,生产者将尝试将记录一起批处理成更少的请求.
+ //此设置有助于提高客户端和服务器的性能,配置控制默认批量大小(以字节为单位)
+ configs.put("batch.size", 16384);
+ //生产者可用于缓冲等待发送到服务器的记录的总内存字节数(以字节为单位)
+ configs.put("buffer-memory", 33554432);
+ //生产者producer要求leader节点在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化
+ //acks=0,设置为0,则生产者producer将不会等待来自服务器的任何确认.该记录将立即添加到套接字(socket)缓冲区并视为已发送.在这种情况下,无法保证服务器已收到记录,并且重试配置(retries)将不会生效(因为客户端通常不会知道任何故障),每条记录返回的偏移量始终设置为-1.
+ //acks=1,设置为1,leader节点会把记录写入本地日志,不需要等待所有follower节点完全确认就会立即应答producer.在这种情况下,在follower节点复制前,leader节点确认记录后立即失败的话,记录将会丢失.
+ //acks=all,acks=-1,leader节点将等待所有同步复制副本完成再确认记录,这保证了只要至少有一个同步复制副本存活,记录就不会丢失.
+ configs.put("acks", "-1");
+ //指定key使用的序列化类
+ Serializer keySerializer = new StringSerializer();
+ //指定value使用的序列化类
+ Serializer valueSerializer = new StringSerializer();
+ //创建Kafka生产者
+ KafkaProducer kafkaProducer = new KafkaProducer(configs, keySerializer, valueSerializer);
+ return kafkaProducer;
+ }
+
+}
diff --git a/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/constants/KafkaConstants.java b/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/constants/KafkaConstants.java
new file mode 100644
index 0000000..b1b7180
--- /dev/null
+++ b/cloud-common/cloud-common-kafka/src/main/java/com/muyu/common/kafka/constants/KafkaConstants.java
@@ -0,0 +1,9 @@
+package com.muyu.common.kafka.constants;
+
+
+public class KafkaConstants {
+
+ public final static String KafkaTopic = "carJsons";
+
+// public final static String KafkaGrop = "kafka_grop";
+}
diff --git a/cloud-common/cloud-common-kafka/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports b/cloud-common/cloud-common-kafka/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports
new file mode 100644
index 0000000..f4a1fdb
--- /dev/null
+++ b/cloud-common/cloud-common-kafka/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports
@@ -0,0 +1,2 @@
+com.muyu.common.kafka.config.KafkaConsumerConfig
+com.muyu.common.kafka.config.KafkaProviderConfig