1. 程式人生 > >Spring整合Kafka之spring-kafka

Spring整合Kafka之spring-kafka

配置檔案的方式實現spring整和kafka:

    此文主要講述的內容:

     1,連線kafka伺服器的配置

    2,kafka-customer:消費者配置

    3,kafka-provider:提供者配置

    4,KfkaUtils:根據topic傳送訊息

    5,消費者根據topic處理訊息

一,安裝kafka

      網上百度一下,一大堆,在這裡我就不贅述了(新版本的kafka集成了zookeeper外掛,所以只需配置kafka安裝包下的zookeeper.properties即可,),下載地址:http://kafka.apache.org/downloads

注意在啟動kafka-service之前需啟動zookeeper外掛


二,配置maven倉庫

   由於是基於spring整合的kafka的方式 ,所以在新增spring依賴的同時,還需   

<dependency>
  <groupId>org.springframework.kafka</groupId>
  <artifactId>spring-kafka</artifactId>
  <version>2.1.4.RELEASE</version>
</dependency>
<dependency>
  <groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId> <version>1.0.0</version> </dependency>

三,kafka伺服器配置

#brokers叢集
bootstrap.servers=192.168.11.38:9092,192.168.11.40:9092
#即所有副本都同步到資料時send方法才返回, 以此來完全判斷資料是否傳送成功, 理論上來講資料不會丟失.
acks=all
#傳送失敗重試次數
retries=10
#批處理條數:當多個記錄被髮送到同一個分割槽時,生產者會嘗試將記錄合併到更少的請求中。這有助於客戶端和伺服器的效能。
batch.size=1638 #批處理延遲時間上限:即1ms過後,不管是否達到批處理數,都直接傳送一次請求 linger.ms=1 #32MB的批處理緩衝區 buffer.memory=33554432 #消費者群組ID,釋出-訂閱模式,即如果一個生產者,多個消費者都要消費,那麼需要定義自己的群組,同一群組內的消費者只有一個能消費到訊息 group.id=order-beta #如果為true,消費者的偏移量將在後臺定期提交。 enable.auto.commit=true #如何設定為自動提交(enable.auto.commit=true),這裡設定自動提交週期 auto.commit.interval.ms=1000 #在使用Kafka的組管理時,用於檢測消費者故障的超時 session.timeout.ms=15000 #消費監聽器容器併發數 concurrency = 3

四,customer配置

<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
xsi:schemaLocation="http://www.springframework.org/schema/beans
         http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
         http://www.springframework.org/schema/context
         http://www.springframework.org/schema/context/spring-context.xsd">
    <context:property-placeholder location="classpath*:kafka.properties " />
<!-- 1.定義consumer的引數 -->
<bean id="consumerProperties" class="java.util.HashMap">
        <constructor-arg>
            <map>
                <entry key="bootstrap.servers" value="${bootstrap.servers}" />
                <entry key="group.id" value="${group.id}" />
                <entry key="enable.auto.commit" value="${enable.auto.commit}" />
                <entry key="session.timeout.ms" value="${session.timeout.ms}" />
                <entry key="key.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer" />
                <entry key="value.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer" />
            </map>
        </constructor-arg>
    </bean>
<!-- 2.建立consumerFactory bean -->
<bean id="consumerFactory"
class="org.springframework.kafka.core.DefaultKafkaConsumerFactory" >
        <constructor-arg>
            <ref bean="consumerProperties" />
        </constructor-arg>
    </bean>
<!-- 3.定義消費實現類 -->
<bean id="kafkaConsumerService" class="KafkaSendMessageServiceImpl" />
<!-- 4.消費者容器配置資訊 -->
<bean id="containerProperties" class="org.springframework.kafka.listener.config.ContainerProperties">
<!-- topic -->
<constructor-arg name="topics">
            <list>
                <value>topic1</value>
                <value>topic2</value>
<!-- <value>${templar.agreement.feedback.topic}</value>
                <value>${templar.aggrement.active.feedback.topic}</value>
                <value>${templar.aggrement.agreementRepaid.topic}</value>
                <value>${templar.aggrement.agreementWithhold.topic}</value>
                <value>${templar.aggrement.agreementRepayRemind.topic}</value>-->
</list>
        </constructor-arg>
        <property name="messageListener" ref="kafkaConsumerService" />
    </bean>
<!-- 5.消費者併發訊息監聽容器,執行doStart()方法 -->
<bean id="messageListenerContainer" class="org.springframework.kafka.listener.ConcurrentMessageListenerContainer" init-method="doStart" >
        <constructor-arg ref="consumerFactory" />
        <constructor-arg ref="containerProperties" />
        <property name="concurrency" value="${concurrency}" />
    </bean>
</beans>    

五,provider配置

<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
xsi:schemaLocation="http://www.springframework.org/schema/beans
         http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
         http://www.springframework.org/schema/context
         http://www.springframework.org/schema/context/spring-context.xsd">
    <context:property-placeholder location="classpath*:kafka.properties " />
<!-- 定義producer的引數 -->
<bean id="producerProperties" class="java.util.HashMap">
        <constructor-arg>
            <map>
                <entry key="bootstrap.servers" value="${bootstrap.servers}" />
                <entry key="group.id" value="${group.id}" />
                <entry key="retries" value="${retries}" />
                <entry key="batch.size" value="${batch.size}" />
                <entry key="linger.ms" value="${linger.ms}" />
                <entry key="buffer.memory" value="${buffer.memory}" />
                <entry key="acks" value="${acks}" />
                <entry key="key.serializer"
value="org.apache.kafka.common.serialization.StringSerializer" />
                <entry key="value.serializer"
value="org.apache.kafka.common.serialization.StringSerializer" />
            </map>
        </constructor-arg>
    </bean>
<!-- 建立kafkatemplate需要使用的producerfactory bean -->
<bean id="producerFactory"
class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
        <constructor-arg>
            <ref bean="producerProperties" />
        </constructor-arg>
    </bean>
<!-- 建立kafkatemplate bean,使用的時候,只需要注入這個bean,即可使用templatesend訊息方法 -->
<bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
        <constructor-arg ref="producerFactory" />
        <constructor-arg name="autoFlush" value="true" />
        <property name="defaultTopic" value="default" />
    </bean>
</beans>

六,簡單的KafkaUtils,傳送kafka主題訊息

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.FailureCallback;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.SuccessCallback;
public class KafkaSendMsgUtils {
    public static final  ClassPathXmlApplicationContext CONTEXT = new ClassPathXmlApplicationContext("/kafka-provider.xml");
    public static <K,T>void sendMessage(String topic, Integer partition, Long timestamp,  K key, T data) {
        KafkaTemplate<K, T> kafkaTemplate = (KafkaTemplate<K, T>) CONTEXT.getBean("kafkaTemplate");
ListenableFuture<SendResult<K, T>> listenableFuture = null;
        if (kafkaTemplate.getDefaultTopic().equals(topic)) {
            listenableFuture =  kafkaTemplate.sendDefault(partition,timestamp,key,data);
}else {
            listenableFuture = kafkaTemplate.send(topic, partition, timestamp, key, data);
}
        //傳送成功回撥
SuccessCallback<SendResult<K, T>> successCallback = new SuccessCallback<SendResult<K, T>>() {
            @Override
public void onSuccess(SendResult<K, T> result) {
                //成功業務邏輯
System.out.println("成功");
}
        };
//傳送失敗回撥
FailureCallback failureCallback = new FailureCallback() {
            @Override
public void onFailure(Throwable ex) {
                //失敗業務邏輯
throw new RuntimeException(ex);
}
        };
listenableFuture.addCallback(successCallback, failureCallback);
}

    public static void main(String[] args) {
        for (int i = 0; i < 5; i++) {
            try {
                Thread.sleep(10000);
} catch (InterruptedException e) {
                e.printStackTrace();
}
            //KafkaTemplate<String, String> kafkaTemplate = (KafkaTemplate<String, String>) CONTEXT.getBean("kafkaTemplate");
KafkaSendMsgUtils.sendMessage("topic1",0,null,"key","kafka-test");
}
    }
}

七,消費者接受訊息

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.listener.MessageListener;
public class KafkaSendMessageServiceImpl implements MessageListener<String,String> {

    @Override
public void onMessage(ConsumerRecord<String, String> data) {
            //根據不同主題,消費
System.out.println("========");
            if("topic1".equals(data.topic())){
                //邏輯1
System.out.println(data.value()+"被消費");
}else if("topic2".equals(data.topic())){
                //邏輯2
System.out.println(data.value()+"主題2 被消費");
}
        }
}