-
Create folder:
sudo mkdir ka-data
sudo mkdir zoo-data
sudo mkdir zoo-datalog
sudo mkdir km-conf
-
Create .env file (you could also config it manually)
#ID should be unique in 1,2,3...
#SERVER_ZOO_HOST the current node should use 0.0.0.0
ID=?
SERVER_KAFKA=kafka1.mydomain.com
SERVER_ZOO_HOST=server.1=0.0.0.0:2888:3888 server.2=zookeeper2.mydomain.com:2888:3888 server.3=zookeeper3.mydomain.com:2888:3888
SERVER_ZOO_CONNECT=zookeeper1.mydomain.com:2181,zookeeper2.mydomain.com:2181,zookeeper3.mydomain.com:2181
-
Create application.conf for Kafka-manager (Ref: https://github.com/lightbend/kafka-manager-TO-BE-DELETED/blob/master/conf/application.conf)
# Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
# See accompanying LICENSE file.
# This is the main configuration file for the application.
# ~~~~~
# Secret key
# ~~~~~
# The secret key is used to secure cryptographics functions.
# If you deploy your application to several instances be sure to use the same key!
play.crypto.secret="^<csmm5Fx4d=r2HEX8pelM3iBkFVv?k[mc;IZE<_Qoq8EkX_/7@Zt6dP05Pzea3U"
play.crypto.secret=${?APPLICATION_SECRET}
# The application languages
# ~~~~~
play.i18n.langs=["en"]
play.http.requestHandler = "play.http.DefaultHttpRequestHandler"
play.http.context = "/"
play.application.loader=loader.KafkaManagerLoader
kafka-manager.zkhosts="kafka-manager-zookeeper:2181"
kafka-manager.zkhosts=${?ZK_HOSTS}
pinned-dispatcher.type="PinnedDispatcher"
pinned-dispatcher.executor="thread-pool-executor"
application.features=["KMClusterManagerFeature","KMTopicManagerFeature","KMPreferredReplicaElectionFeature","KMReassignPartitionsFeature"]
akka {
loggers = ["akka.event.slf4j.Slf4jLogger"]
loglevel = "INFO"
}
basicAuthentication.enabled=false
basicAuthentication.enabled=${?KAFKA_MANAGER_AUTH_ENABLED}
basicAuthentication.username="admin"
basicAuthentication.username=${?KAFKA_MANAGER_USERNAME}
basicAuthentication.password="password"
basicAuthentication.password=${?KAFKA_MANAGER_PASSWORD}
basicAuthentication.realm="Kafka-Manager"
basicAuthentication.excluded=["/api/health"] # ping the health of your instance without authentification
kafka-manager.consumer.properties.file=${?CONSUMER_PROPERTIES_FILE}
-
Create docker-compose file:
-
docker-compose-kafka.yml
version: '3.1'
services:
kafka1:
image: confluentinc/cp-kafka:5.1.2-1.0
container_name: ka
restart: always
ports:
- 9092:9092
- 9999:9999
volumes:
- ./ka-data:/var/lib/kafka/data
environment:
KAFKA_BROKER_ID: ${ID}
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://${SERVER_KAFKA}:9092
KAFKA_ZOOKEEPER_CONNECT: ${SERVER_ZOO_CONNECT}
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: ${SERVER_KAFKA}
-
docker-compose-zookeeper.yml
version: '3.1'
services:
zookeeper-1:
image: zookeeper:3.4.13-1.0
container_name: zk
restart: always
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- ./zoo-data:/data
- ./zoo-datalog:/datalog
environment:
ZOO_MY_ID: ${ID}
ZOO_SERVERS: ${SERVER_ZOO_HOST}
-
docker-compose-kafka-manager.yml
version: '3.1'
services:
kafka-manager-1:
image: sheepkiller/kafka-manager:1.3.1.8-1.0
container_name: km
restart: always
ports:
- 80:9000
volumes:
- ./km-conf:/opt
environment:
ZK_HOSTS: ${SERVER_ZOO_CONNECT}
KM_CONFIGFILE: /opt/application.conf
KAFKA_MANAGER_AUTH_ENABLED: "true"
KAFKA_MANAGER_USERNAME: user
KAFKA_MANAGER_PASSWORD: xxxxx
-
And now you could start it now:
process should be Zookeeper -> Kafka-manager -> create a zookeeper cluster by kafka-manager -> Kafka
docker-compose -f docker-compose-zookeeper up -d
docker-compose -f docker-compose-kafka-manager up -d
# Now create a Zookeeper cluster by Kafka-manager
docker-compose -f docker-compose-kafka up -d
Leave a Reply