Docker安装Kafka服务
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
| docker pull wurstmeister/zookeeper
docker run -d --name zookeeper -p 2181:2181 -e TZ="Asia/Shanghai" --restart always wurstmeister/zookeeper
docker pull wurstmeister/kafka
# docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=<这里换成你的zookeeper地址和端口> -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://<这里换成你的kafka地址和端口> -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -e TZ="Asia/Shanghai" wurstmeister/kafka docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=127.0.0.1:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -e TZ="Asia/Shanghai" wurstmeister/kafka
docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=172.17.0.3:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 wurstmeister/kafka
|
- -e KAFKA_ZOOKEEPER_CONNECT=127.0.0.1:2181:指定 Zookeeper 的地址和端口,这里使用本地地址 127.0.0.1 和默认端口 2181。
- -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092:将 Kafka 服务器的地址和端口设置为 127.0.0.1:9092。
- -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092:指定 Kafka 监听的地址和端口为 0.0.0.0:9092,允许外部访问。
- -p 9092:9092:将容器内部的 9092 端口映射到主机的 9092 端口,允许通过主机的 127.0.0.1:9092 访问 Kafka 服务器。
安装Kafka客户端
1 2 3
| docker pull nickzurich/efak:latest
docker run -d --name kafka-eagle -p 8048:8048 -e EFAK_CLUSTER_ZK_LIST="172.17.0.3:2181" nickzurich/efak:latest
|
Docker Compose部署
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
| version: '3'
services: zookeeper: image: 'bitnami/zookeeper:3.8.0' container_name: zookeeper ports: - "2181:2181" environment: TZ: Asia/Shanghai ALLOW_ANONYMOUS_LOGIN: "yes" ZOO_SERVER_ID: 1 ZOO_PORT_NUMBER: 2181 network_mode: "host"
kafka: image: 'bitnami/kafka:3.2.0' container_name: kafka ports: - "9092:9092" environment: TZ: Asia/Shanghai KAFKA_BROKER_ID: 1 KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092 KAFKA_CFG_ZOOKEEPER_CONNECT: 127.0.0.1:2181 ALLOW_PLAINTEXT_LISTENER: "yes" volumes: - /Users/neozhang/dockercp/kafka/data:/bitnami/kafka/data depends_on: - zookeeper network_mode: "host"
kafka-manager: image: sheepkiller/kafka-manager:latest container_name: kafka-manager ports: - "19092:19092" environment: ZK_HOSTS: 127.0.0.1:2181 APPLICATION_SECRET: letmein KAFKA_MANAGER_USERNAME: neo KAFKA_MANAGER_PASSWORD: neo123 KM_ARGS: -Dhttp.port=19092 depends_on: - kafka network_mode: "host" kafka-ui: container_name: kafka-ui image: provectuslabs/kafka-ui:latest ports: - 8080:8080 environment: DYNAMIC_CONFIG_ENABLED: 'true' volumes: - ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
|
安装knowstreaming
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
| version: "2" services: knowstreaming-manager: image: knowstreaming/knowstreaming-manager:latest container_name: knowstreaming-manager privileged: true restart: always depends_on: - elasticsearch-single expose: - 80 command: - /bin/sh - /ks-start.sh environment: TZ: Asia/Shanghai SERVER_MYSQL_ADDRESS: 127.0.0.1:3306 SERVER_MYSQL_DB: know_streaming SERVER_MYSQL_USER: root SERVER_MYSQL_PASSWORD: 123456 SERVER_ES_ADDRESS: elasticsearch-single:9200 JAVA_OPTS: -Xmx1g -Xms1g
knowstreaming-ui: image: knowstreaming/knowstreaming-ui:latest container_name: knowstreaming-ui restart: always ports: - '80:80' environment: TZ: Asia/Shanghai depends_on: - knowstreaming-manager
elasticsearch-single: image: docker.io/library/elasticsearch:7.6.2 container_name: elasticsearch-single restart: always expose: - 9200 - 9300
environment: TZ: Asia/Shanghai ES_JAVA_OPTS: -Xms512m -Xmx512m discovery.type: single-node
knowstreaming-init: image: knowstreaming/knowstreaming-manager:latest container_name: knowstreaming-init depends_on: - elasticsearch-single command: - /bin/bash - /es_template_create.sh environment: TZ: Asia/Shanghai SERVER_ES_ADDRESS: elasticsearch-single:9200
|
Kafka快速入门
1 2 3 4 5 6 7 8 9
| /opt/kafka/bin/kafka-topics.sh --create --zookeeper 172.17.0.3:2181 --topic test --partitions 1 --replication-factor 1
/opt/kafka/bin/kafka-topics.sh --describe --zookeeper 172.17.0.3:2181 --topic test
# 发送消息 /opt/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test
# 消费消息 /opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
|
链接
如果本篇文章对你有帮助,可以给作者加个鸡腿~(*^__^*),感谢鼓励与支持!
微信支付
支付宝