不会飞的章鱼

熟能生巧,勤能补拙;念念不忘,必有回响。

Docker安装Kafka

Docker安装Kafka服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
docker pull wurstmeister/zookeeper

docker run -d --name zookeeper -p 2181:2181 -e TZ="Asia/Shanghai" --restart always wurstmeister/zookeeper

docker pull wurstmeister/kafka


# docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=<这里换成你的zookeeper地址和端口> -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://<这里换成你的kafka地址和端口> -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -e TZ="Asia/Shanghai" wurstmeister/kafka
docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=127.0.0.1:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -e TZ="Asia/Shanghai" wurstmeister/kafka



docker run -d --name kafka -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=172.17.0.3:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 wurstmeister/kafka


  • -e KAFKA_ZOOKEEPER_CONNECT=127.0.0.1:2181:指定 Zookeeper 的地址和端口,这里使用本地地址 127.0.0.1 和默认端口 2181。
  • -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092:将 Kafka 服务器的地址和端口设置为 127.0.0.1:9092。
  • -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092:指定 Kafka 监听的地址和端口为 0.0.0.0:9092,允许外部访问。
  • -p 9092:9092:将容器内部的 9092 端口映射到主机的 9092 端口,允许通过主机的 127.0.0.1:9092 访问 Kafka 服务器。

安装Kafka客户端

1
2
3
docker pull nickzurich/efak:latest

docker run -d --name kafka-eagle -p 8048:8048 -e EFAK_CLUSTER_ZK_LIST="172.17.0.3:2181" nickzurich/efak:latest

Docker Compose部署

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
version: '3'

services:
zookeeper:
image: 'bitnami/zookeeper:3.8.0'
container_name: zookeeper
ports:
- "2181:2181"
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_SERVER_ID: 1
ZOO_PORT_NUMBER: 2181
network_mode: "host"

kafka:
image: 'bitnami/kafka:3.2.0'
container_name: kafka
ports:
- "9092:9092"
environment:
TZ: Asia/Shanghai
# 更多变量 查看文档 https://github.com/bitnami/bitnami-docker-kafka/blob/master/README.md
KAFKA_BROKER_ID: 1
# 监听端口
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092
# 实际访问ip 本地用 127 内网用 192 外网用 外网ip
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092
KAFKA_CFG_ZOOKEEPER_CONNECT: 127.0.0.1:2181
ALLOW_PLAINTEXT_LISTENER: "yes"
volumes:
- /Users/neozhang/dockercp/kafka/data:/bitnami/kafka/data
depends_on:
- zookeeper
network_mode: "host"

kafka-manager:
image: sheepkiller/kafka-manager:latest
container_name: kafka-manager
ports:
- "19092:19092"
environment:
ZK_HOSTS: 127.0.0.1:2181
APPLICATION_SECRET: letmein
KAFKA_MANAGER_USERNAME: neo
KAFKA_MANAGER_PASSWORD: neo123
KM_ARGS: -Dhttp.port=19092
depends_on:
- kafka
network_mode: "host"
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
environment:
DYNAMIC_CONFIG_ENABLED: 'true'
volumes:
- ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml

安装knowstreaming

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
version: "2"
services:
# *不要调整knowstreaming-manager服务名称,ui中会用到
knowstreaming-manager:
image: knowstreaming/knowstreaming-manager:latest
container_name: knowstreaming-manager
privileged: true
restart: always
depends_on:
- elasticsearch-single
expose:
- 80
command:
- /bin/sh
- /ks-start.sh
environment:
TZ: Asia/Shanghai
# mysql服务地址
SERVER_MYSQL_ADDRESS: 127.0.0.1:3306
# mysql数据库名
SERVER_MYSQL_DB: know_streaming
# mysql用户名
SERVER_MYSQL_USER: root
# mysql用户密码
SERVER_MYSQL_PASSWORD: 123456
# es服务地址
SERVER_ES_ADDRESS: elasticsearch-single:9200
# 服务JVM参数
JAVA_OPTS: -Xmx1g -Xms1g
# 对于kafka中ADVERTISED_LISTENERS填写的hostname可以通过该方式完成
# extra_hosts:
# - "hostname:x.x.x.x"
# 服务日志路径
# volumes:
# - /ks/manage/log:/logs
knowstreaming-ui:
image: knowstreaming/knowstreaming-ui:latest
container_name: knowstreaming-ui
restart: always
ports:
- '80:80'
environment:
TZ: Asia/Shanghai
depends_on:
- knowstreaming-manager
# extra_hosts:
# - "hostname:x.x.x.x"
elasticsearch-single:
image: docker.io/library/elasticsearch:7.6.2
container_name: elasticsearch-single
restart: always
expose:
- 9200
- 9300
# ports:
# - '9200:9200'
# - '9300:9300'
environment:
TZ: Asia/Shanghai
# es的JVM参数
ES_JAVA_OPTS: -Xms512m -Xmx512m
# 单节点配置,多节点集群参考 https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html#docker-compose-file
discovery.type: single-node
# 数据持久化路径
# volumes:
# - /ks/es/data:/usr/share/elasticsearch/data

# es初始化服务,与manager使用同一镜像
# 首次启动es需初始化模版和索引,后续会自动创建
knowstreaming-init:
image: knowstreaming/knowstreaming-manager:latest
container_name: knowstreaming-init
depends_on:
- elasticsearch-single
command:
- /bin/bash
- /es_template_create.sh
environment:
TZ: Asia/Shanghai
# es服务地址
SERVER_ES_ADDRESS: elasticsearch-single:9200

Kafka快速入门

1
2
3
4
5
6
7
8
9
/opt/kafka/bin/kafka-topics.sh --create --zookeeper 172.17.0.3:2181 --topic test --partitions 1 --replication-factor 1

/opt/kafka/bin/kafka-topics.sh --describe --zookeeper 172.17.0.3:2181 --topic test

# 发送消息
/opt/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

# 消费消息
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning

链接

------ 本文结束------
如果本篇文章对你有帮助,可以给作者加个鸡腿~(*^__^*),感谢鼓励与支持!