This commit is contained in:
2025-11-01 21:18:29 +08:00
parent 35478d43d6
commit a607719b46
119 changed files with 815 additions and 1221 deletions

109
kafka/ADD-4/ccmd Executable file
View File

@@ -0,0 +1,109 @@
#!/bin/bash
##################################################
# Mount dir #
# - /opt/kafka/data #
# - /opt/kafka/logs #
# ENV #
# - CONF #
# - CLUSTER_ID #
# - LEAD_CONTROLLER #
# - KAFKA_HEAP_OPTS #
# - EXTRA_ARGS #
# - _CONF_* #
##################################################
set -euo pipefail
export LANG=en_US.UTF-8
trap Quit EXIT
PIDS=
GOT_SIGTERM=
CONF=${CONF:?}
CONF_FILE=config/$CONF.properties
CLUSTER_ID=${CLUSTER_ID:?}
LEAD_CONTROLLER=${LEAD_CONTROLLER:-}
function Print {
local file=/dev/null
[ '-f' = "$1" ] && file=$2 && shift && shift
date +"[%F %T] $*" | tee -a $file
}
function Quit {
while :; do
pkill -f java && Print killing java ... || break
sleep 1
done
Print Container stopped.
test -n "$GOT_SIGTERM"
}
function Usage {
Print 'This container should run with
**root user**
**/opt/kafka/{data,logs} mounted from host**
'
}
function ModifyConf {
local kv=
! cp -f $CONF_FILE.origin $CONF_FILE \
&& Print Skipped modifying $CONF_FILE ... \
&& return 0
Print Modifying $CONF_FILE ...
echo -e "\n#### Docker" >> $CONF_FILE
while read kv; do
[ -z "$kv" ] && break
Print Modifying property: ${kv%%=*} ...
sed -i "/^${kv%%=*} *=/d" $CONF_FILE
echo "$kv" >> $CONF_FILE
done <<< "$(env | grep '^_CONF_' | sed 's/_CONF_//')"
}
function StartProc {
[[ ! "$CONF" =~ ^broker|controller|server$ ]] \
&& Print Unknown conf: $CONF! \
&& exit 1
Print Formatting storage ...
if [ -z "$LEAD_CONTROLLER" -a 'broker' != "$CONF" ]; then
kafka-storage.sh format -g -t $CLUSTER_ID -c $CONF_FILE -s
else
kafka-storage.sh format -g -t $CLUSTER_ID -c $CONF_FILE -N
fi
Print Starting kafka ...
kafka-server-start.sh $CONF_FILE &>> logs/kafka.out &
PIDS="$PIDS $!"
Print Kafka started.
[ -e data/$CONF.lock ] && return 0
[ -z "$LEAD_CONTROLLER" -o 'broker' == "$CONF" ] \
&& touch data/$CONF.lock \
&& return 0
Print Join in kraft cluster with $LEAD_CONTROLLER after 10s ...
sleep 10
[ ! -e /proc/$! ] && Print Unexpected error! && exit
timeout 10 kafka-metadata-quorum.sh --command-config $CONF_FILE \
--bootstrap-controller $LEAD_CONTROLLER add-controller \
&& touch data/$CONF.lock \
&& return 0
Print Failed to join cluster with $LEAD_CONTROLLER!
exit 1
}
function Main {
local pid=
cd /opt/kafka
Usage
ModifyConf
StartProc
trap "GOT_SIGTERM=1; Print Got SIGTERM ..." SIGTERM
while [ -z "$GOT_SIGTERM" ] && sleep 2; do
for pid in $PIDS; do
[ ! -e /proc/$pid ] && Print Unexpected error! && exit
done
done
}
# Start here
Main

View File

@@ -0,0 +1,31 @@
# 部署 kafka 集群
- 根据实际环境修改 docker-compose.yml
* CONF: 这里启动三个 controller 节点和两个 brocker 节点,可用于线上
* CLUSTER_ID: 这里要替换成完全一样的 uuid生成命令
```
docker run --rm harbor.boyachain.cn:20443/general/kafka:4.0 kafka-storage.sh random-uuid
```
* LEAD_CONTROLLER: 指定 bootstrap kraft 集群的节点 **controller 地址**bootstrap kraft 集群的**第一个节点不能设置该变量**broker 节点无需设置该变量
- 创建目录
```
grep '\<source:' docker-compose.yml | cut -d: -f2 | xargs mkdir -p
```
- 启动
```
docker-compose up -d
```
- 查看集群状态
```
docker exec kafka-controller2 kafka-metadata-quorum.sh \
--bootstrap-controller 127.10.11.3:9093 \
describe --status
docker exec kafka-broker2 kafka-metadata-quorum.sh \
--bootstrap-controller 127.10.11.2:9093 \
describe --replication
```

View File

@@ -0,0 +1,123 @@
services:
kafka-controller1:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka-controller1
restart: no
environment:
CONF: controller
CLUSTER_ID: xxxx
_CONF_node.id: 1
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: CONTROLLER://127.10.11.1:9093
_CONF_offsets.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.min.isr: 3
_CONF_transaction.state.log.replication.factor: 3
_CONF_transaction.state.log.min.isr: 3
network_mode: host
volumes:
- type: bind
source: ./kafka-controller1/data
target: /opt/kafka/data
- type: bind
source: ./kafka-controller1/logs
target: /opt/kafka/logs
kafka-controller2:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka-controller2
restart: no
environment:
CONF: controller
CLUSTER_ID: xxxx
LEAD_CONTROLLER: 127.10.11.1:9093
_CONF_node.id: 2
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: CONTROLLER://127.10.11.2:9093
_CONF_offsets.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.min.isr: 3
_CONF_transaction.state.log.replication.factor: 3
_CONF_transaction.state.log.min.isr: 3
network_mode: host
volumes:
- type: bind
source: ./kafka-controller2/data
target: /opt/kafka/data
- type: bind
source: ./kafka-controller2/logs
target: /opt/kafka/logs
kafka-controller3:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka-controller3
restart: no
environment:
CONF: controller
CLUSTER_ID: xxxx
LEAD_CONTROLLER: 127.10.11.1:9093
_CONF_node.id: 3
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: CONTROLLER://127.10.11.3:9093
_CONF_offsets.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.min.isr: 3
_CONF_transaction.state.log.replication.factor: 3
_CONF_transaction.state.log.min.isr: 3
network_mode: host
volumes:
- type: bind
source: ./kafka-controller3/data
target: /opt/kafka/data
- type: bind
source: ./kafka-controller3/logs
target: /opt/kafka/logs
kafka-broker10:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka-broker1
restart: no
environment:
CONF: broker
CLUSTER_ID: xxxx
_CONF_node.id: 10
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: PLAINTEXT://127.10.11.10:9092
_CONF_offsets.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.min.isr: 3
_CONF_transaction.state.log.replication.factor: 3
_CONF_transaction.state.log.min.isr: 3
network_mode: host
volumes:
- type: bind
source: ./kafka-broker1/data
target: /opt/kafka/data
- type: bind
source: ./kafka-broker1/logs
target: /opt/kafka/logs
kafka-broker11:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka-broker11
restart: no
environment:
CONF: broker
CLUSTER_ID: xxxx
_CONF_node.id: 11
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: PLAINTEXT://127.10.11.11:9092
_CONF_offsets.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.replication.factor: 3
_CONF_share.coordinator.state.topic.min.isr: 3
_CONF_transaction.state.log.replication.factor: 3
_CONF_transaction.state.log.min.isr: 3
network_mode: host
volumes:
- type: bind
source: ./kafka-broker11/data
target: /opt/kafka/data
- type: bind
source: ./kafka-broker11/logs
target: /opt/kafka/logs

View File

@@ -1,7 +1,11 @@
# 部署 kafka 单节点
- 根据实际环境修改
- docker-compose.yml
- 根据实际环境修改 docker-compose.yml
* CONF: 单节点只能用 server, 使用内置的 server 配置文件,该文件中默认配置的角色是 "controller,broker"
* CLUSTER_ID: 这里要替换成 uuid生成命令
```
docker run --rm harbor.boyachain.cn:20443/general/kafka:4.0 kafka-storage.sh random-uuid
```
- 创建目录
```

View File

@@ -1,38 +1,16 @@
version: "3.7"
services:
zk:
image: harbor.colben.cn/general/zookeeper:3.6
container_name: zk
restart: on-failure
environment:
MYID: 1
JVMFLAGS: "-Xmx1G -Xms1G"
_CONF_reconfigEnabled: "false"
_CONF_standaloneEnabled: "true"
_CONF_clientPort: 2181
networks:
kafka:
volumes:
- type: bind
source: ./zk/dataLog
target: /opt/zk/dataLog
- type: bind
source: ./zk/data
target: /opt/zk/data
- type: bind
source: ./zk/logs
target: /opt/zk/logs
kafka:
image: harbor.colben.cn/general/kafka:2.7
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka
restart: on-failure
environment:
KAFKA_OPTS: "-Xmx1G -Xms1G"
_CONF_zookeeper.connect: "zk:2181"
networks:
kafka:
KAFKA_HEAP_OPTS: '-Xmx1G -Xms1G'
CONF: server
CLUSTER_ID: xxxx
_CONF_node.id: 1
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093
_CONF_listeners: PLAINTEXT://127.10.11.1:9092,CONTROLLER://127.10.11.1:9093
network_mode: host
volumes:
- type: bind
source: ./kafka/data
@@ -41,6 +19,3 @@ services:
source: ./kafka/logs
target: /opt/kafka/logs
networks:
kafka:

View File

@@ -1,15 +0,0 @@
# 部署 kafka 集群
- 根据实际环境修改
- docker-compose.yml
- 创建目录
```
grep '\<source:' docker-compose.yml | cut -d: -f2 | xargs mkdir -p
```
- 启动
```
docker-compose up -d
```

View File

@@ -1,138 +0,0 @@
version: "3.7"
services:
zk1:
image: harbor.colben.cn/general/zookeeper:3.6
container_name: zk1
restart: on-failure
environment:
MYID: 1
JVMFLAGS: "-Xmx1G -Xms1G"
_CONF_reconfigEnabled: "false"
_CONF_standaloneEnabled: "false"
_CONF_server.1: "zk1:2888:3888;2181"
_CONF_server.2: "zk2:2888:3888;2181"
_CONF_server.3: "zk3:2888:3888;2181"
networks:
kafka:
volumes:
- type: bind
source: ./zk1/dataLog
target: /opt/zk/dataLog
- type: bind
source: ./zk1/data
target: /opt/zk/data
- type: bind
source: ./zk1/logs
target: /opt/zk/logs
zk2:
image: harbor.colben.cn/general/zookeeper:3.6
container_name: zk2
restart: on-failure
environment:
MYID: 2
JVMFLAGS: "-Xmx1G -Xms1G"
_CONF_reconfigEnabled: "false"
_CONF_standaloneEnabled: "false"
_CONF_server.1: "zk1:2888:3888;2181"
_CONF_server.2: "zk2:2888:3888;2181"
_CONF_server.3: "zk3:2888:3888;2181"
networks:
kafka:
volumes:
- type: bind
source: ./zk2/dataLog
target: /opt/zk/dataLog
- type: bind
source: ./zk2/data
target: /opt/zk/data
- type: bind
source: ./zk2/logs
target: /opt/zk/logs
zk3:
image: harbor.colben.cn/general/zookeeper:3.6
container_name: zk3
restart: on-failure
environment:
MYID: 3
JVMFLAGS: "-Xmx1G -Xms1G"
_CONF_reconfigEnabled: "false"
_CONF_standaloneEnabled: "false"
_CONF_server.1: "zk1:2888:3888;2181"
_CONF_server.2: "zk2:2888:3888;2181"
_CONF_server.3: "zk3:2888:3888;2181"
networks:
kafka:
volumes:
- type: bind
source: ./zk3/dataLog
target: /opt/zk/dataLog
- type: bind
source: ./zk3/data
target: /opt/zk/data
- type: bind
source: ./zk3/logs
target: /opt/zk/logs
kafka1:
image: harbor.colben.cn/general/kafka:2.7
container_name: kafka1
restart: on-failure
environment:
KAFKA_OPTS: "-Xmx1G -Xms1G"
_CONF_broker.id: 1
_CONF_listeners: "PLAINTEXT://kafka1:9092"
_CONF_zookeeper.connect: "zk1:2181,zk2:2181,zk3:2181"
networks:
kafka:
volumes:
- type: bind
source: ./kafka1/data
target: /opt/kafka/data
- type: bind
source: ./kafka1/logs
target: /opt/kafka/logs
kafka2:
image: harbor.colben.cn/general/kafka:2.7
container_name: kafka2
restart: on-failure
environment:
KAFKA_OPTS: "-Xmx1G -Xms1G"
_CONF_broker.id: 2
_CONF_listeners: "PLAINTEXT://kafka2:9092"
_CONF_zookeeper.connect: "zk1:2181,zk2:2181,zk3:2181"
networks:
kafka:
volumes:
- type: bind
source: ./kafka2/data
target: /opt/kafka/data
- type: bind
source: ./kafka2/logs
target: /opt/kafka/logs
kafka3:
image: harbor.colben.cn/general/kafka:2.7
container_name: kafka3
restart: on-failure
environment:
KAFKA_OPTS: "-Xmx1G -Xms1G"
_CONF_broker.id: 3
_CONF_listeners: "PLAINTEXT://kafka3:9092"
_CONF_zookeeper.connect: "zk1:2181,zk2:2181,zk3:2181"
networks:
kafka:
volumes:
- type: bind
source: ./kafka3/data
target: /opt/kafka/data
- type: bind
source: ./kafka3/logs
target: /opt/kafka/logs
networks:
kafka:

View File

@@ -0,0 +1,31 @@
# 部署 kafka 集群
- 根据实际环境修改 docker-compose.yml
* CONF: 这里启动三个 server 节点,简单试用集群环境
* CLUSTER_ID: 这里要替换成完全一样的 uuid生成命令
```
docker run --rm harbor.boyachain.cn:20443/general/kafka:4.0 kafka-storage.sh random-uuid
```
* LEAD_CONTROLLER: 指定 bootstrap kraft 集群的节点 **controller 地址**bootstrap kraft 集群的**第一个节点不能设置该变量**
- 创建目录
```
grep '\<source:' docker-compose.yml | cut -d: -f2 | xargs mkdir -p
```
- 启动
```
docker-compose up -d
```
- 查看集群状态
```
docker exec kafka1 kafka-metadata-quorum.sh \
--bootstrap-controller 127.10.11.1:9093 \
describe --status
docker exec kafka1 kafka-metadata-quorum.sh \
--bootstrap-controller 127.10.11.1:9093 \
describe --replication
```

View File

@@ -0,0 +1,75 @@
services:
kafka1:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka1
restart: no
environment:
CONF: server
CLUSTER_ID: xxxx
_CONF_node.id: 1
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: PLAINTEXT://127.10.11.1:9092,CONTROLLER://127.10.11.1:9093
_CONF_offsets.topic.replication.factor: 2
_CONF_share.coordinator.state.topic.replication.factor: 2
_CONF_share.coordinator.state.topic.min.isr: 2
_CONF_transaction.state.log.replication.factor: 2
_CONF_transaction.state.log.min.isr: 2
network_mode: host
volumes:
- type: bind
source: ./kafka1/data
target: /opt/kafka/data
- type: bind
source: ./kafka1/logs
target: /opt/kafka/logs
kafka2:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka2
restart: no
environment:
CONF: server
CLUSTER_ID: xxxx
LEAD_CONTROLLER: 127.10.11.1:9093
_CONF_node.id: 2
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: PLAINTEXT://127.10.11.2:9092,CONTROLLER://127.10.11.2:9093
_CONF_offsets.topic.replication.factor: 2
_CONF_share.coordinator.state.topic.replication.factor: 2
_CONF_share.coordinator.state.topic.min.isr: 2
_CONF_transaction.state.log.replication.factor: 2
_CONF_transaction.state.log.min.isr: 2
network_mode: host
volumes:
- type: bind
source: ./kafka2/data
target: /opt/kafka/data
- type: bind
source: ./kafka2/logs
target: /opt/kafka/logs
kafka3:
image: harbor.boyachain.cn:20443/general/kafka:4.0
container_name: kafka3
restart: no
environment:
CONF: server
CLUSTER_ID: xxxx
LEAD_CONTROLLER: 127.10.11.1:9093
_CONF_node.id: 3
_CONF_controller.quorum.bootstrap.servers: 127.10.11.1:9093,127.10.11.2:9093,127.10.11.3:9093
_CONF_listeners: PLAINTEXT://127.10.11.3:9092,CONTROLLER://127.10.11.3:9093
_CONF_offsets.topic.replication.factor: 2
_CONF_share.coordinator.state.topic.replication.factor: 2
_CONF_share.coordinator.state.topic.min.isr: 2
_CONF_transaction.state.log.replication.factor: 2
_CONF_transaction.state.log.min.isr: 2
network_mode: host
volumes:
- type: bind
source: ./kafka3/data
target: /opt/kafka/data
- type: bind
source: ./kafka3/logs
target: /opt/kafka/logs

View File

@@ -1,6 +1,7 @@
ARG ARCH
FROM harbor.colben.cn/general/jdk$ARCH:8
FROM harbor.colben.cn/general/jdk$ARCH:17
MAINTAINER Colben colbenlee@gmail.com
ADD --chown=root:root /ADD/ /opt/
ENV PATH=/opt/kafka/bin:$PATH
CMD ["/opt/ccmd"]

View File

@@ -24,6 +24,7 @@ fi
function Quit {
local exitCode=$?
rm -rf $ROOT_DIR/ADD/
[ 0 -ne $exitCode ] && Error Failed to build or push image!
[ -z "${END:-}" ] && echo && Error Interrupted manually!
Print Succeeded to build and push image.
@@ -41,17 +42,41 @@ function YesOrNo {
done
}
function ModifyKafkaV2 {
cd kafka/config
cp server.properties server.properties.sample
sed -i '/^log4j\.rootLogger/clog4j.rootLogger=INFO, connectAppender' connect-log4j.properties
sed -i '/^log4j\.rootLogger/clog4j.rootLogger=INFO, kafkaAppender' log4j.properties
}
function ModifyKafkaV4 {
local f=
cd kafka/config
for f in {broker,controller,server}.properties; do
sed -i \
-e '/^node\.id/s/^/#/' \
-e '/^controller\.quorum\.bootstrap\.servers/s/^/#/' \
-e '/^listeners/s/^/#/' \
-e '/^advertised\.listeners/s/^/#/' \
-e '/^log\.dirs/clog.dirs=/opt/kafka/data' \
$f
mv $f $f.origin
done
sed -i '/ref: STDOUT/d' connect-log4j2.yaml
sed -i '/ref: STDOUT/d' log4j2.yaml
}
function Update {
Warn Preparing kafka $VERSION ...
cd $ROOT_DIR/ADD
rm -rf $(ls | grep -v ccmd || true)
cd $ROOT_DIR
rm -rf ADD
cp -af ADD-${VERSION%%.*} ADD
cd ADD
tar zxf /release/RUNTIME/kafka_2.13-$VERSION.tgz
mv kafka_2.13-$VERSION kafka
cp kafka/config/server.properties kafka/config/server.properties.sample
sed -i '/^log4j\.rootLogger/clog4j.rootLogger=INFO, connectAppender' kafka/config/connect-log4j.properties
sed -i '/^log4j\.rootLogger/clog4j.rootLogger=INFO, kafkaAppender' kafka/config/log4j.properties
mkdir kafka/{data,logs}
rm -rf kafka/site-docs
rm -rf kafka/{LICENSE,licenses,NOTICE,site-docs}
ModifyKafkaV${VERSION%%.*}
}
function Build {
@@ -61,7 +86,7 @@ function Build {
&& Warn Removing image $IMAGE ... \
&& docker rmi $IMAGE
Warn Building image: $IMAGE ...
docker build --force-rm --build-arg ARCH="$ARCH" --build-arg VERSION="$VERSION" -t $IMAGE .
docker build --force-rm --build-arg ARCH="$ARCH" -t $IMAGE .
YesOrNo Push image: $IMAGE? && docker push $IMAGE
}