kubernetes

k8s 외부 etcd 클러스터 구성

misankim 2023. 3. 7. 08:41
k8s 외부 etcd 클러스터 구성

 

 

# k8s HA 구성의 두 가지 옵션

 

## stacked etcd -> etcd 가 포함된 마스터노드를 다중으로 구성하여 HA 구성

 

구성이 간편하지만 마스터노드의 수가 늘어남에 따라 etcd 포드도 늘어나는 특징
마스터노드에 etcd 가 포함되어 있기 때문에 마스터노드에 부하 발생 시 etcd 에도 영향

 

## external etcd -> 마스터노드 외부에 별도의 etcd 를 구성하여 HA 구성

 

마스터노드 HA 구성 + etcd HA 구성으로 더 많은 수의 노드가 필요함
마스터노드와 etcd 가 분리되어 있기 때문에 마스터노드의 수와 etcd 의 수를 별도로 구성 가능하며, 부하의 영향을 덜 받음



#################################################################################



# 사전 조건 -> kubeadm init 명령어를 이용하여 클러스터를 초기화하기 직전의 상태

 

docker 설치
kubeadm, kubelet, kubectl(옵션) 설치
외부 etcd를 구성을 위한 별도의 etcd 노드 3대

 

# kubelet 을 etcd의 서비스 관리자로 구성(모든 etcd 노드)

 

mkdir /etc/systemd/system/kubelet.service.d/

cat << EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf
[Service]
ExecStart=
#  Replace "systemd" with the cgroup driver of your container runtime. The default value in the kubelet is "cgroupfs".
ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd
Restart=always
EOF

systemctl daemon-reload && systemctl restart kubelet

 

# kubeadm ClusterConfiguration 파일 작성

 

각 호스트의 아이피를 구성 환경에 맞게 수정

 

export HOST0=172.16.0.252
export HOST1=172.16.0.85
export HOST2=172.16.0.203

mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/

ETCDHOSTS=(${HOST0} ${HOST1} ${HOST2})
NAMES=("infra0" "infra1" "infra2")

for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
NAME=${NAMES[$i]}
cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta2"
kind: ClusterConfiguration
etcd:
    local:
        serverCertSANs:
        - "${HOST}"
        peerCertSANs:
        - "${HOST}"
        extraArgs:
            initial-cluster: ${NAMES[0]}=https://${ETCDHOSTS[0]}:2380,${NAMES[1]}=https://${ETCDHOSTS[1]}:2380,${NAMES[2]}=https://${ETCDHOSTS[2]}:2380
            initial-cluster-state: new
            name: ${NAME}
            listen-peer-urls: https://${HOST}:2380
            listen-client-urls: https://${HOST}:2379
            advertise-client-urls: https://${HOST}:2379
            initial-advertise-peer-urls: https://${HOST}:2380
EOF
done

 

# CA 인증서 생성

 

kubeadm init phase certs etcd-ca


ls -l /etc/kubernetes/pki/etcd/

 

# etcd 노드 구성원에 대한 인증서 생성

 

kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
cp -R /etc/kubernetes/pki /tmp/${HOST2}/
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete


kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
cp -R /etc/kubernetes/pki /tmp/${HOST1}/
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete


kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml


find /tmp/${HOST2} -name ca.key -type f -delete
find /tmp/${HOST1} -name ca.key -type f -delete

 

 

# 인증서 및 ClusterConfiguration 파일을 다른 etcd 노드로 복사

 

touch ~/.ssh/id_rsa.pem
chmod 600 ~/.ssh/id_rsa.pem
vim ~/.ssh/id_rsa.pem

 

id_rsa.pem 파일 복사(인증서 및 클러스터 설정 파일 복사를 위함)

 

mv /tmp/${HOST0} /tmp/KUBE


rsync -avP -e 'ssh -i ~/.ssh/id_rsa.pem' /tmp/${HOST2}/ centos@${HOST2}:/tmp/KUBE/


ssh -i ~/.ssh/id_rsa.pem centos@${HOST2} "sudo chown -R root.root /tmp/KUBE && sudo mv /tmp/KUBE/pki /etc/kubernetes"

 

# 정적 etcd 포드를 실행하는 etcd 클러스터 생성(모든 etcd 노드에서 실행)

 

kubeadm init phase etcd local --config=/tmp/KUBE/kubeadmcfg.yaml

 

# etcd 클러스터 상태 확인

 

export HOST0=172.16.0.252
export HOST1=172.16.0.85
export HOST2=172.16.0.203

K8S_VERSION=`kubelet --version | awk -Fv '{ print $2 }'`
ETCD_TAG=`kubeadm config images list --kubernetes-version ${K8S_VERSION} | grep etcd | awk -F: '{ print $2 }'`

docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \
--cert /etc/kubernetes/pki/etcd/peer.crt \
--key /etc/kubernetes/pki/etcd/peer.key \
--cacert /etc/kubernetes/pki/etcd/ca.crt \
--endpoints https://${HOST0}:2379 endpoint health --cluster

 

아래와 같이 결과 보여지는지 확인

 

https://172.16.0.85:2379 is healthy: successfully committed proposal: took = 15.654453ms
https://172.16.0.252:2379 is healthy: successfully committed proposal: took = 16.482114ms
https://172.16.0.203:2379 is healthy: successfully committed proposal: took = 16.626585ms

 

(참고) etcd 클러스터의 리더 확인

 

docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \
--cert /etc/kubernetes/pki/etcd/peer.crt \
--key /etc/kubernetes/pki/etcd/peer.key \
--cacert /etc/kubernetes/pki/etcd/ca.crt \
--endpoints https://${HOST0}:2379,https://${HOST1}:2379,https://${HOST2}:2379 -w table endpoint status

 

(참고) etcd 클러스터의 모든 키 확인

 

docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \
--cert /etc/kubernetes/pki/etcd/peer.crt \
--key /etc/kubernetes/pki/etcd/peer.key \
--cacert /etc/kubernetes/pki/etcd/ca.crt \
--endpoints https://${HOST0}:2379 get / --prefix --keys-only

 

# etcd 클러스터의 인증서를 마스터노드로 복사

 

export MASTER0=172.16.0.102

rsync -avP -e 'ssh -i ~/.ssh/id_rsa.pem' /etc/kubernetes/pki/etcd/ca.crt centos@${MASTER0}:/home/centos/
rsync -avP -e 'ssh -i ~/.ssh/id_rsa.pem' /etc/kubernetes/pki/apiserver-etcd-client.* centos@${MASTER0}:/home/centos/

ssh -i ~/.ssh/id_rsa.pem centos@${MASTER0} "sudo mkdir -p /etc/kubernetes/pki/etcd; sudo mv /home/centos/ca.crt /etc/kubernetes/pki/etcd && sudo mv /home/centos/apiserver-etcd-client.* /etc/kubernetes/pki/ && sudo chown root.root -R /etc/kubernetes/pki/"



#################################################################################



# 마스터 노드에서 ClusterConfiguration 파일 작성

 

export ETCD_0_IP=172.16.0.252
export ETCD_1_IP=172.16.0.85
export ETCD_2_IP=172.16.0.203
export MASTER0=172.16.0.102

 

(마스터노드(kube-apiserver)로 구성하기 위해 외부 로드밸런서가 있는 경우)

 

export LOAD_BALANCER_DNS=internal-kube-int-clb-1095060359.ap-northeast-1.elb.amazonaws.com
export LOAD_BALANCER_PORT=6443

 

(마스터 노드가 단일 노드인 경우)

 

export LOAD_BALANCER_DNS=${MASTER0}
export LOAD_BALANCER_PORT=6443



cat << EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: stable
networking:
  podSubnet: "10.244.0.0/16"
controlPlaneEndpoint: "${LOAD_BALANCER_DNS}:${LOAD_BALANCER_PORT}"
etcd:
    external:
        endpoints:
        - https://${ETCD_0_IP}:2379
        - https://${ETCD_1_IP}:2379
        - https://${ETCD_2_IP}:2379
        caFile: /etc/kubernetes/pki/etcd/ca.crt
        certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
        keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key
EOF



(flannel CNI 애드온 설치를 위해 .networking.podSubnet 옵션 추가했음)

 

# 클러스터 초기화 실행
kubeadm init --config kubeadm-config.yaml --upload-certs

 

출력 중 다른 마스터노드를 클러스터에 join 하기 위한 커맨드와 워커노드를 클러스터에 join 하기 위한 커맨드를 복사해 놓는다

 

<마스터 노드 join 커맨드>

 

kubeadm join internal-kube-int-clb-1095060359.ap-northeast-1.elb.amazonaws.com:6443 --token 38bx1r.mxla2zdx1yjum4na \
    --discovery-token-ca-cert-hash sha256:709954c7a7b0f6822a470d7253241c77f250dfc0825e4fdb4d1067f19c883daa \
    --control-plane --certificate-key f57e5c45c69ffff165c9a05e198366478219947802405e8afb8e46a9a6b84bcb

 

<워커 노드 join 커맨드>

 

kubeadm join internal-kube-int-clb-1095060359.ap-northeast-1.elb.amazonaws.com:6443 --token 38bx1r.mxla2zdx1yjum4na \
    --discovery-token-ca-cert-hash sha256:709954c7a7b0f6822a470d7253241c77f250dfc0825e4fdb4d1067f19c883daa

 

(참고) kubeadm init 명령어로 클러스터 초기화 시 --upload-certs 옵션을 주지 않았다면 다른 마스터 노드로 인증서를 수동 복사해야함

 

최초 구성한 마스터노드에서 수행

 

USER=ubuntu # customizable
CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
    # Quote this line if you are using external etcd
    scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
done

 

나머지 마스터노드에서 join 전에 수행

 

USER=ubuntu # customizable
mkdir -p /etc/kubernetes/pki/etcd
mv /home/${USER}/ca.crt /etc/kubernetes/pki/
mv /home/${USER}/ca.key /etc/kubernetes/pki/
mv /home/${USER}/sa.pub /etc/kubernetes/pki/
mv /home/${USER}/sa.key /etc/kubernetes/pki/
mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
# Quote this line if you are using external etcd
mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key



#################################################################################



# 클러스터 초기화 이후부터는 마스터노드 HA 구성(stacked etcd) 시 진행 방법과 동일함

 

## kubectl 명령어를 사용하기 위한 kubeconfig 파일 복사

 

### 루트 사용자의 경우

 

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

 

### 루트 사용자가 아닌 경우

 

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

 

## 마스터 노드에 CNI 플러그인 flannel 설치

 

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml


kubectl get all -n kube-system

 

# 나머지 다른 마스터 노드 설정

 

마스터 노드 구성 시 출력된 다른 마스터 노드를 추가하는 커맨드를 실행한다

 

kubeadm join internal-kube-int-clb-1095060359.ap-northeast-1.elb.amazonaws.com:6443 --token 38bx1r.mxla2zdx1yjum4na \
    --discovery-token-ca-cert-hash sha256:709954c7a7b0f6822a470d7253241c77f250dfc0825e4fdb4d1067f19c883daa \
    --control-plane --certificate-key f57e5c45c69ffff165c9a05e198366478219947802405e8afb8e46a9a6b84bcb

 

최초 구성한 마스터 노드에서 정상적으로 노드가 추가되었는지 확인
kubectl get node
kubectl get pod -n kube-system

 

로드밸런서 리스너에 모든 마스터 노드 추가

 

(참고) 노드 제거

 

마스터노드에서 해당 노드 제거
kubectl get node
kubectl delete node ip-172-16-100-193.ap-northeast-1.compute.internal

 

제거할 노드에서 실행
kubeadm reset
rm -rf /etc/kubernetes/pki

 

(참고) 노드 제거 중 etcd 클러스터 관련 오류 발생 시 etcd 멤버에서 제거(포드 이름과 etcd 멤버 이름은 확인하여 수정)
kubectl exec -it -n kube-system etcd-kube-1.novalocal -- etcdctl member list --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key

kubectl exec -it -n kube-system etcd-kube-1.novalocal -- etcdctl member remove e1417d86c63ba965 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key

 

(참고) 마스터 노드 설치 중 중단되어 컨테이너만 생성된 경우 삭제
docker rm -f $(docker ps -aq)

 

# 워커 노드 설정

 

마스터 노드 구성 시 출력된 워커 노드를 추가하는 커맨드를 실행한다

 

kubeadm join internal-kube-int-clb-1095060359.ap-northeast-1.elb.amazonaws.com:6443 --token 38bx1r.mxla2zdx1yjum4na \
    --discovery-token-ca-cert-hash sha256:709954c7a7b0f6822a470d7253241c77f250dfc0825e4fdb4d1067f19c883daa

 

최초 구성한 마스터 노드에서 정상적으로 노드가 추가되었는지 확인
kubectl get node
kubectl get pod -n kube-system

 

클러스터 설정 확인
kubeadm config view

 

(참고) etcd 클러스터(노드 3개), 마스터노드(kube-apiserver) 2개, 워커노드 1개로 구성된 클러스터 정상 작동 kube-system 네임스페이스의 오브젝트 목록

 

[root@ip-172-16-0-102 ~]# kubectl get all -n kube-system
NAME                                                                          READY   STATUS    RESTARTS   AGE
pod/coredns-66bff467f8-2fhkc                                                  1/1     Running   0          19m
pod/coredns-66bff467f8-zhltk                                                  1/1     Running   0          19m
pod/kube-apiserver-ip-172-16-0-102.ap-northeast-1.compute.internal            1/1     Running   0          20m
pod/kube-apiserver-ip-172-16-0-200.ap-northeast-1.compute.internal            1/1     Running   0          93s
pod/kube-controller-manager-ip-172-16-0-102.ap-northeast-1.compute.internal   1/1     Running   0          20m
pod/kube-controller-manager-ip-172-16-0-200.ap-northeast-1.compute.internal   1/1     Running   0          93s
pod/kube-flannel-ds-amd64-8qqh2                                               1/1     Running   0          50s
pod/kube-flannel-ds-amd64-f7fh4                                               1/1     Running   0          8m21s
pod/kube-flannel-ds-amd64-rhr7d                                               1/1     Running   0          94s
pod/kube-proxy-dqbd5                                                          1/1     Running   0          19m
pod/kube-proxy-j88xf                                                          1/1     Running   0          50s
pod/kube-proxy-zwmfc                                                          1/1     Running   0          94s
pod/kube-scheduler-ip-172-16-0-102.ap-northeast-1.compute.internal            1/1     Running   0          20m
pod/kube-scheduler-ip-172-16-0-200.ap-northeast-1.compute.internal            1/1     Running   0          93s

NAME               TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
service/kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   20m

NAME                                     DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
daemonset.apps/kube-flannel-ds-amd64     3         3         3       3            3           <none>                   8m21s
daemonset.apps/kube-flannel-ds-arm       0         0         0       0            0           <none>                   8m21s
daemonset.apps/kube-flannel-ds-arm64     0         0         0       0            0           <none>                   8m21s
daemonset.apps/kube-flannel-ds-ppc64le   0         0         0       0            0           <none>                   8m21s
daemonset.apps/kube-flannel-ds-s390x     0         0         0       0            0           <none>                   8m21s
daemonset.apps/kube-proxy                3         3         3       3            3           kubernetes.io/os=linux   20m

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   2/2     2            2           20m

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-66bff467f8   2         2         2       19m

 

(참고) etcd 클러스터의 노드에 장애 발생했을 경우

 

정상 상태의 클러스터의 노드 상태 확인

 

[root@ip-172-16-0-85 ~]# docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key --cacert /etc/kubernetes/pki/etcd/ca.crt --endpoints https://${HOST0}:2379,https://${HOST1}:2379,https://${HOST2}:2379 -w table endpoint status

+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|         ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://172.16.0.252:2379 |  f7b21a7cc012fa9 |   3.4.3 |  4.4 MB |      true |      false |         4 |      31759 |              31759 |        |
|  https://172.16.0.85:2379 | 3ba0c7dd35315f65 |   3.4.3 |  4.3 MB |     false |      false |         4 |      31759 |              31759 |        |
| https://172.16.0.203:2379 | 1e035b94290c1275 |   3.4.3 |  4.4 MB |     false |      false |         4 |      31759 |              31759 |        |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

 

현재 리더인 172.16.0.252 호스트를 다운시킨 뒤 다시 노드 상태 확인

 

[root@ip-172-16-0-85 ~]# docker run --rm -it \
> --net host \
> -v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \
> --cert /etc/kubernetes/pki/etcd/peer.crt \
> --key /etc/kubernetes/pki/etcd/peer.key \
> --cacert /etc/kubernetes/pki/etcd/ca.crt \
> --endpoints https://${HOST0}:2379,https://${HOST1}:2379,https://${HOST2}:2379 -w table endpoint status

{"level":"warn","ts":"2020-07-22T03:01:28.248Z","caller":"clientv3/retry_interceptor.go:61","msg":"retrying of unary invoker failed","target":"passthrough:///https://172.16.0.252:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest connection error: connection error: desc = \"transport: Error while dialing dial tcp 172.16.0.252:2379: connect: no route to host\""}
Failed to get the status of endpoint https://172.16.0.252:2379 (context deadline exceeded)

+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|         ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|  https://172.16.0.85:2379 | 3ba0c7dd35315f65 |   3.4.3 |  4.3 MB |      true |      false |         5 |      32148 |              32148 |        |
| https://172.16.0.203:2379 | 1e035b94290c1275 |   3.4.3 |  4.4 MB |     false |      false |         5 |      32148 |              32148 |        |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+



[root@ip-172-16-0-85 ~]# docker run --rm -it \
> --net host \
> -v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \
> --cert /etc/kubernetes/pki/etcd/peer.crt \
> --key /etc/kubernetes/pki/etcd/peer.key \
> --cacert /etc/kubernetes/pki/etcd/ca.crt \
> --endpoints https://${HOST2}:2379 endpoint health --cluster

{"level":"warn","ts":"2020-07-22T03:01:08.515Z","caller":"clientv3/retry_interceptor.go:61","msg":"retrying of unary invoker failed","target":"endpoint://client-7412d93b-61b1-488f-99c1-9256db52cc60/172.16.0.252:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest connection error: connection error: desc = \"transport: Error while dialing dial tcp 172.16.0.252:2379: connect: no route to host\""}

https://172.16.0.85:2379 is healthy: successfully committed proposal: took = 13.814777ms
https://172.16.0.203:2379 is healthy: successfully committed proposal: took = 14.817601ms
https://172.16.0.252:2379 is unhealthy: failed to commit proposal: context deadline exceeded
Error: unhealthy cluster

 

다시 다운시켰던 172.16.0.252 호스트를 업시킨 후 확인

 

[root@ip-172-16-0-85 ~]# docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key --cacert /etc/kubernetes/pki/etcd/ca.crt --endpoints https://${HOST0}:2379,https://${HOST1}:2379,https://${HOST2}:2379 -w table endpoint status

+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|         ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://172.16.0.252:2379 |  f7b21a7cc012fa9 |   3.4.3 |  4.4 MB |     false |      false |         5 |      33297 |              33297 |        |
|  https://172.16.0.85:2379 | 3ba0c7dd35315f65 |   3.4.3 |  4.3 MB |      true |      false |         5 |      33297 |              33297 |        |
| https://172.16.0.203:2379 | 1e035b94290c1275 |   3.4.3 |  4.4 MB |     false |      false |         5 |      33297 |              33297 |        |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+



[root@ip-172-16-0-85 ~]# docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl --cert /etc/kubernetes/pki/etcd/peer.crt --key /etc/kubernetes/pki/etcd/peer.key --cacert /etc/kubernetes/pki/etcd/ca.crt --endpoints https://${HOST2}:2379 endpoint health --cluster

https://172.16.0.85:2379 is healthy: successfully committed proposal: took = 15.654453ms
https://172.16.0.252:2379 is healthy: successfully committed proposal: took = 16.482114ms
https://172.16.0.203:2379 is healthy: successfully committed proposal: took = 16.626585ms