Thank you for reading this post, don't forget to subscribe!
имеется 6 серверов
3 мастера
kub-master1-121
kub-master2-122
kub-master3-123
3 воркера:
kub-worker1-124
kub-worker2-125
kub-worker3-126
Что мы собираемся получить по итогу:
[root@kub-master1-121 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.121 kub-master1-121
192.168.1.122 kub-master2-122
192.168.1.123 kub-master3-123
192.168.1.124 kub-worker1-124
192.168.1.125 kub-worker2-125
192.168.1.126 kub-worker3-126
[root@kub-master1-121 ~]# ssh-keygen
ssh-copy-id kub-master1-121
ssh-copy-id kub-master2-122
ssh-copy-id kub-master3-123
ssh-copy-id kub-worker1-124
ssh-copy-id kub-worker2-125
ssh-copy-id kub-worker3-126
отключаем НА ВСЕХ нодах swap
[root@kub-master1-121 ~]# cat /etc/fstab | grep swap
#/dev/mapper/centos-swap swap swap defaults 0 0
и SELINUX:
[root@kub-master1-121 ~]# cat /etc/selinux/config | grep ^SELINUX
SELINUX=disabled
SELINUXTYPE=targeted
Установка дополнительного ПО:
1. ETCD
[root@kub-master1-121 ~]# yum install etcd -y
[root@kub-master2-122 ~]# yum install etcd -y
[root@kub-master3-123 ~]# yum install etcd -y
[root@kub-master1-121 ~]# mv /etc/etcd/etcd.conf /etc/etcd/etcd.conf.backup
[root@kub-master2-122 ~]# mv /etc/etcd/etcd.conf /etc/etcd/etcd.conf.backup
[root@kub-master3-123 ~]# mv /etc/etcd/etcd.conf /etc/etcd/etcd.conf.backup
[root@kub-master1-121 ~]# cat /etc/etcd/etcd.conf
# [member]
ETCD_NAME=etcd1
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.1.121:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.121:2379,http://127.0.0.1:2379"
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.121:2380"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.1.121:2380,etcd2=http://192.168.1.122:2380,etcd3=http://192.168.1.123:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="ab5f20b33aa4"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.121:2379"
[root@kub-master2-122 ~]# cat /etc/etcd/etcd.conf
# [member]
ETCD_NAME=etcd2
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.1.122:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.122:2379,http://127.0.0.1:2379"
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.122:2380"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.1.121:2380,etcd2=http://192.168.1.122:2380,etcd3=http://192.168.1.123:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="ab5f20b33aa4"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.122:2379"
[root@kub-master3-123 ~]# cat /etc/etcd/etcd.conf
# [member]
ETCD_NAME=etcd3
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.1.123:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.123:2379,http://127.0.0.1:2379"
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.123:2380"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.1.121:2380,etcd2=http://192.168.1.122:2380,etcd3=http://192.168.1.123:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="ab5f20b33aa4"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.123:2379"
[root@kub-master1-121 ~]# systemctl enable etcd
[root@kub-master1-121 ~]# systemctl start etcd
[root@kub-master2-122 ~]# systemctl enable etcd
[root@kub-master2-122 ~]# systemctl start etcd
[root@kub-master3-123 ~]# systemctl enable etcd
[root@kub-master3-123 ~]# systemctl start etcd
1 2 3 4 5 |
Проверяем состояние кластера: [root@kub-master1-121 ~]# <strong>etcdctl member list</strong> 511bd140b9078cd1: name=etcd3 peerURLs=http://192.168.1.123:2380 clientURLs=http://192.168.1.123:2379 isLeader=true bae29871b5c48ed7: name=etcd1 peerURLs=http://192.168.1.121:2380 clientURLs=http://192.168.1.121:2379 isLeader=false d652a4faf8890f2a: name=etcd2 peerURLs=http://192.168.1.122:2380 clientURLs=http://192.168.1.122:2379 isLeader=false |
1 2 3 4 |
[root@kub-master1-121 ~]# <strong>ETCDCTL_API=3 etcdctl member list</strong> 511bd140b9078cd1, started, etcd3, http://192.168.1.123:2380, http://192.168.1.123:2379 bae29871b5c48ed7, started, etcd1, http://192.168.1.121:2380, http://192.168.1.121:2379 d652a4faf8890f2a, started, etcd2, http://192.168.1.122:2380, http://192.168.1.122:2379 |
2. DOCKER
На ВСЕ ноды:
1 |
<strong><code class="language-output" data-lang="output">yum install -y yum-utils device-mapper-persistent-data lvm2</code></strong> |
1 |
<strong><code class="language-output" data-lang="output">yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo</code></strong> |
1 |
<strong><code class="language-output" data-lang="output">yum install docker-ce -y && systemctl enable docker && systemctl start docker</code></strong> |
[codesyntax lang="php"]
1 2 3 4 5 6 7 8 9 10 |
cat <<EOF > /etc/docker/daemon.json { "exec-opts": ["native.cgroupdriver=systemd"], "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ], "experimental":true } EOF |
[/codesyntax]
systemctl restart docker
3. kubelet kubeadm kubectl
На ВСЕХ нодах:
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
1 |
<strong><code>sysctl -p</code></strong> |
[codesyntax lang="php"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
apiVersion: kubeadm.k8s.io/v1beta1 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.1.121 #Адрес на котором слушает API-сервер --- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration kubernetesVersion: stable #Версия кластера которую мы будем устанавливать apiServer: #Список хостов для которых kubeadm генерирует сертификаты certSANs: - 127.0.0.1 - 192.168.1.121 - 192.168.1.122 - 192.168.1.123 - kub-master1-121 - kub-master2-122 - kub-master3-123 controlPlaneEndpoint: 192.168.1.121 #адрес мастера или балансировщика нагрузки etcd: #адреса кластера etc external: endpoints: - http://192.168.1.121:2379 - http://192.168.1.122:2379 - http://192.168.1.123:2379 networking: podSubnet: 192.168.0.0/16 # подсеть для подов, у каждого CNI она своя. |
[/codesyntax]
kubeadm config migrate --old-config kub.yaml --new-config kub-new.yaml
cat kub-new.yaml
[codesyntax lang="php"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
apiVersion: kubeadm.k8s.io/v1beta2 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: ldjjki.uw6e6mawrrawdngi ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.1.121 bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock name: kub-master1-121 taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiServer: certSANs: - 127.0.0.1 - 192.168.1.121 - 192.168.1.122 - 192.168.1.123 - kub-master1-121 - kub-master2-122 - kub-master3-123 timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controlPlaneEndpoint: 192.168.1.121:6443 controllerManager: {} dns: type: CoreDNS etcd: external: caFile: "" certFile: "" endpoints: - http://192.168.1.121:2379 - http://192.168.1.122:2379 - http://192.168.1.123:2379 keyFile: "" imageRepository: k8s.gcr.io kind: ClusterConfiguration kubernetesVersion: v1.17.3 networking: dnsDomain: cluster.local podSubnet: 192.168.0.0/16 serviceSubnet: 10.96.0.0/12 scheduler: {} |
[/codesyntax]
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
[root@kub-master1-121 ~]# kubeadm config migrate --old-config kub.yaml --new-config kub-new.yaml W0308 13:19:18.597335 1578 validation.go:28] Cannot validate kube-proxy config - no validator is available W0308 13:19:18.597433 1578 validation.go:28] Cannot validate kubelet config - no validator is available [root@kub-master1-121 ~]# kubeadm init --config=kub-new.yaml --upload-certs W0308 13:19:28.500151 1593 validation.go:28] Cannot validate kube-proxy config - no validator is available W0308 13:19:28.500236 1593 validation.go:28] Cannot validate kubelet config - no validator is available [init] Using Kubernetes version: v1.17.3 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [kub-master1-121 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local kub-master1-121 kub-master2-122 kub-master3-123] and IPs [10.96.0.1 192.168.1.121 192.168.1.121 127.0.0.1 192.168.1.121 192.168.1.122 192.168.1.123] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] External etcd mode: Skipping etcd/ca certificate authority generation [certs] External etcd mode: Skipping etcd/server certificate generation [certs] External etcd mode: Skipping etcd/peer certificate generation [certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation [certs] External etcd mode: Skipping apiserver-etcd-client certificate generation [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" W0308 13:20:30.344659 1593 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [control-plane] Creating static Pod manifest for "kube-scheduler" W0308 13:20:30.345737 1593 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 36.007034 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: 919f0dc3d7af021ddb78cffd97d3babe0f603deaca7aa89a5d7f34c14d88af05 [mark-control-plane] Marking the node kub-master1-121 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node kub-master1-121 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: 1n0mn1.0z0jfpqk6og5s8p0 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of the control-plane node running the following command on each as root: kubeadm join 192.168.1.121:6443 --token 1n0mn1.0z0jfpqk6og5s8p0 \ --discovery-token-ca-cert-hash sha256:09ef212a4f96a7652ec9e3756e2abdb6d4cc08577636e46882e7e4b6092d1f21 \ --control-plane --certificate-key 919f0dc3d7af021ddb78cffd97d3babe0f603deaca7aa89a5d7f34c14d88af05 Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward. Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.1.121:6443 --token 1n0mn1.0z0jfpqk6og5s8p0 \ --discovery-token-ca-cert-hash sha256:09ef212a4f96a7652ec9e3756e2abdb6d4cc08577636e46882e7e4b6092d1f21 |
[/codesyntax]
Создаем директорию и кладем туда конфигурационный файл для подключения к kubernetes API:
1 2 3 |
[root@kub-master1-121 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION kub-master1-121 NotReady master 3m5s v1.17.3 |
[root@kub-master1-121 ~]# scp -r /etc/kubernetes/pki kub-master2-122:/etc/kubernetes/pki
[root@kub-master1-121 ~]# scp -r /etc/kubernetes/pki kub-master3-123:/etc/kubernetes/pki
--discovery-token-ca-cert-hash sha256:09ef212a4f96a7652ec9e3756e2abdb6d4cc08577636e46882e7e4b6092d1f21 \
--control-plane --certificate-key 919f0dc3d7af021ddb78cffd97d3babe0f603deaca7aa89a5d7f34c14d88af05
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
[root@kub-master2-122 ~]# kubeadm join 192.168.1.121:6443 --token 1n0mn1.0z0jfpqk6og5s8p0 \ > --discovery-token-ca-cert-hash sha256:09ef212a4f96a7652ec9e3756e2abdb6d4cc08577636e46882e7e4b6092d1f21 \ > --control-plane --certificate-key 919f0dc3d7af021ddb78cffd97d3babe0f603deaca7aa89a5d7f34c14d88af05 [preflight] Running pre-flight checks [WARNING Hostname]: hostname "kub-master2-122" could not be reached [WARNING Hostname]: hostname "kub-master2-122": lookup kub-master2-122 on 8.8.8.8:53: no such host [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [preflight] Running pre-flight checks before initializing the new control plane instance [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Using the existing "front-proxy-client" certificate and key [certs] Using the existing "apiserver" certificate and key [certs] Using the existing "apiserver-kubelet-client" certificate and key [certs] Valid certificates and keys now exist in "/etc/kubernetes/pki" [certs] Using the existing "sa" key [kubeconfig] Generating kubeconfig files [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" W0308 13:32:27.198326 2133 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [control-plane] Creating static Pod manifest for "kube-controller-manager" W0308 13:32:27.202677 2133 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [control-plane] Creating static Pod manifest for "kube-scheduler" W0308 13:32:27.204261 2133 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [check-etcd] Skipping etcd check in external mode [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap… [control-plane-join] using external etcd - no local stacked instance added [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [mark-control-plane] Marking the node kub-master2-122 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node kub-master2-122 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] This node has joined the cluster and a new control plane instance was created: * Certificate signing request was sent to apiserver and approval was received. * The Kubelet was informed of the new secure connection details. * Control plane (master) label and taint were applied to the new node. * The Kubernetes control plane instances scaled up. To start administering your cluster from this node, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Run 'kubectl get nodes' to see this node join the cluster. |
[/codesyntax]
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
После удачного завершения мы увидим новые мастера
kubectl get nodes
1 2 3 4 5 |
[root@kub-master1-121 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION kub-master1-121 NotReady master 8m35s v1.17.3 kub-master2-122 NotReady master 2m34s v1.17.3 kub-master3-123 NotReady master 4s v1.17.3 |
На текущий момент схема такая:
все наши мастера сейчас смотрят на первый мастер:
192.168.1.121
внесём изменения, чтобы каждый из них смотрел на себя, для этого надо поправить пару файлов:
[root@kub-master2-122 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.122:6443|g' /root/.kube/config
[root@kub-master2-122 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.122:6443|g' /etc/kubernetes/kubelet.conf
[root@kub-master2-122 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.122:6443|g' /etc/kubernetes/admin.conf
[root@kub-master2-122 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.122:6443|g' /etc/kubernetes/scheduler.conf
[root@kub-master2-122 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.122:6443|g' /etc/kubernetes/controller-manager.conf
[root@kub-master2-122 ~]# systemctl restart kubelet && systemctl restart docker
[root@kub-master3-123 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.123:6443|g' /root/.kube/config
[root@kub-master3-123 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.123:6443|g' /etc/kubernetes/kubelet.conf
[root@kub-master3-123 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.123:6443|g' /etc/kubernetes/admin.conf
[root@kub-master3-123 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.123:6443|g' /etc/kubernetes/scheduler.conf
[root@kub-master3-123 ~]# sed -i 's|server: https://192.168.1.121:6443|server: https://192.168.1.123:6443|g' /etc/kubernetes/controller-manager.conf
[root@kub-master3-123 ~]# systemctl restart kubelet && systemctl restart docker
Сертификаты будут удаленны через 2 часа, токен для join будет удален через 24 часа, токен для выгрузки сертификатов удалится через 1 час
Смотрим наши токены :
kubeadm token list
1 2 3 4 |
[root@kub-master1-121 ~]# kubeadm token list TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS d1xgzd.nem9mqmqn32qz3sd 23h 2020-03-09T12:26:26+06:00 authentication,signing <none> system:bootstrappers:kubeadm:default-node-token d92gl6.4bt0eut311v19w6g 1h 2020-03-08T14:26:25+06:00 <none> Proxy for managing TTL for the kubeadm-certs secret <none> |
воркер ноды добавляем командой:
kubeadm join 192.168.1.121:6443 --token 1n0mn1.0z0jfpqk6og5s8p0 \
--discovery-token-ca-cert-hash sha256:09ef212a4f96a7652ec9e3756e2abdb6d4cc08577636e46882e7e4b6092d1f21
вывод будет примерно такой:
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
[root@kub-worker1-124 ~]# kubeadm join 192.168.1.121:6443 --token 1n0mn1.0z0jfpqk6og5s8p0 \ > --discovery-token-ca-cert-hash sha256:09ef212a4f96a7652ec9e3756e2abdb6d4cc08577636e46882e7e4b6092d1f21 W0308 13:37:05.292301 1605 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set. [preflight] Running pre-flight checks [WARNING Hostname]: hostname "kub-worker1-124" could not be reached [WARNING Hostname]: hostname "kub-worker1-124": lookup kub-worker1-124 on 8.8.8.8:53: no such host [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. |
[/codesyntax]
1 2 3 4 5 6 7 8 9 |
проверяем что все они добавились: [root@kub-master1-121 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION kub-master1-121 NotReady master 14m v1.17.3 kub-master2-122 NotReady master 8m8s v1.17.3 kub-master3-123 NotReady master 5m38s v1.17.3 kub-worker1-124 NotReady <none> 115s v1.17.3 kub-worker2-125 NotReady <none> 43s v1.17.3 kub-worker3-126 NotReady <none> 8s v1.17.3 |
Все сервера добавлены, но они в статусе NotReady. Это из-за отсутствия сети. В нашем примере мы будем использовать сеть calico. Для этого установим ее в наш кластер
Заходим на оф сайт. На момент написания статьи самая последняя версия:
https://docs.projectcalico.org/v3.11/introduction/
выкачиваем её
вывод такой:
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
[root@kub-master1-121 ~]# kubectl apply -f calico.yaml configmap/calico-config created customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created clusterrole.rbac.authorization.k8s.io/calico-node created clusterrolebinding.rbac.authorization.k8s.io/calico-node created daemonset.apps/calico-node created serviceaccount/calico-node created deployment.apps/calico-kube-controllers created serviceaccount/calico-kube-controllers created |
[/codesyntax]
ну или можно сразу запустить самую последнюю версию:
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
ждём пару минут и проверяем:
kubectl get nodes
1 2 3 4 5 6 7 8 |
[root@kub-master1-121 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION kub-master1-121 <strong>Ready </strong> master 18m v1.17.3 kub-master2-122 <strong>Ready </strong> master 12m v1.17.3 kub-master3-123 <strong>Ready </strong> master 10m v1.17.3 kub-worker1-124 <strong>Ready </strong> <none> 6m26s v1.17.3 kub-worker2-125 <strong>Ready </strong> <none> 5m14s v1.17.3 kub-worker3-126 <strong>Ready </strong> <none> 4m39s v1.17.3 |
базовая установка завершена.
на этом этапе схема такая:
================================================
сгенерировать токен для добавления воркеров:
kubeadm token generate
kubeadm token create <generated-token> --print-join-command --ttl=24h
=================================================
Запуск подов на мастернодах
Для того, чтобы разрешить запуск подов на master нодах, выполните следующую команду на любом из мастеров
1 |
kubectl taint nodes --all node-role.kubernetes.io/master- |
4.Устанавливаем haproxy на worknodes
Теперь мы имеем рабочий кластер с тремя master нодами и тремя worker нодами.
Проблема в том, что сейчас наши worker ноды не имеют HA режима.
Если посмотреть на конфиг файл kubelet, то мы увидим, что наши worker ноды обращаются только к одной master ноде из трех.
[root@kub-master1-121 ~]# cat /etc/kubernetes/kubelet.conf | grep server:
server: https://192.168.1.121:6443
При данной конфигурации, в случае падения kub-master1-121, worker нода потеряет связь с API сервером кластера. Чтобы наш кластер стал полностью HA, мы установим на каждый из воркеров Load Balancer (Haproxy), который по round robin будет раскидывать запросы на три master ноды, а в конфигах kubelet на worker нодах мы поменяем адрес сервера на 127.0.0.1:6443
Для начала установим HAProxy на каждую worker ноду.
[root@kub-worker1-124 ~]# yum -y install haproxy
[root@kub-worker2-125 ~]# yum -y install haproxy
[root@kub-worker3-126 ~]# yum -y install haproxy
[root@kub-worker1-124 ~]# mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup
[root@kub-worker2-125 ~]# mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup
[root@kub-worker3-126 ~]# mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.backup
лог для haproxy включается следующим образом
добавляем следующий параметр:
cat /etc/sysconfig/rsyslog
SYSLOGD_OPTIONS="-c 2 -r"
В конфиге rsyslog раскомментируем следующие параметры:
cat /etc/rsyslog.conf
$ModLoad imudp
$UDPServerRun 514
$ModLoad imtcp
$InputTCPServerRun 514
local2.* /var/log/haproxy.log
полностью конфиг выгглядит следующим образом:
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
[root@kub-worker1-124 ~]# cat /etc/rsyslog.conf # rsyslog configuration file # For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html # If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html #### MODULES #### # The imjournal module bellow is now used as a message source instead of imuxsock. $ModLoad imuxsock # provides support for local system logging (e.g. via logger command) $ModLoad imjournal # provides access to the systemd journal #$ModLoad imklog # reads kernel messages (the same are read from journald) #$ModLoad immark # provides --MARK-- message capability # Provides UDP syslog reception $ModLoad imudp $UDPServerRun 514 # Provides TCP syslog reception $ModLoad imtcp $InputTCPServerRun 514 #### GLOBAL DIRECTIVES #### # Where to place auxiliary files $WorkDirectory /var/lib/rsyslog # Use default timestamp format $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat # File syncing capability is disabled by default. This feature is usually not required, # not useful and an extreme performance hit #$ActionFileEnableSync on # Include all config files in /etc/rsyslog.d/ $IncludeConfig /etc/rsyslog.d/*.conf # Turn off message reception via local log socket; # local messages are retrieved through imjournal now. $OmitLocalLogging on # File to store the position in the journal $IMJournalStateFile imjournal.state #### RULES #### # Log all kernel messages to the console. # Logging much else clutters up the screen. #kern.* /dev/console # Log anything (except mail) of level info or higher. # Don't log private authentication messages! *.info;mail.none;authpriv.none;cron.none /var/log/messages # The authpriv file has restricted access. authpriv.* /var/log/secure # Log all the mail messages in one place. mail.* -/var/log/maillog # Log cron stuff cron.* /var/log/cron # Everybody gets emergency messages *.emerg :omusrmsg:* # Save news errors of level crit and higher in a special file. uucp,news.crit /var/log/spooler # Save boot messages also to boot.log local7.* /var/log/boot.log local2.* /var/log/haproxy.log # ### begin forwarding rule ### # The statement between the begin … end define a SINGLE forwarding # rule. They belong together, do NOT split them. If you create multiple # forwarding rules, duplicate the whole block! # Remote Logging (we use TCP for reliable delivery) # # An on-disk queue is created for this action. If the remote host is # down, messages are spooled to disk and sent when it is up again. #$ActionQueueFileName fwdRule1 # unique name prefix for spool files #$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible) #$ActionQueueSaveOnShutdown on # save messages to disk on shutdown #$ActionQueueType LinkedList # run asynchronously #$ActionResumeRetryCount -1 # infinite retries if host is down # remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional #*.* @@remote-host:514 # ### end of the forwarding rule ### |
[/codesyntax]
конфиг haproxy выглядит следующим образом:
cat /etc/haproxy/haproxy.cfg
[codesyntax lang="php"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
global log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon stats socket /var/lib/haproxy/stats defaults mode http log global option httplog option dontlognull option http-server-close option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 frontend front bind *:6443 option tcplog mode tcp default_backend backend_servers backend backend_servers mode tcp balance roundrobin server kub-master1-121 192.168.1.121:6443 check server kub-master2-122 192.168.1.122:6443 check server kub-master3-123 192.168.1.123:6443 check |
[/codesyntax]
теперь копируем конфиги на остальные тачки:
[root@kub-worker1-124 ~]# scp /etc/rsyslog.conf 192.168.1.125:/etc/rsyslog.conf
[root@kub-worker1-124 ~]# scp /etc/rsyslog.conf 192.168.1.126:/etc/rsyslog.conf
[root@kub-worker1-124 ~]# scp /etc/haproxy/haproxy.cfg 192.168.1.125:/etc/haproxy/haproxy.cfg
[root@kub-worker1-124 ~]# scp /etc/haproxy/haproxy.cfg 192.168.1.126:/etc/haproxy/haproxy.cfg
добавляем haproxy в автозапуск и стартуем его:
[root@kub-worker1-124 ~]# systemctl restart rsyslog
[root@kub-worker1-124 ~]# systemctl enable haproxy
[root@kub-worker1-124 ~]# systemctl start haproxy
[root@kub-worker2-125 ~]# systemctl restart rsyslog
[root@kub-worker2-125 ~]# systemctl enable haproxy
[root@kub-worker2-125 ~]# systemctl start haproxy
[root@kub-worker3-126 ~]# systemctl restart rsyslog
[root@kub-worker3-126 ~]# systemctl enable haproxy
[root@kub-worker3-126 ~]# systemctl start haproxy
Теперь нам нужно сказать kubelet, чтобы он обращался на localhost вместо master ноды. Для этого нужно отредактировать значение server в файле /etc/kubernetes/kubelet.conf
и если есть в файле:
/etc/kubernetes/bootstrap-kubelet.conf
на всех worker нодах.
Значение server должно принять вот такой вид:
server: https://127.0.0.1:6443
запускаем на всех воркерах:
sed -i 's|server: https://192.168.1.121:6443|server: https://127.0.0.1:6443|g' /etc/kubernetes/kubelet.conf
после перезапускаем наш кластер
[root@kub-worker1-124 ~]# systemctl restart kubelet && systemctl restart docker
[root@kub-worker2-125 ~]# systemctl restart kubelet && systemctl restart docker
[root@kub-worker3-126 ~]# systemctl restart kubelet && systemctl restart docker
Проверяем что всё ок:
1 2 3 4 5 6 7 8 |
[root@kub-master1-121 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION kub-master1-121 Ready master 3h1m v1.17.3 kub-master2-122 Ready master 169m v1.17.3 kub-master3-123 Ready master 169m v1.17.3 kub-worker1-124 Ready <none> 164m v1.17.3 kub-worker2-125 Ready <none> 157m v1.17.3 kub-worker3-126 Ready <none> 163m v1.17.3 |
Пока что у нас нет приложений в кластере, чтобы проверить работу HA. Но мы можем остановить работу kubelet на первой master ноде и убедиться, что наш кластер остался дееспособным.
[root@kub-master1-121 ~]# systemctl stop kubelet && systemctl stop docker
Проверяем со второй master ноды
[root@kub-master1-121 ~]# kubectl get nodes
The connection to the server 192.168.1.121:6443 was refused - did you specify the right host or port?
всё верно, проверяем со второго мастера:
1 2 3 4 5 6 7 8 |
[root@kub-master2-122 ~]# <strong>kubectl get nodes</strong> NAME STATUS ROLES AGE VERSION kub-master1-121 <strong>NotReady </strong> master 4h4m v1.17.3 kub-master2-122 Ready master 3h52m v1.17.3 kub-master3-123 Ready master 3h52m v1.17.3 kub-worker1-124 Ready <none> 3h47m v1.17.3 kub-worker2-125 Ready <none> 3h40m v1.17.3 kub-worker3-126 Ready <none> 3h46m v1.17.3 |
Признаком - что все прошло без сбоев является колонка READY где числа до и после слеша равны
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
[root@kub-master1-121 ~]# <strong>kubectl get pods --all-namespaces</strong> NAMESPACE NAME READY STATUS RESTARTS AGE kube-system calico-kube-controllers-5b644bc49c-p7pkf 1/1 Running 13 135m kube-system calico-node-6m7wj 1/1 Running 1 4h12m kube-system calico-node-jbklw 1/1 Running 21 4h12m kube-system calico-node-p57q2 1/1 Running 2 4h12m kube-system calico-node-pmhwz 1/1 Running 1 4h12m kube-system calico-node-qw5kp 1/1 Running 3 4h12m kube-system calico-node-rrjdd 1/1 Running 1 4h12m kube-system coredns-6955765f44-2gtxc 1/1 Running 12 135m kube-system kube-apiserver-kub-master1-121 1/1 Running 3 4h36m kube-system kube-apiserver-kub-master2-122 1/1 Running 12 4h25m kube-system kube-apiserver-kub-master3-123 1/1 Running 2 4h25m kube-system kube-controller-manager-kub-master1-121 1/1 Running 3 4h36m kube-system kube-controller-manager-kub-master2-122 1/1 Running 12 4h25m kube-system kube-controller-manager-kub-master3-123 1/1 Running 2 4h25m kube-system kube-proxy-44ftq 1/1 Running 2 4h25m kube-system kube-proxy-4zpn2 1/1 Running 1 4h19m kube-system kube-proxy-7b8bw 1/1 Running 1 4h13m kube-system kube-proxy-7jgpq 1/1 Running 3 4h36m kube-system kube-proxy-9ghgd 1/1 Running 12 4h25m kube-system kube-proxy-f6lgs 1/1 Running 1 4h20m kube-system kube-scheduler-kub-master1-121 1/1 Running 4 4h36m kube-system kube-scheduler-kub-master2-122 1/1 Running 12 4h25m kube-system kube-scheduler-kub-master3-123 1/1 Running 3 4h25m |
на этом этапе схема такая
5.Установка Ingress контроллера
Ingress контроллер — это дополнение Kubernetes, c помощью которого мы можем получить доступ до наших приложений снаружи. Подробное описание есть в документации Kuberbnetes. Ingress контролеров существует достаточное большое количество, я опишу контроллер от Nginx. Документацию по работе, настройке и установке Ingress контроллера от Nginx можно почитать на официальном сайте
Приступим к установке, все команды можно выполнять с
kub-master1-121
Устанавливаем сам контроллер. Данная команда необходима не зависимо от среды развертывания:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml
содержимое данного файла:
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 |
apiVersion: v1 kind: Namespace metadata: name: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx --- kind: ConfigMap apiVersion: v1 metadata: name: nginx-configuration namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx --- kind: ConfigMap apiVersion: v1 metadata: name: tcp-services namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx --- kind: ConfigMap apiVersion: v1 metadata: name: udp-services namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx --- apiVersion: v1 kind: ServiceAccount metadata: name: nginx-ingress-serviceaccount namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: nginx-ingress-clusterrole labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx rules: - apiGroups: - "" resources: - configmaps - endpoints - nodes - pods - secrets verbs: - list - watch - apiGroups: - "" resources: - nodes verbs: - get - apiGroups: - "" resources: - services verbs: - get - list - watch - apiGroups: - "" resources: - events verbs: - create - patch - apiGroups: - "extensions" - "networking.k8s.io" resources: - ingresses verbs: - get - list - watch - apiGroups: - "extensions" - "networking.k8s.io" resources: - ingresses/status verbs: - update --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: nginx-ingress-role namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx rules: - apiGroups: - "" resources: - configmaps - pods - secrets - namespaces verbs: - get - apiGroups: - "" resources: - configmaps resourceNames: # Defaults to "<election-id>-<ingress-class>" # Here: "<ingress-controller-leader>-<nginx>" # This has to be adapted if you change either parameter # when launching the nginx-ingress-controller. - "ingress-controller-leader-nginx" verbs: - get - update - apiGroups: - "" resources: - configmaps verbs: - create - apiGroups: - "" resources: - endpoints verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: nginx-ingress-role-nisa-binding namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: nginx-ingress-role subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount namespace: ingress-nginx --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: nginx-ingress-clusterrole-nisa-binding labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: nginx-ingress-clusterrole subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount namespace: ingress-nginx --- apiVersion: apps/v1 kind: Deployment metadata: name: nginx-ingress-controller namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx template: metadata: labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx annotations: prometheus.io/port: "10254" prometheus.io/scrape: "true" spec: # wait up to five minutes for the drain of connections terminationGracePeriodSeconds: 300 serviceAccountName: nginx-ingress-serviceaccount nodeSelector: kubernetes.io/os: linux containers: - name: nginx-ingress-controller image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0 args: - /nginx-ingress-controller - --configmap=$(POD_NAMESPACE)/nginx-configuration - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - --publish-service=$(POD_NAMESPACE)/ingress-nginx - --annotations-prefix=nginx.ingress.kubernetes.io securityContext: allowPrivilegeEscalation: true capabilities: drop: - ALL add: - NET_BIND_SERVICE # www-data -> 101 runAsUser: 101 env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace ports: - name: http containerPort: 80 protocol: TCP - name: https containerPort: 443 protocol: TCP livenessProbe: failureThreshold: 3 httpGet: path: /healthz port: 10254 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 10 readinessProbe: failureThreshold: 3 httpGet: path: /healthz port: 10254 scheme: HTTP periodSeconds: 10 successThreshold: 1 timeoutSeconds: 10 lifecycle: preStop: exec: command: - /wait-shutdown --- apiVersion: v1 kind: LimitRange metadata: name: ingress-nginx namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx spec: limits: - min: memory: 90Mi cpu: 100m type: Container |
[/codesyntax]
Далее предлагают установить Ingress и самому добавить NodePort:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/provider/baremetal/service-nodeport.yaml
service/ingress-nginx created
данный файл выглядит следующим образом:
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/provider/baremetal/service-nodeport.yaml
cat service-nodeport.yaml
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
apiVersion: v1 kind: Service metadata: name: ingress-nginx namespace: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx spec: type: NodePort ports: - name: http port: 80 targetPort: 80 protocol: TCP - name: https port: 443 targetPort: 443 protocol: TCP selector: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx --- |
[/codesyntax]
ну и всё, мы получили кластер который работаем и которому пофиг на падение мастер ноды, схема получилась которую и хотели изначально:
Установка кластера завершена, всё работает.
==================================================
Разбор проблем:
Посмотреть полную информацию по поду:
kubectl describe pod -n kube-system calico-kube-controllers-5c45f5bd9f-plrfr
посмотреть логи контейнера:
kubectl logs -n ingress-nginx nginx-ingress-controller-5bb8fb4bb6-rm5pv
посмотреть все поды, на всех namespaces:
kubectl get pods --all-namespaces
посмотреть информацию по всем нодам
kubectl describe node
kubernetes commands
kubectl get pods && kubectl get services --all-namespaces
kubectl get nodes - список нодов
kubectl taint nodes --all node-role.kubernetes.io/master- - запуска подов на мастере.
kubectl get pod redis --watch
kubectl cluster-info dump - собрать всю инфу (log/логи)
kubectl delete service,deployment nginx - удалить сервис и поды nginx
kubectl get pod NAME --output=yaml - показать значение пода
kubectl get deploy elasticsearch -o yaml - показать deployment в yaml
kubectl describe pod NAME --namespace=NAME - показать значения пода
kubectl delete pod NAME --grace-period=0 --force
kubectl logs NAME - посмотреть логи контейнера
kubectl logs -f NAME - посмотреть логи контейнера в интерактивном режиме
kubectl exec -ti NAME bash - интерактивный режим bash
kubectl expose deployment nginx --type=NodePort --port=80 - выставить сервис наружу
kubectl create -f ingress.yml - создание точки входа ingress
kubectl delete service NAME -n NAMESPACE
kubectl delete service NAME
kubectl delete deployment NAME
kubectl get deployments - вывести список Deployments
kubectl get rs - список ReplicaSet
kubectl rollout history deployment/nginx
kubectl set resources deployment/nginx -c=nginx --limits=cpu=200m,memory=512Mi
kubectl exec -ti -n monitoring prometheus-deployment-6bf45557bd-f5bvx -- /bin/sh
ubectl --namespace kube-system delete deployment kubernetes-dashboard
==================rollout and rollback===============
kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.91 --record=true
kubectl describe deployment - описать развертывание
kubectl rollout history deployment/nginx - вывод истории развертывания
kubectl rollout history deployment/nginx --revision=1 - описание истории ревизии №1
kubectl rollout undo deployment/nginx --to-revision=2 - откат к ревизии 2
kubectl rollout status - проверить состояние развертывания
==================scale=================================
kubectl scale deployment/nginx --replicas=10
===================Cronjobs=============================
kubectl create -f cronjob.yml
kubectl get cronjob NAME
kubectl delete cronjob NAME
kubectl get jobs --watch
========================Configmap================================
kubectl get configmap NAME -n NAMESPACE
kubectl describe configmap NAME -n NAMESPACE
kubectl delete configmap NAME -n NAMESPACE
=======================proxy=====================================
kubectl proxy --address="192.168.0.105" -p 8001 --accept-hosts='^*$'
=====================clusterrole=================================
kubectl delete clusterrolebinding cluster-system-anonymous - удаление роли в кластере
kubectk get clusterrole - вывод списка ролей
kubectl get clusterrole system:NAME -o yaml
oc create clusterrolebinding grafana1 --clusterrole=cluster-monitoring-operator --user=grafana1
====================volumes======================================
kubectl get pv NAME - вывод списка томов PersistantVolume
kubectl delete pv NAME - удаление тома PersistantVolume
kubectl delete pvc NAME - удаление claim
kubectl get pvc NAME_PV - Вывод списка claim для PersistantVolume
grafana-pv-volume - описание pvc
======================secrets====================================
kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt - создать secret db-user-pass на основе файлов
kubectl get secrets - вывод списка для secrets
kubectl describe secrets/db-user-pass
kubectl get secret db-user-pass -o yaml - вывод логина и пароля secret db-user-pass
kubectl -n kube-system describe secret deployment-controller-token-9pn6n - получить token для дашборда
=====================network=====================================
kubectl expose deployment hello-world --type=LoadBalancer --name=my-service - создание Service для Deployment hello-world с типом лоадбалансера.
=====================load balancer===============================
1. Пример лоад балансера между 5 подами.
kubectl run hello-world --replicas=5 --labels="run=load-balancer-example" --image=gcr.io/google-samples/node-hello:1.0 --port=8080
kubectl expose deployment hello-world --type=LoadBalancer --name=my-service
====================================kube-adm======================
kubectl -n kube-system get cm kubeadm-config -oyaml - Просмотр ControlPlaneEndpoint для изменения IP для kube-api
kubectl -n kube-system edit cm kubeadm-config -oyaml - Редактируем ControlPlaneEndpoint для изменения IP для kube-api
===================================INTERACTIVE CONTAINER===========================================
kubectl run -i --tty --rm debug --image=python:3.8.0b1-slim --restart=Never -- sh
=================================ROLES Troubleshooting=============================================
oc adm policy add-role-to-user admin nurlan -n load-test - добавить роль админа на namespace load-test для юзера nurlan
oc get rolebinding.rbac -n openshift-monitoring
oc describe rolebinding.rbac -n NAMESPACE
oc get clusterrole - список ролей
oc get clusterrolebind - список clusterrolebinding
oc create clusterrolebinding grafana1 --clusterrole=cluster-monitoring-operator --user=grafana1 - создание clusterrolebinding с названием grafana1 для юзера grafana1
oc get clusterrolebinding grafana1 - показать role binding для пользователя grafana1