kubernetes基础 – 使用kubeadm部署kubernetes集群

欢迎加入本站的kubernetes技术交流群,微信添加:加Blue_L。


环境信息

主机名 内网ip 公网ip 负载均衡器
master1 192.168.3.29 x.x.x.x 192.168.3.33:6443 
*:6443
master2 192.168.3.30 n/a
master3 192.168.3.31 n/a
node1 192.168.3.27 n/a n/a
node2 192.168.3.32 n/a n/a
node3 192.168.3.28 n/a n/a

主机操作系统版本为CentOS 7.9。

配置基础环境

cat /etc/hosts
::1 localhost localhost.localdomain localhost6  localhost6.localdomain6
127.0.0.1 localhost localhost.localdomain localhost4  localhost4.localdomain4
​
192.168.3.29 master1
192.168.3.30 master2
192.168.3.31 master3
192.168.3.27 node1
192.168.3.32 node2
192.168.3.28 node3 
# 生成known_hosts
ssh-keyscan -t rsa -f /etc/hosts >> ~/.ssh/known_hosts

配置master1节点到其他节点的免密登录

ssh-keygen
yum install -y sshpass
cat /etc/hosts | grep 192 | awk '{system("sshpass -p password ssh-copy-id root@" $2)}'

配置ansible

yum install -y ansible
# 配置/etc/ansible/hosts加入下列配置
​
cat <<EOF >> /etc/ansible/hosts
[all]
master1
master2
master3
node1
node2
node3
​
[master]
master1
master2
master3
​
[node]
node1
node2
node3
EOF
​
# 测试
ansible all -m shell -a uname

挂载各个主机上的数据盘到/data目录下

ansible all -m shell -a 'parted /dev/vdb mklabel gpt && parted /dev/vdb mkpart primary ext4 0% 100% && mkfs.ext4 /dev/vdb1'
ansible all -m shell -a 'echo "/dev/vdb1 /data ext4 defaults 1 1" >> /etc/fstab'
ansible all -m shell -a 'mkdir -p /data && mount -a'

将hosts文件拷贝到其他所有节点

ansible all -m copy -a 'src=/etc/hosts dest=/etc/hosts'

准备http代理

在安装软件包和拉取镜像时有些源或仓库访问比较慢或无法访问,因此我们需要一个http代理服务器。 使用代理的话,在部署各种应用和组件时我们拿到资源配置时无需修改镜像配置等信息直接部署。 最简单的方法是我们在任一云平台购买一台海外服务器,安装squid软件。

# 确保master1节点可以访问海外服务器的ssh服务
# 在海外服务器上执行
yum install -y squid
systemctl start squid
# master1节点执行
# vim /etc/ssh/sshd_config修改GatewayPorts为yes
# 执行systemctl restart sshd
ssh -Nf -L:8128:localhost:3128 username@海外服务器ip
# 查看本地隧道端口是否已经启动
netstat -tnpl | grep 8128

安装配置docker

ansible all -m shell -a 'yum install -y yum-utils'
ansible all -m shell -a 'yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo'
ansible all -m shell -a 'yum install -y docker-ce docker-ce-cli containerd.io'
​
# 如果yum无法获取安装包,可通过http代理加速
ansible all -m shell -a 'https_proxy=http://master1:8128 http_proxy=http://master1:8128 yum install -y docker-ce docker-ce-cli containerd.io'
ansible all -m shell -a 'systemctl enable docker'

配置docker

ansible all -m shell -a 'mkdir -p /etc/docker'
cat <<EOF > /etc/docker/daemon.json
{
  "data-root": "/data/lib/docker",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "20m",
    "max-file": "5"
  }
}
EOF
ansible all -m shell -a 'mkdir -p /data/lib/docker'
ansible all -m copy -a 'src=/etc/docker/daemon.json dest=/etc/docker/daemon.json'

配置dockerd使用代理

mkdir -p /etc/systemd/system/docker.service.d/
cat <<EOF > /etc/systemd/system/docker.service.d/http-proxy.conf
[Service]
Environment="HTTP_PROXY=http://master1:8128"
Environment="HTTPS_PROXY=http://master1:8128"
Environment="NO_PROXY=192.168.0.0/16,127.0.0.1,registry.local"
EOF

ansible all -m shell -a 'mkdir -p /etc/systemd/system/docker.service.d/'
ansible all -m copy -a 'src=/etc/systemd/system/docker.service.d/http-proxy.conf dest=/etc/systemd/system/docker.service.d/http-proxy.conf'
ansible all -m shell -a 'systemctl daemon-reload'
ansible all -m shell -a 'systemctl restart docker'

系统配置

# bridge设备支持iptables
cat <<EOF | tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

# kube-proxy启动时会自动设置
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
ansible all -m copy -a 'src=/etc/modules-load.d/k8s.conf dest=/etc/modules-load.d/k8s.conf'
ansible all -m copy -a 'src=/etc/sysctl.d/k8s.conf dest=/etc/sysctl.d/k8s.conf'
ansible all -m shell -a 'sysctl --system'

配置kubernetes源

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
ansible all -m copy -a 'src=/etc/yum.repos.d/kubernetes.repo dest=/etc/yum.repos.d/kubernetes.repo'
ansible all -m shell -a 'setenforce 0'
ansible all -m shell -a "sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config"
ansible all -m shell -a 'https_proxy=http://master1:8128 http_proxy=http://master1:8128 yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes'
ansible all -m shell -a 'systemctl enable --now kubelet'

部署kubernetes

注意有些云厂商的负载均衡器有回环问题,如腾讯和阿里的。

安装kubernetes

在master1上

kubeadm -v=5 init --control-plane-endpoint "192.168.3.29:6443" --upload-certs --pod-network-cidr=10.248.0.0/13 --service-cidr=172.17.0.0/16 --apiserver-cert-extra-sans=192.168.3.33,k8s.dev
# 等待master节点部署完成后
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

# 修改/etc/kubernetes/kubelet.conf中的apiserver地址为https://192.168.3.33:6443
# systemctl restart kubelet

# 在master2和master3上
kubeadm -v=5 join 192.168.3.33:6443 --token 8oi1o6.uoz4dggqrdbddno1 --discovery-token-ca-cert-hash sha256:0ce53869ef91625973548d6995294871c6631ea183b55c8c7e2c5599d60e0270 --control-plane --certificate-key 77ce31db5efbd44cd9d6ae10443b87db485ae8c8548435562769aecba229f3ee
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

# master1执行
kubectl edit cm -n kube-system kubeadm-config
# 修改 controlPlaneEndpoint: 192.168.3.33:6443

# 在所有工作节点上执行
kubeadm join 192.168.3.33:6443 --token 8oi1o6.uoz4dggqrdbddno1 --discovery-token-ca-cert-hash sha256:0ce53869ef91625973548d6995294871c6631ea183b55c8c7e2c5599d60e0270
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

配置自动补全

ansible master -m shell -a 'yum install -y bash-completion'
ansible master -m shell -a 'kubectl completion bash > /root/.kube/completion.bash.inc'
ansible master -m shell -a 'echo -e "source '/root/.kube/completion.bash.inc'" >> /root/.bash_profile'

安装网络插件

curl https://docs.projectcalico.org/manifests/canal.yaml -O
# 编辑canal.yaml,修改
#            - name: CALICO_IPV4POOL_CIDR
#              value: "10.248.0.0/13"

#  net-conf.json: |
#    {
#      "Network": "10.248.0.0/13",
#      "Backend": {
#        "Type": "vxlan"
#      }
#    }
kubectl apply -f canal.yaml

安装helm

 https_proxy=localhost:8128 curl https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz -O
tar xzf helm-v3.6.3-linux-amd64.tar.gz
ansible master -m copy -a 'src=linux-amd64/helm dest=/usr/bin/'

helm completion bash > ~/.helm_completion.bash.inc
echo 'source ~/.helm_completion.bash.inc' >> ~/.bash
echo 'source ~/.helm_completion.bash.inc' >> ~/.bash_profile
source ~/.bash_profile

部署nfs-provisioner

ansible all -m shell -a 'https_proxy=master1:8128 yum install -y nfs-utils'
ssh node1
systemctl enable nfs-server
mkdir -p /data/nfs
chmod 777 /data/nfs
cat '/data/nfs *(rw)' >> /etc/exports
systemctl restart nfs-server
exit
helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
https_proxy=master1:8128 no_proxy=192.168.3.29 helm install -n kube-system nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner --set nfs.server=192.168.3.27 --set nfs.path=/data/nfs

cat <<EOF > storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: cluster.local/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  pathPattern: "\${.PVC.namespace}/\${.PVC.annotations.nfs.io/storage-path}" # waits for nfs.io/storage-path annotation, if not specified will accept as empty string.
  onDelete: delete
EOF
kubectl apply -f storageclass.yaml
kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

cat <<EOF > test-pvc.yaml
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: gcr.io/google_containers/busybox:1.24
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
spec:
  storageClassName: nfs-client
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi
EOF
kubectl apply -f test-pvc.yaml

部署metrics-server

kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

部署dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

cat <<EOF > admin-user.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF
kubectl apply -f admin-user.yaml
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
kubectl proxy
# 访问http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

部署metallb

kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml
cat <<EOF > metallb-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 192.168.64.1-192.168.79.244
EOF
kubectl apply -f metallb-config.yaml
kubectl create service loadbalancer test --tcp=80:80

离线安装

docker和kubernetes安装包,私有镜像仓库,镜像,参数和chart调整等。

发表回复

您的电子邮箱地址不会被公开。 必填项已用 * 标注