avatar

K8S安装过程记录
  1. 准备环境

    1.1 关闭并停止自启动防火墙

    systemctl stop firewalld&&systemctl disable firewalld

    1.2 关闭swap交换和selinux

    swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

    setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/sysconfig/selinux

    1.3 安装基础软件

    yum -y install yum-utils git wget vim net-tools curl ntpdate ipset

    1.4 调整内核参数

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    cat > /etc/sysctl.d/k8s.conf <<EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    net.ipv4.tcp_tw_recycle = 0
    net.ipv6.conf.all.disable_ipv6 = 1 # close ipv6
    vm.swappiness=0 # close swap
    EOF
    modprobe br_netfilter
    sysctl -p /etc/sysctl.d/k8s.conf

    1.5 同步时间

    timedatectl set-timezone Asia/Shanghai && ntpdate ntp1.aliyun.com

    1.6 kube-proxy开启ipvs的前置条件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

    1.6 安装docker-ce

    1
    2
    3
    4
    5
    6
    7
    8
    # 添加源
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    # 查看可安装版本
    yum list docker-ce --showduplicates | sort -r
    yum install -y docker-ce-18.06.3.ce-3.el7
    systemctl enable docker
    systemctl start docker
    docker info

    1.7 修改docker cgroup driver为systemd

    1
    2
    3
    4
    5
    6
    7
    8
    9
    cat > /etc/docker/daemon.json <<EOF
    {
    "exec-opts": ["native.cgroupdriver=systemd"]
    }
    EOF
    systemctl daemon-reload
    systemctl restart docker
    docker info | grep Cgroup
    #Cgroup Driver: systemd
  2. 安装Kubeadm

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
    http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    yum makecache
    yum install -y kubelet kubeadm kubectl
    systemctl enable kubelet
    systemctl start kubelet
  3. 初始化主节点

    1
    2
    3
    kubeadm config print init-defaults > kubeadm-config.yaml
    # 修改advertiseAddress为本机IP
    kubeadm init --config=kubeadm-config.yaml --upload-certs|tee kubeadm-init.log
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
    - system:bootstrappers:kubeadm:default-node-token
    token: abcdef.0123456789abcdef
    ttl: 24h0m0s
    usages:
    - signing
    - authentication
    kind: InitConfiguration
    localAPIEndpoint:
    advertiseAddress: 192.168.19.200
    bindPort: 6443
    nodeRegistration:
    criSocket: /var/run/dockershim.sock
    name: master.kube.ldsec.co
    taints:
    - effect: NoSchedule
    key: node-role.kubernetes.io/master
    ---
    apiServer:
    timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns:
    type: CoreDNS
    etcd:
    local:
    dataDir: /var/lib/etcd
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.18.0
    networking:
    dnsDomain: cluster.local
    podSubnet: 10.244.0.0/16
    serviceSubnet: 10.96.0.0/12
    scheduler: {}

    查看一下集群状态,确认个组件都处于healthy状态:

    1
    2
    3
    4
    5
    [root@master ~]# kubectl get cs
    NAME STATUS MESSAGE ERROR
    scheduler Healthy ok
    controller-manager Healthy ok
    etcd-0 Healthy {"health":"true"}

    先跳到步骤5 安装一个网络插件

  4. 初始化工作节点

    1
    2
    kubeadm join 192.168.19.200:6443 --token abcdef.0123456789abcdef \
    > --discovery-token-ca-cert-hash sha256:8dc469c0ba7b95c579ff102ca05b595db59d561b67d77ea77243df1ad9e11566

    回到master节点检查加入情况

    1
    2
    3
    4
    5
    [root@master ~]# kubectl get node
    NAME STATUS ROLES AGE VERSION
    master.kube.ldsec.co Ready master 18m v1.18.3
    node1.kube.ldsec.co NotReady <none> 17s v1.18.3
    node2.kube.ldsec.co NotReady <none> 15s v1.18.3

    等待一些时间

    1
    2
    3
    4
    5
    [root@master ~]# kubectl get node
    NAME STATUS ROLES AGE VERSION
    master.kube.ldsec.co Ready master 36m v1.18.3
    node1.kube.ldsec.co Ready <none> 17m v1.18.3
    node2.kube.ldsec.co Ready <none> 17m v1.18.3

    kubectl get pod -A -o wide 查看消息部署情况

  5. 安装Pod Network(在master, node加入时会同步)

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    [root@master ~]# curl -O https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    % Total % Received % Xferd Average Speed Time Time Time Current
    Dload Upload Total Spent Left Speed
    100 14366 100 14366 0 0 9618 0 0:00:01 0:00:01 --:--:-- 9615
    [root@master ~]# kubectl apply -f kube-flannel.yml
    podsecuritypolicy.policy/psp.flannel.unprivileged created
    clusterrole.rbac.authorization.k8s.io/flannel created
    clusterrolebinding.rbac.authorization.k8s.io/flannel created
    serviceaccount/flannel created
    configmap/kube-flannel-cfg created
    daemonset.apps/kube-flannel-ds-amd64 created
    daemonset.apps/kube-flannel-ds-arm64 created
    daemonset.apps/kube-flannel-ds-arm created
    daemonset.apps/kube-flannel-ds-ppc64le created
    daemonset.apps/kube-flannel-ds-s390x created

    确保所有的Pod都处于Running状态

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    [root@master ~]# kubectl get pod -A
    NAMESPACE NAME READY STATUS RESTARTS AGE
    kube-system coredns-546565776c-gsd8s 1/1 Running 0 9m48s
    kube-system coredns-546565776c-wbrqz 1/1 Running 0 9m48s
    kube-system etcd-master.kube.ldsec.co 1/1 Running 0 10m
    kube-system kube-apiserver-master.kube.ldsec.co 1/1 Running 0 10m
    kube-system kube-controller-manager-master.kube.ldsec.co 1/1 Running 0 10m
    kube-system kube-flannel-ds-amd64-xqc9w 1/1 Running 0 2m16s
    kube-system kube-proxy-x224k 1/1 Running 0 9m48s
    kube-system kube-scheduler-master.kube.ldsec.co 1/1 Running 0 10m
  1. 开启IPVS

    1. 安装工具

      yum install -y ipvsadm

    2. 配置

      kubectl edit cm kube-proxy -n kube-system

    3. 将mode: “”修改为mode: “ipvs” 保存

    4. 之后重启各个节点上的kube-proxy pod:

      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      [root@master ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
      pod "kube-proxy-56cml" deleted
      pod "kube-proxy-rwhtx" deleted
      pod "kube-proxy-x224k" deleted
      [root@master ~]# kubectl get pod -n kube-system | grep kube-proxy
      kube-proxy-4rvxk 1/1 Running 0 20s
      kube-proxy-l76jv 1/1 Running 0 21s
      kube-proxy-m2wtd 1/1 Running 0 23s
      [root@master ~]# kubectl logs kube-proxy-4rvxk -n kube-system
      I0529 08:31:50.726783 1 node.go:136] Successfully retrieved node IP: 192.168.19.200
      I0529 08:31:50.726846 1 server_others.go:259] Using ipvs Proxier.
      W0529 08:31:50.727174 1 proxier.go:429] IPVS scheduler not specified, use rr by default
      I0529 08:31:50.727354 1 server.go:583] Version: v1.18.0
      I0529 08:31:50.727804 1 conntrack.go:52] Setting nf_conntrack_max to 131072
      I0529 08:31:50.728947 1 config.go:315] Starting service config controller
      I0529 08:31:50.728982 1 shared_informer.go:223] Waiting for caches to sync for service config
      I0529 08:31:50.729024 1 config.go:133] Starting endpoints config controller
      I0529 08:31:50.729037 1 shared_informer.go:223] Waiting for caches to sync for endpoints config
      I0529 08:31:50.829591 1 shared_informer.go:230] Caches are synced for endpoints config
      I0529 08:31:50.829591 1 shared_informer.go:230] Caches are synced for service config

      日志中打印出了Using ipvs Proxier,说明ipvs模式已经开启。