一. 软件、镜像清单
1 软件清单
软件 | 版本 |
---|
kubelet | 1.29.4-150500.2.1.x86_64 |
kubernetes-cni | 1.3.0-150500.1.1.x86_64 |
kubectl | 1.29.4-150500.2.1.x86_64 |
kubeadm | 1.29.4-150500.2.1.x86_64 |
containerd.io | 1.6.31-3.1.el7.x86_64 |
2 镜像清单
image | tag |
---|
docker.io/calico/cni | v3.26.1 |
docker.io/calico/node | v3.26.1 |
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns | v1.11.1 |
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd | 3.5.12-0 |
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver | v1.29.4 |
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager | v1.29.4 |
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy | v1.29.4 |
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler | v1.29.4 |
registry.cn-hangzhou.aliyuncs.com/google_containers/pause | 3.9 |
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen | v1.4.0 |
registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server | v0.5.2 |
registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller | v1.10.0 |
二. 主机信息
hostname | 主机ip | 系统版本 |
---|
baseos | 192.168.3.35 | centos 7.9.2009 |
master1 | 192.168.3.34 | centos 7.9.2009 |
master2 | 192.168.3.36 | centos 7.9.2009 |
master3 | 192.168.3.38 | centos 7.9.2009 |
node1 | 192.168.1.114 | centos 7.9.2009 |
node2 | 192.168.1.115 | centos 7.9.2009 |
三.部署nginx,用做k8s apiserver负载均衡
sudo yum install epel-release
sudo yum install nginx -y
yum -y install nginx-all-modules.noarch
sudo systemctl start nginx
sudo systemctl enable nginx
cat > /etc/nginx/nginx.conf << "eof"
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
# 四层负载均衡,为master apiserver提供负载均衡
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.3.34:6443;
server 192.168.3.36:6443;
server 192.168.3.38:6443;
}
server {
listen 6443; # 如果nginx与master节点部署在一起,这个监听端口不能是6443,否则会冲突
proxy_pass k8s-apiserver;
}
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
server {
listen 80 default_server;
server_name _;
location / {
}
}
}
eof
sudo systemctl restart nginx
四. 系统基础配置(所有节点)
1.关闭selinux,关闭防火墙,关闭交换分区
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/^selinux=enforcing$/selinux=permissive/' /etc/selinux/config
sed -i 's/.swap./#&/' /etc/fstab
swapoff -a
hostnamectl set-hostname master1
cat >> /etc/hosts << eof
192.168.3.10 master1
192.168.3.11 node1
192.168.3.15 node2
eof
cat > /etc/sysctl.d/k8s.conf << eof
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
eof
sysctl --system
cat <<eof | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
eof
sudo modprobe overlay
sudo modprobe br_netfilter
cat <<eof | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
eof
sudo sysctl --system
lsmod | grep br_netfilter
lsmod | grep overlay
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
yum install chrony -y
systemctl start chronyd && systemctl enable chronyd && chronyc sources
ln -sf /usr/share/zoneinfo/asia/shanghai /etc/localtime
sudo echo 'lang="en_us.utf-8"' >> /etc/profile;source /etc/profile
yum -y install ipset ipvsadm
mkdir -p /etc/sysconfig/ipvsadm
cat > /etc/sysconfig/ipvsadm/ipvs.modules <<eofmodprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
eof
chmod 755 /etc/sysconfig/ipvsadm/ipvs.modules && bash /etc/sysconfig/ipvsadm/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
五. 容器运行时安装(所有节点)
yum -y remove docker*
rpm -qa|grep docker
sudo curl -o /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum install -y containerd.io
containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i 's/\bsystemdcgroup = false\b/systemdcgroup = true/' /etc/containerd/config.toml
sudo sed -i 's/sandbox_image = ".\+"/sandbox_image = "registry.cn-hangzhou.aliyuncs.com\/google_containers\/pause:3.9"/' /etc/containerd/config.toml
systemctl start containerd
systemctl enable containerd
六. 安装kubeadm,kubelet,kubectl(所有节点)
cat <<eof | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
eof
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
sudo sed -i 's/^kubelet_extra_args=.*/kubelet_extra_args="--cgroup-driver=systemd"/' /etc/sysconfig/kubelet
sudo systemctl enable --now kubelet
七. kubeadm初始化(master1节点)
1.因kubeadm默认生成的证书有效期只有一年,所以需要下载k8s源码修改证书有效期
rpm --import https://mirror.go-repo.io/centos/rpm-gpg-key-go-repo
curl -s https://mirror.go-repo.io/centos/go-repo.repo | tee /etc/yum.repos.d/go-repo.repo
yum install golang -y
wget https://github.com/kubernetes/kubernetes/archive/refs/tags/v1.29.4.tar.gz
tar -zxvf v1.29.4.tar.gz
cd kubernetes-1.29.4/staging/src/k8s.io/client-go/util/cert/
vi cert.go
notafter: now.add(duration365d * 100).utc(),
cd kubernetes-1.29.4/cmd/kubeadm/app/constants/
vi constants.go
certificatevalidity = time.hour * 24 * 365 * 100
cd kubernetes-1.29.4/
make what=cmd/kubeadm goflags=-v
mv /usr/bin/kubeadm /usr/bin/kubeadm.old
cp _output/bin/kubeadm /usr/bin/
kubeadm config print init-defaults > kubeadm-init.yaml
2. 配置 kubeadm-init.yaml
apiversion: kubeadm.k8s.io/v1beta3
bootstraptokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: initconfiguration
localapiendpoint:
advertiseaddress: 192.168.3.34
bindport: 6443
noderegistration:
crisocket: unix:///var/run/containerd/containerd.sock
imagepullpolicy: ifnotpresent
name: master1
taints: null
---
apiserver:
timeoutforcontrolplane: 4m0s
certsans:
- 192.168.3.34
- 192.168.3.35
- 192.168.3.36
- 192.168.3.37
- master1
- master2
- master3
apiversion: kubeadm.k8s.io/v1beta3
certificatesdir: /etc/kubernetes/pki
clustername: kubernetes
controlplaneendpoint: "192.168.3.35:6443"
controllermanager: {}
dns: {}
etcd:
local:
datadir: /var/lib/etcd
imagerepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: clusterconfiguration
kubernetesversion: 1.29.4
networking:
dnsdomain: cluster.local
servicesubnet: 10.96.0.0/12
podsubnet: 30.100.0.0/16
scheduler: {}
3. 初始化master1节点
kubeadm init --config=kubeadm-init.yaml
your kubernetes control-plane has initialized successfully!
to start using your cluster, you need to run the following as a regular user:
mkdir -p $home/.kube
sudo cp -i /etc/kubernetes/admin.conf $home/.kube/config
sudo chown $(id -u):$(id -g) $home/.kube/config
alternatively, if you are the root user, you can run:
export kubeconfig=/etc/kubernetes/admin.conf
you should now deploy a pod network to the cluster.
run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
you can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.3.35:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:bc586fecb737f0e560b7a151d6efe289209a12affe5a8d5c5f980df6182de21e \
--control-plane
then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.3.35:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:bc586fecb737f0e560b7a151d6efe289209a12affe5a8d5c5f980df6182de21e
mkdir -p $home/.kube
sudo cp -i /etc/kubernetes/admin.conf $home/.kube/config
sudo chown $(id -u):$(id -g) $home/.kube/config
kubectl get node
八. 安装网络插件(master1节点)
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/calico.yaml
vim calico.yaml
- name: calico_ipv4pool_cidr
value: "30.100.0.0/16"
- name: ip_autodetection_method
value: "interface=eth0"
kubectl apply -f calico.yaml
九. master节点加入集群
mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
scp /etc/kubernetes/admin.conf root@192.168.3.36:/etc/kubernetes/
scp /etc/kubernetes/pki/ca.* root@192.168.3.36:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* root@192.168.3.36:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.3.36:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* root@192.168.3.36:/etc/kubernetes/pki/etcd/
mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
scp /etc/kubernetes/admin.conf root@192.168.3.38:/etc/kubernetes/
scp /etc/kubernetes/pki/ca.* root@192.168.3.38:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* root@192.168.3.38:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* root@192.168.3.38:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* root@192.168.3.38:/etc/kubernetes/pki/etcd/
kubeadm join 192.168.3.35:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:bc586fecb737f0e560b7a151d6efe289209a12affe5a8d5c5f980df6182de21e \
--control-plane
mkdir -p $home/.kube
sudo cp -i /etc/kubernetes/admin.conf $home/.kube/config
sudo chown $(id -u):$(id -g) $home/.kube/config
kubectl get node
vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --service-node-port-range=1-65535
systemctl restart kubelet
十. node节点加入集群
kubeadm join 192.168.3.35:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:bc586fecb737f0e560b7a151d6efe289209a12affe5a8d5c5f980df6182de21e
十一. 检查集群状态
[root@master1 ~]
name status roles age version
master1 ready control-plane 18h v1.29.4
master2 ready control-plane 17h v1.29.4
master3 ready control-plane 6h v1.29.4
node1 ready <none> 6h21m v1.29.4
node2 ready <none> 6h20m v1.29.4
十二. 安装ingress
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml
sed -i 's+registry.k8s.io/ingress-nginx/controller:v1.10.0@sha256:42b3f0e5d0846876b1791cd3afeb5f1cbbe4259d6f35651dcc1b5c980925379c+registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.10.0+' deploy.yaml
sed -i 's+registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.0@sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334+registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.4.0+' deploy.yaml
kubectl apply -f deploy.yaml
vim ingress-nginx-deploy.yaml
apiversion: v1
kind: namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiversion: v1
automountserviceaccounttoken: true
kind: serviceaccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
namespace: ingress-nginx
---
apiversion: v1
kind: serviceaccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiversion: rbac.authorization.k8s.io/v1
kind: role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
namespace: ingress-nginx
rules:
- apigroups:
- ""
resources:
- namespaces
verbs:
- get
- apigroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apigroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apigroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apigroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apigroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apigroups:
- coordination.k8s.io
resourcenames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apigroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apigroups:
- ""
resources:
- events
verbs:
- create
- patch
- apigroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiversion: rbac.authorization.k8s.io/v1
kind: role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apigroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
rules:
- apigroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apigroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apigroups:
- ""
resources:
- nodes
verbs:
- get
- apigroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apigroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apigroups:
- ""
resources:
- events
verbs:
- create
- patch
- apigroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apigroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apigroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
rules:
- apigroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiversion: rbac.authorization.k8s.io/v1
kind: rolebinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
namespace: ingress-nginx
roleref:
apigroup: rbac.authorization.k8s.io
kind: role
name: ingress-nginx
subjects:
- kind: serviceaccount
name: ingress-nginx
namespace: ingress-nginx
---
apiversion: rbac.authorization.k8s.io/v1
kind: rolebinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
namespace: ingress-nginx
roleref:
apigroup: rbac.authorization.k8s.io
kind: role
name: ingress-nginx-admission
subjects:
- kind: serviceaccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrolebinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
roleref:
apigroup: rbac.authorization.k8s.io
kind: clusterrole
name: ingress-nginx
subjects:
- kind: serviceaccount
name: ingress-nginx
namespace: ingress-nginx
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrolebinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
roleref:
apigroup: rbac.authorization.k8s.io
kind: clusterrole
name: ingress-nginx-admission
subjects:
- kind: serviceaccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiversion: v1
data:
allow-snippet-annotations: "false"
kind: configmap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiversion: v1
kind: service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
externaltrafficpolicy: local
ipfamilies:
- ipv4
ipfamilypolicy: singlestack
ports:
- appprotocol: http
name: http
port: 80
protocol: tcp
targetport: http
- appprotocol: https
name: https
port: 443
protocol: tcp
targetport: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: loadbalancer
---
apiversion: v1
kind: service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
ports:
- appprotocol: https
name: https-webhook
port: 443
targetport: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: clusterip
---
apiversion: apps/v1
kind: deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
minreadyseconds: 0
revisionhistorylimit: 10
selector:
matchlabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
strategy:
rollingupdate:
maxunavailable: 1
type: rollingupdate
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
spec:
containers:
- args:
- /nginx-ingress-controller
- --publish-service=$(pod_namespace)/ingress-nginx-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(pod_namespace)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --enable-metrics=false
env:
- name: pod_name
valuefrom:
fieldref:
fieldpath: metadata.name
- name: pod_namespace
valuefrom:
fieldref:
fieldpath: metadata.namespace
- name: ld_preload
value: /usr/local/lib/libmimalloc.so
image: registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.10.0
imagepullpolicy: ifnotpresent
lifecycle:
prestop:
exec:
command:
- /wait-shutdown
livenessprobe:
failurethreshold: 5
httpget:
path: /healthz
port: 10254
scheme: http
initialdelayseconds: 10
periodseconds: 10
successthreshold: 1
timeoutseconds: 1
name: controller
ports:
- containerport: 80
name: http
protocol: tcp
- containerport: 443
name: https
protocol: tcp
- containerport: 8443
name: webhook
protocol: tcp
readinessprobe:
failurethreshold: 3
httpget:
path: /healthz
port: 10254
scheme: http
initialdelayseconds: 10
periodseconds: 10
successthreshold: 1
timeoutseconds: 1
resources:
requests:
cpu: 100m
memory: 90mi
securitycontext:
allowprivilegeescalation: false
capabilities:
add:
- net_bind_service
drop:
- all
readonlyrootfilesystem: false
runasnonroot: true
runasuser: 101
seccompprofile:
type: runtimedefault
volumemounts:
- mountpath: /usr/local/certificates/
name: webhook-cert
readonly: true
dnspolicy: clusterfirst
nodeselector:
kubernetes.io/os: linux
serviceaccountname: ingress-nginx
terminationgraceperiodseconds: 300
volumes:
- name: webhook-cert
secret:
secretname: ingress-nginx-admission
---
apiversion: batch/v1
kind: job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-create
spec:
containers:
- args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(pod_namespace).svc
- --namespace=$(pod_namespace)
- --secret-name=ingress-nginx-admission
env:
- name: pod_namespace
valuefrom:
fieldref:
fieldpath: metadata.namespace
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.4.0
imagepullpolicy: ifnotpresent
name: create
securitycontext:
allowprivilegeescalation: false
capabilities:
drop:
- all
readonlyrootfilesystem: true
runasnonroot: true
runasuser: 65532
seccompprofile:
type: runtimedefault
nodeselector:
kubernetes.io/os: linux
restartpolicy: onfailure
serviceaccountname: ingress-nginx-admission
---
apiversion: batch/v1
kind: job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(pod_namespace)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=fail
env:
- name: pod_namespace
valuefrom:
fieldref:
fieldpath: metadata.namespace
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.4.0
imagepullpolicy: ifnotpresent
name: patch
securitycontext:
allowprivilegeescalation: false
capabilities:
drop:
- all
readonlyrootfilesystem: true
runasnonroot: true
runasuser: 65532
seccompprofile:
type: runtimedefault
nodeselector:
kubernetes.io/os: linux
restartpolicy: onfailure
serviceaccountname: ingress-nginx-admission
---
apiversion: networking.k8s.io/v1
kind: ingressclass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
apiversion: admissionregistration.k8s.io/v1
kind: validatingwebhookconfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
webhooks:
- admissionreviewversions:
- v1
clientconfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
failurepolicy: fail
matchpolicy: equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apigroups:
- networking.k8s.io
apiversions:
- v1
operations:
- create
- update
resources:
- ingresses
sideeffects: none
十三. 部署metrics
kubectl apply -f components.yaml
vim components.yaml
apiversion: v1
kind: serviceaccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apigroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apigroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiversion: rbac.authorization.k8s.io/v1
kind: rolebinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleref:
apigroup: rbac.authorization.k8s.io
kind: role
name: extension-apiserver-authentication-reader
subjects:
- kind: serviceaccount
name: metrics-server
namespace: kube-system
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrolebinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleref:
apigroup: rbac.authorization.k8s.io
kind: clusterrole
name: system:auth-delegator
subjects:
- kind: serviceaccount
name: metrics-server
namespace: kube-system
---
apiversion: rbac.authorization.k8s.io/v1
kind: clusterrolebinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleref:
apigroup: rbac.authorization.k8s.io
kind: clusterrole
name: system:metrics-server
subjects:
- kind: serviceaccount
name: metrics-server
namespace: kube-system
---
apiversion: v1
kind: service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: tcp
targetport: https
selector:
k8s-app: metrics-server
---
apiversion: apps/v1
kind: deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchlabels:
k8s-app: metrics-server
strategy:
rollingupdate:
maxunavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=internalip,externalip,hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.5.2
imagepullpolicy: ifnotpresent
livenessprobe:
failurethreshold: 3
httpget:
path: /livez
port: https
scheme: https
periodseconds: 10
name: metrics-server
ports:
- containerport: 4443
name: https
protocol: tcp
readinessprobe:
failurethreshold: 3
httpget:
path: /readyz
port: https
scheme: https
initialdelayseconds: 20
periodseconds: 10
resources:
requests:
cpu: 100m
memory: 200mi
securitycontext:
readonlyrootfilesystem: true
runasnonroot: true
runasuser: 1000
volumemounts:
- mountpath: /tmp
name: tmp-dir
nodeselector:
kubernetes.io/os: linux
priorityclassname: system-cluster-critical
serviceaccountname: metrics-server
volumes:
- emptydir: {}
name: tmp-dir
---
apiversion: apiregistration.k8s.io/v1
kind: apiservice
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
grouppriorityminimum: 100
insecureskiptlsverify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionpriority: 100
版权声明:本文内容由互联网用户贡献,该文观点仅代表作者本人。本站仅提供信息存储服务,不拥有所有权,不承担相关法律责任。
如发现本站有涉嫌抄袭侵权/违法违规的内容, 请发送邮件至 2386932994@qq.com 举报,一经查实将立刻删除。
发表评论