# 注意! # 若虚拟机是进行克隆的那么网卡的UUID会重复 # 若UUID重复需要重新生成新的UUID # UUID重复无法获取到IPV6地址 # # 查看当前的网卡列表和 UUID: # nmcli con show # 删除要更改 UUID 的网络连接: # nmcli con delete uuid <原 UUID> # 重新生成 UUID: # nmcli con add type ethernet ifname <接口名称> con-name <新名称> # 重新启用网络连接: # nmcli con up <新名称> # 更改网卡的UUID ssh root@192.168.1.31 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" ssh root@192.168.1.32 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" ssh root@192.168.1.33 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" ssh root@192.168.1.34 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" ssh root@192.168.1.35 "nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44;nmcli con add type ethernet ifname eth0 con-name eth0;nmcli con up eth0" # 参数解释 # # ssh ssh root@192.168.1.31 # 使用SSH登录到IP为192.168.1.31的主机,使用root用户身份。 # # nmcli con delete uuid 708a1497-2192-43a5-9f03-2ab936fb3c44 # 删除 UUID 为 708a1497-2192-43a5-9f03-2ab936fb3c44 的网络连接,这是 NetworkManager 中一种特定网络配置的唯一标识符。 # # nmcli con add type ethernet ifname eth0 con-name eth0 # 添加一种以太网连接类型,并指定接口名为 eth0,连接名称也为 eth0。 # # nmcli con up eth0 # 开启 eth0 这个网络连接。 # # 简单来说,这个命令的作用是删除一个特定的网络连接配置,并添加一个名为 eth0 的以太网连接,然后启用这个新的连接。 # 修改静态的IPv4地址 ssh root@192.168.1.104 "nmcli con mod eth0 ipv4.addresses 192.168.1.31/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" ssh root@192.168.1.106 "nmcli con mod eth0 ipv4.addresses 192.168.1.32/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" ssh root@192.168.1.107 "nmcli con mod eth0 ipv4.addresses 192.168.1.33/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" ssh root@192.168.1.109 "nmcli con mod eth0 ipv4.addresses 192.168.1.34/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" ssh root@192.168.1.110 "nmcli con mod eth0 ipv4.addresses 192.168.1.35/24; nmcli con mod eth0 ipv4.gateway 192.168.1.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0" # 参数解释 # # ssh root@192.168.1.154 # 使用SSH登录到IP为192.168.1.154的主机,使用root用户身份。 # # "nmcli con mod eth0 ipv4.addresses 192.168.1.31/24" # 修改eth0网络连接的IPv4地址为192.168.1.31,子网掩码为 24。 # # "nmcli con mod eth0 ipv4.gateway 192.168.1.1" # 修改eth0网络连接的IPv4网关为192.168.1.1。 # # "nmcli con mod eth0 ipv4.method manual" # 将eth0网络连接的IPv4配置方法设置为手动。 # # "nmcli con mod eth0 ipv4.dns "8.8.8.8" # 将eth0网络连接的IPv4 DNS服务器设置为 8.8.8.8。 # # "nmcli con up eth0" # 启动eth0网络连接。 # # 总体来说,这条命令是通过SSH远程登录到指定的主机,并使用网络管理命令 (nmcli) 修改eth0网络连接的配置,包括IP地址、网关、配置方法和DNS服务器,并启动该网络连接。 # 没有IPv6选择不配置即可 ssh root@192.168.1.31 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" ssh root@192.168.1.32 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::20; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" ssh root@192.168.1.33 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::30; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" ssh root@192.168.1.34 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::40; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" ssh root@192.168.1.35 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::50; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0" # 参数解释 # # ssh root@192.168.1.31 # 通过SSH连接到IP地址为192.168.1.31的远程主机,使用root用户进行登录。 # # "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10" # 使用nmcli命令修改eth0接口的IPv6地址为fc00:43f4:1eea:1::10。 # # "nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1" # 使用nmcli命令修改eth0接口的IPv6网关为fc00:43f4:1eea:1::1。 # # "nmcli con mod eth0 ipv6.method manual" # 使用nmcli命令将eth0接口的IPv6配置方法修改为手动配置。 # # "nmcli con mod eth0 ipv6.dns "2400:3200::1" # 使用nmcli命令设置eth0接口的IPv6 DNS服务器为2400:3200::1。 # # "nmcli con up eth0" # 使用nmcli命令启动eth0接口。 # # 这个命令的目的是在远程主机上配置eth0接口的IPv6地址、网关、配置方法和DNS服务器,并启动eth0接口。 # 查看网卡配置 # nmcli device show eth0 # nmcli con show eth0 [root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 TYPE=Ethernet PROXY_METHOD=none BROWSER_ONLY=no BOOTPROTO=none DEFROUTE=yes IPV4_FAILURE_FATAL=no IPV6INIT=yes IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy NAME=eth0 UUID=2aaddf95-3f36-4a48-8626-b55ebf7f53e7 DEVICE=eth0 ONBOOT=yes IPADDR=192.168.1.31 PREFIX=24 GATEWAY=192.168.1.1 DNS1=8.8.8.8 [root@localhost ~]# # 参数解释 # # TYPE=Ethernet # 指定连接类型为以太网。 # # PROXY_METHOD=none # 指定不使用代理方法。 # # BROWSER_ONLY=no # 指定不仅仅在浏览器中使用代理。 # # BOOTPROTO=none # 指定自动分配地址的方式为无(即手动配置IP地址)。 # # DEFROUTE=yes # 指定默认路由开启。 # # IPV4_FAILURE_FATAL=no # 指定IPv4连接失败时不宣告严重错误。 # # IPV6INIT=yes # 指定启用IPv6。 # # IPV6_AUTOCONF=no # 指定不自动配置IPv6地址。 # # IPV6_DEFROUTE=yes # 指定默认IPv6路由开启。 # # IPV6_FAILURE_FATAL=no # 指定IPv6连接失败时不宣告严重错误。 # # IPV6_ADDR_GEN_MODE=stable-privacy # 指定IPv6地址生成模式为稳定隐私模式。 # # NAME=eth0 # 指定设备名称为eth0。 # # UUID=424fd260-c480-4899-97e6-6fc9722031e8 # 指定设备的唯一标识符。 # # DEVICE=eth0 # 指定设备名称为eth0。 # # ONBOOT=yes # 指定开机自动启用这个连接。 # # IPADDR=192.168.1.31 # 指定IPv4地址为192.168.1.31。 # # PREFIX=24 # 指定IPv4地址的子网掩码为24。 # # GATEWAY=192.168.8.1 # 指定IPv4的网关地址为192.168.8.1。 # # DNS1=8.8.8.8 # 指定首选DNS服务器为8.8.8.8。 # # IPV6ADDR=fc00:43f4:1eea:1::10/128 # 指定IPv6地址为fc00:43f4:1eea:1::10,子网掩码为128。 # # IPV6_DEFAULTGW=fc00:43f4:1eea:1::1 # 指定IPv6的默认网关地址为fc00:43f4:1eea:1::1。 # # DNS2=2400:3200::1 # 指定备用DNS服务器为2400:3200::1。
err=0 for k in \$(seq 1 3) do check_code=\$(pgrep haproxy) if [[ \$check_code == "" ]]; then err=\$(expr \$err + 1) sleep 1 continue else err=0 break fi done
if [[ \$err != "0" ]]; then echo "systemctl stop keepalived" /usr/bin/systemctl stop keepalived exit 1 else exit 0 fi EOF # 给脚本授权
[root@k8s-master01 ~]# kubeadm init --config=kubeadm.yaml [init] Using Kubernetes version: v1.30.0 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' W0505 03:06:30.873603 10998 checks.go:844] detected that the sandbox image "m.daocloud.io/registry.k8s.io/pause:3.8" of the container runtime is inconsistent with that used by kubeadm.It is recommended to use "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9" as the CRI sandbox image. [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local x.oiox.cn] and IPs [10.96.0.1 192.168.1.31 192.168.1.36 192.168.1.32 192.168.1.33 192.168.1.34 192.168.1.35 192.168.1.60 127.0.0.1] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.1.31 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.1.31 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" W0505 03:06:33.121345 10998 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address [kubeconfig] Writing "admin.conf" kubeconfig file W0505 03:06:33.297328 10998 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address [kubeconfig] Writing "super-admin.conf" kubeconfig file W0505 03:06:33.403541 10998 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address [kubeconfig] Writing "kubelet.conf" kubeconfig file W0505 03:06:33.552221 10998 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address [kubeconfig] Writing "controller-manager.conf" kubeconfig file W0505 03:06:33.625848 10998 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address [kubeconfig] Writing "scheduler.conf" kubeconfig file [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests" [kubelet-check] Waiting for a healthy kubelet. This can take up to 4m0s [kubelet-check] The kubelet is healthy after 501.155946ms [api-check] Waiting for a healthy API server. This can take up to 4m0s [api-check] The API server is healthy after 16.665034989s [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Skipping phase. Please see --upload-certs [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:PreferNoSchedule] [bootstrap-token] Using token: abcdef.0123456789abcdef [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS W0505 03:06:54.233183 10998 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address [addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities and service account keys on each node and then running the following as root:
kubectl get po -owide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02 <none> <none>
kubectl exec -ti busybox -- sh / # ping 192.168.1.34 PING 192.168.1.34 (192.168.1.34): 56 data bytes 64 bytes from 192.168.1.34: seq=0 ttl=63 time=0.358 ms 64 bytes from 192.168.1.34: seq=1 ttl=63 time=0.668 ms 64 bytes from 192.168.1.34: seq=2 ttl=63 time=0.637 ms 64 bytes from 192.168.1.34: seq=3 ttl=63 time=0.624 ms 64 bytes from 192.168.1.34: seq=4 ttl=63 time=0.907 ms # 可以连通证明这个pod是可以跨命名空间和跨主机通信的
[root@k8s-master01 ~]# kubectl apply -f cby.yaml # 查看pod情况 [root@k8s-master01 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE chenby-868fd8f687-727hd 1/1 Running 0 23s chenby-868fd8f687-lrxsr 1/1 Running 0 23s chenby-868fd8f687-n7f2k 1/1 Running 0 23s [root@k8s-master01 ~]# # 查看svc情况 [root@k8s-master01 ~]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE chenby NodePort 2408:822a:730:af01::4466 <none> 80:30921/TCP 2m40s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 58m [root@k8s-master01 ~]# # 在集群内访问,需要在pod所在的节点上执行测试 [root@k8s-node01 ~]# curl -g -6 [2408:822a:730:af01::4466] <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p>
<p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p> </body> </html> [root@k8s-node01 ~]# # 在集群内访问node地址,集群内需要在pod所在的节点上执行测试,集群外任意节点即可访问 [root@k8s-node01 ~]# curl -g -6 [2408:822a:730:af01::bcf]:30921 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p>
<p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p> </body> </html> [root@k8s-node01 ~]# # 测试ipv4地址 [root@k8s-master01 ~]# curl http://192.168.1.31:30921/ <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p>
<p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p> </body> </html> [root@k8s-master01 ~]#
# 进入目录进行修改images地址 cd kube-prometheus-stack/ sed -i "s#registry.k8s.io#k8s.dockerproxy.com#g" charts/kube-state-metrics/values.yaml sed -i "s#quay.io#quay.dockerproxy.com#g" charts/kube-state-metrics/values.yaml
sed -i "s#registry.k8s.io#k8s.dockerproxy.com#g" values.yaml sed -i "s#quay.io#quay.dockerproxy.com#g" values.yaml
安装
1 2 3 4 5 6 7 8 9 10 11 12
# 进行安装 helm install op . --create-namespace --namespace op NAME: op LAST DEPLOYED: Sun May 5 12:43:26 2024 NAMESPACE: op STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace op get pods -l "release=op"
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
修改 svc
1 2 3 4
# 修改 svc 将其设置为NodePort kubectl edit svc -n op op-grafana kubectl edit svc -n op op-kube-prometheus-stack-prometheus type: NodePort