https://www.tigera.io/project-calico/
https://docs.projectcalico.org/getting-started/kubernetes/
https://github.com/projectcalico/calicoctl
https://docs.projectcalico.org/getting-started/kubernetes/quickstart
本地安装Calico与网络策略
https://docs.projectcalico.org/getting-started/kubernetes/self-managed-onprem/onpremises
[root@VM-3-8-centos ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.5", GitCommit:"e6503f8d8f769ace2f338794c914a96fc335df0f", GitTreeState:"clean", BuildDate:"2020-06-26T03:45:16Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"}
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# docker version
Client: Docker Engine - Community
Version: 20.10.8
API version: 1.41
Go version: go1.16.6
Git commit: 3967b7d
Built: Fri Jul 30 19:55:49 2021
OS/Arch: linux/amd64
Context: default
Experimental: true
Server: Docker Engine - Community
Engine:
Version: 20.10.8
API version: 1.41 (minimum version 1.12)
Go version: go1.16.6
Git commit: 75249d8
Built: Fri Jul 30 19:54:13 2021
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.4.9
GitCommit: e25210fe30a0a703442421b0f60afac609f950a3
runc:
Version: 1.0.1
GitCommit: v1.0.1-0-g4144b63
docker-init:
Version: 0.19.0
GitCommit: de40ad0
[root@VM-3-8-centos ~]#
kubeadm init --pod-network-cidr=192.168.0.0/16 \
--kubernetes-version v1.18.5
[root@VM-3-8-centos ~]# kubeadm init --pod-network-cidr=192.168.0.0/16 --kubernetes-version v1.18.5
W0828 00:31:31.194429 7679 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.18.5
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.8. Latest validated version: 19.03
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [vm-3-8-centos kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.3.8]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [vm-3-8-centos localhost] and IPs [172.16.3.8 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [vm-3-8-centos localhost] and IPs [172.16.3.8 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0828 00:31:35.673867 7679 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0828 00:31:35.674734 7679 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 22.502066 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node vm-3-8-centos as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node vm-3-8-centos as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: pxbzag.uy6wej7j9y81kufc
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.16.3.8:6443 --token pxbzag.uy6wej7j9y81kufc \
--discovery-token-ca-cert-hash sha256:8b323f512affef8cc644987452be758fa13b4f95eaa8a571f7b3a0b501ec4fad
[root@VM-3-8-centos ~]# mkdir -p $HOME/.kube
[root@VM-3-8-centos ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@VM-3-8-centos ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/imagesets.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/tigerastatuses.operator.tigera.io created
namespace/tigera-operator created
podsecuritypolicy.policy/tigera-operator created
serviceaccount/tigera-operator created
clusterrole.rbac.authorization.k8s.io/tigera-operator created
clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created
deployment.apps/tigera-operator created
[root@VM-3-8-centos ~]# kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
vm-3-5-centos Ready <none> 11m v1.18.5 172.16.3.5 <none> CentOS Linux 7 (Core) 3.10.0-1160.31.1.el7.x86_64 docker://20.10.8
vm-3-8-centos Ready master 12m v1.18.5 172.16.3.8 <none> CentOS Linux 7 (Core) 3.10.0-1160.31.1.el7.x86_64 docker://20.10.8
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
vm-3-5-centos Ready <none> 11m v1.18.5
vm-3-8-centos Ready master 12m v1.18.5
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# ip route
default via 172.16.3.1 dev eth0
169.254.0.0/16 dev eth0 scope link metric 1002
172.16.3.0/24 dev eth0 proto kernel scope link src 172.16.3.8
172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
blackhole 192.168.23.64/26 proto 80
192.168.23.65 dev cali03673d3f0fd scope link
192.168.23.68 dev calib6ee052bebf scope link
192.168.245.192/26 via 172.16.3.5 dev eth0 proto 80 onlink
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 172.16.3.1 0.0.0.0 UG 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
172.16.3.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0
192.168.23.64 0.0.0.0 255.255.255.192 U 0 0 0 *
192.168.23.65 0.0.0.0 255.255.255.255 UH 0 0 0 cali03673d3f0fd
192.168.23.68 0.0.0.0 255.255.255.255 UH 0 0 0 calib6ee052bebf
192.168.245.192 172.16.3.5 255.255.255.192 UG 0 0 0 eth0
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl create deployment nginx --image=nginx
deployment.apps/nginx created
[root@VM-3-8-centos ~]# kubectl create deployment web --image=nginx
deployment.apps/web created
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl scale deployment nginx --replicas=2
deployment.apps/nginx scaled
[root@VM-3-8-centos ~]# kubectl scale deployment web --replicas=2
deployment.apps/web scaled
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl get pod -A -owide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-apiserver calico-apiserver-6fc8878944-gjd9l 1/1 Running 0 53m 192.168.245.196 vm-3-5-centos <none> <none>
calico-system calico-kube-controllers-68fb6b86bc-8t948 1/1 Running 0 54m 192.168.245.193 vm-3-5-centos <none> <none>
calico-system calico-node-nl6xt 1/1 Running 0 54m 172.16.3.8 vm-3-8-centos <none> <none>
calico-system calico-node-qdvt2 1/1 Running 0 54m 172.16.3.5 vm-3-5-centos <none> <none>
calico-system calico-typha-555dd6c6bb-5nr7v 1/1 Running 0 54m 172.16.3.8 vm-3-8-centos <none> <none>
calico-system calico-typha-555dd6c6bb-q9sz8 1/1 Running 0 54m 172.16.3.5 vm-3-5-centos <none> <none>
default fluentd-elasticsearch-45958 1/1 Running 0 7s 192.168.23.74 vm-3-8-centos <none> <none>
default fluentd-elasticsearch-9lpg5 1/1 Running 0 8s 192.168.245.210 vm-3-5-centos <none> <none>
default nginx-f89759699-7fjqj 1/1 Running 0 48m 192.168.245.201 vm-3-5-centos <none> <none>
default nginx-f89759699-vfhvz 1/1 Running 0 50m 192.168.245.199 vm-3-5-centos <none> <none>
default web-5dcb957ccc-kh7cr 1/1 Running 0 48m 192.168.245.202 vm-3-5-centos <none> <none>
default web-5dcb957ccc-wr774 1/1 Running 0 50m 192.168.245.200 vm-3-5-centos <none> <none>
kube-system coredns-66bff467f8-glnnz 1/1 Running 0 56m 192.168.245.194 vm-3-5-centos <none> <none>
kube-system coredns-66bff467f8-znhtt 1/1 Running 0 56m 192.168.23.65 vm-3-8-centos <none> <none>
kube-system etcd-vm-3-8-centos 1/1 Running 0 56m 172.16.3.8 vm-3-8-centos <none> <none>
kube-system kube-apiserver-vm-3-8-centos 1/1 Running 0 56m 172.16.3.8 vm-3-8-centos <none> <none>
kube-system kube-controller-manager-vm-3-8-centos 1/1 Running 0 56m 172.16.3.8 vm-3-8-centos <none> <none>
kube-system kube-proxy-2b9wn 1/1 Running 0 55m 172.16.3.5 vm-3-5-centos <none> <none>
kube-system kube-proxy-kmzmg 1/1 Running 0 56m 172.16.3.8 vm-3-8-centos <none> <none>
kube-system kube-scheduler-vm-3-8-centos 1/1 Running 0 56m 172.16.3.8 vm-3-8-centos <none> <none>
tigera-operator tigera-operator-86699b977b-mxt4d 1/1 Running 0 55m 172.16.3.5 vm-3-5-centos <none> <none>
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
fluentd-elasticsearch-45958 1/1 Running 0 40s 192.168.23.74 vm-3-8-centos <none> <none>
fluentd-elasticsearch-9lpg5 1/1 Running 0 41s 192.168.245.210 vm-3-5-centos <none> <none>
nginx-f89759699-7fjqj 1/1 Running 0 49m 192.168.245.201 vm-3-5-centos <none> <none>
nginx-f89759699-vfhvz 1/1 Running 0 51m 192.168.245.199 vm-3-5-centos <none> <none>
web-5dcb957ccc-kh7cr 1/1 Running 0 49m 192.168.245.202 vm-3-5-centos <none> <none>
web-5dcb957ccc-wr774 1/1 Running 0 51m 192.168.245.200 vm-3-5-centos <none> <none>
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# ping 192.168.245.199 -c 2
PING 192.168.245.199 (192.168.245.199) 56(84) bytes of data.
^C
--- 192.168.245.199 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 1000ms
[root@VM-3-8-centos ~]# ping 192.168.245.200 -c 2
PING 192.168.245.200 (192.168.245.200) 56(84) bytes of data.
^C
--- 192.168.245.200 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 999ms
[root@VM-3-8-centos ~]# ping 192.168.245.201 -c 2
PING 192.168.245.201 (192.168.245.201) 56(84) bytes of data.
^C
--- 192.168.245.201 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 999ms
[root@VM-3-8-centos ~]# ping 192.168.245.202 -c 2
PING 192.168.245.202 (192.168.245.202) 56(84) bytes of data.
^C
--- 192.168.245.202 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 999ms
[root@VM-3-8-centos ~]# ping 192.168.23.74 -c 2
PING 192.168.23.74 (192.168.23.74) 56(84) bytes of data.
64 bytes from 192.168.23.74: icmp_seq=1 ttl=64 time=0.068 ms
64 bytes from 192.168.23.74: icmp_seq=2 ttl=64 time=0.044 ms
--- 192.168.23.74 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.044/0.056/0.068/0.012 ms
[root@VM-3-8-centos ~]# ping 192.168.245.210
PING 192.168.245.210 (192.168.245.210) 56(84) bytes of data.
^C
--- 192.168.245.210 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 999ms
[root@VM-3-8-centos ~]#
[root@VM-3-5-centos ~]# ping 192.168.245.200 -c 2
PING 192.168.245.200 (192.168.245.200) 56(84) bytes of data.
64 bytes from 192.168.245.200: icmp_seq=1 ttl=64 time=0.061 ms
64 bytes from 192.168.245.200: icmp_seq=2 ttl=64 time=0.036 ms
--- 192.168.245.200 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.036/0.048/0.061/0.014 ms
[root@VM-3-5-centos ~]# ping 192.168.245.201 -c 2
PING 192.168.245.201 (192.168.245.201) 56(84) bytes of data.
64 bytes from 192.168.245.201: icmp_seq=1 ttl=64 time=0.063 ms
64 bytes from 192.168.245.201: icmp_seq=2 ttl=64 time=0.046 ms
--- 192.168.245.201 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.046/0.054/0.063/0.011 ms
[root@VM-3-5-centos ~]# ping 192.168.245.202 -c 2
PING 192.168.245.202 (192.168.245.202) 56(84) bytes of data.
64 bytes from 192.168.245.202: icmp_seq=1 ttl=64 time=0.063 ms
64 bytes from 192.168.245.202: icmp_seq=2 ttl=64 time=0.044 ms
--- 192.168.245.202 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.044/0.053/0.063/0.012 ms
[root@VM-3-5-centos ~]# ping 192.168.245.199 -c 2
PING 192.168.245.199 (192.168.245.199) 56(84) bytes of data.
64 bytes from 192.168.245.199: icmp_seq=1 ttl=64 time=0.064 ms
64 bytes from 192.168.245.199: icmp_seq=2 ttl=64 time=0.042 ms
--- 192.168.245.199 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.042/0.053/0.064/0.011 ms
[root@VM-3-5-centos ~]#
[root@VM-3-8-centos ~]# kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml
daemonset.apps/fluentd-elasticsearch created
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
fluentd-elasticsearch-45958 1/1 Running 0 40s 192.168.23.74 vm-3-8-centos <none> <none>
fluentd-elasticsearch-9lpg5 1/1 Running 0 41s 192.168.245.210 vm-3-5-centos <none> <none>
nginx-f89759699-7fjqj 1/1 Running 0 49m 192.168.245.201 vm-3-5-centos <none> <none>
nginx-f89759699-vfhvz 1/1 Running 0 51m 192.168.245.199 vm-3-5-centos <none> <none>
web-5dcb957ccc-kh7cr 1/1 Running 0 49m 192.168.245.202 vm-3-5-centos <none> <none>
web-5dcb957ccc-wr774 1/1 Running 0 51m 192.168.245.200 vm-3-5-centos <none> <none>
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl label nodes vm-3-5-centos k8s-app=fluentd-logging
node/vm-3-5-centos labeled
[root@VM-3-8-centos ~]# kubectl label nodes vm-3-8-centos k8s-app=fluentd-logging
node/vm-3-8-centos labeled
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl label nodes vm-3-8-centos k8s-app-
node/vm-3-8-centos labeled
[root@VM-3-8-centos ~]# kubectl label nodes vm-3-5-centos k8s-app-
node/vm-3-5-centos labeled
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
fluentd-elasticsearch-45958 1/1 Running 0 40s 192.168.23.74 vm-3-8-centos <none> <none>
fluentd-elasticsearch-9lpg5 1/1 Running 0 41s 192.168.245.210 vm-3-5-centos <none> <none>
nginx-f89759699-7fjqj 1/1 Running 0 49m 192.168.245.201 vm-3-5-centos <none> <none>
nginx-f89759699-vfhvz 1/1 Running 0 51m 192.168.245.199 vm-3-5-centos <none> <none>
web-5dcb957ccc-kh7cr 1/1 Running 0 49m 192.168.245.202 vm-3-5-centos <none> <none>
web-5dcb957ccc-wr774 1/1 Running 0 51m 192.168.245.200 vm-3-5-centos <none> <none>
[root@VM-3-8-centos ~]# kubectl exec -it fluentd-elasticsearch-45958b /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead.
root@fluentd-elasticsearch-45958:/# ip route
default via 169.254.1.1 dev eth0
169.254.1.1 dev eth0 scope link
root@fluentd-elasticsearch-45958:/# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 169.254.1.1 0.0.0.0 UG 0 0 0 eth0
169.254.1.1 0.0.0.0 255.255.255.255 UH 0 0 0 eth0
root@fluentd-elasticsearch-45958:/#
[root@VM-3-5-centos ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 172.16.3.1 0.0.0.0 UG 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
172.16.3.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0
192.168.23.64 172.16.3.8 255.255.255.192 UG 0 0 0 eth0
192.168.245.192 0.0.0.0 255.255.255.192 U 0 0 0 *
192.168.245.193 0.0.0.0 255.255.255.255 UH 0 0 0 cali24419a74e7b
192.168.245.194 0.0.0.0 255.255.255.255 UH 0 0 0 calidd7194c468b
192.168.245.196 0.0.0.0 255.255.255.255 UH 0 0 0 calif16548a36fa
192.168.245.199 0.0.0.0 255.255.255.255 UH 0 0 0 cali70ecdcd696c
192.168.245.200 0.0.0.0 255.255.255.255 UH 0 0 0 cali43776891603
192.168.245.201 0.0.0.0 255.255.255.255 UH 0 0 0 cali46ceb8a3746
192.168.245.202 0.0.0.0 255.255.255.255 UH 0 0 0 cali06090aff34d
192.168.245.208 0.0.0.0 255.255.255.255 UH 0 0 0 calid771311ffd3
[root@VM-3-5-centos ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 52:54:00:67:62:dd brd ff:ff:ff:ff:ff:ff
inet 172.16.3.5/24 brd 172.16.3.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fe67:62dd/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:0a:f1:68:03 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
4: cali24419a74e7b@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
5: calidd7194c468b@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
6: vxlan.calico: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether 66:c1:b0:80:b4:d7 brd ff:ff:ff:ff:ff:ff
inet 192.168.245.192/32 scope global vxlan.calico
valid_lft forever preferred_lft forever
inet6 fe80::64c1:b0ff:fe80:b4d7/64 scope link
valid_lft forever preferred_lft forever
8: calif16548a36fa@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 2
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
11: cali70ecdcd696c@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 5
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
12: cali43776891603@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 6
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
13: cali46ceb8a3746@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 3
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
14: cali06090aff34d@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 4
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
20: calid771311ffd3@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 7
inet6 fe80::ecee:eeff:feee:eeee/64 scope link
valid_lft forever preferred_lft forever
[root@VM-3-5-centos ~]#
https://docs.projectcalico.org/networking/configuring
[root@VM-3-8-centos ~]# calicoctl version
Client Version: v3.20.0
Git commit: 38b00edd
Cluster Version: v3.20.0
Cluster Type: typha,kdd,k8s,operator,bgp,kubeadm
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get nodes
NAME
vm-3-5-centos
vm-3-8-centos
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl node status
Calico process is running.
IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+--------------+-------------------+-------+----------+-------------+
| 172.16.3.5 | node-to-node mesh | up | 16:34:37 | Established |
+--------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl ipam show
+----------+----------------+-----------+------------+--------------+
| GROUPING | CIDR | IPS TOTAL | IPS IN USE | IPS FREE |
+----------+----------------+-----------+------------+--------------+
| IP Pool | 192.168.0.0/16 | 65536 | 12 (0%) | 65524 (100%) |
+----------+----------------+-----------+------------+--------------+
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl node diags
Collecting diagnostics
Using temp dir: /tmp/calico831055576
Dumping netstat
Dumping routes (IPv4)
Dumping routes (IPv6)
Dumping interface info (IPv4)
Dumping interface info (IPv6)
Dumping iptables (IPv4)
Dumping iptables (IPv6)
Dumping ipsets
Dumping ipsets (container)
Copying journal for calico-node.service
Dumping felix stats
Failed to run command: pkill -SIGUSR1 felix
Error:
Copying Calico logs
Error creating log directory: mkdir /tmp/calico831055576/diagnostics/logs: file exists
Diags saved to /tmp/calico831055576/diags-20210828_014651.tar.gz
If required, you can upload the diagnostics bundle to a file sharing service
such as transfer.sh using curl or similar. For example:
curl --upload-file /tmp/calico831055576/diags-20210828_014651.tar.gz https://transfer.sh//tmp/calico831055576/diags-20210828_014651.tar.gz
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl node checksystem
Checking kernel version...
3.10.0-1160.31.1.el7.x86_64 OK
Checking kernel modules...
ipt_REJECT OK
ipt_rpfilter OK
xt_multiport OK
vfio-pci OK
xt_bpf OK
ip_tables OK
xt_conntrack OK
xt_icmp OK
xt_mark OK
xt_u32 OK
ip_set OK
xt_addrtype OK
xt_rpfilter OK
ipt_set OK
nf_conntrack_netlink OK
xt_icmp6 OK
xt_set OK
ip6_tables OK
ipt_ipvs OK
System meets minimum system requirements to run Calico!
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get ippool -o wide
NAME CIDR NAT IPIPMODE VXLANMODE DISABLED SELECTOR
default-ipv4-ippool 192.168.0.0/16 true Never CrossSubnet false all()
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get ipPool -o yaml
apiVersion: projectcalico.org/v3
items:
- apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
creationTimestamp: "2021-08-27T16:34:31Z"
name: default-ipv4-ippool
resourceVersion: "1143"
uid: 88605ae1-6e23-4a89-b85d-6e13ef84d0e7
spec:
blockSize: 26
cidr: 192.168.0.0/16
ipipMode: Never
natOutgoing: true
nodeSelector: all()
vxlanMode: CrossSubnet
kind: IPPoolList
metadata:
resourceVersion: "18926"
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get profile
NAME
projectcalico-default-allow
kns.calico-apiserver
kns.calico-system
kns.default
kns.kube-node-lease
kns.kube-public
kns.kube-system
kns.tigera-operator
ksa.calico-apiserver.calico-apiserver
ksa.calico-apiserver.default
ksa.calico-system.calico-kube-controllers
ksa.calico-system.calico-node
ksa.calico-system.calico-typha
ksa.calico-system.default
ksa.default.default
ksa.kube-node-lease.default
ksa.kube-public.default
ksa.kube-system.attachdetach-controller
ksa.kube-system.bootstrap-signer
ksa.kube-system.certificate-controller
ksa.kube-system.clusterrole-aggregation-controller
ksa.kube-system.coredns
ksa.kube-system.cronjob-controller
ksa.kube-system.daemon-set-controller
ksa.kube-system.default
ksa.kube-system.deployment-controller
ksa.kube-system.disruption-controller
ksa.kube-system.endpoint-controller
ksa.kube-system.endpointslice-controller
ksa.kube-system.expand-controller
ksa.kube-system.generic-garbage-collector
ksa.kube-system.horizontal-pod-autoscaler
ksa.kube-system.job-controller
ksa.kube-system.kube-proxy
ksa.kube-system.namespace-controller
ksa.kube-system.node-controller
ksa.kube-system.persistent-volume-binder
ksa.kube-system.pod-garbage-collector
ksa.kube-system.pv-protection-controller
ksa.kube-system.pvc-protection-controller
ksa.kube-system.replicaset-controller
ksa.kube-system.replication-controller
ksa.kube-system.resourcequota-controller
ksa.kube-system.service-account-controller
ksa.kube-system.service-controller
ksa.kube-system.statefulset-controller
ksa.kube-system.token-cleaner
ksa.kube-system.ttl-controller
ksa.tigera-operator.default
ksa.tigera-operator.tigera-operator
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get profile kns.kube-node-lease
NAME
kns.kube-node-lease
[root@VM-3-8-centos ~]# calicoctl get profile kns.kube-node-lease -o json
{
"kind": "Profile",
"apiVersion": "projectcalico.org/v3",
"metadata": {
"name": "kns.kube-node-lease",
"uid": "c8d845ab-5e4e-4f8f-ac65-4605efe796c6",
"resourceVersion": "41/",
"creationTimestamp": "2021-08-27T16:31:56Z"
},
"spec": {
"ingress": [
{
"action": "Allow",
"source": {},
"destination": {}
}
],
"egress": [
{
"action": "Allow",
"source": {},
"destination": {}
}
],
"labelsToApply": {
"pcns.projectcalico.org/name": "kube-node-lease"
}
}
}
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get profile kns.kube-node-lease -o json > profile.json
https://docs.projectcalico.org/master/reference/calicoctl/create
[root@VM-3-8-centos ~]# calicoctl get bgpconfig
NAME LOGSEVERITY MESHENABLED ASNUMBER
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get ippool -o wide
NAME CIDR NAT IPIPMODE VXLANMODE DISABLED SELECTOR
default-ipv4-ippool 192.168.0.0/16 true Never CrossSubnet false all()
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get nodes --output=wide
NAME ASN IPV4 IPV6
vm-3-5-centos (64512) 172.16.3.5/24
vm-3-8-centos (64512) 172.16.3.8/24
[root@VM-3-8-centos ~]#
[root@VM-3-8-centos ~]# calicoctl get bgppeer
NAME PEERIP NODE ASN
[root@VM-3-8-centos ~]#
[root@VM-3-5-centos ~]# cat /etc/cni/net.d/10-calico.conflist
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"datastore_type": "kubernetes",
"mtu": 0,
"nodename_file_optional": false,
"log_level": "Info",
"log_file_path": "/var/log/calico/cni/cni.log",
"ipam": { "type": "calico-ipam", "assign_ipv4" : "true", "assign_ipv6" : "false"},
"container_settings": {
"allow_ip_forwarding": false
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"k8s_api_root":"https://10.96.0.1:443",
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
},
{"type": "portmap", "snat": true, "capabilities": {"portMappings": true}}
]
}[root@VM-3-5-centos ~]#