blob: e57071a95ea92c9a4b67efa92c3a333f92cbf647 [file] [log] [blame]
# Calico Version v2.6.8
# https://docs.projectcalico.org/v2.6/releases#v2.6.8
# This manifest includes the following component versions:
# calico/node:v2.6.8
# calico/cni:v1.11.4
# calico/kube-controllers:v1.0.3
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: "http://10.96.232.136:6666"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.1.0",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"mtu": 1500,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
# to force it to run on the master even when the master isn't schedulable, and uses
# nodeSelector to ensure it only runs on the master.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: calico-etcd
namespace: kube-system
labels:
k8s-app: calico-etcd
spec:
template:
metadata:
labels:
k8s-app: calico-etcd
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
# this taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the calico pods
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
# Toleration allows the pod to run on master
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
# Only run this pod on the master.
nodeSelector:
node-role.kubernetes.io/master: ""
hostNetwork: true
containers:
- name: calico-etcd
image: quay.io/coreos/etcd:v3.1.10
env:
- name: CALICO_ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumes:
- name: var-etcd
hostPath:
path: /var/etcd
---
# This manifest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
k8s-app: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: 10.96.232.136
ports:
- port: 6666
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
# This taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the calico pods
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
# Toleration allows the pod to run on master
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.8
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kubeadm,bgp"
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
value: "1440"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Auto-detect the BGP IP address.
- name: IP
value: ""
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.4
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
# this taint is set by all kubelets running `--cloud-provider=external`
# so we should tolerate it to schedule the calico pods
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-kube-controllers
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.3
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: policy,profile,workloadendpoint,node
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
---
# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
# be removed entirely once the new kube-controllers deployment has been deployed above.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
spec:
# Turn this deployment off in favor of the kube-controllers deployment above.
replicas: 0
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
spec:
hostNetwork: true
serviceAccountName: calico-kube-controllers
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-controllers:v1.0.3
env:
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-kube-controllers
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
- nodes
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system