Networking
About Flanel: https://blog.laputa.io/kubernetes-flannel-networking-6a1cb1f8ec7c
A ServiceAccount is required to login. A ClusterRoleBinding is used to assign the new ServiceAccount (admin-user) the role of cluster-admin on the cluster.
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
EOF
This means they can control all aspects of Kubernetes. With ClusterRoleBinding and RBAC, different level of permissions can be defined based on security requirements. More information on creating a user for the Dashboard can be found in the Dashboard documentation.
Once the ServiceAccount has been created, the token to login can be found with:
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
cluster-ip.yaml
apiVersion: v1
kind: Service
metadata:
name: webapp1-clusterip-svc
labels:
app: webapp1-clusterip
spec:
ports:
- port: 80
selector:
app: webapp1-clusterip
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: webapp1-clusterip-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: webapp1-clusterip
spec:
containers:
- name: webapp1-clusterip-pod
image: katacoda/docker-http-server:latest
ports:
- containerPort: 80
---
export CLUSTER_IP=$(kubectl get services/webapp1-clusterip-svc -o go-template='{{(index .spec.clusterIP)}}')
clusterip-target.yaml
apiVersion: v1
kind: Service
metadata:
name: webapp1-clusterip-targetport-svc
labels:
app: webapp1-clusterip-targetport
spec:
ports:
- port: 8080
targetPort: 80
selector:
app: webapp1-clusterip-targetport
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: webapp1-clusterip-targetport-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: webapp1-clusterip-targetport
spec:
containers:
- name: webapp1-clusterip-targetport-pod
image: katacoda/docker-http-server:latest
ports:
- containerPort: 80
---
export CLUSTER_IP=$(kubectl get services/webapp1-clusterip-targetport-svc -o go-template='{{(index .spec.clusterIP)}}')
nodeport.yaml
apiVersion: v1
kind: Service
metadata:
name: webapp1-nodeport-svc
labels:
app: webapp1-nodeport
spec:
type: NodePort
ports:
- port: 80
nodePort: 30080
selector:
app: webapp1-nodeport
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: webapp1-nodeport-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: webapp1-nodeport
spec:
containers:
- name: webapp1-nodeport-pod
image: katacoda/docker-http-server:latest
ports:
- containerPort: 80
---
externalip.yaml
apiVersion: v1
kind: Service
metadata:
name: webapp1-externalip-svc
labels:
app: webapp1-externalip
spec:
ports:
- port: 80
externalIPs:
- 172.17.0.82
selector:
app: webapp1-externalip
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: webapp1-externalip-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: webapp1-externalip
spec:
containers:
- name: webapp1-externalip-pod
image: katacoda/docker-http-server:latest
ports:
- containerPort: 80
---
cloudprovider.yaml¶
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-keepalived-vip
namespace: kube-system
spec:
template:
metadata:
labels:
name: kube-keepalived-vip
spec:
hostNetwork: true
containers:
- image: gcr.io/google_containers/kube-keepalived-vip:0.9
name: kube-keepalived-vip
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: modules
readOnly: true
- mountPath: /dev
name: dev
# use downward API
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# to use unicast
args:
- --services-configmap=kube-system/vip-configmap
# unicast uses the ip of the nodes instead of multicast
# this is useful if running in cloud providers (like AWS)
#- --use-unicast=true
volumes:
- name: modules
hostPath:
path: /lib/modules
- name: dev
hostPath:
path: /dev
nodeSelector:
# type: worker # adjust this to match your worker nodes
---
## We also create an empty ConfigMap to hold our config
apiVersion: v1
kind: ConfigMap
metadata:
name: vip-configmap
namespace: kube-system
data:
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
app: keepalived-cloud-provider
name: keepalived-cloud-provider
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: keepalived-cloud-provider
strategy:
type: RollingUpdate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
labels:
app: keepalived-cloud-provider
spec:
containers:
- name: keepalived-cloud-provider
image: quay.io/munnerz/keepalived-cloud-provider:0.0.1
imagePullPolicy: IfNotPresent
env:
- name: KEEPALIVED_NAMESPACE
value: kube-system
- name: KEEPALIVED_CONFIG_MAP
value: vip-configmap
- name: KEEPALIVED_SERVICE_CIDR
value: 10.10.0.0/26 # pick a CIDR that is explicitly reserved for keepalived
volumeMounts:
- name: certs
mountPath: /etc/ssl/certs
resources:
requests:
cpu: 200m
livenessProbe:
httpGet:
path: /healthz
port: 10252
host: 127.0.0.1
initialDelaySeconds: 15
timeoutSeconds: 15
failureThreshold: 8
volumes:
- name: certs
hostPath:
path: /etc/ssl/certs
loadbalancer.yaml¶
apiVersion: v1
kind: Service
metadata:
name: webapp1-loadbalancer-svc
labels:
app: webapp1-loadbalancer
spec:
type: LoadBalancer
ports:
- port: 80
selector:
app: webapp1-loadbalancer
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: webapp1-loadbalancer-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: webapp1-loadbalancer
spec:
containers:
- name: webapp1-loadbalancer-pod
image: katacoda/docker-http-server:latest
ports:
- containerPort: 80
---
export LoadBalancerIP=$(kubectl get services/webapp1-loadbalancer-svc -o go-template='{{(index .status.loadBalancer.ingress 0).ip}}')