[!TIP|label:references]
helm
$ curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
Downloading https://get.helm.sh/helm-v3.15.4-linux-amd64.tar.gz
Verifying checksum... Done.
Preparing to install helm into /usr/local/bin
helm installed into /usr/local/bin/helm
CNI
calico
$ kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/tigera-operator.yaml
$ kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/custom-resources.yaml
# verify
$ kubectl get pods -n calico-system
calico tools
[!NOTE|label:references:]
# calicoctl $ curl -L https://github.com/projectcalico/calico/releases/download/v3.28.1/calicoctl-linux-amd64 -o calicoctl $ chmod +x calicoctl $ sudo mv calicoctl /usr/local/bin/ # kubectl-calico $ curl -L https://github.com/projectcalico/calico/releases/download/v3.28.1/calicoctl-linux-amd64 -o kubectl-calico $ chmod +x kubectl-calico $ sudo mv kubectl-calico /usr/local/bin/
flannel
[!NOTE|label:references:]
- #2695 dial tcp 10.96.0.1:443 timeout
- k8s安装插件出现dial tcp 10.96.0.1:443: i/o timeout问题解析
- kube-flannel.yml
- Kubernetes / Flannel – Failed to list *v1.Service
$ cat /run/flannel/subnet.env
$ kubectl get nodes k8s-node-01 -o jsonpath='{.spec.podCIDR}'
modify
$ kubectl edit cm -n kube-system kube-flannel-cfg net-conf.json: | { "Network": "10.244.0.0/16", "Backend": { "Type": "vxlan" } }
check
$ kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' # e.g.: flannel $ kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' 10.244.21.0/24 10.244.4.0/24 10.244.1.0/24 10.244.10.0/24 10.244.20.0/24 10.244.7.0/24 10.244.5.0/24 10.244.17.0/24 10.244.3.0/24 10.244.0.0/24 10.244.6.0/24 10.244.12.0/24 10.244.13.0/24 10.244.16.0/24 10.244.15.0/24
ingress
[!NOTE|label:references:]
ingress-nginx
$ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
$ helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx --namespace ingress-nginx --create-namespace
# or
$ helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace
# check value
$ helm show values ingress-nginx --repo https://kubernetes.github.io/ingress-nginx
monitoring
metrics-server
$ helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
$ helm upgrade --install metrics-server metrics-server/metrics-server --namespace monitoring --create-namespace
# without tls: https://github.com/kubernetes-sigs/metrics-server/issues/1221
$ helm upgrade metrics-server metrics-server/metrics-server --set args="{--kubelet-insecure-tls}" --namespace monitoring
kubernetes-dashboard
[!NOTE|label:references:]
- Configure Kubernetes Dashboard Web UI hosted with Nginx Ingress Controller
- Enabling Kubernetes Dashboard over HTTPS with RBAC Authorization
- Admin User
- Cluster Admin User
- Ready-only User
- Access via kubeconfig
# add kubernetes-dashboard repository
$ helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
# deploy a Helm Release named "kubernetes-dashboard" using the kubernetes-dashboard chart
$ helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
ingress for kubernetes-dashboard
# v7.x
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: monitoring
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/secure-backends: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- sms-k8s-dashboard.sample.com
secretName: sample-tls
rules:
- host: sms-k8s-dashboard.sample.com
http:
paths:
- path: /
backend:
service:
name: kubernetes-dashboard-kong-proxy
port:
number: 443
pathType: Prefix
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/secure-backends: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- sms-k8s-dashboard.sample.com
secretName: sample-tls
rules:
- host: sms-k8s-dashboard.sample.com
http:
paths:
- path: /
backend:
service:
# or kubernetes-dashboard-kong-proxy for latest version
name: kubernetes-dashboard
port:
number: 443
pathType: Prefix
RBAC
create admin user for kubernetes-dashboard
[!NOTE|label:references:]
- Creating sample user
- using
ClusterRole
:cluster-admin
forkubernetes-dashboard-admin
- Kubernetes - Dashboard 配置用户名密码方式登录
create ServiceAccount in namespace
$ kubectl -n monitoring create serviceaccount kubernetes-dashboard-admin
create ClusterRoleBinding for ServiceAccount
$ kubectl create clusterrolebinding kubernetes-dashboard-admin \ --clusterrole=cluster-admin \ # default ClusterRole --serviceaccount=monitoring:kubernetes-dashboard-admin # service account $ kubectl get clusterrolebinding kubernetes-dashboard-admin -o yaml | grep -v -E 'uid:|resourceVersion:|creationTimestamp:' apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubernetes-dashboard-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: kubernetes-dashboard-admin namespace: monitoring
generate token
# admin $ kubectl -n monitoring create token kubernetes-dashboard-admin # normal user $ kubectl -n monitoring create token kubernetes-dashboard-metrics-scraper
manually create a long-lived api token for a serviceaccount
$ kubectl -n monitoring apply -f - <<EOF apiVersion: v1 kind: Secret metadata: name: kubernetes-dashboard-admin-token namespace: monitoring annotations: kubernetes.io/service-account.name: kubernetes-dashboard-admin type: kubernetes.io/service-account-token EOF # get token by service account $ kubectl -n monitoring get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='kubernetes-dashboard-admin')].data.token}" | base64 -d # or $ kubectl -n monitoring get secrets -o jsonpath="{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name=='kubernetes-dashboard-admin')].data.token}" | base64 -d # or get token by describe token $ kubectl -n monitoring describe secrets $(kubectl -n monitoring get secret | grep kubernetes-dashboard-admin | awk '{print $1}') | grep 'token' | awk '{print $2}'
or modify ClusterRole
kubernetes-dashboard-metrics-scraper
manually[!TIP] for v7.x
clusterrole:
kubernetes-dashboard-metrics-scraper
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: annotations: meta.helm.sh/release-name: kubernetes-dashboard meta.helm.sh/release-namespace: monitoring labels: app.kubernetes.io/instance: kubernetes-dashboard app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: kubernetes-dashboard helm.sh/chart: kubernetes-dashboard-7.5.0 name: kubernetes-dashboard-metrics-scraper rules: - apiGroups: - '*' resources: - '*' verbs: - '*'
- original
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: annotations: meta.helm.sh/release-name: kubernetes-dashboard meta.helm.sh/release-namespace: monitoring labels: app.kubernetes.io/instance: kubernetes-dashboard app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: kubernetes-dashboard helm.sh/chart: kubernetes-dashboard-7.5.0 name: kubernetes-dashboard-metrics-scraper rules: - apiGroups: - metrics.k8s.io resources: - pods - nodes verbs: - get - list - watch
- original
clusterrolebinding:
kubernetes-dashboard-metrics-scraper
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: meta.helm.sh/release-name: kubernetes-dashboard meta.helm.sh/release-namespace: monitoring labels: app.kubernetes.io/instance: kubernetes-dashboard app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: kubernetes-dashboard helm.sh/chart: kubernetes-dashboard-7.5.0 name: kubernetes-dashboard-metrics-scraper roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubernetes-dashboard-metrics-scraper subjects: - kind: ServiceAccount name: kubernetes-dashboard-metrics-scraper namespace: monitoring
serviceaccount:
kubernetes-dashboard-metrics-scraper
apiVersion: v1 kind: ServiceAccount metadata: annotations: meta.helm.sh/release-name: kubernetes-dashboard meta.helm.sh/release-namespace: monitoring labels: app.kubernetes.io/instance: kubernetes-dashboard app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: kubernetes-dashboard helm.sh/chart: kubernetes-dashboard-7.5.0 name: kubernetes-dashboard-metrics-scraper namespace: monitoring
generate token
$ kubectl -n monitoring create token kubernetes-dashboard-metrics-scraper ey**********************WAA
older version
clusterrole
$ kubectl get clusterrole kubernetes-dashboard -o yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard rules: - apiGroups: - '*' resources: - '*' verbs: - '*'
clusterrolebinding
$ kubectl -n kube-system get clusterrolebindings kubernetes-dashboard -o yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubernetes-dashboard subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system
serviceaccount
$ kubectl -n kube-system get sa kubernetes-dashboard -o yaml apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system
generate token
$ kubectl -n kube-system create token kubernetes-dashboard ey**********************WAA
prometheus stack
[!NOTE|label:references:]
$ helm repo add prometheus-stack https://prometheus-community.github.io/helm-charts
"prometheus-stack" has been added to your repositories
$ helm upgrade --install prometheus-stack prometheus-stack/kube-prometheus-stack --namespace monitoring
# if node-exporter has been deployed by kubespary, assign another port for kube-prometheus-stack
$ helm upgrade --install prometheus-stack prometheus-stack/kube-prometheus-stack \
--namespace monitoring \
--set prometheus-node-exporter.service.port=9200
Release "prometheus-stack" has been upgraded. Happy Helming!
NAME: prometheus-stack
LAST DEPLOYED: Tue Sep 10 22:53:40 2024
NAMESPACE: monitoring
STATUS: deployed
REVISION: 2
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace monitoring get pods -l "release=prometheus-stack"
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
ingress
grafana
[!NOTE|label:references]
--- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: prometheus-stack-grafana namespace: monitoring annotations: kubernetes.io/ingress.class: "nginx" nginx.ingress.kubernetes.io/secure-backends: "true" spec: ingressClassName: nginx tls: - hosts: - sms-k8s-grafana.sample.com secretName: sample-tls rules: - host: sms-k8s-grafana.sample.com http: paths: - path: / backend: service: name: prometheus-stack-grafana port: number: 80 pathType: Prefix
prometheus
--- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: prometheus namespace: monitoring annotations: kubernetes.io/ingress.class: "nginx" spec: ingressClassName: nginx tls: - hosts: - sms-k8s-prometheus.marvell.com secretName: marvell-tls rules: - host: sms-k8s-prometheus.marvell.com http: paths: - path: / backend: service: name: prometheus-stack-kube-prom-prometheus port: number: 9090 pathType: Prefix
alertmanager
--- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: alertmanager namespace: monitoring annotations: kubernetes.io/ingress.class: "nginx" spec: ingressClassName: nginx tls: - hosts: - sms-k8s-alertmgr.marvell.com secretName: marvell-tls rules: - host: sms-k8s-alertmgr.marvell.com http: paths: - path: / backend: service: name: prometheus-stack-kube-prom-alertmanager port: number: 9093 pathType: Prefix
admin account for grafana
# account
$ kubectl get secret --namespace monitoring prometheus-stack-grafana -o jsh='{.data.admin-user}' | base64 -d; echo
# password
$ kubectl get secret --namespace monitoring prometheus-stack-grafana -o jsonpath='{.data.admin-password}' | base64 -d; echo
grafana
$ helm repo add grafana https://grafana.github.io/helm-charts
$ helm repo list
NAME URL
kubernetes-dashboard https://kubernetes.github.io/dashboard/
ingress-nginx https://kubernetes.github.io/ingress-nginx
grafana https://grafana.github.io/helm-charts
$ helm repo update
$ helm search repo grafana/grafana
$ helm install grafana grafana/grafana --namespace monitoring --create-namespace
tls
import tls
$ sudo kubectl -n kube-system create secret tls sample-tls --cert star_sample_com.full.crt --key star_sample_com.key --dry-run=client -o yaml > kube-system.sample-tls.yaml
copy tls
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=monitoring -f -
secret/sample-tls created
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=kubernetes-dashboard -f -
secret/sample-tls created
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=ingress-nginx -f -
secret/sample-tls created
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=default -f -
secret/sample-tls created