- filter via
--field-selector
- sort via
--sort-by
- run & create
- list
- watch pods with timestamp
- list pod status with timestamp
- list pod with nodename
- list all ready pods
- list error status pods
- list all pods statuses only
- list running images
- list running pods
- list pods on nodes
- list all containers
- list container images by pod
- get port enabled in pod
- get podIP
- get the first deploy name in namespace
- get all deploy names
item.metadata.name
- output
- management
- resource managemenet
- troubleshooting
[!NOTE|label:pod phase]
VALUE DESCRIPTION Pending The Pod has been accepted by the Kubernetes cluster, but one or more of the containers has not been set up and made ready to run Running The Pod has been bound to a node, and all of the containers have been created Succeeded All containers in the Pod have terminated in success, and will not be restarted. Failed All containers in the Pod have terminated, and at least one container has terminated in failure Unknown For some reason the state of the Pod could not be obtained
filter via --field-selector
list all Failed
pods
$ kubectl -n <namespace> get po \
--field-selector status.phase=Failed
filter via Node Name
$ kubectl -n <namespace> get po \
[-o wide] \
--field-selector spec.nodeName=master-node01
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
devops-jenkins-659f4c6d44-d2w76 1/1 Running 0 2d22h **.***.*.** master-node01 <none> <none>
filter all pods running in particular node
$ kubectl --all-namespaces get po \ [-o wide] \ --field-selector spec.nodeName=<node_name>
filter all pods running in particular node via
--template
$ kubectl -n <namespace> get po \ --template '{{range .items}}{{if eq .spec.nodeName "<nodeName>"}}{{.metadata.name}}{{"\n"}}{{end}}}{{end}}'
- via api
$ curl --cacert ca.crt \ --cert apiserver.crt \ --key apiserver.key \ https://<server>:<port>/api/v1/namespaces/<namespace>/pods?fieldSelector=spec.nodeName%3Dsomenodename
filter via json
$ kubectl get po -o json |
jq -r '.items | sort_by(.spec.nodeName)[] | [.spec.nodeName,.metadata.name] | @tsv'
list pod details for failure pods
$ ns='my-namespace'
$ kubectleyword='tester'
$ for p in $(kubectl -n ${ns} get po --field-selector status.phase=Failed -o=name | /bin/grep ${keyword}); do
echo "--- ${p} --- ";
kubectl -n ${ns} describe ${p} | grep -E 'Annotations|Status|Reason|Message';
done
sort via --sort-by
sorting pods by nodeName
$ kubectl -n <namespace> get po \
-o wide \
--sort-by="{.spec.nodeName}"
sort pods by restartCount
$ kubectl -n <namespace> get po --sort-by="{.status.containerStatuses[:1].restartCount}"
sort by restart count
$ kubectl -n <namespace> get pods --sort-by=.status.containerStatuses[0].restartCount
sort via start time
$ kubectl -n <namespace> get po \
--sort-by=.status.startTime
get the oldest pod
-1:
means the last in the list
$ kubectl -n <namepsace> get pods \
--sort-by=.metadata.creationTimestamp \
-o jsonpath='{.items[-1:].metadata.name}'
sort via created time
$ kubectl -n <namespace> get pods \
--sort-by=.metadata.creationTimestamp
run & create
pod
# create and login
$ kubectl run debug --image=busybox -it --rm
# create and sleep
$ kubectl run debug --image=busybox -- sleep infinity
pod/debug created
# created with specific nodeSelector
$ kubectl run debug \
--image=busybox \
--overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"} }}'
$ kubectl get pod
NAME READY STATUS RESTARTS AGE
debug 1/1 Running 0 6s
# delete
$ kubectl delete pod/debug
pod "debug" deleted
# attach
$ kubectl attach <pod-name> -c <container-name> -it
# i.e.:
$ kubectl attach debug -c debug -it
deploy
# format
$ kubectl create deployment <name> --image=<image:tag> [--replicas=n]
# i.e.:
$ kubectl create deployment nginx --image=nginx --replicas=2
deployment.apps/nginx created
# optional
$ kubectl scale deployment nginx --replicas=3
$ kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6799fc88d8-6clhp 1/1 Running 0 9s
nginx-6799fc88d8-cjz56 1/1 Running 0 9s
# delete
$ kubectl delete deployment nginx
deployment.apps "nginx" deleted
svc
$ kubectl expose deployment <name> --port=80 --target-port=9376
list
watch pods with timestamp
[!NOTE|label:references:]
$ kubectl get pods --watch-only |
while read line ; do echo -e "$(date +"%Y-%m-%d %H:%M:%S.%3N")\t pods\t $line" ; done
list pod status with timestamp
[!NOTE|label:references:]
via evnets
$ kubectl get events -o custom-columns=FirstSeen:.firstTimestamp,LastSeen:.lastTimestamp,Count:.count,From:.source.component,Type:.type,Reason:.reason,Message:.message \ --field-selector involvedObject.kind=Pod,involvedObject.name=<pod-name>
via pod json
$ kubectl get po <pod-name> -o json | jq -r '.status.conditions' [ { "lastProbeTime": null, "lastTransitionTime": "2023-09-28T08:15:33Z", "status": "True", "type": "Initialized" }, { "lastProbeTime": null, "lastTransitionTime": "2023-11-23T02:33:09Z", "message": "containers with unready status: [config-reload]", "reason": "ContainersNotReady", "status": "False", "type": "Ready" }, { "lastProbeTime": null, "lastTransitionTime": "2023-11-23T02:33:09Z", "message": "containers with unready status: [config-reload]", "reason": "ContainersNotReady", "status": "False", "type": "ContainersReady" }, { "lastProbeTime": null, "lastTransitionTime": "2023-09-28T08:15:16Z", "status": "True", "type": "PodScheduled" } ]
list pod with nodename
filter
$ kubectl get po --all-namespaces -o wide --field-selector spec.nodeName=<nodeName>
-
$ kubectl get pods \ --all-namespaces \ --output 'jsonpath={range .items[*]}{.spec.nodeName}{"\t"}{.metadata.namespace}{"\t"}{.metadata.name}{"\n"}{end}
-
$ kubectl get pod \ --all-namespaces -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName \
-
$ kubectl get pod \ --all-namespaces \ -o json | jq '.items[] | .spec.nodeName + " " + .status.podIP'
list all ready pods
[!NOTE|label:references]
$ kubectl get pods --all-namespaces -o json |
jq -r '.items[] | select(.status.phase = "Ready" or ([ .status.conditions[] | select(.type == "Ready") ] | length ) == 1 ) | .metadata.namespace + "\t" + .metadata.name'
list all
ImagePullBackOff
pods[!NOTE|label:references]
$ kubectl get pod --all-namespaces \ -o=json | jq '.items[]|select(any( .status.containerStatuses[]; .state.waiting.reason=="ImagePullBackOff"))|.metadata.name' # or $ kubectl get pod --all-namespaces \ -o jsonpath='{.items[?(@.status.containerStatuses[*].state.waiting.reason=="ImagePullBackOff")].metadata.name}'
list error status pods
$ kubectl -n <namespace> get po \
--field-selector status.phase=Failed
list and delete all error status pods
$ for i in $(kubectl get po --no-headers --all-namespaces --field-selector status.phase=Failed -o=custom-columns=NAMESPACE:.metadata.namespace | sort -u); do
kubectl -n $i delete po --field-selector status.phase=Failed --force --grace-period=0
done
or
$ kubectl -n <namespace> delete po \ --field-selector status.phase=Failed
-
$ kubectl -n <namespace> get po \ --field-selector=status.phase!=Running
-
$ kubectl --all-namespaces get po \ --field-selector=status.phase!=Running,status.phase!=Succeeded
-
$ kubectl get po --all-namespaces -o json \ | jq -r '.items[] \ | select(.status.phase != "Running" \ or ([ .status.conditions[] | select(.type == "Ready" and .status == "False") ] | length ) == 1 \ ) \ | .metadata.namespace + "/" + .metadata.name'
list all pods statuses only
$ kubectl -n <namespace> get po \
-o=jsonpath='{.items[*].status.phase}'
Running Running Running Running Running Running Running Running Running
list running images
$ kubectl4 -n <namespace> get po -o jsonpath="{..image}" |
tr -s '[[:space:]]' '\n' |
sort |
uniq -c
2 gcr.io/kubernetes-helm/tiller:v2.14.3
6 k8s.gcr.io/coredns:1.2.2
6 k8s.gcr.io/etcd:3.2.24
6 k8s.gcr.io/kube-apiserver:v1.12.3
6 k8s.gcr.io/kube-controller-manager:v1.12.3
30 k8s.gcr.io/kube-proxy:v1.12.3
6 k8s.gcr.io/kube-scheduler:v1.12.3
4 k8s.gcr.io/metrics-server-amd64:v0.3.6
30 k8s.gcr.io/node-problem-detector:v0.8.1
2 kubernetesui/dashboard:v2.0.0-beta1
4 kubernetesui/metrics-scraper:v1.0.1
60 quay.io/coreos/flannel:v0.10.0-amd64
list running pods
$ kubectl -n <namespace> get po \
-o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName
NAME STATUS NODE
coredns-59dd98b545-7t25l Running k8s-node01
coredns-59dd98b545-lnklx Running k8s-node02
coredns-59dd98b545-ltj5p Running k8s-node03
...
list pods on nodes
specific nodes
$ kubectl get pods --all-namespaces \ -o wide \ --field-selector spec.nodeName=<node>
all nodes
$ kubectl get pods -o wide \ --sort-by="{.spec.nodeName}"
via label filter
$ for n in $(kubectl get nodes -l your_label_key=your_label_value --no-headers | cut -d " " -f1); do kubectl get pods --all-namespaces --no-headers --field-selector spec.nodeName=${n} done
via API
$ curl --cacert ca.crt \ --cert apiserver.crt \ --key apiserver.key \ https://<server>:<port>/api/v1/namespaces/<namespace>/pods?fieldSelector=spec.nodeName%3Dsomenodename
list all containers
[!NOTE|label:references:]
- list container images
$ kubectl get po -o jsonpath="{.items[*].spec['initContainers', 'containers'][*].image}" |
tr -s '[[:space:]]' '\n' |
sort |
uniq -c
2 jenkins:2.452.2-lts-jdk17
2 docker.io/kiwigrid/k8s-sidecar:1.27.4
# or
$ kubectl get pods -o go-template --template="{{range .items}}{{range .spec.containers}}{{.image}} {{end}}{{end}}"
jenkins:2.452.2-lts-jdk17 docker.io/kiwigrid/k8s-sidecar:1.27.4
- for all namespaces
- list all container images in all namespaces
$ kubectl get pods \ --all-namespaces \ -o jsonpath="{.items[*].spec.containers[*].image}" | tr -s '[[:space:]]' '\n' | sort | uniq -c # or $ kubectl get pods \ --all-namespaces \ -o jsonpath="{.items[*].spec.containers[*].image}"
- list all container images in all namespaces
list container images by pod
$ kubectl get pods \
--all-namespaces \
-o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |
sort
list container names
$ kubectl get po -o jsonpath="{.items[*].spec['initContainers', 'containers'][*].name}" | tr -s '[[:space:]]' '\n' | sort | uniq -c 1 config-reload 1 config-reload-init 1 init 1 jenkins
list container image by pod
$ kubectl get po -o jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' | sort staging-jenkins-0: jenkins:2.452.2-lts-jdk17, docker.io/kiwigrid/k8s-sidecar:1.27.4,
get port enabled in pod
$ kubectl get po jenkins-0 -o jsonpath='{.spec.containers[*].ports[*]}'
{"containerPort":8080,"name":"http","protocol":"TCP"} {"containerPort":50000,"name":"agent-listener","protocol":"TCP"} {"containerPort":50017,"name":"sshd-listener","protocol":"TCP"}
# or
$ kubectl get po jenkins-0 -o jsonpath="{range .spec.containers[*].ports[*]}{@.*}{'\n'}{end}" | column -t
http 8080 TCP
agent-listener 50000 TCP
sshd-listener 50017 TCP
get podIP
$ kubectl get po <pod-name> -o go-template='{{range .items}}{{.status.podIP}}{{"\n"}}{{end}}'
10.244.140.106
get the first deploy name in namespace
$ kubectl -n <namespace> get deploy -o=jsonpath={.items[0].metadata.name}
get all deploy names
$ kubectl -n <namespace> get deploy -o=jsonpath='{.items[*].metadata.name}'
item.metadata.name
- list via
jsonpath={.items..metadata.name}
$ kubectl -n kube-system get po --output=jsonpath={.items..metadata.name} coredns-c7ddbcccb-5cj5z coredns-c7ddbcccb-lxsw6 coredns-c7ddbcccb-prjfk ...
- or
$ kubectl -n kube-system get po -o jsonpath="{range .items[*]}{@.metadata.name}{'\n'}{end}" | head -10 coredns-c7ddbcccb-5cj5z coredns-c7ddbcccb-lxsw6 coredns-c7ddbcccb-prjfk etcd-node03 etcd-node04 etcd-node01 kube-apiserver-node03 kube-apiserver-node04 kube-apiserver-node01 kube-controller-manager-node03
- or
output
-o name
$ kubectl -n kube-system get pods -o name | head
pod/coredns-c7ddbcccb-5cj5z
pod/coredns-c7ddbcccb-lxsw6
pod/coredns-c7ddbcccb-prjfk
pod/etcd-node03
pod/etcd-node04
pod/etcd-node01
pod/kube-apiserver-node03
pod/kube-apiserver-node04
pod/kube-apiserver-node01
pod/kube-controller-manager-node03
--template
$ kubectl -n kube-system get pods \
-o go-template \
--template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' |
head
coredns-c7ddbcccb-5cj5z
coredns-c7ddbcccb-lxsw6
coredns-c7ddbcccb-prjfk
etcd-node03
etcd-node04
etcd-node01
kube-apiserver-node03
kube-apiserver-node04
kube-apiserver-node01
kube-controller-manager-node03
or
$ kubectl -n kube-system get pods \ --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | head coredns-c7ddbcccb-5cj5z coredns-c7ddbcccb-lxsw6 coredns-c7ddbcccb-prjfk etcd-node03 etcd-node04 etcd-node01 kube-apiserver-node03 kube-apiserver-node04 kube-apiserver-node01 kube-controller-manager-node03
custom-columns
Name:.metadata.name
$ kubectl get po --all-namespaces \ -o=custom-columns=NAMESPACE:.metadata.namespace
NODE:.spec.nodeName
IPS:status.podIPs
PHASE:.status.phase
RESTARTS:RESTART:.status.containerStatuses[0].restartCount
IMAGE:.spec.containers[0].image
CREATED:.metadata.creationTimestamp
LABELS:.metadata.labels
QOS-CLASS:.status.qosClass
list all images running in particular namespace
$ kubectl -n <namespace> get po \
--output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image"
- list all images exclude
'k8s.gcr.io/coredns:1.6.2'
$ kubectl --all-namespaces get pods \ -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image'
list via -o custom-columns=":metadata.name"
$ kubectl -n kube-system get pods -o custom-columns=":metadata.name" | head
coredns-c7ddbcccb-5cj5z
coredns-c7ddbcccb-lxsw6
coredns-c7ddbcccb-prjfk
etcd-node03
etcd-node04
etcd-node01
kube-apiserver-node03
kube-apiserver-node04
kube-apiserver-node01
QOS
$ kubectl -n kube-system get po \
-o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,QOS-CLASS:.status.qosClass
NAME NAMESPACE QOS-CLASS
coredns-59dd98b545-7t25l kube-system Burstable
coredns-59dd98b545-lnklx kube-system Burstable
coredns-59dd98b545-ltj5p kube-system Burstable
etcd-k8s-node01 kube-system BestEffort
etcd-k8s-node02 kube-system BestEffort
etcd-k8s-node03 kube-system BestEffort
kube-apiserver-k8s-node01 kube-system Burstable
kube-apiserver-k8s-node02 kube-system Burstable
kube-apiserver-k8s-node03 kube-system Burstable
kube-controller-manager-k8s-node01 kube-system Burstable
kube-controller-manager-k8s-node02 kube-system Burstable
kube-controller-manager-k8s-node03 kube-system Burstable
kube-flannel-ds-amd64-627bn kube-system Guaranteed
kube-flannel-ds-amd64-7hdqd kube-system Guaranteed
kube-flannel-ds-amd64-b4th7 kube-system Guaranteed
...
management
execute in pod
$ kubectl -n devops exec -it devops-jenkins-659f4c6d44-d2w76 -- /bin/bash
jenkins@devops-jenkins-659f4c6d44-d2w76:/$ echo $HOME
/var/jenkins_home
jenkins@devops-jenkins-659f4c6d44-d2w76:/$ hostname
devops-jenkins-659f4c6d44-d2w76
restart po
reference:
- Restarting Kubernetes Pods
- How to Restarting Kubernetes Pods
- How to gracefully remove a node from Kubernetes? for kubernetes version 1.15+
kubectl -n <namespace> rollout restart deployment <name>
$ kubectl -n <namespace> get po <po-name> -o yaml | kubectl replace --force -f -
- result
$ kubectl -n <namespace> get po -w NAME READY STATUS RESTARTS AGE mypo-659f4c6d44-72hb5 1/1 Running 0 47h mypo-659f4c6d44-72hb5 1/1 Terminating 0 47h mypo-659f4c6d44-d2w76 0/1 Pending 0 0s mypo-659f4c6d44-d2w76 0/1 Pending 0 0s mypo-659f4c6d44-d2w76 0/1 ContainerCreating 0 0s mypo-659f4c6d44-d2w76 1/1 Running 0 2s mypo-659f4c6d44-72hb5 0/1 Terminating 0 47h mypo-659f4c6d44-72hb5 0/1 Terminating 0 47h mypo-659f4c6d44-72hb5 0/1 Terminating 0 47h mypo-659f4c6d44-72hb5 0/1 Pending 0 0s mypo-659f4c6d44-72hb5 0/1 Terminating 0 0s mypo-659f4c6d44-72hb5 0/1 Terminating 0 0s mypo-659f4c6d44-72hb5 0/1 Terminating 0 0s mypo-659f4c6d44-72hb5 0/1 Terminating 0 1s mypo-659f4c6d44-72hb5 0/1 Terminating 0 1s
- or
$ kubectl -n <namespace> scale deployment <name> --replicas=0
- or
resource managemenet
resource management for pods and containers
assign cpu resources to containers and pods
managing kubernetes resource limits
troubleshooting
[!NOTE|label:references:]
-
# run-nginx.yml apiVersion: extensions/v1beta1 kind: Deploy metadata: name: my-nginx spec: replicas: 2 template: metadata: labels: run: my-ngxin spec: containers: - name: my-nginx image: nginx:1.10.1 ports: - containerPort: 80
kubectl run
$ kubectl run ubuntu-marslo \ --image=ubuntu:18.04 \ --overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"}}}' \ -- sleep infinity # or $ kubectl run ubuntu-marslo \ --image=ubuntu:18.04 \ --overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"}}}' \ -it \ --rm
debug svc
[!NOTE|labels:referencds:]
- DNS for Services and Pods
- svc in cluster can be visit via
CLUSTER-IP
<svc-name>.<namespace>.svc.cluster.local
# current svc
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
jenkins ClusterIP 10.111.230.13 <none> 8080/TCP,30338/TCP 18h
# create new pod
$ kubectl run ubuntu-marslo \
--image=ubuntu:18.04 \
--overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"}}}' \
-it \
--rm
# check DNS
<ubuntu-marslo> $ cat /etc/resolv.conf
nameserver 10.96.0.10
search devops.svc.cluster.local svc.cluster.local cluster.local company.com
options ndots:5
# debug
$ nc -zv jenkins.devops.svc.cluster.local 30338
$ nc -zv 10.111.230.13 30338
$ ssh -l marslo -p 30338 -i ~/.ssh/id_rsa jenkins.devops.svc.cluster.local list-plugins
$ ssh -l marslo -p 30338 -i ~/.ssh/id_rsa 10.111.230.13 list-plugins