[!NOTE|label:pod phase]
VALUE DESCRIPTION The Pod has been accepted by the Kubernetes cluster, but one or more of the containers has not been set up and made ready to run
The Pod has been bound to a node, and all of the containers have been created
All containers in the Pod have terminated in success, and will not be restarted.
All containers in the Pod have terminated, and at least one container has terminated in failure
For some reason the state of the Pod could not be obtained
list all Failed
pods
Copy $ kubectl -n < namespac e > get po \
--field-selector status.phase=Failed
filter via Node Name
Copy $ kubectl -n < namespac e > get po \
[-o wide] \
--field-selector spec.nodeName=master-node01
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
devops-jenkins-659f4c6d44-d2w76 1/1 Running 0 2d22h **.***.*.** master-node01 <none> <none>
filter all pods running in particular node
Copy $ kubectl --all-namespaces get po \
[-o wide] \
--field-selector spec.nodeName= < node_nam e >
filter all pods running in particular node via --template
Copy $ kubectl -n < namespac e > get po \
--template '{{range .items}}{{if eq .spec.nodeName "<nodeName>"}}{{.metadata.name}}{{"\n"}}{{end}}}{{end}}'
via api
Copy $ curl --cacert ca.crt \
--cert apiserver.crt \
--key apiserver.key \
https:// < serve r > : < por t > /api/v1/namespaces/ < namespac e > /pods?fieldSelector=spec.nodeName%3Dsomenodename
Copy $ kubectl get po -o json |
jq -r '.items | sort_by(.spec.nodeName)[] | [.spec.nodeName,.metadata.name] | @tsv'
list pod details for failure pods
Copy $ ns= 'my-namespace'
$ kubectleyword= 'tester'
$ for p in $( kubectl -n ${ns} get po --field-selector status.phase=Failed -o=name | /bin/grep ${keyword}); do
echo "--- ${p} --- " ;
kubectl -n ${ns} describe ${p} | grep -E 'Annotations|Status|Reason|Message' ;
done
sorting pods by nodeName
Copy $ kubectl -n < namespac e > get po \
-o wide \
--sort-by= "{.spec.nodeName}"
sort pods by restartCount
Copy $ kubectl -n < namespac e > get po --sort-by= "{.status.containerStatuses[:1].restartCount}"
sort by restart count
Copy $ kubectl -n < namespac e > get pods --sort-by=.status.containerStatuses[0].restartCount
sort via start time
Copy $ kubectl -n < namespac e > get po \
--sort-by=.status.startTime
get the oldest pod
-1:
means the last in the list
Copy $ kubectl -n < namepsac e > get pods \
--sort-by=.metadata.creationTimestamp \
-o jsonpath= '{.items[-1:].metadata.name}'
sort via created time
Copy $ kubectl -n < namespac e > get pods \
--sort-by=.metadata.creationTimestamp
run & create
pod
Copy # create and login
$ kubectl run debug --image=busybox -it --rm
# create and sleep
$ kubectl run debug --image=busybox -- sleep infinity
pod/debug created
# created with specific nodeSelector
$ kubectl run debug \
--image=busybox \
--overrides= '{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"} }}'
$ kubectl get pod
NAME READY STATUS RESTARTS AGE
debug 1/1 Running 0 6s
# delete
$ kubectl delete pod/debug
pod "debug" deleted
# attach
$ kubectl attach < pod-nam e > -c < container-nam e > -it
# i.e.:
$ kubectl attach debug -c debug -it
deploy
Copy # format
$ kubectl create deployment < nam e > --image= < image:tag > [--replicas=n]
# i.e.:
$ kubectl create deployment nginx --image=nginx --replicas=2
deployment.apps/nginx created
# optional
$ kubectl scale deployment nginx --replicas=3
$ kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6799fc88d8-6clhp 1/1 Running 0 9s
nginx-6799fc88d8-cjz56 1/1 Running 0 9s
# delete
$ kubectl delete deployment nginx
deployment.apps "nginx" deleted
svc
Copy $ kubectl expose deployment < nam e > --port=80 --target-port=9376
list
watch pods with timestamp
[!NOTE|label:references:]
Copy $ kubectl get pods --watch-only |
while read line ; do echo -e "$( date + "%Y-%m-%d %H:%M:%S.%3N")\t pods\t $line" ; done
list pod status with timestamp
[!NOTE|label:references:]
via evnets
Copy $ kubectl get events -o custom-columns=FirstSeen:.firstTimestamp,LastSeen:.lastTimestamp,Count:.count,From:.source.component,Type:.type,Reason:.reason,Message:.message \
--field-selector involvedObject.kind=Pod,involvedObject.name= < pod-nam e >
via pod json
Copy $ kubectl get po < pod-nam e > -o json | jq -r '.status.conditions'
[
{
"lastProbeTime" : null,
"lastTransitionTime" : "2023-09-28T08:15:33Z" ,
"status" : "True" ,
"type" : "Initialized"
},
{
"lastProbeTime" : null,
"lastTransitionTime" : "2023-11-23T02:33:09Z" ,
"message" : "containers with unready status: [config-reload]" ,
"reason" : "ContainersNotReady" ,
"status" : "False" ,
"type" : "Ready"
},
{
"lastProbeTime" : null,
"lastTransitionTime" : "2023-11-23T02:33:09Z" ,
"message" : "containers with unready status: [config-reload]" ,
"reason" : "ContainersNotReady" ,
"status" : "False" ,
"type" : "ContainersReady"
},
{
"lastProbeTime" : null,
"lastTransitionTime" : "2023-09-28T08:15:16Z" ,
"status" : "True" ,
"type" : "PodScheduled"
}
]
list pod with nodename
filter
Copy $ kubectl get po --all-namespaces -o wide --field-selector spec.nodeName= < nodeNam e >
list
Copy $ kubectl get pods \
--all-namespaces \
--output 'jsonpath={range .items[*]}{.spec.nodeName}{"\t"}{.metadata.namespace}{"\t"}{.metadata.name}{"\n"}{end}
or list with custom-columns
Copy $ kubectl get pod \
--all-namespaces
-o = custom-columns = NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName \
list nodeName with podIP
Copy $ kubectl get pod \
--all-namespaces \
-o json |
jq '.items[] | .spec.nodeName + " " + .status.podIP'
[!NOTE|label:references]
Copy $ kubectl get pods --all-namespaces -o json |
jq -r '.items[] | select(.status.phase = "Ready" or ([ .status.conditions[] | select(.type == "Ready") ] | length ) == 1 ) | .metadata.namespace + "\t" + .metadata.name'
list all ImagePullBackOff
pods
[!NOTE|label:references]
Copy $ kubectl get pod --all-namespaces \
-o=json |
jq '.items[]|select(any( .status.containerStatuses[]; .state.waiting.reason=="ImagePullBackOff"))|.metadata.name'
# or
$ kubectl get pod --all-namespaces \
-o jsonpath='{.items[?(@.status.containerStatuses[*].state.waiting.reason=="ImagePullBackOff")].metadata.name}'
Copy $ kubectl -n < namespac e > get po \
--field-selector status.phase=Failed
list and delete all error status pods
Copy $ for i in $(kubectl get po --no-headers --all-namespaces --field-selector status.phase=Failed -o=custom-columns=NAMESPACE:.metadata.namespace | sort -u); do
kubectl -n $i delete po --field-selector status.phase=Failed --force --grace-period=0
done
or
Copy $ kubectl -n < namespac e > delete po \
--field-selector status.phase=Failed
or
Copy $ kubectl -n < namespac e > get po \
--field-selector=status.phase!=Running
or
Copy $ kubectl --all-namespaces get po \
--field-selector=status.phase!=Running,status.phase!=Succeeded
or
Copy $ kubectl get po --all-namespaces -o json \
| jq -r '.items[] \
| select(.status.phase != "Running" \
or ([ .status.conditions[] | select(.type == "Ready" and .status == "False") ] | length ) == 1 \
) \
| .metadata.namespace + "/" + .metadata.name'
Copy $ kubectl -n < namespac e > get po \
-o=jsonpath= '{.items[*].status.phase}'
Running Running Running Running Running Running Running Running Running
list running images
Copy $ kubectl4 -n < namespac e > get po -o jsonpath= "{..image}" |
tr -s '[[:space:]]' '\n' |
sort |
uniq -c
2 gcr.io/kubernetes-helm/tiller:v2.14.3
6 k8s.gcr.io/coredns:1.2.2
6 k8s.gcr.io/etcd:3.2.24
6 k8s.gcr.io/kube-apiserver:v1.12.3
6 k8s.gcr.io/kube-controller-manager:v1.12.3
30 k8s.gcr.io/kube-proxy:v1.12.3
6 k8s.gcr.io/kube-scheduler:v1.12.3
4 k8s.gcr.io/metrics-server-amd64:v0.3.6
30 k8s.gcr.io/node-problem-detector:v0.8.1
2 kubernetesui/dashboard:v2.0.0-beta1
4 kubernetesui/metrics-scraper:v1.0.1
60 quay.io/coreos/flannel:v0.10.0-amd64
list running pods
Copy $ kubectl -n < namespac e > get po \
-o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName
NAME STATUS NODE
coredns-59dd98b545-7t25l Running k8s-node01
coredns-59dd98b545-lnklx Running k8s-node02
coredns-59dd98b545-ltj5p Running k8s-node03
...
specific nodes
Copy $ kubectl get pods --all-namespaces \
-o wide \
--field-selector spec.nodeName= < nod e >
all nodes
Copy $ kubectl get pods -o wide \
--sort-by= "{.spec.nodeName}"
via label filter
Copy $ for n in $( kubectl get nodes -l your_label_key=your_label_value --no-headers | cut -d " " -f1 ); do
kubectl get pods --all-namespaces --no-headers --field-selector spec.nodeName= ${n}
done
via API
Copy $ curl --cacert ca.crt \
--cert apiserver.crt \
--key apiserver.key \
https:// < serve r > : < por t > /api/v1/namespaces/ < namespac e > /pods?fieldSelector=spec.nodeName%3Dsomenodename
list all containers
[!NOTE|label:references:]
list container images
Copy $ kubectl get po -o jsonpath= "{.items[*].spec['initContainers', 'containers'][*].image}" |
tr -s '[[:space:]]' '\n' |
sort |
uniq -c
2 jenkins:2.452.2-lts-jdk17
2 docker.io/kiwigrid/k8s-sidecar:1.27.4
# or
$ kubectl get pods -o go-template --template= "{{range .items}}{{range .spec.containers}}{{.image}} {{end}}{{end}}"
jenkins:2.452.2-lts-jdk17 docker.io/kiwigrid/k8s-sidecar:1.27.4
for all namespaces
Copy $ kubectl get pods \
--all-namespaces \
-o jsonpath= "{.items[*].spec.containers[*].image}" |
tr -s '[[:space:]]' '\n' |
sort |
uniq -c
# or
$ kubectl get pods \
--all-namespaces \
-o jsonpath= "{.items[*].spec.containers[*].image}"
Copy $ kubectl get pods \
--all-namespaces \
-o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |
sort
list container names
Copy $ kubectl get po -o jsonpath= "{.items[*].spec['initContainers', 'containers'][*].name}" |
tr -s '[[:space:]]' '\n' |
sort |
uniq -c
1 config-reload
1 config-reload-init
1 init
1 jenkins
list container image by pod
Copy $ kubectl get po -o jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' | sort
staging-jenkins-0: jenkins:2.452.2-lts-jdk17, docker.io/kiwigrid/k8s-sidecar:1.27.4,
get port enabled in pod
Copy $ kubectl get po jenkins-0 -o jsonpath= '{.spec.containers[*].ports[*]}'
{"containerPort":8080,"name":"http","protocol":"TCP"} {"containerPort":50000,"name":"agent-listener","protocol":"TCP"} {"containerPort":50017,"name":"sshd-listener","protocol":"TCP"}
# or
$ kubectl get po jenkins-0 -o jsonpath= "{range .spec.containers[*].ports[*]}{@.*}{'\n'}{end}" | column -t
http 8080 TCP
agent-listener 50000 TCP
sshd-listener 50017 TCP
Copy $ kubectl get po < pod-nam e > -o go-template= '{{range .items}}{{.status.podIP}}{{"\n"}}{{end}}'
10.244.140.106
get the first deploy name in namespace
Copy $ kubectl -n < namespac e > get deploy -o=jsonpath= { .items[0].metadata.name}
get all deploy names
Copy $ kubectl -n < namespac e > get deploy -o=jsonpath= '{.items[*].metadata.name}'
item.metadata.name
list via jsonpath={.items..metadata.name}
Copy $ kubectl -n kube-system get po --output=jsonpath= { .items..metadata.name}
coredns-c7ddbcccb-5cj5z coredns-c7ddbcccb-lxsw6 coredns-c7ddbcccb-prjfk ...
or
Copy $ kubectl -n kube-system get po -o jsonpath= "{range .items[*]}{@.metadata.name}{'\n'}{end}" |
head -10
coredns-c7ddbcccb-5cj5z
coredns-c7ddbcccb-lxsw6
coredns-c7ddbcccb-prjfk
etcd-node03
etcd-node04
etcd-node01
kube-apiserver-node03
kube-apiserver-node04
kube-apiserver-node01
kube-controller-manager-node03
output
-o name
Copy $ kubectl -n kube-system get pods -o name | head
pod/coredns-c7ddbcccb-5cj5z
pod/coredns-c7ddbcccb-lxsw6
pod/coredns-c7ddbcccb-prjfk
pod/etcd-node03
pod/etcd-node04
pod/etcd-node01
pod/kube-apiserver-node03
pod/kube-apiserver-node04
pod/kube-apiserver-node01
pod/kube-controller-manager-node03
--template
Copy $ kubectl -n kube-system get pods \
-o go-template \
--template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' |
head
coredns-c7ddbcccb-5cj5z
coredns-c7ddbcccb-lxsw6
coredns-c7ddbcccb-prjfk
etcd-node03
etcd-node04
etcd-node01
kube-apiserver-node03
kube-apiserver-node04
kube-apiserver-node01
kube-controller-manager-node03
or
Copy $ kubectl -n kube-system get pods \
--template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' |
head
coredns-c7ddbcccb-5cj5z
coredns-c7ddbcccb-lxsw6
coredns-c7ddbcccb-prjfk
etcd-node03
etcd-node04
etcd-node01
kube-apiserver-node03
kube-apiserver-node04
kube-apiserver-node01
kube-controller-manager-node03
custom-columns
Name:.metadata.name
Copy $ kubectl get po --all-namespaces \
-o=custom-columns=NAMESPACE:.metadata.namespace
RESTARTS:RESTART:.status.containerStatuses[0].restartCount
IMAGE:.spec.containers[0].image
CREATED:.metadata.creationTimestamp
QOS-CLASS:.status.qosClass
list all images running in particular namespace
Copy $ kubectl -n <namespace> get po \
--output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image"
list all images exclude 'k8s.gcr.io/coredns:1.6.2'
Copy $ kubectl --all-namespaces get pods \
-o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image'
list via -o custom-columns=":metadata.name"
Copy $ kubectl -n kube-system get pods -o custom-columns=":metadata.name" | head
coredns-c7ddbcccb-5cj5z
coredns-c7ddbcccb-lxsw6
coredns-c7ddbcccb-prjfk
etcd-node03
etcd-node04
etcd-node01
kube-apiserver-node03
kube-apiserver-node04
kube-apiserver-node01
QOS
Copy $ kubectl -n kube-system get po \
-o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,QOS-CLASS:.status.qosClass
NAME NAMESPACE QOS-CLASS
coredns-59dd98b545-7t25l kube-system Burstable
coredns-59dd98b545-lnklx kube-system Burstable
coredns-59dd98b545-ltj5p kube-system Burstable
etcd-k8s-node01 kube-system BestEffort
etcd-k8s-node02 kube-system BestEffort
etcd-k8s-node03 kube-system BestEffort
kube-apiserver-k8s-node01 kube-system Burstable
kube-apiserver-k8s-node02 kube-system Burstable
kube-apiserver-k8s-node03 kube-system Burstable
kube-controller-manager-k8s-node01 kube-system Burstable
kube-controller-manager-k8s-node02 kube-system Burstable
kube-controller-manager-k8s-node03 kube-system Burstable
kube-flannel-ds-amd64-627bn kube-system Guaranteed
kube-flannel-ds-amd64-7hdqd kube-system Guaranteed
kube-flannel-ds-amd64-b4th7 kube-system Guaranteed
...
management
execute in pod
Copy $ kubectl -n devops exec -it devops-jenkins-659f4c6d44-d2w76 -- /bin/bash
jenkins@devops-jenkins-659f4c6d44-d2w76:/$ echo $HOME
/var/jenkins_home
jenkins@devops-jenkins-659f4c6d44-d2w76:/$ hostname
devops-jenkins-659f4c6d44-d2w76
restart po
reference:
kubectl -n <namespace> rollout restart deployment <name>
Copy $ kubectl -n <namespace> get po <po-name> -o yaml | kubectl replace --force -f -
result
Copy $ kubectl -n <namespace> get po -w
NAME READY STATUS RESTARTS AGE
mypo-659f4c6d44-72hb5 1/1 Running 0 47h
mypo-659f4c6d44-72hb5 1/1 Terminating 0 47h
mypo-659f4c6d44-d2w76 0/1 Pending 0 0s
mypo-659f4c6d44-d2w76 0/1 Pending 0 0s
mypo-659f4c6d44-d2w76 0/1 ContainerCreating 0 0s
mypo-659f4c6d44-d2w76 1/1 Running 0 2s
mypo-659f4c6d44-72hb5 0/1 Terminating 0 47h
mypo-659f4c6d44-72hb5 0/1 Terminating 0 47h
mypo-659f4c6d44-72hb5 0/1 Terminating 0 47h
mypo-659f4c6d44-72hb5 0/1 Pending 0 0s
mypo-659f4c6d44-72hb5 0/1 Terminating 0 0s
mypo-659f4c6d44-72hb5 0/1 Terminating 0 0s
mypo-659f4c6d44-72hb5 0/1 Terminating 0 0s
mypo-659f4c6d44-72hb5 0/1 Terminating 0 1s
mypo-659f4c6d44-72hb5 0/1 Terminating 0 1s
or
Copy $ kubectl -n <namespace> scale deployment <name> --replicas=0
resource managemenet
troubleshooting
[!NOTE|label:references:]
simple pods
Copy # run-nginx.yml
apiVersion: extensions/v1beta1
kind: Deploy
metadata:
name: my-nginx
spec:
replicas: 2
template:
metadata:
labels:
run: my-ngxin
spec:
containers:
- name: my-nginx
image: nginx:1.10.1
ports:
- containerPort: 80
kubectl run
Copy $ kubectl run ubuntu-marslo \
--image=ubuntu:18.04 \
--overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"}}}' \
-- sleep infinity
# or
$ kubectl run ubuntu-marslo \
--image=ubuntu:18.04 \
--overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"}}}' \
-it \
--rm
debug svc
[!NOTE|labels:referencds:]
svc in cluster can be visit via
<svc-name>.<namespace>.svc.cluster.local
Copy # current svc
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
jenkins ClusterIP 10.111.230.13 <none> 8080/TCP,30338/TCP 18h
# create new pod
$ kubectl run ubuntu-marslo \
--image=ubuntu:18.04 \
--overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "k8s-node-01"}}}' \
-it \
--rm
# check DNS
<ubuntu-marslo> $ cat /etc/resolv.conf
nameserver 10.96.0.10
search devops.svc.cluster.local svc.cluster.local cluster.local company.com
options ndots:5
# debug
$ nc -zv jenkins.devops.svc.cluster.local 30338
$ nc -zv 10.111.230.13 30338
$ ssh -l marslo -p 30338 -i ~/.ssh/id_rsa jenkins.devops.svc.cluster.local list-plugins
$ ssh -l marslo -p 30338 -i ~/.ssh/id_rsa 10.111.230.13 list-plugins
Last updated 4 months ago