addons
Last updated
Was this helpful?
Last updated
Was this helpful?
[!TIP|label:references]
$ curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
Downloading https://get.helm.sh/helm-v3.15.4-linux-amd64.tar.gz
Verifying checksum... Done.
Preparing to install helm into /usr/local/bin
helm installed into /usr/local/bin/helm
$ kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/tigera-operator.yaml
$ kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/custom-resources.yaml
# verify
$ kubectl get pods -n calico-system
calico tools
[!NOTE|label:references:]
# calicoctl
$ curl -L https://github.com/projectcalico/calico/releases/download/v3.28.1/calicoctl-linux-amd64 -o calicoctl
$ chmod +x calicoctl
$ sudo mv calicoctl /usr/local/bin/
# kubectl-calico
$ curl -L https://github.com/projectcalico/calico/releases/download/v3.28.1/calicoctl-linux-amd64 -o kubectl-calico
$ chmod +x kubectl-calico
$ sudo mv kubectl-calico /usr/local/bin/
[!NOTE|label:references:]
$ cat /run/flannel/subnet.env
$ kubectl get nodes k8s-node-01 -o jsonpath='{.spec.podCIDR}'
modify
$ kubectl edit cm -n kube-system kube-flannel-cfg
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
check
$ kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}'
# e.g.: flannel
$ kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}'
10.244.21.0/24 10.244.4.0/24 10.244.1.0/24 10.244.10.0/24 10.244.20.0/24 10.244.7.0/24 10.244.5.0/24 10.244.17.0/24 10.244.3.0/24 10.244.0.0/24 10.244.6.0/24 10.244.12.0/24 10.244.13.0/24 10.244.16.0/24 10.244.15.0/24
[!NOTE|label:references:]
$ helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
$ helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx --namespace ingress-nginx --create-namespace
# or
$ helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace
# check value
$ helm show values ingress-nginx --repo https://kubernetes.github.io/ingress-nginx
$ helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
$ helm upgrade --install metrics-server metrics-server/metrics-server --namespace monitoring --create-namespace
# without tls: https://github.com/kubernetes-sigs/metrics-server/issues/1221
$ helm upgrade metrics-server metrics-server/metrics-server --set args="{--kubelet-insecure-tls}" --namespace monitoring
[!NOTE|label:references:]
Admin User
Cluster Admin User
Ready-only User
Access via kubeconfig
# add kubernetes-dashboard repository
$ helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
# deploy a Helm Release named "kubernetes-dashboard" using the kubernetes-dashboard chart
$ helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard
# v7.x
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: monitoring
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/secure-backends: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- sms-k8s-dashboard.sample.com
secretName: sample-tls
rules:
- host: sms-k8s-dashboard.sample.com
http:
paths:
- path: /
backend:
service:
name: kubernetes-dashboard-kong-proxy
port:
number: 443
pathType: Prefix
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/secure-backends: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- sms-k8s-dashboard.sample.com
secretName: sample-tls
rules:
- host: sms-k8s-dashboard.sample.com
http:
paths:
- path: /
backend:
service:
# or kubernetes-dashboard-kong-proxy for latest version
name: kubernetes-dashboard
port:
number: 443
pathType: Prefix
[!NOTE|label:references:]
using
ClusterRole
:cluster-admin
forkubernetes-dashboard-admin
create ServiceAccount in namespace
$ kubectl -n monitoring create serviceaccount kubernetes-dashboard-admin
create ClusterRoleBinding for ServiceAccount
$ kubectl create clusterrolebinding kubernetes-dashboard-admin \
--clusterrole=cluster-admin \ # default ClusterRole
--serviceaccount=monitoring:kubernetes-dashboard-admin # service account
$ kubectl get clusterrolebinding kubernetes-dashboard-admin -o yaml | grep -v -E 'uid:|resourceVersion:|creationTimestamp:'
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-admin
namespace: monitoring
generate token
# admin
$ kubectl -n monitoring create token kubernetes-dashboard-admin
# normal user
$ kubectl -n monitoring create token kubernetes-dashboard-metrics-scraper
$ kubectl -n monitoring apply -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: kubernetes-dashboard-admin-token
namespace: monitoring
annotations:
kubernetes.io/service-account.name: kubernetes-dashboard-admin
type: kubernetes.io/service-account-token
EOF
# get token by service account
$ kubectl -n monitoring get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='kubernetes-dashboard-admin')].data.token}" | base64 -d
# or
$ kubectl -n monitoring get secrets -o jsonpath="{.items[?(@.metadata.annotations.kubernetes\.io/service-account\.name=='kubernetes-dashboard-admin')].data.token}" | base64 -d
# or get token by describe token
$ kubectl -n monitoring describe secrets $(kubectl -n monitoring get secret | grep kubernetes-dashboard-admin | awk '{print $1}') | grep 'token' | awk '{print $2}'
or modify ClusterRole kubernetes-dashboard-metrics-scraper
manually
[!TIP] for v7.x
clusterrole: kubernetes-dashboard-metrics-scraper
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-name: kubernetes-dashboard
meta.helm.sh/release-namespace: monitoring
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-7.5.0
name: kubernetes-dashboard-metrics-scraper
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
original
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-name: kubernetes-dashboard
meta.helm.sh/release-namespace: monitoring
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-7.5.0
name: kubernetes-dashboard-metrics-scraper
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
clusterrolebinding: kubernetes-dashboard-metrics-scraper
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-name: kubernetes-dashboard
meta.helm.sh/release-namespace: monitoring
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-7.5.0
name: kubernetes-dashboard-metrics-scraper
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard-metrics-scraper
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-metrics-scraper
namespace: monitoring
serviceaccount: kubernetes-dashboard-metrics-scraper
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-name: kubernetes-dashboard
meta.helm.sh/release-namespace: monitoring
labels:
app.kubernetes.io/instance: kubernetes-dashboard
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: kubernetes-dashboard
helm.sh/chart: kubernetes-dashboard-7.5.0
name: kubernetes-dashboard-metrics-scraper
namespace: monitoring
generate token
$ kubectl -n monitoring create token kubernetes-dashboard-metrics-scraper
ey**********************WAA
older version
clusterrole
$ kubectl get clusterrole kubernetes-dashboard -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
clusterrolebinding
$ kubectl -n kube-system get clusterrolebindings kubernetes-dashboard -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
serviceaccount
$ kubectl -n kube-system get sa kubernetes-dashboard -o yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
generate token
$ kubectl -n kube-system create token kubernetes-dashboard
ey**********************WAA
[!NOTE|label:references:]
cluster level: 13332、13824、14518
$ helm repo add prometheus-stack https://prometheus-community.github.io/helm-charts
"prometheus-stack" has been added to your repositories
$ helm upgrade --install prometheus-stack prometheus-stack/kube-prometheus-stack --namespace monitoring
# if node-exporter has been deployed by kubespary, assign another port for kube-prometheus-stack
$ helm upgrade --install prometheus-stack prometheus-stack/kube-prometheus-stack \
--namespace monitoring \
--set prometheus-node-exporter.service.port=9200
Release "prometheus-stack" has been upgraded. Happy Helming!
NAME: prometheus-stack
LAST DEPLOYED: Tue Sep 10 22:53:40 2024
NAMESPACE: monitoring
STATUS: deployed
REVISION: 2
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
kubectl --namespace monitoring get pods -l "release=prometheus-stack"
Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
ingress
grafana
[!NOTE|label:references]
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus-stack-grafana
namespace: monitoring
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/secure-backends: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- sms-k8s-grafana.sample.com
secretName: sample-tls
rules:
- host: sms-k8s-grafana.sample.com
http:
paths:
- path: /
backend:
service:
name: prometheus-stack-grafana
port:
number: 80
pathType: Prefix
prometheus
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prometheus
namespace: monitoring
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
ingressClassName: nginx
tls:
- hosts:
- sms-k8s-prometheus.marvell.com
secretName: marvell-tls
rules:
- host: sms-k8s-prometheus.marvell.com
http:
paths:
- path: /
backend:
service:
name: prometheus-stack-kube-prom-prometheus
port:
number: 9090
pathType: Prefix
alertmanager
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: alertmanager
namespace: monitoring
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
ingressClassName: nginx
tls:
- hosts:
- sms-k8s-alertmgr.marvell.com
secretName: marvell-tls
rules:
- host: sms-k8s-alertmgr.marvell.com
http:
paths:
- path: /
backend:
service:
name: prometheus-stack-kube-prom-alertmanager
port:
number: 9093
pathType: Prefix
admin account for grafana
# account
$ kubectl get secret --namespace monitoring prometheus-stack-grafana -o jsh='{.data.admin-user}' | base64 -d; echo
# password
$ kubectl get secret --namespace monitoring prometheus-stack-grafana -o jsonpath='{.data.admin-password}' | base64 -d; echo
$ helm repo add grafana https://grafana.github.io/helm-charts
$ helm repo list
NAME URL
kubernetes-dashboard https://kubernetes.github.io/dashboard/
ingress-nginx https://kubernetes.github.io/ingress-nginx
grafana https://grafana.github.io/helm-charts
$ helm repo update
$ helm search repo grafana/grafana
$ helm install grafana grafana/grafana --namespace monitoring --create-namespace
$ sudo kubectl -n kube-system create secret tls sample-tls --cert star_sample_com.full.crt --key star_sample_com.key --dry-run=client -o yaml > kube-system.sample-tls.yaml
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=monitoring -f -
secret/sample-tls created
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=kubernetes-dashboard -f -
secret/sample-tls created
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=ingress-nginx -f -
secret/sample-tls created
$ kubectl --namespace=kube-system get secrets sample-tls -o yaml | grep -v '^\s*namespace:\s' | kubectl apply --namespace=default -f -
secret/sample-tls created
: