cks

发布于 2024-08-12  27 次阅读


1、kube-bench 修复不安全项

ssh master01
vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --authorization-mode=Node,RBAC
————————————————

vim /var/lib/kubelet/config.yaml
 anonymous:  #修改 anonymous 下的,将 true 改为 false
 enabled: false  #改为 false
 webhook:
 cacheTTL: 0s
 enabled: true   #这个 webhook 下的 true 不要改
 x509:
 clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:   #修改 authorization 下的
 mode: Webhook   #改为 Webhook  注意首字母大写
 webhook:

#编辑完后重新加载配置文件,并重启 kubelet
systemctl daemon-reload
systemctl restart kubelet.service
————————————————

vim /etc/kubernetes/manifests/etcd.yaml
 - --client-cert-auth=true #修改为 true

2、Pod 使用 ServiceAccount

apiVersion: v1
kind: ServiceAccount
metadata:
name: database-sa
namespace: prod
automountServiceAccountToken: false
————————————————

# vi app-pod.yaml …
spec:
  serviceAccountName: database-sa   #新增之前创建的sa
  containers:
  - image: nginx
kubectl apply -f app-pod.yaml

kubectl get pod -n qa -o yaml|grep -i "serviceaccount:"
kubectl get sa -n qa
kubectl delete sa fraont-sa -n qa

3、网络策略

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: denypolicy          #修改名称以及NS即可
  namespace: testing        #官网复制,新增字段
spec:
  podSelector: {}
  policyTypes:
  - Ingress
  - Egress

4、安全上下文

kubectl edit deployment web-deployment -n app
在template的spec下面添加(考试时可能已有securityContext)
securityContext:
  runAsUser: 30000

在每个容器中添加
securityContext:
  allowPrivilegeEscalation: false
  readOnlyRootFilesystem: true

5、RBAC

kubectl get role -n monitoring
kubectl edit role web-role-1 -n monitoring
  name: role-1
  namespace: db
  resourceVersion: "9528"
rules:
- apiGroups:            #为默认的核心组
    - ""
  resources:
    - services          #只给services资源授权
  verbs:
    - get               #具体的权限

kubectl create rolebinding web-role-1-binding --role=web-role-1 --serviceaccount=monitoring:service-account-web -n monitoring

kubectl create role web-role-2 --verb=delete --resource=namespaces -n monitoring
kubectl create rolebinding web-role-2-binding --role=web-role-2 --serviceaccount=monitoring:service-account-web -n monitoring

6、审计日志

ssh master01 
sudo -i
vi /etc/kubernetes/logpolicy/sample-policy.yaml
apiVersion: audit.k8s.io/v1
kind: Policy
omitStages:
  - "RequestReceived"
rules:
  - level: RequestResponse
    resources:
    - group: ""
      resources: ["persistentvolumes"] 
  - level: Request
    resources:
    - group: ""
      resources: ["configmaps"]   
    namespaces: ["front-apps"]
  - level: Metadata
    resources:
    - group: ""
      resources: ["secrets", "configmaps"]
  - level: Metadata
    omitStages:
      - "RequestReceived"

vi /etc/kubernetes/manifests/kube-apiserver.yaml
- --audit-policy-file=/etc/kubernetes/logpolicy/sample-policy.yaml
- --audit-log-path=/var/log/kubernetes/audit-logs.txt
- --audit-log-maxage=5
- --audit-log-maxbackup=2

# 在容器 volumeMounts 增加(注:考试环境默认已经配置挂载)
- mountPath: /etc/kubernetes/logpolicy/sample-policy.yaml
  name: log-policy
- mountPath: /var/log/kubernetes/audit-logs.txt
  name: audit-log
# 在 volumes 增加
- name: log-policy
  hostPath:
    path: /etc/kubernetes/logpolicy/sample-policy.yaml
    type: File
- name: audit-log
  hostPath:
    path: /var/log/kubernetes/audit-logs.txt
    type: FileOrCreate

systemctl restart kubelet
vi /var/log/kubernetes/audit-logs.txt //看下有没有日志

7、创建 Secret

kubectl get secrets -n istio-system dev-john -o jsonpath={.data.username} |
base64 -d > /home/candidate/old-username.txt
kubectl get secrets -n istio-system dev-john -o jsonpath={.data.password} |
base64 -d > /home/candidate/password.txt

kubectl create secret generic dev-mark -n istio-system --from-literal=username=thanos
--from-literal=password=aV7HR7nU3JLx

apiVersion: v1
kind: Pod
metadata:
  name: dev-pod
  namespace: istio-system
spec:
  containers:
  - name: test-secret-container
    image: nginx
    volumeMounts:
    - name: test-secret-volume
      mountPath: /etc/secret
  volumes:
  - name: test-secret-volume
    secret:
      secretName: dev-mark

kubectl  apply -f k8s-secret.yaml

8、Dockerfile 和 Deployment 优化

vi /home/candidate/KSSC00301/Dockerfile
FROM ubuntu:16.04 # 修改基础镜像
USER nobody # 只修改 CMD 上面的

vim /cks/docker/deployment.yaml
  template:
    metadata:
      labels:
        app: couchdb # 这里的run改为app
        version: stable
    spec:
      container

        securityContext:
          {'capabilities': {'add': ['NET_BIND_SERVICE'], 'drop': ['all']}, 'privileged': False, 'readOnlyRootFilesystem': True, 'runAsUser': 65535}
          # 'privileged': False, 'readOnlyRootFilesystem': True, 'runAsUser': 65535
        resources: 

9、gVisor 安全运行容器

vim /cks/gVisor/rc.yaml
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
  name: untrusted   # 用来引用RuntimeClass的名字,RuntimeClass 是一个集群层面的资源
handler: runsc      # 添加对应的CRI配置名称

kubectl  apply -f /cks/gVisor/rc.yaml

kubectl edit deploy run-test -n server
spec:
      runtimeClassName: untrusted # 在第二个spec下面增加这一行,增加完会跑到下面去
      containers:
      - image: nginx:1.19

10、修改 APIserver 连接 etcd 的 TLS 配置

ssh master
sudo -i
vim  /etc/kubernetes/manifests/kube-apiserver.yaml
- --tls-cipher-suites=TLS_AES_128_GCM_SHA256
- --tls-min-version=VersionTLS13

vim /etc/kubernetes/manifests/etcd.yaml
- --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256

systemctl  daemon-reload
systemctl  restart kubelet

11、网络策略

kubectl  get pods -n dev-team  --show-labels

vim /cks/net/po.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: pod-restriction # 修改名字
  namespace: dev-team   # 修改名称空间
spec:
  podSelector:
    matchLabels:
      environment: testing # 目标pod kubectl get pods -n dev-team --show-labels查出来的
  policyTypes:
  - Ingress
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          name: qaqa # 匹配带qaqa的名称空间
  - from:       # 新加一个from 
    - namespaceSelector: {} 
      podSelector:
        matchLabels:
          environment: testing # 匹配所有带testing标签的pod

  kubectl apply -f /cks/net/po.yaml

12、ImagePolicyWebhook

kubectl  config use-context  KSSH
1、切换到master的root下
ssh master
sudo -i

2、编辑 admission_configuration.json(题目会给这个目录),修改 defaultAllow 为 false:
root@master:~# vim /etc/kubernetes/epconfig/admission_configuration.json
{
  "imagePolicy": {
     "kubeConfigFile": "/etc/kubernetes/epconfig/kubeconfig.yml",
     "retryBackoff": 500,
     "defaultAllow": false # true改为false
  }
}

3 、编辑/etc/kubernetes/epconfig/kubeconfig.yml,添加 webhook server 地址:操作前,先备份配置文件

root@master:~# vim /etc/kubernetes/epconfig/kubeconfig.yml
apiVersion: v1
kind: Config
clusters:
- cluster:
    certificate-authority: /etc/kubernetes/epconfig/server.crt
    # 在name前面添加webhook server 地址
    server: https://image-bouncer-webhook.default.svc:1323/image_policy 
  name: bouncer_webhook
contexts:

4、编辑 kube-apiserver.yaml,从官网中引用 ImagePolicyWebhook 的配置信息:
    - --enable-admission-plugins=NodeRestriction,ImagePolicyWebhook # 追加ImagePolicyWebhook
    - --admission-control-config-file=/etc/kubernetes/epconfig/admission_configuration.json # 添加这一行

5 、等待 apiserver 自动重启,且恢复正常。
# 配置完后,重启一下 kubelete 服务
root@master:~# systemctl  daemon-reload
root@master:~# systemctl  restart kubelet
# 等待 3 分钟,等集群应用策略后,确保 kube-apiserver 是 running 的
root@master:~# kubectl  get pods -n kube-system

# 通过尝试部署易受攻击的资源 /cks/img/web1.yaml 来测试配置是否有效
# 无法创建 pod,如下报错,表示成功。
root@master:~# kubectl apply -f  /cks/img/web1.yaml

13、Trivy 扫描镜像安全漏洞

ssh master
kubectl get pods --namespace kamino --output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image"

sudo trivy image -s HIGH,CRITICAL nginx:1.19
kubectl  delete pod -n kamino tri222 # 其他有的也需要删除

14、启用 Kubernetes API 认证

vim /etc/kubernetes/manifests/kube-apiserver.yaml
     - --authorization-mode=Node,RBAC
     - --enable-admission-plugins=NodeRestriction #  AlwaysAdmit修改为 NodeRestriction
     - --anonymous-auth=false   # 没有这一行就添加,有就修改,将 true 修改为 false

systemctl  daemon-reload
systemctl  restart kubelet 

kubectl delete clusterrolebinding system:anonymous

15、AppArmor

ssh node2
apparmor_status | grep nginx
apparmor_parser /etc/apparmor.d/nginx_apparmor

exit
exit
vim /cks/KSSH00401/nginx-deploy.yaml
apiVersion: v1
kind: Pod
metadata:
  name: podx
  annotations:
    # Tell Kubernetes to apply the AppArmor profile "k8s-apparmor-example-deny-write".
    container.apparmor.security.beta.kubernetes.io/podx: localhost/nginx-profile-3
    # 添加 annotations,kubernetes.io/podx 名字要和 containers 里的 name 一样,nginx-profile-3 为前面在 worker node2 上执行的 apparmor 策略模块的名字。
spec:
  containers:
  - name: podx

kubectl  get pods
kubectl  exec podx -- cat /proc/1/attr/current
nginx-profile-3 (enforce)

16、Sysdig

ssh node2
sudo -i
sudo sysdig -M 30 -p "%evt.time,%user.uid/%user.name,%proc.name" k8s.pod.name=<Pod 名称>
/opt/KSR00101/alerts/report

或者

crictl ps | grep tomcat
sudo sysdig -M 30 -p "%evt.time,%user.uid/%user.name,%proc.name" container.name=<容器名
称> /opt/KSR00101/alerts/report

山林不向四季起誓 荣枯随缘