k8s集群部署EFK

ELK:

EFK

本方案使用helm方式部署EFK,因此服务器上需要提前准备好helm与kubectl,helm版本推荐使用helm3版本

EFK官方helm模板下载地址:https://github.com/elastic/helm-charts/archive/refs/tags/v7.17.3.tar.gz

一、单例部署

1.使用NFS创建Elasticsearch PV

  1. 服务器安装nfs服务端
    #安装nfs
    yum -y install nfs-utils
    
    #配置共享目录和访问权限
    cat >> /etc/exports << EOF
    /data/nfs/efk *(rw,no_root_squash,no_all_squash,sync)
    EOF
    
    #赋权
    chmod 777 -R /data/nfs/efk 
    
    #启动nfs
    systemctl start nfs
    
    #加入开机自启
    systemctl enable nfs
  1. 编写es-pv.yaml文件
    # es-pv.yaml
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: elasticsearch
    spec:
      capacity:
        storage: 30Gi
      accessModes:
        - ReadWriteOnce
      persistentVolumeReclaimPolicy: Retain
      nfs:
        path: /data/nfs/efk
        server: 192.168.0.164
  1. 执行导入pv
    kubectl create -f es-pv.yaml

2.部署elasticsearch

1.更改elasticsearch/values.yaml文件

    # vim elasticsearch/values.yaml
    
    # 根据自己节点数进行更改
    replicas: 1
    minimumMasterNodes: 1
    
    .......
    
    
    # 调整资源大小
    resources:
      requests:
        cpu: "1000m"
        memory: "2Gi"
      limits:
        cpu: "1000m"
        memory: "2Gi"
    ......
    
    # 调整数据卷大小
    volumeClaimTemplate:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 10Gi
    ......
  1. 部署elasticsearch
    helm install es --namespace=efk ./elasticsearch

3.部署filebeat

  1. 更改filebeat/values.yaml文件
    # 调整资源大小 (默认即可)
      resources:
        requests:
          cpu: "100m"
          memory: "100Mi"
        limits:
          cpu: "1000m"
          memory: "200Mi"
          
    ......
    
    #更改日志搜集策略(指定命名空间),drop_fields:删除无用字段
    
      filebeatConfig:
        filebeat.yml: |
          filebeat.autodiscover:
            providers:
              - type: kubernetes
                templates:
                  - condition:
                      and:
                        - or:
                            - equals:
                                kubernetes.namespace: dev
                            - equals:
                                kubernetes.namespace: app
                    config:
                       - type: container
                         paths:
                          - /var/log/containers/${data.kubernetes.pod.name}_${data.kubernetes.namespace}_*.log
            processors:
            - add_kubernetes_metadata:
                host: ${NODE_NAME}
                matchers:
                - logs_path:
                    logs_path: "/var/log/containers/"
    
          output.elasticsearch:
            host: '${NODE_NAME}'
            hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
          processors:
          - drop_fields:  
              fields: ["_id", "_index", "_score","_type" , "agent.ephemeral_id", "agent.hostname", "agent.id", "agent.name", "agent.type", "agent.version", "container.id", "container.image.name", "container.runtime", "ecs.version", "host.name", "input.type", "kubernetes.labels.pod-template-hash", "kubernetes.labels.workload_user_cattle_io/workloadselector", "kubernetes.namespace_labels.cattle_io/creator", "kubernetes.namespace_labels.field_cattle_io/projectId", "kubernetes.namespace_uid", "kubernetes.node.labels.beta_kubernetes_io/arch", "kubernetes.node.labels.beta_kubernetes_io/os", "kubernetes.node.labels.kubernetes_io/arch", "kubernetes.node.labels.kubernetes_io/hostname", "kubernetes.node.labels.kubernetes_io/os", "kubernetes.node.labels.node-role_kubernetes_io/worker", "kubernetes.node.labels.kubernetes_io/hostname", "kubernetes.node.uid", "kubernetes.pod.uid", "kubernetes.replicaset.name", "log.file.path", "log.offset", "stream" ]
    
    
    ......
    
    #数据持久化路径
    hostPathRoot: /var/lib
    
    ......
  1. 部署filebeat
    helm install fb --namespace=efk ./filebeat

** 注:当docker的数据存放目录不在/var/lib/docker下时,需要修改filebeat/templates/daemonset.yaml文件中的docker路径 **

4.部署kibana

  1. 更改kibana/values.yaml文件
    # 调整资源大小 (默认即可)
    resources:
      requests:
        cpu: "1000m"
        memory: "2Gi"
      limits:
        cpu: "1000m"
        memory: "2Gi"
    
    ......
    
    # 更改为NodePort模式
    service:
      type: NodePort
      port: 5601
      nodePort: "30005"
     
    ......
  1. 部署kibana
    helm install kb --namespace=efk ./kibana

二、加密部署

1.使用NFS创建Elasticsearch PV

  1. 服务器安装nfs服务端
    #安装nfs
    yum -y install nfs-utils
    
    #配置共享目录和访问权限
    cat >> /etc/exports << EOF
    /data/nfs/efk *(rw,no_root_squash,no_all_squash,sync)
    EOF
    
    #启动nfs
    systemctl start nfs
    
    #加入开机自启
    systemctl enable nfs
  1. 编写es-pv.yaml文件
    # es-pv.yaml
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: elasticsearch
    spec:
      capacity:
        storage: 30Gi
      accessModes:
        - ReadWriteOnce
      persistentVolumeReclaimPolicy: Retain
      nfs:
        path: /data/nfs/efk
        server: 192.168.0.164
  1. 执行导入pv
    kubectl create -f es-pv.yaml

2.部署elasticsearch

  1. 更改elasticsearch/values.yaml文件
    # 根据自己节点数进行更改
    replicas: 1
    minimumMasterNodes: 1
    
    .......
    
    
    # 调整资源大小
    resources:
      requests:
        cpu: "1000m"
        memory: "2Gi"
      limits:
        cpu: "1000m"
        memory: "2Gi"
    ......
    
    
    # 调整数据卷大小
    volumeClaimTemplate:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 10Gi
    ......
    
    
    # 配置加密
    # 开启https
    protocol: https
    
    #加密
    esConfig:
      elasticsearch.yml: |
        xpack.security.enabled: true
        xpack.security.transport.ssl.enabled: true
        xpack.security.transport.ssl.verification_mode: certificate
        xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
        xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
        xpack.security.http.ssl.enabled: true
        xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
        xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
    
    extraEnvs:
      - name: ELASTIC_PASSWORD
        valueFrom:
          secretKeyRef:
            name: elastic-credentials
            key: password
    
    secretMounts:
      - name: elastic-certificates
        secretName: elastic-certificates
        path: /usr/share/elasticsearch/config/certs
    
    ......
  1. 生成证书密钥
    • 生成elastic-certificate.p12
          # docker运行elasticsearch容器
          docker run -d --name es -e discovery.type=single-node docker.elastic.co/elasticsearch/elasticsearch:7.17.3
          # 进入容器
          docker exec -it es /bin/bash
          # 生成证书
          # 注意一路回车不要输入密码
          /usr/share/elasticsearch/bin/elasticsearch-certutil cert
          # 退出容器后将证书文件复制出来
          docker cp es:/usr/share/elasticsearch/elastic-certificates.p12 .
    
    • 生成elastic-certificate.pem
          # 根据elastic-certificate.p12生成elastic-certificate.pem,elastic-certificate.pem文件会在之后给filebeat和kibana使用
          openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem
    
    • k8s创建密文
          # k8s集群配置EFK登录账号与密码
          kubectl create secret generic elastic-credentials --from-literal=username=elastic --from-literal=password=@123.com -n efk
          
          # k8s集群创建elastic-certificates.p12证书
          kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 -n efk
          
          # k8s集群创建elastic-certificates.pem证书
          kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem -n efk
    
  2. 部署elasticsearch
    helm install es --namespace=efk ./elasticsearch

3.部署filebeat

  1. 更改filebeat/values.yaml文件
    # 调整资源大小 (默认即可)
      resources:
        requests:
          cpu: "100m"
          memory: "100Mi"
        limits:
          cpu: "1000m"
          memory: "200Mi"
          
    ......
    
    
    #更改日志搜集策略(指定命名空间),drop_fields:删除无用字段
    
      filebeatConfig:
        filebeat.yml: |
          filebeat.autodiscover:
            providers:
              - type: kubernetes
                templates:
                  - condition:
                      and:
                        - or:
                            - equals:
                                kubernetes.namespace: dev
                            - equals:
                                kubernetes.namespace: app
                    config:
                       - type: container
                         paths:
                          - /var/log/containers/${data.kubernetes.pod.name}_${data.kubernetes.namespace}_*.log
            processors:
            - add_kubernetes_metadata:
                host: ${NODE_NAME}
                matchers:
                - logs_path:
                    logs_path: "/var/log/containers/"
    
          output.elasticsearch:
            host: '${NODE_NAME}'
            hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
          processors:
          - drop_fields:  
              fields: ["_id", "_index", "_score","_type" , "agent.ephemeral_id", "agent.hostname", "agent.id", "agent.name", "agent.type", "agent.version", "container.id", "container.image.name", "container.runtime", "ecs.version", "host.name", "input.type", "kubernetes.labels.pod-template-hash", "kubernetes.labels.workload_user_cattle_io/workloadselector", "kubernetes.namespace_labels.cattle_io/creator", "kubernetes.namespace_labels.field_cattle_io/projectId", "kubernetes.namespace_uid", "kubernetes.node.labels.beta_kubernetes_io/arch", "kubernetes.node.labels.beta_kubernetes_io/os", "kubernetes.node.labels.kubernetes_io/arch", "kubernetes.node.labels.kubernetes_io/hostname", "kubernetes.node.labels.kubernetes_io/os", "kubernetes.node.labels.node-role_kubernetes_io/worker", "kubernetes.node.labels.kubernetes_io/hostname", "kubernetes.node.uid", "kubernetes.pod.uid", "kubernetes.replicaset.name", "log.file.path", "log.offset", "stream" ]
    
    
    ......
    
    # 配置加密
      filebeatConfig:
        filebeat.yml: |
          filebeat.inputs:
          - type: container
            paths:
              - /var/log/containers/*.log
            processors:
            - add_kubernetes_metadata:
                host: ${NODE_NAME}
                matchers:
                - logs_path:
                    logs_path: "/var/log/containers/"
    
          output.elasticsearch:
            username: '${ELASTICSEARCH_USERNAME}'
            password: '${ELASTICSEARCH_PASSWORD}'
            protocol: https
            hosts: ["elasticsearch-master:9200"]
            ssl.certificate_authorities: /usr/share/filebeat/config/certs/elastic-certificate.pem
            ssl.verification_mode: certificate #此处examples/security模板中没有,缺少无法启动
    
      secretMounts:
        - name: elastic-certificate-pem
          secretName: elastic-certificate-pem
          path: /usr/share/filebeat/config/certs
    
      extraEnvs:
        - name: 'ELASTICSEARCH_USERNAME'
          valueFrom:
            secretKeyRef:
              name: elastic-credentials
              key: username
        - name: 'ELASTICSEARCH_PASSWORD'
          valueFrom:
            secretKeyRef:
              name: elastic-credentials
              key: password
    
    ......
    
    #数据持久化路径
    hostPathRoot: /var/lib
    
    ......
  1. 部署filebeat
    helm install fb --namespace=efk ./filebeat

注:当docker的数据存放目录不在/var/lib/docker下时,需要修改filebeat/templates/daemonset.yaml文件中的docker路径

4.部署kibana

  1. 更改kibana/values.yaml文件
    # 调整资源大小 (默认即可)
    resources:
      requests:
        cpu: "1000m"
        memory: "2Gi"
      limits:
        cpu: "1000m"
        memory: "2Gi"
    
    ......
    
    # 更改为NodePort模式
    service:
      type: NodePort
      port: 5601
      nodePort: "30005"
     
    ......
    
    # 配置加密
    # 连接elasticsearch使用https
    elasticsearchHosts: "https://elasticsearch-master:9200"
    
    extraEnvs:
      - name: "NODE_OPTIONS"
        value: "--max-old-space-size=1800"
      - name: 'ELASTICSEARCH_USERNAME'
        valueFrom:
          secretKeyRef:
            name: elastic-credentials
            key: username
      - name: 'ELASTICSEARCH_PASSWORD'
        valueFrom:
          secretKeyRef:
            name: elastic-credentials
            key: password
      #- name: 'KIBANA_ENCRYPTION_KEY'
      #  valueFrom:
      #    secretKeyRef:
      #      name: kibana
      #      key: encryptionkey
    
    kibanaConfig:
      kibana.yml: |
        i18n.locale: zh-CN
        server.ssl:
          enabled: true
          key: /usr/share/kibana/config/certs/elastic-certificate.pem
          certificate: /usr/share/kibana/config/certs/elastic-certificate.pem
        #xpack.security.encryptionKey: ${KIBANA_ENCRYPTION_KEY} (删除此行有格式问题)
        xpack.security.encryptionKey: "something_at_least_32_characters"
        elasticsearch.ssl:
          certificateAuthorities: /usr/share/kibana/config/certs/elastic-certificate.pem
          verificationMode: certificate
    
    protocol: https
    
    secretMounts:
      - name: elastic-certificate-pem
        secretName: elastic-certificate-pem
        path: /usr/share/kibana/config/certs
  1. 部署kibana
    helm install kb --namespace=efk ./kibana

修改后的模板地址:https://download.chencc.xyz/k8s集群使用helm部署efk/