--- apiVersion: apps/v1 kind: StatefulSet metadata: name: offline-process-schedule namespace: qa labels: app: offline-process-schedule spec: serviceName: offline-process-schedule replicas: 1 selector: matchLabels: app: offline-process-schedule template: metadata: labels: app: offline-process-schedule spec: terminationGracePeriodSeconds: 180 dnsPolicy: "None" dnsConfig: nameservers: - 223.5.5.5 - 10.108.30.101 - 10.26.22.50 searches: - pro.svc.cluster.local - svc.cluster.local - cluster.local options: - name: ndots value: "5" initContainers: - name: init image: hub.evbj.easou.com/dev/busybox command: [ "chmod","777","-R","/data" ] imagePullPolicy: IfNotPresent volumeMounts: - name: volume mountPath: /data #nodeSelector: # app.touchrs: touchrs affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: app operator: In values: - offline-process-schedule topologyKey: kubernetes.io/hostname containers: - name: offline-process-schedule image: hub.evbj.easou.com/qa/offline-process-schedule:r188114 imagePullPolicy: IfNotPresent env: - name: MY_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: MY_POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: MY_POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: JAVA_OPTS value: "-server -Xdebug -Xrunjdwp:transport=dt_socket,server=y,address=8001,suspend=n -Xmx2g -Xms2g -Xss512k -Djava.awt.headless=true -Djava.security.egd=file:/dev/./urandom -Dspring.profiles.active=test" ports: - containerPort: 8080 name: port - containerPort: 8001 name: "jvm-debug" volumeMounts: - name: volume mountPath: /data lifecycle: preStop: exec: command: ["/bin/sh","-c","curl -H 'loginName:admin' -H 'password:ea1so2ua3dm4in5' -X POST 127.0.0.1:8080/actuator/shutdown"] livenessProbe: httpGet: path: /actuator/health port: 8080 httpHeaders: - name: loginName value: admin - name: password value: ea1so2ua3dm4in5 initialDelaySeconds: 40 #60s后启动第一次探测 periodSeconds: 15 # 每隔10s启动一次探测 timeoutSeconds: 3 # 超时时间3s successThreshold: 1 # 成功1次即表示容器健康 failureThreshold: 5 # 连续5次失败,则判定容器不健康,默认3次 readinessProbe: tcpSocket: port: 8080 initialDelaySeconds: 40 periodSeconds: 15 - image: hub.evbj.easou.com/pro/filebeat:7.13.3 imagePullPolicy: IfNotPresent name: filebeat volumeMounts: - name: volume #日志同时挂载在nginx和filebeat中 mountPath: /data - name: filebeat-config mountPath: /usr/share/filebeat/filebeat.yml subPath: filebeat.yml volumes: - name: filebeat-config configMap: name: filebeat-config-offline-process-schedule items: - key: filebeat.yml path: filebeat.yml volumeClaimTemplates: - metadata: name: volume spec: accessModes: [ "ReadWriteOnce" ] storageClassName: rbd resources: requests: storage: 100Gi --- apiVersion: v1 kind: Service metadata: name: offline-process-schedule-svc namespace: qa labels: app: offline-process-schedule-svc spec: type: NodePort ports: - port: 80 targetPort: 8080 name: "main" selector: app: offline-process-schedule --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: offline-process-schedule-ingress namespace: qa spec: ingressClassName: nginx rules: - host: qa-offline-process-schedule.ieasou.cn http: paths: - path: / pathType: ImplementationSpecific backend: service: name: offline-process-schedule-svc port: number: 8080 --- apiVersion: v1 kind: ConfigMap metadata: name: filebeat-config-offline-process-schedule namespace: qa data: filebeat.yml: | filebeat.inputs: - type: log enabled: true #多行合并 multiline.pattern: '^\[[0-9]{4}-[0-9]{2}-[0-9]{2}' multiline.negate: true multiline.match: after multiline.timeout: 30 fields: log_topic: offline-process-schedule fields_under_root: true paths: - "/data/logs/app/*.log" - "/data/logs/*.log" output.kafka: version: 2.0.0 enable: true hosts: ["kafka-0.kafka-headless.qa.svc.cluster.local:9093", "kafka-1.kafka-headless.qa.svc.cluster.local:9093", "kafka-2.kafka-headless.qa.svc.cluster.local:9093"] topic: 'offline-process-topic' required_acks: 1 partition.round_robin: reachable_only: false compression: gzip max_message_bytes: 1000000