--- apiVersion: apps/v1 kind: StatefulSet metadata: name: realtime-yuewen-v2 namespace: pro labels: app: realtime-yuewen-v2 spec: serviceName: realtime-yuewen-v2 replicas: 2 selector: matchLabels: app: realtime-yuewen-v2 template: metadata: labels: app: realtime-yuewen-v2 spec: terminationGracePeriodSeconds: 30 dnsPolicy: ClusterFirstWithHostNet dnsConfig: nameservers: - 10.108.30.101 - 223.5.5.5 initContainers: - name: init image: hub.evbj.easou.com/dev/busybox command: [ "chmod","777","-R","/data" ] imagePullPolicy: IfNotPresent volumeMounts: - name: volume mountPath: /data #nodeSelector: # app.touchrs: touchrs # affinity: # podAntiAffinity: # preferredDuringSchedulingIgnoredDuringExecution: # - weight: 100 # podAffinityTerm: # labelSelector: # matchExpressions: # - key: app # operator: In # values: # - realtime-yuewen-v2 # topologyKey: kubernetes.io/hostname containers: - name: realtime-yuewen-v2 image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188687 imagePullPolicy: IfNotPresent env: - name: MY_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: MY_POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: MY_POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: JAVA_OPTS value: "-server -Xmx2g -Xms2g -Xss512k -Djava.awt.headless=true -Djava.security.egd=file:/dev/./urandom -Dspring.profiles.active=product" ports: - containerPort: 8080 name: port - containerPort: 8001 name: "jvm-debug" volumeMounts: - name: volume mountPath: /data lifecycle: preStop: exec: command: ["/bin/sh","-c","curl -H 'loginName:admin' -H 'password:ea1so2ua3dm4in5' -X POST 127.0.0.1:8080/actuator/shutdown"] livenessProbe: httpGet: path: /actuator/health port: 8080 httpHeaders: - name: loginName value: admin - name: password value: ea1so2ua3dm4in5 initialDelaySeconds: 10 #60s后启动第一次探测 periodSeconds: 10 # 每隔10s启动一次探测 timeoutSeconds: 3 # 超时时间3s successThreshold: 1 # 成功1次即表示容器健康 failureThreshold: 2 # 连续5次失败,则判定容器不健康,默认3次 readinessProbe: tcpSocket: port: 8080 initialDelaySeconds: 40 periodSeconds: 15 - image: hub.evbj.easou.com/dev/flume:1.7.0-20220930-v1 imagePullPolicy: IfNotPresent name: flume volumeMounts: - name: volume #日志同时挂载在nginx和filebeat中 mountPath: /data - name: flume-config mountPath: /app/soft/flume-1.7.0/conf/flume.conf subPath: flume.conf volumes: - name: flume-config configMap: name: flume-config-realtime-yuewen-v2 items: - key: flume.conf path: flume.conf volumeClaimTemplates: - metadata: name: volume spec: accessModes: [ "ReadWriteOnce" ] storageClassName: rbd resources: requests: storage: 100Gi --- apiVersion: v1 kind: Service metadata: name: realtime-yuewen-v2-svc namespace: pro labels: app: realtime-yuewen-v2-svc spec: type: NodePort ports: - port: 80 targetPort: 8080 name: "main" selector: app: realtime-yuewen-v2 --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: realtime-yuewen-v2-ingress namespace: pro spec: rules: - host: ywrealtimev2.appeasou.com http: paths: - path: / pathType: Prefix backend: service: name: realtime-yuewen-v2-svc port: number: 8080 - host: pro-realtime-yuewen-v2.ieasou.cn http: paths: - path: / pathType: Prefix backend: service: name: realtime-yuewen-v2-svc port: number: 8080 ingressClassName: nginx --- apiVersion: v1 kind: ConfigMap metadata: name: flume-config-realtime-yuewen-v2 namespace: pro data: flume.conf: | a1.sources = r1 r2 a1.channels = c1 c2 a1.sinks = a1.sources.r1.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource a1.sources.r1.channels = c1 a1.sources.r1.spoolDir = /data/logs/app/ a1.sources.r1.fileSuffix = .COMPLETED a1.sources.r1.deletePolicy = never a1.sources.r1.ignorePattern = ^$ a1.sources.r1.targetPattern = ywbuy_earning.*(\\d){10}.* a1.sources.r1.targetFilename = yyyyMMddHH a1.sources.r1.trackerDir = .flumespooltail_earning a1.sources.r1.consumeOrder = oldest a1.sources.r1.bufferMaxLineLength = 500000 a1.sources.r1.inputCharset = UTF-8 a1.sources.r1.decodeErrorPolicy = REPLACE a1.sources.r1.deserializer = LINE a1.sources.r1.interceptors = i1 i2 i3 i4 #配置 interceptor i1 a1.sources.r1.interceptors.i1.type = regex_filter a1.sources.r1.interceptors.i1.regex = ^\\s*$ a1.sources.r1.interceptors.i1.excludeEvents = true #配置 interceptor i2 a1.sources.r1.fileHeader = true a1.sources.r1.basenameHeader = true a1.sources.r1.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder a1.sources.r1.interceptors.i2.regex = (.*)\\.(.*)\\.(.*) a1.sources.r1.interceptors.i2.extractorHeader = true a1.sources.r1.interceptors.i2.extractorHeaderKey = basename a1.sources.r1.interceptors.i2.serializers = s1 s2 s3 a1.sources.r1.interceptors.i2.serializers.s1.name = log_type a1.sources.r1.interceptors.i2.serializers.s2.name = file_type a1.sources.r1.interceptors.i2.serializers.s3.name = log_data #配置 interceptor i3 a1.sources.r1.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder a1.sources.r1.interceptors.i3.isServerTime=true a1.sources.r1.interceptors.i3.timeType=yyyyMMddHHmmss #配置 interceptor i4 a1.sources.r1.interceptors.i4.type = timestamp a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel a1.channels.c1.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092 a1.channels.c1.kafka.topic = yuewen_new_buydetail # 开启kafka幂等性,保证重试不会产生数据重复 a1.channels.c1.kafka.producer.enable.idempotence = true a1.channels.c1.kafka.producer.acks = all a1.channels.c1.kafka.producer.max.in.flight.requests.per.connection = 1 # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复 a1.channels.c1.kafka.producer.retries = 20 a1.channels.c1.kafka.producer.request.timeout.ms = 30000 a1.channels.c1.kafka.consumer.group.id = integral2kafka ##################### a1.sources.r2.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource a1.sources.r2.channels = c2 a1.sources.r2.spoolDir = /data/logs/app/ a1.sources.r2.fileSuffix = .COMPLETED a1.sources.r2.deletePolicy = never a1.sources.r2.ignorePattern = ^$ a1.sources.r2.targetPattern = ywbuy_expending.*(\\d){10}.* a1.sources.r2.targetFilename = yyyyMMddHH a1.sources.r2.trackerDir = .flumespooltail_expending a1.sources.r2.consumeOrder = oldest a1.sources.r2.bufferMaxLineLength = 500000 a1.sources.r2.inputCharset = UTF-8 a1.sources.r2.decodeErrorPolicy = REPLACE a1.sources.r2.deserializer = LINE a1.sources.r2.interceptors = i1 i2 i3 i4 #配置 interceptor i1 a1.sources.r2.interceptors.i1.type = regex_filter a1.sources.r2.interceptors.i1.regex = ^\\s*$ a1.sources.r2.interceptors.i1.excludeEvents = true #配置 interceptor i2 a1.sources.r2.fileHeader = true a1.sources.r2.basenameHeader = true a1.sources.r2.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder a1.sources.r2.interceptors.i2.regex = (.*)\\.(.*)\\.(.*) a1.sources.r2.interceptors.i2.extractorHeader = true a1.sources.r2.interceptors.i2.extractorHeaderKey = basename a1.sources.r2.interceptors.i2.serializers = s1 s2 s3 a1.sources.r2.interceptors.i2.serializers.s1.name = log_type a1.sources.r2.interceptors.i2.serializers.s2.name = file_type a1.sources.r2.interceptors.i2.serializers.s3.name = log_data #配置 interceptor i3 a1.sources.r2.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder a1.sources.r2.interceptors.i3.isServerTime=true a1.sources.r2.interceptors.i3.timeType=yyyyMMddHHmmss #配置 interceptor i4 a1.sources.r2.interceptors.i4.type = timestamp a1.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel a1.channels.c2.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092 a1.channels.c2.kafka.topic = yuewen_new_buydetail # 开启kafka幂等性,保证重试不会产生数据重复 a1.channels.c2.kafka.producer.enable.idempotence = true a1.channels.c2.kafka.producer.acks = all a1.channels.c2.kafka.producer.max.in.flight.requests.per.connection = 1 # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复 a1.channels.c2.kafka.producer.retries = 20 a1.channels.c2.kafka.producer.request.timeout.ms = 30000 a1.channels.c2.kafka.consumer.group.id = integral2kafka # a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel # a1.channels.c1.kafka.bootstrap.servers = 10.26.27.71:8092,10.26.22.102:8092,10.26.22.120:8092,10.26.22.121:8092,10.26.22.122:8092 # a1.channels.c1.kafka.topic = yuewen_new_buydetail # a1.channels.c1.kafka.consumer.group.id = flume2kafka # # # Define an Avro source called r1 on a1 and tell it # # to bind to 0.0.0.0:41414. Connect it to channel c1. # #注意这里分出2个channel出来,一个是到k1,一个到k2 # #如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份 # a1.sources.r1.channels = c1 # a1.sources.r1.type = exec # a1.sources.r1.command = tail -F /data/logs/app/access.log # a1.sources.r1.restart = true # a1.sources.r1.selector.optional = c1 # a1.sources.r1.batchSize = 100000 # a1.sources.r1.interceptors = i1 i2 i3 # a1.sources.r1.interceptors.i1.type = timestamp # a1.sources.r1.interceptors.i2.type = host # a1.sources.r1.interceptors.i2.useIP = false # a1.sources.r1.interceptors.i3.type = regex_filter # a1.sources.r1.interceptors.i3.regex = ^\\s*$ # a1.sources.r1.interceptors.i3.excludeEvents = true # #默认是replicationg,还有multiplexer # #a1.sources.r1.selector.type = replicationg # # # Finally, now that we've defined all of our components, tell # # a1 which ones we want to activate. # a1.sources = r1 # a1.channels = c1 # a1.sinks =