Browse Source

deploy pro/realtime-yuewen-v2:r188684 测试是否能提交

pmrd 1 year ago
parent
commit
a14d36dd9f
3 changed files with 140 additions and 32 deletions
  1. 4 4
      .drone.yml
  2. 1 1
      Dockerfile
  3. 135 27
      deploy-tmp.yaml

+ 4 - 4
.drone.yml

@@ -21,7 +21,7 @@ steps:
   environment:
     JNLP_ENV: pro
     JNLP_REPLICAS: 2
-    JNLP_TAG: r188682
+    JNLP_TAG: r188684
     JNLP_VERSION: v1  # default v1, v2 for canary
     DEPLOY_ENV: k8s-1 # 可以选择把应用部署到集群:k8s-1 or k8s-2
     JNLP_REPO: hub.evbj.easou.com
@@ -54,8 +54,8 @@ steps:
     corpid: ww419ee4063735e1c0
     corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
     agent_id: 1000004
-    title: "Pipeline pro/realtime-yuewen-v2:r188682 Success"
-    description: "${DRONE_BUILD_LINK} pro/realtime-yuewen-v2:r188682 部署完成 测试是否能提交"
+    title: "Pipeline pro/realtime-yuewen-v2:r188684 Success"
+    description: "${DRONE_BUILD_LINK} pro/realtime-yuewen-v2:r188684 部署完成 测试是否能提交"
     msg_url: ${DRONE_BUILD_LINK}
     btn_txt: "否"
   when:
@@ -68,7 +68,7 @@ steps:
     corpid: ww419ee4063735e1c0
     corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
     agent_id: 1000004
-    title: "Pipeline pro/realtime-yuewen-v2:r188682 Failure"
+    title: "Pipeline pro/realtime-yuewen-v2:r188684 Failure"
     description: "${DRONE_BUILD_LINK} 部署失败,请检查配置!测试是否能提交"
     msg_url: ${DRONE_BUILD_LINK}
     btn_txt: "否"

+ 1 - 1
Dockerfile

@@ -1 +1 @@
-FROM hub.evbj.easou.com/pro/realtime-yuewen-v2:r188682
+FROM hub.evbj.easou.com/pro/realtime-yuewen-v2:r188684

+ 135 - 27
deploy-tmp.yaml

@@ -47,7 +47,7 @@ spec:
       #              topologyKey: kubernetes.io/hostname
       containers:
         - name: realtime-yuewen-v2
-          image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188682
+          image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188684
           imagePullPolicy: IfNotPresent
           env:
             - name: MY_NODE_NAME
@@ -178,35 +178,143 @@ metadata:
   namespace: pro
 data:
   flume.conf: |
+    a1.sources = r1 r2
+    a1.channels = c1 c2
+    a1.sinks =
+    
+    a1.sources.r1.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource
+    a1.sources.r1.channels = c1
+    a1.sources.r1.spoolDir = /data/logs/app/
+    a1.sources.r1.fileSuffix = .COMPLETED
+    a1.sources.r1.deletePolicy = never
+    a1.sources.r1.ignorePattern = ^$
+    a1.sources.r1.targetPattern = ywbuy_earning.*(\\d){10}.*
+    a1.sources.r1.targetFilename = yyyyMMddHH
+    a1.sources.r1.trackerDir = .flumespooltail_earning
+    a1.sources.r1.consumeOrder = oldest
+    a1.sources.r1.bufferMaxLineLength = 500000
+    a1.sources.r1.inputCharset = UTF-8
+    a1.sources.r1.decodeErrorPolicy = REPLACE
+    a1.sources.r1.deserializer = LINE
+    a1.sources.r1.interceptors = i1 i2 i3 i4
+    
+    #配置 interceptor i1
+    a1.sources.r1.interceptors.i1.type = regex_filter
+    a1.sources.r1.interceptors.i1.regex = ^\\s*$
+    a1.sources.r1.interceptors.i1.excludeEvents = true
+    #配置 interceptor i2
+    a1.sources.r1.fileHeader = true
+    a1.sources.r1.basenameHeader = true
+    a1.sources.r1.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder
+    a1.sources.r1.interceptors.i2.regex = (.*)\\.(.*)\\.(.*)
+    a1.sources.r1.interceptors.i2.extractorHeader = true
+    a1.sources.r1.interceptors.i2.extractorHeaderKey = basename
+    a1.sources.r1.interceptors.i2.serializers = s1 s2 s3
+    a1.sources.r1.interceptors.i2.serializers.s1.name = log_type
+    a1.sources.r1.interceptors.i2.serializers.s2.name = file_type
+    a1.sources.r1.interceptors.i2.serializers.s3.name = log_data
+    #配置 interceptor i3
+    a1.sources.r1.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder
+    a1.sources.r1.interceptors.i3.isServerTime=true
+    a1.sources.r1.interceptors.i3.timeType=yyyyMMddHHmmss
+    #配置 interceptor i4
+    a1.sources.r1.interceptors.i4.type = timestamp
+    
+    
     a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
-    a1.channels.c1.kafka.bootstrap.servers = 10.26.27.71:8092,10.26.22.102:8092,10.26.22.120:8092,10.26.22.121:8092,10.26.22.122:8092
+    a1.channels.c1.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092
     a1.channels.c1.kafka.topic = yuewen_new_buydetail
-    a1.channels.c1.kafka.consumer.group.id = flume2kafka
+    # 开启kafka幂等性,保证重试不会产生数据重复
+    a1.channels.c1.kafka.producer.enable.idempotence = true
+    a1.channels.c1.kafka.producer.acks = all
+    a1.channels.c1.kafka.producer.max.in.flight.requests.per.connection = 1
+    # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复
+    a1.channels.c1.kafka.producer.retries = 20
+    a1.channels.c1.kafka.producer.request.timeout.ms = 30000
+    a1.channels.c1.kafka.consumer.group.id = integral2kafka
     
-    # Define an Avro source called r1 on a1 and tell it
-    # to bind to 0.0.0.0:41414. Connect it to channel c1.
-    #注意这里分出2个channel出来,一个是到k1,一个到k2
-    #如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
-    a1.sources.r1.channels = c1
-    a1.sources.r1.type = exec
-    a1.sources.r1.command = tail -F /data/logs/app/access.log
-    a1.sources.r1.restart = true
-    a1.sources.r1.selector.optional = c1
-    a1.sources.r1.batchSize = 100000
-    a1.sources.r1.interceptors = i1 i2 i3
-    a1.sources.r1.interceptors.i1.type = timestamp
-    a1.sources.r1.interceptors.i2.type = host
-    a1.sources.r1.interceptors.i2.useIP = false
-    a1.sources.r1.interceptors.i3.type = regex_filter
-    a1.sources.r1.interceptors.i3.regex = ^\\s*$
-    a1.sources.r1.interceptors.i3.excludeEvents = true
-    #默认是replicationg,还有multiplexer
-    #a1.sources.r1.selector.type = replicationg
+    #####################
     
-    # Finally, now that we've defined all of our components, tell
-    # a1 which ones we want to activate.
-    a1.sources = r1
-    a1.channels = c1
-    a1.sinks =
+    a1.sources.r2.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource
+    a1.sources.r2.channels = c2
+    a1.sources.r2.spoolDir = /data/logs/app/
+    a1.sources.r2.fileSuffix = .COMPLETED
+    a1.sources.r2.deletePolicy = never
+    a1.sources.r2.ignorePattern = ^$
+    a1.sources.r2.targetPattern = ywbuy_expending.*(\\d){10}.*
+    a1.sources.r2.targetFilename = yyyyMMddHH
+    a1.sources.r2.trackerDir = .flumespooltail_expending
+    a1.sources.r2.consumeOrder = oldest
+    a1.sources.r2.bufferMaxLineLength = 500000
+    a1.sources.r2.inputCharset = UTF-8
+    a1.sources.r2.decodeErrorPolicy = REPLACE
+    a1.sources.r2.deserializer = LINE
+    a1.sources.r2.interceptors = i1 i2 i3 i4
+    
+    #配置 interceptor i1
+    a1.sources.r2.interceptors.i1.type = regex_filter
+    a1.sources.r2.interceptors.i1.regex = ^\\s*$
+    a1.sources.r2.interceptors.i1.excludeEvents = true
+    #配置 interceptor i2
+    a1.sources.r2.fileHeader = true
+    a1.sources.r2.basenameHeader = true
+    a1.sources.r2.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder
+    a1.sources.r2.interceptors.i2.regex = (.*)\\.(.*)\\.(.*)
+    a1.sources.r2.interceptors.i2.extractorHeader = true
+    a1.sources.r2.interceptors.i2.extractorHeaderKey = basename
+    a1.sources.r2.interceptors.i2.serializers = s1 s2 s3
+    a1.sources.r2.interceptors.i2.serializers.s1.name = log_type
+    a1.sources.r2.interceptors.i2.serializers.s2.name = file_type
+    a1.sources.r2.interceptors.i2.serializers.s3.name = log_data
+    #配置 interceptor i3
+    a1.sources.r2.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder
+    a1.sources.r2.interceptors.i3.isServerTime=true
+    a1.sources.r2.interceptors.i3.timeType=yyyyMMddHHmmss
+    #配置 interceptor i4
+    a1.sources.r2.interceptors.i4.type = timestamp
+    
+    
+    a1.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
+    a1.channels.c2.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092
+    a1.channels.c2.kafka.topic = yuewen_new_buydetail
+    # 开启kafka幂等性,保证重试不会产生数据重复
+    a1.channels.c2.kafka.producer.enable.idempotence = true
+    a1.channels.c2.kafka.producer.acks = all
+    a1.channels.c2.kafka.producer.max.in.flight.requests.per.connection = 1
+    # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复
+    a1.channels.c2.kafka.producer.retries = 20
+    a1.channels.c2.kafka.producer.request.timeout.ms = 30000
+    a1.channels.c2.kafka.consumer.group.id = integral2kafka
+
+#    a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
+#    a1.channels.c1.kafka.bootstrap.servers = 10.26.27.71:8092,10.26.22.102:8092,10.26.22.120:8092,10.26.22.121:8092,10.26.22.122:8092
+#    a1.channels.c1.kafka.topic = yuewen_new_buydetail
+#    a1.channels.c1.kafka.consumer.group.id = flume2kafka
+#
+#    # Define an Avro source called r1 on a1 and tell it
+#    # to bind to 0.0.0.0:41414. Connect it to channel c1.
+#    #注意这里分出2个channel出来,一个是到k1,一个到k2
+#    #如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
+#    a1.sources.r1.channels = c1
+#    a1.sources.r1.type = exec
+#    a1.sources.r1.command = tail -F /data/logs/app/access.log
+#    a1.sources.r1.restart = true
+#    a1.sources.r1.selector.optional = c1
+#    a1.sources.r1.batchSize = 100000
+#    a1.sources.r1.interceptors = i1 i2 i3
+#    a1.sources.r1.interceptors.i1.type = timestamp
+#    a1.sources.r1.interceptors.i2.type = host
+#    a1.sources.r1.interceptors.i2.useIP = false
+#    a1.sources.r1.interceptors.i3.type = regex_filter
+#    a1.sources.r1.interceptors.i3.regex = ^\\s*$
+#    a1.sources.r1.interceptors.i3.excludeEvents = true
+#    #默认是replicationg,还有multiplexer
+#    #a1.sources.r1.selector.type = replicationg
+#
+#    # Finally, now that we've defined all of our components, tell
+#    # a1 which ones we want to activate.
+#    a1.sources = r1
+#    a1.channels = c1
+#    a1.sinks =