Explorar o código

deploy pro/realtime-yuewen-v2:r188822 测试是否能提交

pmrd hai 1 ano
pai
achega
cde1158064
Modificáronse 3 ficheiros con 36 adicións e 57 borrados
  1. 4 4
      .drone.yml
  2. 1 1
      Dockerfile
  3. 31 52
      deploy-tmp.yaml

+ 4 - 4
.drone.yml

@@ -21,7 +21,7 @@ steps:
   environment:
     JNLP_ENV: pro
     JNLP_REPLICAS: 1
-    JNLP_TAG: r188820
+    JNLP_TAG: r188822
     JNLP_VERSION: v1  # default v1, v2 for canary
     DEPLOY_ENV: k8s-1 # 可以选择把应用部署到集群:k8s-1 or k8s-2
     JNLP_REPO: hub.evbj.easou.com
@@ -54,8 +54,8 @@ steps:
     corpid: ww419ee4063735e1c0
     corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
     agent_id: 1000004
-    title: "Pipeline pro/realtime-yuewen-v2:r188820 Success"
-    description: "${DRONE_BUILD_LINK} pro/realtime-yuewen-v2:r188820 部署完成 测试是否能提交"
+    title: "Pipeline pro/realtime-yuewen-v2:r188822 Success"
+    description: "${DRONE_BUILD_LINK} pro/realtime-yuewen-v2:r188822 部署完成 测试是否能提交"
     msg_url: ${DRONE_BUILD_LINK}
     btn_txt: "否"
   when:
@@ -68,7 +68,7 @@ steps:
     corpid: ww419ee4063735e1c0
     corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
     agent_id: 1000004
-    title: "Pipeline pro/realtime-yuewen-v2:r188820 Failure"
+    title: "Pipeline pro/realtime-yuewen-v2:r188822 Failure"
     description: "${DRONE_BUILD_LINK} 部署失败,请检查配置!测试是否能提交"
     msg_url: ${DRONE_BUILD_LINK}
     btn_txt: "否"

+ 1 - 1
Dockerfile

@@ -1 +1 @@
-FROM hub.evbj.easou.com/pro/realtime-yuewen-v2:r188820
+FROM hub.evbj.easou.com/pro/realtime-yuewen-v2:r188822

+ 31 - 52
deploy-tmp.yaml

@@ -47,7 +47,7 @@ spec:
       #              topologyKey: kubernetes.io/hostname
       containers:
         - name: realtime-yuewen-v2
-          image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188820
+          image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188822
           imagePullPolicy: IfNotPresent
           env:
             - name: MY_NODE_NAME
@@ -105,6 +105,9 @@ spec:
           volumeMounts:
             - name: volume #日志同时挂载在nginx和filebeat中
               mountPath: /data
+            - name: flume-config
+              mountPath: /app/soft/flume-1.7.0/conf/flume.conf
+              subPath: flume.conf
       volumes:
         - name: flume-config
           configMap:
@@ -175,59 +178,35 @@ metadata:
   namespace: pro
 data:
   flume.conf: |
-    a1.sources = r1
-    a1.channels = c1
-    a1.sinks =
+    a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
+    a1.channels.c1.kafka.bootstrap.servers = 10.40.20.44:8092,10.40.20.43:8092,10.40.20.41:8092,10.40.20.42:8092,10.40.20.40:8092
+    a1.channels.c1.kafka.topic = yuewen_buydetail
+    a1.channels.c1.kafka.consumer.group.id = flume2kafka
     
-    a1.sources.r1.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource
+    # Define an Avro source called r1 on a1 and tell it
+    # to bind to 0.0.0.0:41414. Connect it to channel c1.
+    #注意这里分出2个channel出来,一个是到k1,一个到k2
+    #如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
     a1.sources.r1.channels = c1
-    a1.sources.r1.spoolDir = /data/logs/app/
-    a1.sources.r1.fileSuffix = .COMPLETED
-    a1.sources.r1.deletePolicy = never
-    a1.sources.r1.ignorePattern = ^$
-    a1.sources.r1.targetPattern = access.*(\\d){10}.*
-    a1.sources.r1.targetFilename = yyyyMMddHH
-    a1.sources.r1.trackerDir = .flumespooltail_earning
-    a1.sources.r1.consumeOrder = oldest
-    a1.sources.r1.bufferMaxLineLength = 500000
-    a1.sources.r1.inputCharset = UTF-8
-    a1.sources.r1.decodeErrorPolicy = REPLACE
-    a1.sources.r1.deserializer = LINE
-    a1.sources.r1.interceptors = i1 i2 i3 i4
-    
-    #配置 interceptor i1
-    a1.sources.r1.interceptors.i1.type = regex_filter
-    a1.sources.r1.interceptors.i1.regex = ^\\s*$
-    a1.sources.r1.interceptors.i1.excludeEvents = true
-    #配置 interceptor i2
-    a1.sources.r1.fileHeader = true
-    a1.sources.r1.basenameHeader = true
-    a1.sources.r1.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder
-    a1.sources.r1.interceptors.i2.regex = (.*)\\.(.*)\\.(.*)
-    a1.sources.r1.interceptors.i2.extractorHeader = true
-    a1.sources.r1.interceptors.i2.extractorHeaderKey = basename
-    a1.sources.r1.interceptors.i2.serializers = s1 s2 s3
-    a1.sources.r1.interceptors.i2.serializers.s1.name = log_type
-    a1.sources.r1.interceptors.i2.serializers.s2.name = file_type
-    a1.sources.r1.interceptors.i2.serializers.s3.name = log_data
-    #配置 interceptor i3
-    a1.sources.r1.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder
-    a1.sources.r1.interceptors.i3.isServerTime=true
-    a1.sources.r1.interceptors.i3.timeType=yyyyMMddHHmmss
-    #配置 interceptor i4
-    a1.sources.r1.interceptors.i4.type = timestamp
+    a1.sources.r1.type = exec
+    a1.sources.r1.command = tail -F /data/logs/app/access.log
+    a1.sources.r1.restart = true
+    a1.sources.r1.selector.optional = c1
+    a1.sources.r1.batchSize = 100000
+    a1.sources.r1.interceptors = i1 i2 i3
+    a1.sources.r1.interceptors.i1.type = timestamp
+    a1.sources.r1.interceptors.i2.type = host
+    a1.sources.r1.interceptors.i2.useIP = false
+    a1.sources.r1.interceptors.i3.type = regex_filter
+    a1.sources.r1.interceptors.i3.regex = ^\\s*$
+    a1.sources.r1.interceptors.i3.excludeEvents = true
+    #默认是replicationg,还有multiplexer
+    #a1.sources.r1.selector.type = replicationg
     
-    
-    a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
-    a1.channels.c1.kafka.bootstrap.servers = 10.26.27.71:8092,10.26.22.102:8092,10.26.22.120:8092,10.26.22.121:8092,10.26.22.122:8092
-    a1.channels.c1.kafka.topic = yuewen_buydetail
-    # 开启kafka幂等性,保证重试不会产生数据重复
-    a1.channels.c1.kafka.producer.enable.idempotence = true
-    a1.channels.c1.kafka.producer.acks = all
-    a1.channels.c1.kafka.producer.max.in.flight.requests.per.connection = 1
-    # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复
-    a1.channels.c1.kafka.producer.retries = 20
-    a1.channels.c1.kafka.producer.request.timeout.ms = 30000
-    a1.channels.c1.kafka.consumer.group.id = integral2kafka
+    # Finally, now that we've defined all of our components, tell
+    # a1 which ones we want to activate.
+    a1.sources = r1
+    a1.channels = c1
+    a1.sinks =