|
@@ -47,7 +47,7 @@ spec:
|
|
# topologyKey: kubernetes.io/hostname
|
|
# topologyKey: kubernetes.io/hostname
|
|
containers:
|
|
containers:
|
|
- name: realtime-yuewen-v2
|
|
- name: realtime-yuewen-v2
|
|
- image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188682
|
|
|
|
|
|
+ image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188684
|
|
imagePullPolicy: IfNotPresent
|
|
imagePullPolicy: IfNotPresent
|
|
env:
|
|
env:
|
|
- name: MY_NODE_NAME
|
|
- name: MY_NODE_NAME
|
|
@@ -178,35 +178,143 @@ metadata:
|
|
namespace: pro
|
|
namespace: pro
|
|
data:
|
|
data:
|
|
flume.conf: |
|
|
flume.conf: |
|
|
|
|
+ a1.sources = r1 r2
|
|
|
|
+ a1.channels = c1 c2
|
|
|
|
+ a1.sinks =
|
|
|
|
+
|
|
|
|
+ a1.sources.r1.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource
|
|
|
|
+ a1.sources.r1.channels = c1
|
|
|
|
+ a1.sources.r1.spoolDir = /data/logs/app/
|
|
|
|
+ a1.sources.r1.fileSuffix = .COMPLETED
|
|
|
|
+ a1.sources.r1.deletePolicy = never
|
|
|
|
+ a1.sources.r1.ignorePattern = ^$
|
|
|
|
+ a1.sources.r1.targetPattern = ywbuy_earning.*(\\d){10}.*
|
|
|
|
+ a1.sources.r1.targetFilename = yyyyMMddHH
|
|
|
|
+ a1.sources.r1.trackerDir = .flumespooltail_earning
|
|
|
|
+ a1.sources.r1.consumeOrder = oldest
|
|
|
|
+ a1.sources.r1.bufferMaxLineLength = 500000
|
|
|
|
+ a1.sources.r1.inputCharset = UTF-8
|
|
|
|
+ a1.sources.r1.decodeErrorPolicy = REPLACE
|
|
|
|
+ a1.sources.r1.deserializer = LINE
|
|
|
|
+ a1.sources.r1.interceptors = i1 i2 i3 i4
|
|
|
|
+
|
|
|
|
+ #配置 interceptor i1
|
|
|
|
+ a1.sources.r1.interceptors.i1.type = regex_filter
|
|
|
|
+ a1.sources.r1.interceptors.i1.regex = ^\\s*$
|
|
|
|
+ a1.sources.r1.interceptors.i1.excludeEvents = true
|
|
|
|
+ #配置 interceptor i2
|
|
|
|
+ a1.sources.r1.fileHeader = true
|
|
|
|
+ a1.sources.r1.basenameHeader = true
|
|
|
|
+ a1.sources.r1.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder
|
|
|
|
+ a1.sources.r1.interceptors.i2.regex = (.*)\\.(.*)\\.(.*)
|
|
|
|
+ a1.sources.r1.interceptors.i2.extractorHeader = true
|
|
|
|
+ a1.sources.r1.interceptors.i2.extractorHeaderKey = basename
|
|
|
|
+ a1.sources.r1.interceptors.i2.serializers = s1 s2 s3
|
|
|
|
+ a1.sources.r1.interceptors.i2.serializers.s1.name = log_type
|
|
|
|
+ a1.sources.r1.interceptors.i2.serializers.s2.name = file_type
|
|
|
|
+ a1.sources.r1.interceptors.i2.serializers.s3.name = log_data
|
|
|
|
+ #配置 interceptor i3
|
|
|
|
+ a1.sources.r1.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder
|
|
|
|
+ a1.sources.r1.interceptors.i3.isServerTime=true
|
|
|
|
+ a1.sources.r1.interceptors.i3.timeType=yyyyMMddHHmmss
|
|
|
|
+ #配置 interceptor i4
|
|
|
|
+ a1.sources.r1.interceptors.i4.type = timestamp
|
|
|
|
+
|
|
|
|
+
|
|
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
- a1.channels.c1.kafka.bootstrap.servers = 10.26.27.71:8092,10.26.22.102:8092,10.26.22.120:8092,10.26.22.121:8092,10.26.22.122:8092
|
|
|
|
|
|
+ a1.channels.c1.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092
|
|
a1.channels.c1.kafka.topic = yuewen_new_buydetail
|
|
a1.channels.c1.kafka.topic = yuewen_new_buydetail
|
|
- a1.channels.c1.kafka.consumer.group.id = flume2kafka
|
|
|
|
|
|
+ # 开启kafka幂等性,保证重试不会产生数据重复
|
|
|
|
+ a1.channels.c1.kafka.producer.enable.idempotence = true
|
|
|
|
+ a1.channels.c1.kafka.producer.acks = all
|
|
|
|
+ a1.channels.c1.kafka.producer.max.in.flight.requests.per.connection = 1
|
|
|
|
+ # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复
|
|
|
|
+ a1.channels.c1.kafka.producer.retries = 20
|
|
|
|
+ a1.channels.c1.kafka.producer.request.timeout.ms = 30000
|
|
|
|
+ a1.channels.c1.kafka.consumer.group.id = integral2kafka
|
|
|
|
|
|
- # Define an Avro source called r1 on a1 and tell it
|
|
|
|
- # to bind to 0.0.0.0:41414. Connect it to channel c1.
|
|
|
|
- #注意这里分出2个channel出来,一个是到k1,一个到k2
|
|
|
|
- #如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
|
|
|
|
- a1.sources.r1.channels = c1
|
|
|
|
- a1.sources.r1.type = exec
|
|
|
|
- a1.sources.r1.command = tail -F /data/logs/app/access.log
|
|
|
|
- a1.sources.r1.restart = true
|
|
|
|
- a1.sources.r1.selector.optional = c1
|
|
|
|
- a1.sources.r1.batchSize = 100000
|
|
|
|
- a1.sources.r1.interceptors = i1 i2 i3
|
|
|
|
- a1.sources.r1.interceptors.i1.type = timestamp
|
|
|
|
- a1.sources.r1.interceptors.i2.type = host
|
|
|
|
- a1.sources.r1.interceptors.i2.useIP = false
|
|
|
|
- a1.sources.r1.interceptors.i3.type = regex_filter
|
|
|
|
- a1.sources.r1.interceptors.i3.regex = ^\\s*$
|
|
|
|
- a1.sources.r1.interceptors.i3.excludeEvents = true
|
|
|
|
- #默认是replicationg,还有multiplexer
|
|
|
|
- #a1.sources.r1.selector.type = replicationg
|
|
|
|
|
|
+ #####################
|
|
|
|
|
|
- # Finally, now that we've defined all of our components, tell
|
|
|
|
- # a1 which ones we want to activate.
|
|
|
|
- a1.sources = r1
|
|
|
|
- a1.channels = c1
|
|
|
|
- a1.sinks =
|
|
|
|
|
|
+ a1.sources.r2.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource
|
|
|
|
+ a1.sources.r2.channels = c2
|
|
|
|
+ a1.sources.r2.spoolDir = /data/logs/app/
|
|
|
|
+ a1.sources.r2.fileSuffix = .COMPLETED
|
|
|
|
+ a1.sources.r2.deletePolicy = never
|
|
|
|
+ a1.sources.r2.ignorePattern = ^$
|
|
|
|
+ a1.sources.r2.targetPattern = ywbuy_expending.*(\\d){10}.*
|
|
|
|
+ a1.sources.r2.targetFilename = yyyyMMddHH
|
|
|
|
+ a1.sources.r2.trackerDir = .flumespooltail_expending
|
|
|
|
+ a1.sources.r2.consumeOrder = oldest
|
|
|
|
+ a1.sources.r2.bufferMaxLineLength = 500000
|
|
|
|
+ a1.sources.r2.inputCharset = UTF-8
|
|
|
|
+ a1.sources.r2.decodeErrorPolicy = REPLACE
|
|
|
|
+ a1.sources.r2.deserializer = LINE
|
|
|
|
+ a1.sources.r2.interceptors = i1 i2 i3 i4
|
|
|
|
+
|
|
|
|
+ #配置 interceptor i1
|
|
|
|
+ a1.sources.r2.interceptors.i1.type = regex_filter
|
|
|
|
+ a1.sources.r2.interceptors.i1.regex = ^\\s*$
|
|
|
|
+ a1.sources.r2.interceptors.i1.excludeEvents = true
|
|
|
|
+ #配置 interceptor i2
|
|
|
|
+ a1.sources.r2.fileHeader = true
|
|
|
|
+ a1.sources.r2.basenameHeader = true
|
|
|
|
+ a1.sources.r2.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder
|
|
|
|
+ a1.sources.r2.interceptors.i2.regex = (.*)\\.(.*)\\.(.*)
|
|
|
|
+ a1.sources.r2.interceptors.i2.extractorHeader = true
|
|
|
|
+ a1.sources.r2.interceptors.i2.extractorHeaderKey = basename
|
|
|
|
+ a1.sources.r2.interceptors.i2.serializers = s1 s2 s3
|
|
|
|
+ a1.sources.r2.interceptors.i2.serializers.s1.name = log_type
|
|
|
|
+ a1.sources.r2.interceptors.i2.serializers.s2.name = file_type
|
|
|
|
+ a1.sources.r2.interceptors.i2.serializers.s3.name = log_data
|
|
|
|
+ #配置 interceptor i3
|
|
|
|
+ a1.sources.r2.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder
|
|
|
|
+ a1.sources.r2.interceptors.i3.isServerTime=true
|
|
|
|
+ a1.sources.r2.interceptors.i3.timeType=yyyyMMddHHmmss
|
|
|
|
+ #配置 interceptor i4
|
|
|
|
+ a1.sources.r2.interceptors.i4.type = timestamp
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ a1.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
+ a1.channels.c2.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092
|
|
|
|
+ a1.channels.c2.kafka.topic = yuewen_new_buydetail
|
|
|
|
+ # 开启kafka幂等性,保证重试不会产生数据重复
|
|
|
|
+ a1.channels.c2.kafka.producer.enable.idempotence = true
|
|
|
|
+ a1.channels.c2.kafka.producer.acks = all
|
|
|
|
+ a1.channels.c2.kafka.producer.max.in.flight.requests.per.connection = 1
|
|
|
|
+ # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复
|
|
|
|
+ a1.channels.c2.kafka.producer.retries = 20
|
|
|
|
+ a1.channels.c2.kafka.producer.request.timeout.ms = 30000
|
|
|
|
+ a1.channels.c2.kafka.consumer.group.id = integral2kafka
|
|
|
|
+
|
|
|
|
+# a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
+# a1.channels.c1.kafka.bootstrap.servers = 10.26.27.71:8092,10.26.22.102:8092,10.26.22.120:8092,10.26.22.121:8092,10.26.22.122:8092
|
|
|
|
+# a1.channels.c1.kafka.topic = yuewen_new_buydetail
|
|
|
|
+# a1.channels.c1.kafka.consumer.group.id = flume2kafka
|
|
|
|
+#
|
|
|
|
+# # Define an Avro source called r1 on a1 and tell it
|
|
|
|
+# # to bind to 0.0.0.0:41414. Connect it to channel c1.
|
|
|
|
+# #注意这里分出2个channel出来,一个是到k1,一个到k2
|
|
|
|
+# #如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
|
|
|
|
+# a1.sources.r1.channels = c1
|
|
|
|
+# a1.sources.r1.type = exec
|
|
|
|
+# a1.sources.r1.command = tail -F /data/logs/app/access.log
|
|
|
|
+# a1.sources.r1.restart = true
|
|
|
|
+# a1.sources.r1.selector.optional = c1
|
|
|
|
+# a1.sources.r1.batchSize = 100000
|
|
|
|
+# a1.sources.r1.interceptors = i1 i2 i3
|
|
|
|
+# a1.sources.r1.interceptors.i1.type = timestamp
|
|
|
|
+# a1.sources.r1.interceptors.i2.type = host
|
|
|
|
+# a1.sources.r1.interceptors.i2.useIP = false
|
|
|
|
+# a1.sources.r1.interceptors.i3.type = regex_filter
|
|
|
|
+# a1.sources.r1.interceptors.i3.regex = ^\\s*$
|
|
|
|
+# a1.sources.r1.interceptors.i3.excludeEvents = true
|
|
|
|
+# #默认是replicationg,还有multiplexer
|
|
|
|
+# #a1.sources.r1.selector.type = replicationg
|
|
|
|
+#
|
|
|
|
+# # Finally, now that we've defined all of our components, tell
|
|
|
|
+# # a1 which ones we want to activate.
|
|
|
|
+# a1.sources = r1
|
|
|
|
+# a1.channels = c1
|
|
|
|
+# a1.sinks =
|
|
|
|
|
|
|
|
|