|
@@ -47,7 +47,7 @@ spec:
|
|
# topologyKey: kubernetes.io/hostname
|
|
# topologyKey: kubernetes.io/hostname
|
|
containers:
|
|
containers:
|
|
- name: realtime-yuewen-v2
|
|
- name: realtime-yuewen-v2
|
|
- image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188820
|
|
|
|
|
|
+ image: hub.evbj.easou.com/pro/realtime-yuewen-v2:r188822
|
|
imagePullPolicy: IfNotPresent
|
|
imagePullPolicy: IfNotPresent
|
|
env:
|
|
env:
|
|
- name: MY_NODE_NAME
|
|
- name: MY_NODE_NAME
|
|
@@ -105,6 +105,9 @@ spec:
|
|
volumeMounts:
|
|
volumeMounts:
|
|
- name: volume #日志同时挂载在nginx和filebeat中
|
|
- name: volume #日志同时挂载在nginx和filebeat中
|
|
mountPath: /data
|
|
mountPath: /data
|
|
|
|
+ - name: flume-config
|
|
|
|
+ mountPath: /app/soft/flume-1.7.0/conf/flume.conf
|
|
|
|
+ subPath: flume.conf
|
|
volumes:
|
|
volumes:
|
|
- name: flume-config
|
|
- name: flume-config
|
|
configMap:
|
|
configMap:
|
|
@@ -175,59 +178,35 @@ metadata:
|
|
namespace: pro
|
|
namespace: pro
|
|
data:
|
|
data:
|
|
flume.conf: |
|
|
flume.conf: |
|
|
- a1.sources = r1
|
|
|
|
- a1.channels = c1
|
|
|
|
- a1.sinks =
|
|
|
|
|
|
+ a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
+ a1.channels.c1.kafka.bootstrap.servers = 10.40.20.44:8092,10.40.20.43:8092,10.40.20.41:8092,10.40.20.42:8092,10.40.20.40:8092
|
|
|
|
+ a1.channels.c1.kafka.topic = yuewen_buydetail
|
|
|
|
+ a1.channels.c1.kafka.consumer.group.id = flume2kafka
|
|
|
|
|
|
- a1.sources.r1.type = com.github.ningg.flume.source.SpoolDirectoryTailFileSource
|
|
|
|
|
|
+ # Define an Avro source called r1 on a1 and tell it
|
|
|
|
+ # to bind to 0.0.0.0:41414. Connect it to channel c1.
|
|
|
|
+ #注意这里分出2个channel出来,一个是到k1,一个到k2
|
|
|
|
+ #如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
|
|
a1.sources.r1.channels = c1
|
|
a1.sources.r1.channels = c1
|
|
- a1.sources.r1.spoolDir = /data/logs/app/
|
|
|
|
- a1.sources.r1.fileSuffix = .COMPLETED
|
|
|
|
- a1.sources.r1.deletePolicy = never
|
|
|
|
- a1.sources.r1.ignorePattern = ^$
|
|
|
|
- a1.sources.r1.targetPattern = access.*(\\d){10}.*
|
|
|
|
- a1.sources.r1.targetFilename = yyyyMMddHH
|
|
|
|
- a1.sources.r1.trackerDir = .flumespooltail_earning
|
|
|
|
- a1.sources.r1.consumeOrder = oldest
|
|
|
|
- a1.sources.r1.bufferMaxLineLength = 500000
|
|
|
|
- a1.sources.r1.inputCharset = UTF-8
|
|
|
|
- a1.sources.r1.decodeErrorPolicy = REPLACE
|
|
|
|
- a1.sources.r1.deserializer = LINE
|
|
|
|
- a1.sources.r1.interceptors = i1 i2 i3 i4
|
|
|
|
-
|
|
|
|
- #配置 interceptor i1
|
|
|
|
- a1.sources.r1.interceptors.i1.type = regex_filter
|
|
|
|
- a1.sources.r1.interceptors.i1.regex = ^\\s*$
|
|
|
|
- a1.sources.r1.interceptors.i1.excludeEvents = true
|
|
|
|
- #配置 interceptor i2
|
|
|
|
- a1.sources.r1.fileHeader = true
|
|
|
|
- a1.sources.r1.basenameHeader = true
|
|
|
|
- a1.sources.r1.interceptors.i2.type = com.easou.flume.interceptor.RegexExtractorExtInterceptor$Builder
|
|
|
|
- a1.sources.r1.interceptors.i2.regex = (.*)\\.(.*)\\.(.*)
|
|
|
|
- a1.sources.r1.interceptors.i2.extractorHeader = true
|
|
|
|
- a1.sources.r1.interceptors.i2.extractorHeaderKey = basename
|
|
|
|
- a1.sources.r1.interceptors.i2.serializers = s1 s2 s3
|
|
|
|
- a1.sources.r1.interceptors.i2.serializers.s1.name = log_type
|
|
|
|
- a1.sources.r1.interceptors.i2.serializers.s2.name = file_type
|
|
|
|
- a1.sources.r1.interceptors.i2.serializers.s3.name = log_data
|
|
|
|
- #配置 interceptor i3
|
|
|
|
- a1.sources.r1.interceptors.i3.type=com.easou.flume.interceptor.ServerTimeInterceptor$Builder
|
|
|
|
- a1.sources.r1.interceptors.i3.isServerTime=true
|
|
|
|
- a1.sources.r1.interceptors.i3.timeType=yyyyMMddHHmmss
|
|
|
|
- #配置 interceptor i4
|
|
|
|
- a1.sources.r1.interceptors.i4.type = timestamp
|
|
|
|
|
|
+ a1.sources.r1.type = exec
|
|
|
|
+ a1.sources.r1.command = tail -F /data/logs/app/access.log
|
|
|
|
+ a1.sources.r1.restart = true
|
|
|
|
+ a1.sources.r1.selector.optional = c1
|
|
|
|
+ a1.sources.r1.batchSize = 100000
|
|
|
|
+ a1.sources.r1.interceptors = i1 i2 i3
|
|
|
|
+ a1.sources.r1.interceptors.i1.type = timestamp
|
|
|
|
+ a1.sources.r1.interceptors.i2.type = host
|
|
|
|
+ a1.sources.r1.interceptors.i2.useIP = false
|
|
|
|
+ a1.sources.r1.interceptors.i3.type = regex_filter
|
|
|
|
+ a1.sources.r1.interceptors.i3.regex = ^\\s*$
|
|
|
|
+ a1.sources.r1.interceptors.i3.excludeEvents = true
|
|
|
|
+ #默认是replicationg,还有multiplexer
|
|
|
|
+ #a1.sources.r1.selector.type = replicationg
|
|
|
|
|
|
-
|
|
|
|
- a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
- a1.channels.c1.kafka.bootstrap.servers = 10.26.27.71:8092,10.26.22.102:8092,10.26.22.120:8092,10.26.22.121:8092,10.26.22.122:8092
|
|
|
|
- a1.channels.c1.kafka.topic = yuewen_buydetail
|
|
|
|
- # 开启kafka幂等性,保证重试不会产生数据重复
|
|
|
|
- a1.channels.c1.kafka.producer.enable.idempotence = true
|
|
|
|
- a1.channels.c1.kafka.producer.acks = all
|
|
|
|
- a1.channels.c1.kafka.producer.max.in.flight.requests.per.connection = 1
|
|
|
|
- # 重试 20次,每次超时30秒,一次发送数据最长约10分钟。尽量保证kafka不报异常,因为异常后flumesource会重试,导致数据重复
|
|
|
|
- a1.channels.c1.kafka.producer.retries = 20
|
|
|
|
- a1.channels.c1.kafka.producer.request.timeout.ms = 30000
|
|
|
|
- a1.channels.c1.kafka.consumer.group.id = integral2kafka
|
|
|
|
|
|
+ # Finally, now that we've defined all of our components, tell
|
|
|
|
+ # a1 which ones we want to activate.
|
|
|
|
+ a1.sources = r1
|
|
|
|
+ a1.channels = c1
|
|
|
|
+ a1.sinks =
|
|
|
|
|
|
|
|
|