|
@@ -0,0 +1,73 @@
|
|
|
|
+a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
+a1.channels.c1.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092
|
|
|
|
+a1.channels.c1.kafka.topic = api_searchlog_novel
|
|
|
|
+a1.channels.c1.kafka.consumer.group.id = flume2kafka
|
|
|
|
+# Define an Avro source called r1 on a1 and tell it
|
|
|
|
+# to bind to 0.0.0.0:41414. Connect it to channel c1.
|
|
|
|
+#注意这里分出2个channel出来,一个是到k1,一个到k2
|
|
|
|
+#如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
|
|
|
|
+a1.sources.r1.channels = c1
|
|
|
|
+a1.sources.r1.type = exec
|
|
|
|
+a1.sources.r1.command = tail -F /log/resin-books/words/access.log
|
|
|
|
+a1.sources.r1.restart = true
|
|
|
|
+a1.sources.r1.batchSize = 10000
|
|
|
|
+a1.sources.r1.interceptors = i1 i2 i3
|
|
|
|
+a1.sources.r1.interceptors.i1.type = timestamp
|
|
|
|
+a1.sources.r1.interceptors.i2.type = host
|
|
|
|
+a1.sources.r1.interceptors.i2.useIP = false
|
|
|
|
+a1.sources.r1.interceptors.i3.type = regex_filter
|
|
|
|
+a1.sources.r1.interceptors.i3.regex = ^\\s*$
|
|
|
|
+a1.sources.r1.interceptors.i3.excludeEvents = true
|
|
|
|
+#默认是replicationg,还有multiplexer
|
|
|
|
+#a1.sources.r1.selector.type = replicationg
|
|
|
|
+
|
|
|
|
+# Finally, now that we've defined all of our components, tell
|
|
|
|
+# a1 which ones we want to activate.
|
|
|
|
+a1.sources = r1 r2 r3
|
|
|
|
+a1.channels = c1 c2 c3
|
|
|
|
+a1.sinks =
|
|
|
|
+
|
|
|
|
+#########searchrecord apirs的BI统计搜索日志
|
|
|
|
+a1.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
+a1.channels.c2.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092
|
|
|
|
+a1.channels.c2.kafka.topic = api_searchrecord_bi
|
|
|
|
+a1.channels.c2.kafka.consumer.group.id = flume2kafka
|
|
|
|
+# Define an Avro source called r1 on a1 and tell it
|
|
|
|
+#注意这里分出2个channel出来,一个是到k1,一个到k2
|
|
|
|
+#如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
|
|
|
|
+a1.sources.r2.channels = c2
|
|
|
|
+a1.sources.r2.type = exec
|
|
|
|
+a1.sources.r2.command = tail -F /log/resin-books/search/searchrecord.log
|
|
|
|
+a1.sources.r2.restart = true
|
|
|
|
+a1.sources.r2.batchSize = 10000
|
|
|
|
+a1.sources.r2.interceptors = i1 i2 i3
|
|
|
|
+a1.sources.r2.interceptors.i1.type = timestamp
|
|
|
|
+a1.sources.r2.interceptors.i2.type = host
|
|
|
|
+a1.sources.r2.interceptors.i2.useIP = false
|
|
|
|
+a1.sources.r2.interceptors.i3.type = regex_filter
|
|
|
|
+a1.sources.r2.interceptors.i3.regex = ^\\s*$
|
|
|
|
+a1.sources.r2.interceptors.i3.excludeEvents = true
|
|
|
|
+#默认是replicationg,还有multiplexer
|
|
|
|
+#a1.sources.r1.selector.type = replicationg
|
|
|
|
+
|
|
|
|
+#####bookincharge
|
|
|
|
+a1.channels.c3.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
+a1.channels.c3.kafka.bootstrap.servers = 10.26.22.124:8092,10.26.22.121:8092,10.26.22.120:8092,10.26.22.102:8092,10.26.22.122:8092
|
|
|
|
+a1.channels.c3.kafka.topic = api_bookincharge_novel
|
|
|
|
+a1.channels.c3.kafka.consumer.group.id = flume2kafka
|
|
|
|
+# Define an Avro source called r1 on a1 and tell it
|
|
|
|
+# to bind to 0.0.0.0:41414. Connect it to channel c1.
|
|
|
|
+a1.sources.r3.channels = c3
|
|
|
|
+a1.sources.r3.type = exec
|
|
|
|
+a1.sources.r3.command = tail -F /data/log/bookincharge/bookincharge.log
|
|
|
|
+a1.sources.r3.restart = true
|
|
|
|
+a1.sources.r3.batchSize = 10000
|
|
|
|
+a1.sources.r3.interceptors = i1 i2 i3
|
|
|
|
+a1.sources.r3.interceptors.i1.type = timestamp
|
|
|
|
+a1.sources.r3.interceptors.i2.type = host
|
|
|
|
+a1.sources.r3.interceptors.i2.useIP = false
|
|
|
|
+a1.sources.r3.interceptors.i3.type = regex_filter
|
|
|
|
+a1.sources.r3.interceptors.i3.regex = ^\\s*$
|
|
|
|
+a1.sources.r3.interceptors.i3.excludeEvents = true
|
|
|
|
+#默认是replicationg,还有multiplexer
|
|
|
|
+#a1.sources.r2.selector.type = replicationg
|