|
@@ -10,12 +10,19 @@ a1.channels.c2.capacity = 1000000
|
|
a1.channels.c2.write-timeout = 250
|
|
a1.channels.c2.write-timeout = 250
|
|
a1.channels.c2.keep-alive = 250
|
|
a1.channels.c2.keep-alive = 250
|
|
|
|
|
|
-
|
|
|
|
|
|
+a1.channels.c3.type = org.apache.flume.channel.kafka.KafkaChannel
|
|
|
|
+#a1.channels.c3.transactionCapacity = 100000
|
|
|
|
+#a1.channels.c3.capacity = 10000000
|
|
|
|
+a1.channels.c3.brokerList = 10.26.27.212:8092,10.26.22.76:8092,10.26.22.72:8092,10.26.22.73:8092,10.26.22.74:8092
|
|
|
|
+a1.channels.c3.topic = api_easou_com_nginx
|
|
|
|
+a1.channels.c3.zookeeperConnect = 10.26.27.212:2181,10.26.22.76:2181,10.26.22.72:2181,10.26.22.73:2181,10.26.22.74:2181
|
|
|
|
+#a1.channels.c3.write-timeout = 150
|
|
|
|
+#a1.channels.c3.keep-alive = 260
|
|
# Define an Avro source called r1 on a1 and tell it
|
|
# Define an Avro source called r1 on a1 and tell it
|
|
# to bind to 0.0.0.0:41414. Connect it to channel c1.
|
|
# to bind to 0.0.0.0:41414. Connect it to channel c1.
|
|
#注意这里分出2个channel出来,一个是到k1,一个到k2
|
|
#注意这里分出2个channel出来,一个是到k1,一个到k2
|
|
#如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
|
|
#如果这里只出一个channel c1的话,下面2个sink channel都为c1,则消息就会随机分到2个sink上,而不是2个sink都有一份
|
|
-a1.sources.r1.channels = c1 c2
|
|
|
|
|
|
+a1.sources.r1.channels = c1 c2 c3
|
|
a1.sources.r1.type = exec
|
|
a1.sources.r1.type = exec
|
|
a1.sources.r1.command = tail -F /log/nginx/visit/access.log
|
|
a1.sources.r1.command = tail -F /log/nginx/visit/access.log
|
|
a1.sources.r1.restart = true
|
|
a1.sources.r1.restart = true
|
|
@@ -41,5 +48,5 @@ a1.sinks.k2.connect-timeout = 100000
|
|
# Finally, now that we've defined all of our components, tell
|
|
# Finally, now that we've defined all of our components, tell
|
|
# a1 which ones we want to activate.
|
|
# a1 which ones we want to activate.
|
|
a1.sources = r1
|
|
a1.sources = r1
|
|
-a1.channels = c1 c2
|
|
|
|
|
|
+a1.channels = c1 c2 c3
|
|
a1.sinks = k1 k2
|
|
a1.sinks = k1 k2
|