浏览代码

可以通过脚本部署,zk/es/kibana

yangxg 3 年之前
父节点
当前提交
b766d08fc7

+ 7 - 7
deploy-tmp-elasticsearch.yaml

@@ -2,8 +2,8 @@
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
-  name: es
-  namespace: qa
+  name: elasticsearch
+  namespace: pro
 spec:
   serviceName: elasticsearch
   replicas: 3
@@ -56,7 +56,7 @@ spec:
                 fieldRef:
                   fieldPath: metadata.name
             - name: cluster.initial_master_nodes
-              value: "es-0,es-1,es-2"
+              value: "elasticsearch-0,elasticsearch-1,elasticsearch-2"
             - name: discovery.zen.minimum_master_nodes
               value: "2"
             - name: discovery.seed_hosts
@@ -73,13 +73,13 @@ spec:
         storageClassName: rbd
         resources:
           requests:
-            storage: 100Gi
+            storage: 500Gi
 ---
 apiVersion: v1
 kind: Service
 metadata:
   name: elasticsearch
-  namespace: qa
+  namespace: pro
   labels:
     app: elasticsearch
 spec:
@@ -96,7 +96,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: elasticsearch-cs
-  namespace: qa
+  namespace: pro
   labels:
     app: elasticsearch
 spec:
@@ -104,6 +104,6 @@ spec:
   ports:
     - port: 9200
       name: client
-      nodePort: 30029
+      nodePort: 30030
   selector:
     app: elasticsearch

+ 84 - 0
deploy-tmp-kafka-manager.yaml

@@ -0,0 +1,84 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: kafka-manager
+  namespace: pro
+  labels:
+    app: kafka-manager
+spec:
+  type: NodePort
+  ports:
+  - name: kafka
+    port: 9000
+    targetPort: 9000
+    nodePort: 30031
+  selector:
+    app: kafka-manager
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kafka-manager
+  namespace: pro
+  labels:
+    app: kafka-manager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kafka-manager
+  template:
+    metadata:
+      labels:
+        app: kafka-manager
+    spec:
+      containers:
+      - name: kafka-manager
+        image: zenko/kafka-manager:1.3.3.22
+        imagePullPolicy: IfNotPresent
+        ports:
+          - name: kafka-manager
+            containerPort: 9000
+            protocol: TCP
+        env:
+          - name: ZK_HOSTS
+            value: "zk-cs.pro.svc.cluster.local:2181"
+        livenessProbe:
+          httpGet:
+            path: /api/health
+            port: kafka-manager
+          initialDelaySeconds: 30
+          periodSeconds: 10
+          timeoutSeconds: 3
+          successThreshold: 1
+          failureThreshold: 5
+        readinessProbe:
+          httpGet:
+            path: /api/health
+            port: kafka-manager
+          initialDelaySeconds: 30
+          periodSeconds: 15
+        resources:
+          limits:
+            cpu: 500m
+            memory: 512Mi
+          requests:
+            cpu: 250m
+            memory: 256Mi
+---
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+  name: kafka-manager-ingress
+  namespace: pro
+  annotations:
+    kubernetes.io/ingress.class: nginx
+spec:
+  rules:
+    - host: pro-kafka-manager.ieasou.cn
+      http:
+        paths:
+          - path: /
+            backend:
+              serviceName: kafka-manager
+              servicePort: 80

+ 4 - 4
deploy-tmp-kafka.yaml

@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: kafka-svc
-  namespace: qa
+  namespace: pro
   labels:
     app: kafka
 spec:
@@ -17,7 +17,7 @@ apiVersion: policy/v1beta1
 kind: PodDisruptionBudget
 metadata:
   name: kafka-pdb
-  namespace: qa
+  namespace: pro
 spec:
   selector:
     matchLabels:
@@ -28,7 +28,7 @@ apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: kafka
-  namespace: qa
+  namespace: pro
 spec:
   selector:
     matchLabels:
@@ -81,7 +81,7 @@ spec:
             - -c
             - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
           --override listeners=PLAINTEXT://:9093 \
-          --override zookeeper.connect=zk-cs.qa.svc.cluster.local:2181 \   #这里配置了与zookeeper进行连接,非常重要。格式为:pod名.zookeeper的service名.名称空间.svc.cluster.local:2181
+          --override zookeeper.connect=zk-cs.pro.svc.cluster.local:2181 \   #这里配置了与zookeeper进行连接,非常重要。格式为:pod名.zookeeper的service名.名称空间.svc.cluster.local:2181
           --override log.dir=/var/lib/kafka "
           env:
             - name: KAFKA_HEAP_OPTS

+ 5 - 5
deploy-tmp-kibana.yaml

@@ -3,7 +3,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: kibana-svc
-  namespace: qa
+  namespace: pro
   labels:
     app: kibana-svc
 spec:
@@ -19,7 +19,7 @@ apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: kibana
-  namespace: qa
+  namespace: pro
   labels:
     app: kibana
 spec:
@@ -52,13 +52,13 @@ spec:
 apiVersion: extensions/v1beta1
 kind: Ingress
 metadata:
-  name: kibana-ingress
-  namespace: qa
+  name: kibana-pro-ingress
+  namespace: pro
   annotations:
     kubernetes.io/ingress.class: nginx
 spec:
   rules:
-    - host: qa-kibana.ieasou.cn
+    - host: pro-kibana.ieasou.cn
       http:
         paths:
           - path: /

+ 4 - 4
deploy-tmp-logstash.yaml

@@ -3,12 +3,12 @@ kind: ConfigMap
 apiVersion: v1
 metadata:
   name: logstash-config
-  namespace: qa
+  namespace: pro
 data:
   logstash-config-named-k8s: |
     input {
       kafka {
-          bootstrap_servers => ["kafka-0.kafka-svc.qa.svc.cluster.local:9093,kafka-1.kafka-svc.qa.svc.cluster.local:9093,kafka-2.kafka-svc.qa.svc.cluster.local:9093"]
+          bootstrap_servers => ["kafka-0.kafka-svc.pro.svc.cluster.local:9093,kafka-1.kafka-svc.pro.svc.cluster.local:9093,kafka-2.kafka-svc.pro.svc.cluster.local:9093"]
           group_id => "es-test"
           topics => ["offline-process-topic"]
           codec => json
@@ -77,7 +77,7 @@ kind: Deployment
 apiVersion: apps/v1
 metadata:
   name: logstash-k8s-named
-  namespace: qa
+  namespace: pro
   labels:
     app: logstash-k8s-named
 spec:
@@ -117,7 +117,7 @@ metadata:
   labels:
     app: logstash
   name: logstash
-  namespace: qa
+  namespace: pro
 spec:
   ports:
   - name: http

+ 4 - 4
deploy-tmp-zookeeper.yaml

@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: zk-hs
-  namespace: qa
+  namespace: pro
   labels:
     app: zk
 spec:
@@ -19,7 +19,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: zk-cs
-  namespace: qa
+  namespace: pro
   labels:
     app: zk
 spec:
@@ -33,7 +33,7 @@ apiVersion: policy/v1beta1
 kind: PodDisruptionBudget
 metadata:
   name: zk-pdb
-  namespace: qa
+  namespace: pro
 spec:
   selector:
     matchLabels:
@@ -44,7 +44,7 @@ apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: zk
-  namespace: qa
+  namespace: pro
 spec:
   selector:
     matchLabels:

+ 4 - 4
deploy-tmp.yaml

@@ -3,7 +3,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: kibana-svc
-  namespace: qa
+  namespace: pro
   labels:
     app: kibana-svc
 spec:
@@ -19,7 +19,7 @@ apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: kibana
-  namespace: qa
+  namespace: pro
   labels:
     app: kibana
 spec:
@@ -51,12 +51,12 @@ apiVersion: extensions/v1beta1
 kind: Ingress
 metadata:
   name: kibana-ingress
-  namespace: qa
+  namespace: pro
   annotations:
     kubernetes.io/ingress.class: nginx
 spec:
   rules:
-    - host: qa-kibana.ieasou.cn
+    - host: pro-kibana.ieasou.cn
       http:
         paths:
           - path: /

+ 5 - 5
drone.elasticsearch.yml

@@ -22,7 +22,7 @@ steps:
       insecure: true
       mirror: https://ci7pm4nx.mirror.aliyuncs.com
       registry: hub.evbj.easou.com
-      repo: hub.evbj.easou.com/qa/elasticsearch
+      repo: hub.evbj.easou.com/pro/elasticsearch
       tag: v1.0
   #    build_args:
   #      - JAR_FILE=
@@ -33,7 +33,7 @@ steps:
     pull: always
     # privileged: true
     environment:
-      JNLP_ENV: qa
+      JNLP_ENV: pro
       JNLP_REPLICAS: 1
       JNLP_TAG: v1.0
       JNLP_VERSION: v1  # default v1, v2 for canary
@@ -70,8 +70,8 @@ steps:
       corpid: ww419ee4063735e1c0
       corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
       agent_id: 1000004
-      title: "Pipeline qa/elasticsearch:v1.0 Success"
-      description: "${DRONE_BUILD_LINK} qa/elasticsearch:v1.0 部署完成"
+      title: "Pipeline pro/elasticsearch:v1.0 Success"
+      description: "${DRONE_BUILD_LINK} pro/elasticsearch:v1.0 部署完成"
       msg_url: ${DRONE_BUILD_LINK}
       btn_txt: "否"
     when:
@@ -84,7 +84,7 @@ steps:
       corpid: ww419ee4063735e1c0
       corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
       agent_id: 1000004
-      title: "Pipeline qa/elasticsearch:v1.0 Failure"
+      title: "Pipeline pro/elasticsearch:v1.0 Failure"
       description: "${DRONE_BUILD_LINK} 部署失败,请检查配置!"
       msg_url: ${DRONE_BUILD_LINK}
       btn_txt: "否"

+ 5 - 5
drone.kibana.yml

@@ -22,7 +22,7 @@ steps:
       insecure: true
       mirror: https://ci7pm4nx.mirror.aliyuncs.com
       registry: hub.evbj.easou.com
-      repo: hub.evbj.easou.com/qa/kibana
+      repo: hub.evbj.easou.com/pro/kibana
       tag: v1.0
   #    build_args:
   #      - JAR_FILE=
@@ -33,7 +33,7 @@ steps:
     pull: always
     # privileged: true
     environment:
-      JNLP_ENV: qa
+      JNLP_ENV: pro
       JNLP_REPLICAS: 1
       JNLP_TAG: v1.0
       JNLP_VERSION: v1  # default v1, v2 for canary
@@ -70,8 +70,8 @@ steps:
       corpid: ww419ee4063735e1c0
       corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
       agent_id: 1000004
-      title: "Pipeline qa/kibana:v1.0 Success"
-      description: "${DRONE_BUILD_LINK} qa/kibana:v1.0 部署完成"
+      title: "Pipeline pro/kibana:v1.0 Success"
+      description: "${DRONE_BUILD_LINK} pro/kibana:v1.0 部署完成"
       msg_url: ${DRONE_BUILD_LINK}
       btn_txt: "否"
     when:
@@ -84,7 +84,7 @@ steps:
       corpid: ww419ee4063735e1c0
       corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
       agent_id: 1000004
-      title: "Pipeline qa/kibana:v1.0 Failure"
+      title: "Pipeline pro/kibana:v1.0 Failure"
       description: "${DRONE_BUILD_LINK} 部署失败,请检查配置!"
       msg_url: ${DRONE_BUILD_LINK}
       btn_txt: "否"

+ 5 - 5
drone.zookeeper.yml

@@ -22,7 +22,7 @@ steps:
       insecure: true
       mirror: https://ci7pm4nx.mirror.aliyuncs.com
       registry: hub.evbj.easou.com
-      repo: hub.evbj.easou.com/qa/zookeeper
+      repo: hub.evbj.easou.com/pro/zookeeper
       tag: v1.0
   #    build_args:
   #      - JAR_FILE=
@@ -33,7 +33,7 @@ steps:
     pull: always
     # privileged: true
     environment:
-      JNLP_ENV: qa
+      JNLP_ENV: pro
       JNLP_REPLICAS: 1
       JNLP_TAG: v1.0
       JNLP_VERSION: v1  # default v1, v2 for canary
@@ -70,8 +70,8 @@ steps:
       corpid: ww419ee4063735e1c0
       corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
       agent_id: 1000004
-      title: "Pipeline qa/zookeeper:v1.0 Success"
-      description: "${DRONE_BUILD_LINK} qa/zookeeper:v1.0 部署完成"
+      title: "Pipeline pro/zookeeper:v1.0 Success"
+      description: "${DRONE_BUILD_LINK} pro/zookeeper:v1.0 部署完成"
       msg_url: ${DRONE_BUILD_LINK}
       btn_txt: "否"
     when:
@@ -84,7 +84,7 @@ steps:
       corpid: ww419ee4063735e1c0
       corp_secret: zpiRBLETH9eLwIMQ4eJ_r_dcm3BPSGeLHvTcft8Ot-M
       agent_id: 1000004
-      title: "Pipeline qa/zookeeper:v1.0 Failure"
+      title: "Pipeline pro/zookeeper:v1.0 Failure"
       description: "${DRONE_BUILD_LINK} 部署失败,请检查配置!"
       msg_url: ${DRONE_BUILD_LINK}
       btn_txt: "否"

+ 1 - 1
k8s-deploy.sh

@@ -41,7 +41,7 @@ while [ 1 ]; do
     continue
   fi
 
-  if [ $input1 != 1 -o $input1 -lt 0 -o $input1 -gt 2 ]; then
+  if [ $input1 -ge 0 -o $input1 -lt 2 ]; then
     ENV=${envs[$input1]}
     break
   else

+ 0 - 169
offline-process-charge.yml

@@ -1,169 +0,0 @@
----
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: offline-process-charge
-  namespace: qa
-  labels:
-    app: offline-process-charge
-spec:
-  serviceName: offline-process-charge
-  replicas: 1
-  selector:
-    matchLabels:
-      app: offline-process-charge
-  template:
-    metadata:
-      labels:
-        app: offline-process-charge
-    spec:
-      terminationGracePeriodSeconds: 60
-      initContainers:
-        - name: init
-          image: hub.evbj.easou.com/dev/busybox
-          command: [ "chmod","777","-R","/data" ]
-          imagePullPolicy: Always
-          volumeMounts:
-            - name: volume
-              mountPath: /data
-      #nodeSelector:
-      #  app.touchrs: touchrs
-      containers:
-        - name: offline-process-charge
-          image: hub.evbj.easou.com/qa/offline-process-charge:v1.0.41
-          imagePullPolicy: Always
-          env:
-            - name: JAVA_OPTS
-              value: "-server  -Xdebug -Xrunjdwp:transport=dt_socket,server=y,address=8001,suspend=n  -Xmx2g -Xms2g -Xss512k -Djava.awt.headless=true -Djava.security.egd=file:/dev/./urandom -Dspring.profiles.active=test"
-          ports:
-            - containerPort: 8080
-              name: port
-            - containerPort: 8001
-              name: "jvm-debug"
-          volumeMounts:
-            - name: volume
-              mountPath: /data
-          lifecycle:
-            preStop:
-              exec:
-                command: ["/bin/sh","-c","curl -X POST 127.0.0.1:8080/actuator/shutdown"]
-          livenessProbe:
-            httpGet:
-              path: /actuator/health
-              port: 8080
-              httpHeaders:
-                - name: Custom-Header
-                  value: Awesome
-                - name: Custom-Header
-                  value: Awesome
-            initialDelaySeconds: 30        #60s后启动第一次探测
-            periodSeconds: 10              # 每隔10s启动一次探测
-            timeoutSeconds: 3              # 超时时间3s
-            successThreshold: 1            # 成功1次即表示容器健康
-            failureThreshold: 5            # 连续5次失败,则判定容器不健康,默认3次
-          readinessProbe:
-            tcpSocket:
-              port: 8080
-            initialDelaySeconds: 30
-            periodSeconds: 15
-        - image: docker.elastic.co/beats/filebeat:6.8.12
-          imagePullPolicy: Always
-          name: filebeat
-          volumeMounts:
-            - name: volume #日志同时挂载在nginx和filebeat中
-              mountPath: /data
-            - name: filebeat-config
-              mountPath: /usr/share/filebeat/filebeat.yml
-              subPath: filebeat.yml
-      volumes:
-        - name: filebeat-config
-          configMap:
-            name: filebeat-config-offline-process-charge
-            items:
-              - key: filebeat.yml
-                path: filebeat.yml
-
-  volumeClaimTemplates:
-    - metadata:
-        name: volume
-      spec:
-        accessModes: [ "ReadWriteOnce" ]
-        storageClassName: rbd
-        resources:
-          requests:
-            storage: 100Gi
----
-apiVersion: v1
-kind: Service
-metadata:
-  name: offline-process-charge-svc
-  namespace: qa
-  labels:
-    app: offline-process-charge-svc
-spec:
-  type: NodePort
-  ports:
-    - port: 80
-      targetPort: 8080
-      name: "main"
-  selector:
-    app: offline-process-charge
----
-apiVersion: extensions/v1beta1
-kind: Ingress
-metadata:
-  name: offline-process-charge-ingress
-  namespace: qa
-  annotations:
-    kubernetes.io/ingress.class: nginx
-spec:
-  rules:
-    - host: qa-offline-process-charge.ieasou.cn
-      http:
-        paths:
-          - path: /
-            backend:
-              serviceName: offline-process-charge-svc
-              servicePort: 80
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: filebeat-config-offline-process-charge
-  namespace: qa
-data:
-  filebeat.yml: |
-    filebeat.inputs:
-    - type: log
-      enabled: true
-      #多行合并
-      multiline.pattern: '^\[[0-9]{4}-[0-9]{2}-[0-9]{2}'
-      multiline.negate: true
-      multiline.match: after
-      multiline.timeout: 30
-      fields:
-          log_topic: offline-process-charge
-      fields_under_root: true
-      paths:
-        - "/data/logs/app/*.log"
-        - "/data/logs/*.log"
-    setup.template.enabled: true
-    setup.template.name: offline-process-charge
-    setup.template.pattern: offline-process-charge*
-    setup.dashboards.enabled: true
-    output.kafka:
-      version: 0.10.2.1
-      enable: true
-      # initial brokers for reading cluster metadata
-      hosts: ["kafka-0.kafka-svc.qa.svc.cluster.local:9093", "kafka-1.kafka-svc.qa.svc.cluster.local:9093", "kafka-2.kafka-svc.qa.svc.cluster.local:9093"]
-      # message topic selection + partitioning
-      # topic: '%{[fields.log_topic]}'
-      topic: 'offline-process-topic'
-      partition.round_robin:
-        reachable_only: false
-      required_acks: 0
-      compression: gzip
-      max_message_bytes: 1000000
-    setup.kibana:
-      host: 'qa-kibana.ieasou.cn:80'
-#       host: 'kibana-svc:80'