搭建elk環境並接入frostmourne,實現監控報警效果

2024年2月6日 19点热度 0人点赞

1.部署elasticsearch

拉取鏡像

執行下面拉取鏡像命令(默認拉取最新版):

docker pull docker.elastic.co/elasticsearch/elasticsearch:8.6.0

#創建目錄,分配權限

cd /data/elk_monitor

mkdir -p es/{config,config/certs,data,plugins}

chmod -R 777 es/data*

#證書配置

es_config.tgz

放到 config/certs

#配置文件:config/es.yaml

cluster.name: elasticsearch-cluster
node.name: es1
network.host: 0.0.0.0
network.publish_host: es1 
http.port: 9200
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization
cluster.initial_master_nodes: es1
discovery.seed_hosts: ["es1:9300"]
transport.profiles.default.port: 9300
#xpack
#xpack.security.enabled: false
xpack.security.enabled: true
xpack.security.audit.enabled: false
#xpack.security.transport.ssl.enabled: false
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12

#docker-compose.yaml文件

version: '3.8'
services:
  es1:
    # 鏡像名稱
    image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0 
    # 容器名稱
    container_name: es1
    hostname: es1
    environment:
      - "ES_JAVA_OPTS=-Xms256m -Xmx256m"
      - ELASTIC_PASSWORD=vsUZGKNvjWRtTKPmDG
    # 文件映射到主機
    volumes:
      - /data/elk_monitor/es/config/es.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /data/elk_monitor/es/plugins:/usr/share/elasticsearch/plugins
      - /data/elk_monitor/es/data:/usr/share/elasticsearch/data
      - /data/elk_monitor/es/config/certs:/usr/share/elasticsearch/config/certs
    # 賦予權限
    privileged: true
    #端口映射
    ports:
      - 9200:9200
      - 9300:9300
    networks:
      - meta
    deploy:
      replicas: 1
networks:
  meta:
    external: true # 用已經創建好的網絡

#啟動

docker-compose up -d

#檢查

curl http://192.168.10.14:9200/_cluster/health?pretty -u elastic:vsUZGKNvjWRtTKPmDG

瀏覽器訪問:

#kibana部署

mkdir -p kibana/config

#kibana.yml 配置

elasticsearch.hosts: ["http://es1:9200"]
server.host: 0.0.0.0
server.port: 5601
elasticsearch.username: "kibana"
elasticsearch.password: "vsUZGKNvjWRtTKPmDG"
i18n.locale: "zh-CN"
xpack.encryptedSavedObjects.encryptionKey: dcbf819d8874e8242eaf107d538fe874
xpack.reporting.encryptionKey: ba2d98e6dad4fa73d77f5f34a568cfdd
xpack.security.encryptionKey: 3871dfc1bd945e1141364e4244800b39

#需要進入es節點配置密碼訪問

#需要先開啟xpack,否則報錯,進入一個節點

bin/elasticsearch-setup-passwords interactive

密碼:vsUZGKNvjWRtTKPmDG

#docker-compose.yaml配置

version: '3.8'
services:
  kibana:
    # 鏡像名稱
    image: docker.elastic.co/kibana/kibana:8.6.0
    # 容器名稱
    hostname: kibana
    container_name: kibana
    ports:
      - 5601:5601
    volumes:
      - /data/elk_monitor/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
    networks:
      - meta 
    deploy:
      replicas: 1
networks:
  meta:
    external: true # 用已經創建好的網絡

#啟動

docker-compose up -d

#ui 訪問

http://192.168.10.14:5601

賬號:elastic

密碼:vsUZGKNvjWRtTKPmDG

#logstash 部署

http://www.voycn.com/article/logstash-es-kaiqiquanxianrenzheng-x-pack

mkdir -p logstash/conf.d

#logstash.yml配置

http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://es1:9200" ]
path.logs: /var/log/logstash

#docker-compose.yaml

version: '3.8'
services:
  logstash:
    # 鏡像名稱
    image: docker.elastic.co/logstash/logstash:8.6.0
    # 容器名稱
    hostname: logstash
    container_name: logstash
    ports:
      - 5044:5044
    volumes:
      - /data/elk_monitor/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml
      - /data/elk_monitor/logstash/conf.d:/usr/share/logstash/conf.d
    networks:
      - meta 
    deploy:
      replicas: 1
networks:
  meta:
    external: true # 用已經創建好的網絡

#啟動服務

docker-compose up -d

#驗證:

#測試標準的輸入輸出和輸出到文件
logstash -e "input { stdin {} } output{stdout{}}"
#輸出到文件
logstash -e 'input { stdin {} } output { file { path => "/tmp/test-%{ YYYY.MM.dd}.log"} }'
#測試輸出到elasticsearch
logstash -e 'input { stdin {} } output { elasticsearch { hosts => ["192.168.10.14:9200"] index => "logstash-test-%{ YYYY.MM.dd}" } }'
#查看es
curl http://localhost:9200/_cat/indices\?v
#/usr/share/logstash/data
#rm -f .lock

#部署filebeat

docker pull docker.elastic.co/beats/filebeat:8.6.0

#臨時啟動filebeat 拷貝文件

#啟動
docker run -d --name=filebeat docker.elastic.co/beats/filebeat:8.6.0
#復制到宿主機
docker cp filebeat:/usr/share/filebeat /data/elk_monitor/

#filebeat.yaml配置

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/messages/*.log
setup.kibana:
  host: "192.168.10.14:5601"
output.elasticsearch.allow_older_versions: true
output.elasticsearch:
  hosts: '192.168.10.14:9200'
  indices:
    - index: "filebeat-%{ yyyy.MM.dd}"
#----------------------------- Logstash output --------------------------------
#output.logstash:
  # The Logstash hosts
#  hosts: '192.168.10.14:5044'

#修改權限目錄,id 1000的賬號

chown -R meng.meng data*

chmod -R 777 data*

#docker-compose.yaml

version: '3.8'
services:
  filebeat:
    # 鏡像名稱
    image: docker.elastic.co/beats/filebeat:8.6.0
    # 容器名稱
    hostname: filebeat
    container_name: filebeat
    #ports:
    #  - 5044:5044
    volumes:
      - /data/elk_monitor/filebeat:/usr/share/filebeat
#      - /var/log/messages:/var/log/messages
      - /data/logs:/data/logs
    networks:
      - meta 
    deploy:
      replicas: 1
networks:
  meta:
    external: true # 用已經創建好的網絡

#啟動

docker-compose up -d

#測試

setup.kibana:
  host: "192.168.10.14:5601"
output.elasticsearch.allow_older_versions: true
filebeat.inputs:
- input_type: log
  paths:
    - /data/logs/t1.log
  json.keys_under_root: true
  json.overwrite_keys: true
  fields:
    index: 't1_history'
- input_type: log
  paths:
    - /data/logs/t2.log
  json.keys_under_root: true
  json.overwrite_keys: true
  fields:
    index: 't2_history'
output.elasticsearch:
  hosts: ["192.168.10.14:9200"]
  username: "elastic"
  password: "vsUZGKNvjWRtTKPmDG"110120  
  indices:
    - index: "t1_history"
      when.contains:
        fields:
          index: "t1_history"
    - index: "t2_history"
      when.contains:
        fields:
          index: "t2_history"

cd /data/logs

echo {\"T1\":\"2023-07-26\"} >> t1.log ; echo {\"T2\":\"2023-07-26\"} >> t2.log

#查看索引

curl http://192.168.10.14:9200/_cat/indices?pretty -u elastic:vsUZGKNvjWRtTKPmDG

#kibana創建索引視圖

#frostmourne部署

mkdir /data/frostmourne

#創建數據庫

#CREATE DATABASE frostmourne DEFAULT CHARACTER SET utf8mb4;

#導入sql備份

mysql -uroot -pMFkda949qr -h192.168.10.14

#docker-compose.yaml配置

version: '3.6'
services:
  frostmourne-monitor:
    image: registry.cn-hangzhou.aliyuncs.com/kechangqing/frostmourne:1.0
    container_name: frostmourne-monitor
    environment:
      datasource_frostmourne_url: jdbc:mysql://192.168.10.14:3306/frostmourne?useSSL=false&verifyServerCertificate=false&useUnicode=true&characterEncoding=utf-
8&allowMultiQueries=true&serverTimezone=GMT+8
      datasource_frostmourne_username: root
      datasource_frostmourne_password: MFkda949qr
      frostmourne_monitor_address: http://192.168.10.14:10054
        #      initial_password: 'monitor2023'
      alarmlog_reserve_days: 30
    ports:
      - '10054:10054'
        #    expose:
        #      - '10054'
    networks:
      - meta
    command: bash -c "/opt/frostmourne/start.sh"
networks:
  meta:
    external: true

#啟動服務

docker-compose up -d

#驗證

http://192.168.10.14:10054

賬號:admin

密碼:123456

#配置數據源

#帶密碼方式

#配置數據名(索引模式)

@timestamp

#查詢結果

#監控查詢配置

https://gitee.com/monitor_group/frostmourne/blob/master/doc/wiki/query-string.md

https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax

如查詢message中T2出現的次數

#告警輸出