ELK收集日志信息(一)- 搭建ELK

ELK收集日志信息(一)- 搭建ELK,第1张

ELK收集日志信息(一) 一、搭建ELK

使用的elasticsearch 7.7.0,kibana:7.7.0,logstash:7.7.0版本

1、新建文件目录以及docker-compose文件

mkdir elk
mkdir elasticsearch kibana logstash
touch docker-compose.yml
cd elasticsearch
mkdir es-1 es-2 es-3

2、编辑docker-compose.yml文件

version: "2.2"
services:
  elasticsearch-head:
    image: mobz/elasticsearch-head:5
    container_name: elasticsearch-head
    ports:
      - 9100:9100
    expose:
      - 9100
    restart: always
  es-1:
    image: elasticsearch:7.7.0
    container_name: es-1
    environment:
      - cluster.name=es
      - discovery.seed_hosts=es-2,es-3
      - cluster.initial_master_nodes=es-1,es-2,es-3
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m" 
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /Users/lengqi/myFile/elk/elasticsearch/es-1/logs:/usr/share/elasticsearch/logs
      - /Users/lengqi/myFile/elk/elasticsearch/es-1/data:/usr/share/elasticsearch/data
      - /Users/lengqi/myFile/elk/elasticsearch/es-1/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
    ports:
      - 9200:9200
    networks:
      - elastic
  es-2:
    image: elasticsearch:7.7.0
    container_name: es-2
    environment:
      - cluster.name=es
      - discovery.seed_hosts=es-1,es-3
      - cluster.initial_master_nodes=es-1,es-2,es-3
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"   
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /Users/lengqi/myFile/elk/elasticsearch/es-2/logs:/usr/share/elasticsearch/logs
      - /Users/lengqi/myFile/elk/elasticsearch/es-2/data:/usr/share/elasticsearch/data
      - /Users/lengqi/myFile/elk/elasticsearch/es-2/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
    ports:
      - 9201:9201
    networks:
      - elastic
  es-3:
    image: elasticsearch:7.7.0
    container_name: es-3
    environment:
      - cluster.name=es
      - discovery.seed_hosts=es-1,es-2
      - cluster.initial_master_nodes=es-1,es-2,es-3
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m" 
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /Users/lengqi/myFile/elk/elasticsearch/es-3/logs:/usr/share/elasticsearch/logs
      - /Users/lengqi/myFile/elk/elasticsearch/es-3/data:/usr/share/elasticsearch/data
      - /Users/lengqi/myFile/elk/elasticsearch/es-3/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
    ports:
      - 9202:9202
    networks:
      - elastic
  kibana:
    image: kibana:7.7.0
    container_name: kibana
    depends_on:
      - es-1
    environment:
      ELASTICSEARCH_URL: http://es-1:9200
      ELASTICSEARCH_HOSTS: http://es-1:9200
    volumes:
      - /Users/lengqi/myFile/elk/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
    networks:
      - elastic
    ports:
      - 5601:5601
  logstash:
    image: logstash:7.7.0
    container_name: logstash
    volumes:
      - /Users/lengqi/myFile/elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
      - /Users/lengqi/myFile/elk/logstash/pipeline:/usr/share/logstash/pipeline
    ports:
      - 5044:5044
    environment:
      LS_JAVA_OPTS: "-Xmx512m -Xms512m"
    networks:
      - elastic
    depends_on:
      - es-1
networks:
  elastic:
    driver: bridge 

4、为elasticsearch各个节点添加elasticsearch.yml文件

cd es-1
vim elasticsearch.yml

elasticsearch.yml内容:

cluster.name: fupingstar_es_cluster # 集群名称,集群名称相同的节点自动组成一个集群
node.name: es-1  # 节点名称
network.host: 0.0.0.0 # 同时设置bind_host和publish_host
http.port: 9200  # rest客户端连接端口
transport.tcp.port: 9300  # 集群中节点互相通信端口
node.master: true # 设置master角色
node.data: true # 设置data角色
node.ingest: true # 设置ingest角色 在索引之前,对文档进行预处理,支持pipeline管道,相当于过滤器
bootstrap.memory_lock: false
node.max_local_storage_nodes: 1 
http.cors.enabled: true # 跨域配置
http.cors.allow-origin: "*" # 跨域配置

节点es-2的配置文件

cluster.name: fupingstar_es_cluster # 集群名称,集群名称相同的节点自动组成一个集群
node.name: es-2  # 节点名称
network.host: 0.0.0.0 # 同时设置bind_host和publish_host
http.port: 9201  # rest客户端连接端口
transport.tcp.port: 9300  # 集群中节点互相通信端口
node.master: true # 设置master角色
node.data: true # 设置data角色
node.ingest: true # 设置ingest角色 在索引之前,对文档进行预处理,支持pipeline管道,相当于过滤器
bootstrap.memory_lock: false
node.max_local_storage_nodes: 1 
http.cors.enabled: true # 跨域配置
http.cors.allow-origin: "*" # 跨域配置

节点es-3的配置文件

cluster.name: fupingstar_es_cluster # 集群名称,集群名称相同的节点自动组成一个集群
node.name: es-3  # 节点名称
network.host: 0.0.0.0 # 同时设置bind_host和publish_host
http.port: 9202  # rest客户端连接端口
transport.tcp.port: 9300  # 集群中节点互相通信端口
node.master: true # 设置master角色
node.data: true # 设置data角色
node.ingest: true # 设置ingest角色 在索引之前,对文档进行预处理,支持pipeline管道,相当于过滤器
bootstrap.memory_lock: false
node.max_local_storage_nodes: 1 
http.cors.enabled: true # 跨域配置
http.cors.allow-origin: "*" # 跨域配置

5、编写kiaban.yml

server.name: kibana
server.host: "0.0.0.0"
xpack.monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: zh-CN

6、编写logstash.yml

http.host: "0.0.0.0"

pipeline目录下的logstash.conf等搭建好kafka再进行编写

7、使用docker-compose启动

docker-compose -f docker-compose.yml up -d

有可能会启动失败,报错信息如下

解决:

添加网段:

docker network create --driver bridge elastic
docker network ls

证明已添加elastic网段

访问127.0.0.1:9100,出现集群健康值:green表示es集群搭建成功

或者访问http://localhost:9200/_cat/health?v

访问kibana:

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/758252.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-01
下一篇 2022-05-01

发表评论

登录后才能评论

评论列表(0条)

保存