简单的 docker 部署ELK

news2024/11/6 18:17:37

简单的 docker 部署ELK

这是我的运维同事部署ELK的文档,我这里记录转载一下

服务规划

架构: Filebeat->kafka->logstash->ES
在这里插入图片描述

  • kafka集群部署参照: kafka集群部署

    部署服务程序路径/数据目录端口配置文件
    elasticsearch/data/elasticsearch9200/data/elasticsearch/config/elasticsearch.yml
    logstash/data/logstash/data/logstash/config/logstash.yml
    kibana/data/kibana5601/data/kibana/config/kibana.yml
    filebeat/data/filebeat/data/filebeat/config/filebeat.yml

索引服务-Elasticsearch

创建数据目录

mkdir -pv /data/elasticsearch/{config,data,logs}
chown 1000 /data/elasticsearch/{data,logs}

修改主机配置

vim /etc/sysctl.conf
加入
vm.max_map_count=655360
sysctl -p

vim /etc/security/limits.conf
加入
* soft memlock unlimited
* hard memlock unlimited

配置文件

cat > /data/elasticsearch/config/elasticsearch.yml << 'EOF'
cluster.name: ccms-es-cluster
node.name: ccms-es1
network.host: 172.16.20.51
http.port: 9200
bootstrap.memory_lock: true

# 允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: "OPTIONS, HEAD, GET, POST, PUT, DELETE"
http.cors.allow-headers: "Authorization, X-Requested-With, Content-Type, Content-Length, X-User"

# Cluster
node.master: true
node.data: true
transport.tcp.port: 9300
discovery.seed_hosts: ["172.16.20.51","172.16.20.52","172.16.20.53"]
cluster.initial_master_nodes: ["ccms-es1","ccms-es2","ccms-es3"]

cluster.routing.allocation.same_shard.host: true
cluster.routing.allocation.node_initial_primaries_recoveries: 4
cluster.routing.allocation.node_concurrent_recoveries: 4

# X-Pack
xpack.security.enabled: true
xpack.license.self_generated.type: basic
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
EOF

chown 1000 /data/elasticsearch/config/*
# 容器启动后先生成证书, 分发到各个节点的config目录下, 再重启es容器

discovery.zen.minimum_master_nodes算法: 节点数/2+1

# 设置ES密码:
# 自动设置密码命令
elasticsearch-setup-passwords auto
# 或者
# 自定义密码命令
elasticsearch-setup-passwords interactive

# es-head登录
http://172.16.20.52:9200/?auth_user=elastic&auth_password=elastic123456

# 生成证书(证书不需要设置密码):
cd /usr/share/elasticsearch/config/
elasticsearch-certutil ca -out config/elastic-certificates.p12 -pass ""

docker-compose编排

mkdir -pv /data/docker-compose/elasticsearch/
cat > /data/docker-compose/elasticsearch/docker-compose.yml << EOF
version: "3"
services:
  es:
    container_name: es
    image: elasticsearch:7.11.1
    network_mode: host
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/elasticsearch/config:/usr/share/elasticsearch/config
      - /data/elasticsearch/data:/usr/share/elasticsearch/data
      - /data/elasticsearch/logs:/usr/share/elasticsearch/logs
    environment:
      TZ: Asia/Shanghai
      bootstrap.memory_lock: true
      ES_JAVA_OPTS: "-Xmx8G -Xms8G"
      ELASTIC_PASSWORD: "G1T@es2022#ccms"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    deploy:
      resources:
        limits:
           memory: 10G
EOF
# 1. 解决es-head跨域问题(浏览器报: Request header field Content-Type is not allowed by Access-Control-Allow-Headers)
# es配置文件加入:
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: "OPTIONS, HEAD, GET, POST, PUT, DELETE"
http.cors.allow-headers: "X-Requested-With, Content-Type, Content-Length, X-User"

# 2. 解决es-head数据浏览空白(浏览器报: 406 Not Acceptable)
# 修改es-head代码文件vendor.js
# 第6886行左右
contentType: "application/x-www-form-urlencoded" --> contentType: "application/json;charset=UTF-8"

启动

docker-compose up -d

日志采集-Filebeat

创建数据目录

mkdir -pv /data/filebeat/{config,data}

配置文件

发送到kafka

cat > /data/filebeat/config/filebeat.yml << 'EOF'
###################### Filebeat Configuration Example #########################
filebeat.name: ccms-test-08
filebeat.idle_timeout: 5s
filebeat.spool_zie: 2048

#----------------------------------input form ccms servers--------------------------------#
filebeat.inputs:
- type: log
  enabled: true
  paths:
   - /opt/ccms-auto-deploy/credit-business/*/*/target/logs/*.log
   - /opt/ccms-auto-deploy/credit-support/*/*/target/logs/*.log
  fields:
    kafka_topic: topic-ccms-dev
  fields_under_root: true

  # filebeat 多行日志的处理
  multiline.pattern: '^\['
  multiline.negate: true
  multiline.match: after

  encoding: plain
  tail_files: false

  # 检测指定目录下文件更新时间
  scan_frequency: 3s
  # 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
  backoff: 1s
  max_backoff: 5s
  backoff_factor: 2

#----------------------------------input form nginx access_log--------------------------------#
- type: log
  enabled: true
  paths:
   - /data/nginx/logs/ccms-access.log
  fields:
    kafka_topic: topic-nginx-access
  fields_under_root: true

  encoding: plain
  tail_files: false

  json.keys_under_root: true
  json.overwrite_keys: true
  json.add_error_key: false

  # 检测指定目录下文件更新时间
  scan_frequency: 3s
  # 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
  backoff: 1s
  max_backoff: 5s
  backoff_factor: 2

#----------------------------------Kafka output--------------------------------#
output.kafka:
  enabled: true
  hosts: ['3.1.101.33:9092','3.1.101.34:9092','3.1.101.35:9092']
  topic: '%{[kafka_topic]}'
EOF

docker-compose编排

mkdir -pv /data/docker-compose/filebeat
cat > /data/docker-compose/filebeat/docker-compose.yml << EOF
version: "3"
services:
  filebeat:
    container_name: filebeat
    image: elastic/filebeat:7.11.1
    user: root
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml
      - /data/filebeat/data:/usr/share/filebeat/data/registry
      - /opt/ccms-auto-deploy:/opt/ccms-auto-deploy
      - /data/nginx/logs:/data/nginx/logs/
    deploy:
      resources:
        limits:
           memory: 4G
        reservations:
           memory: 1G
EOF

启动

docker-compose up -d

安装kibana仪表盘

docker-compose exec filebeat filebeat setup --dashboards

过滤服务-Logstash

创建数据目录

mkdir -pv /data/logstash/{config,data,pipeline,logs}
chown 1000.1000 /data/logstash/{config,data,pipeline,logs}

配置文件

logstash.yml

cat > /data/logstash/config/logstash.yml << 'EOF'
node.name: logstast-node1
http.host: "0.0.0.0"
path.data: data
path.logs: /usr/share/logstash/logs
config.reload.automatic: true
config.reload.interval: 5s
config.test_and_exit: false
EOF

如果使用pipeline管道,不要配置path.config

pipelines.yml

cat > /data/logstash/config/pipelines.yml << 'EOF'
- pipeline.id: ccms-credit-java
  path.config: "/usr/share/logstash/pipeline/ccms-credit-java.conf"
- pipeline.id: ccms-credit-nginx-access
  path.config: "/usr/share/logstash/pipeline/ccms-credit-nginx-access.conf"
- pipeline.id: ccms-credit-nginx-error
  path.config: "/usr/share/logstash/pipeline/ccms-credit-nginx-error.conf"
EOF

pipeline配置文件

pipeline/ccms-credit-java.conf

cat > /data/logstash/pipeline/ccms-credit-java.conf<< 'EOF'
input {
   kafka {
    topics_pattern => "topic-ccms-credit-sit-java"
    bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
    consumer_threads => 4
    decorate_events => true
    group_id => "kafka-ccms-credit-sit-java"
    add_field => {"logstash-server" => "172.16.20.51"}
   }
}

filter {
    json {
      source => "message"
    }

    grok {
      match => { "message" => "\[%{TIMESTAMP_ISO8601:currentDateTime}\] \[%{LOGLEVEL:level}\] \[%{DATA:traceInfo}\] \[%{NOTSPACE:class}\] \[%{DATA:hostName}\] \[%{IP:hostIp}\] \[%{DATA:applicationName}\] \[%{DATA:location}\] \[%{DATA:messageInfo}\] ## %{QUOTEDSTRING:throwable}" }
    }

    mutate{
      enable_metric => "false"
      remove_field => ["ecs","tags","input","agent","@version","log","port","host","message"]
    }

    date {
      match => [ "currentDateTime", "ISO8601" ]
    }
}

output {
        elasticsearch {
        hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
        user => "elastic"
        password => "G1T@es2022#ccms"
        index => "index-ccms-credit-sit-java_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

pipeline/ccms-credit-nginx-access.conf

cat > /data/logstash/pipeline.d/ccms-nginx-access.conf<< 'EOF'
input {
   kafka {
    topics_pattern => "topic-ccms-credit-sit-nginx-access"
    bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
    codec => "json"
    consumer_threads => 4
    decorate_events => true
    group_id => "kafka-ccms-credit-sit-nginx-access"
    add_field => {"logstash-server" => "172.16.20.51"}
   }
}

filter {
  geoip {
      source => "client_ip"
      target => "geoip"
      add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
      add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
      remove_field => [ "[geoip][latitude]", "[geoip][longitude]", "[geoip][country_code2]","[geoip][country_code3]", "[geoip][timezone]", "[geoip][continent_code]", "[geoip][dma_code]", "[geoip][region_code]" ]
  }

  mutate {
    convert => [ "size", "integer" ]
    convert => [ "status", "integer" ]
    convert => [ "responsetime", "float" ]
    convert => [ "upstreamtime", "float" ]
    convert => [ "[geoip][coordinates]", "float" ]
    # 过滤 filebeat 没用的字段,这里过滤的字段要考虑好输出到es的,否则过滤了就没法做判断
    remove_field => [ "ecs","agent","host","cloud","@version","input","logs_type" ]
  }


  useragent {
    source => "http_user_agent"
    target => "ua"
    # 过滤useragent没用的字段
    remove_field => [ "[ua][minor]","[ua][major]","[ua][build]","[ua][patch]","[ua][os_minor]","[ua][os_major]" ]
  }

}

output {
        elasticsearch {
        hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
        user => "elastic"
        password => "G1T@es2022#ccms"
        index => "logstash-ccms-credit-sit-nginx-access_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

pipeline/ccms-credit-nginx-error.conf

cat > /data/logstash/pipeline.d/ccms-nginx-error.conf<< 'EOF'
input {
   kafka {
    topics_pattern => "topic-ccms-credit-sit-nginx-error"
    bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
    consumer_threads => 4
    decorate_events => true
    group_id => "kafka-ccms-credit-sit-nginx-error"
    add_field => {"logstash-server" => "172.16.20.51"}
    enable_metric => true
   }
}

filter {
    json {
      source => "message"
    }

    grok {
      match => [
        "message", "%{DATESTAMP:currentDateTime}\s{1,}\[%{LOGLEVEL:level}\]\s{1,}(%{NUMBER:pid:int}#%{NUMBER}:\s{1,}\*%{NUMBER})\s{1,}(%{GREEDYDATA:messageInfo})(?:,\s{1,}client:\s{1,}(?<client>%{IP}|%{HOSTNAME}))(?:,\s{1,}server:\s{1,}%{IPORHOST:server})(?:, request: %{QS:request})?(?:, upstream: \"%{URI:endpoint}\")?(?:, host: \"%{HOSTPORT:host}\")?(?:, referrer: \"%{URI:referrer}\")?",
        "message", "%{DATESTAMP:currentDateTime}\s{1,}\[%{DATA:level}\]\s{1,}%{GREEDYDATA:messageInfo}"]
    }

    date{
      match => ["currentDateTime", "yy/MM/dd HH:mm:ss", "ISO8601"]
      timezone => "+08:00"
      target => "@timestamp"
    }

    mutate{
      enable_metric => "false"
      remove_field => [ "ecs","tags","input","agent","@version","log","port","host","message" ]
    }
}

output {
        elasticsearch {
        hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
        user => "elastic"
        password => "G1T@es2022#ccms"
        index => "logstash-ccms-credit-sit-nginx-error_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

docker-compose编排

mkdir -pv /data/docker-compose/logstash
cat > /data/docker-compose/logstash/docker-compose.yml << EOF
version: "3"
services:
  logstash:
    container_name: logstash
    image: 172.16.20.50:8005/public/logstash:7.11.1
    user: root
    network_mode: host
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/logstash/config:/usr/share/logstash/config
      - /data/logstash/data:/usr/share/logstash/data
      - /data/logstash/pipeline:/usr/share/logstash/pipeline
    environment:
      TZ: Asia/Shanghai
      LS_JAVA_OPTS: "-Xmx8G -Xms8G"
    deploy:
      resources:
        limits:
           memory: 10G
EOF

启动

docker-compose up -d

展示服务-Kibana

创建数据目录

mkdir -pv /data/kibana/{config,logs}
chown 1000 /data/kibana/{config,logs}

配置文件

cat > /data/kibana/config/kibana.yml << 'EOF'
# Default Kibana configuration for docker target
server.name: ccms-kibana
server.port: 5601
server.host: "0"
elasticsearch.hosts: [ "http://172.16.20.51:9200","http://172.16.20.52:9200","http://172.16.20.53:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
map.tilemap.url: 'http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}'

xpack.security.enabled: true
xpack.security.encryptionKey: "fhjskloppd678ehkdfdlliverpoolfcr"
elasticsearch.username: "elastic"
elasticsearch.password: "G1T@es2022#ccms"
EOF

docker-compose编排

mkdir -pv /data/docker-compose/kibana/
cat > /data/docker-compose/kibana/docker-compose.yml << EOF
version: "3"
services:
  kibana:
    container_name: kibana
    image: kibana:7.11.1
    restart: always
    ports:
      - "5601:5601"
    volumes:
      - /data/kibana/config/kibana.yml:/opt/kibana/config/kibana.yml
EOF

启动

docker-compose up -d

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2234429.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

TortoiseSVN小乌龟下载安装(Windows11)

目录 TortoiseSVN 1.14.7工具下载安装 TortoiseSVN 1.14.7 工具 系统&#xff1a;Windows 11 下载 官网&#xff1a;https://tortoisesvn.subversion.org.cn/downloads.html如图选 TortoiseSVN 1.14.7 - 64 位 下载完成 安装 打开 next&#xff0c;next Browse&#xf…

Python实例:爱心代码

前言 在编程的奇妙世界里,代码不仅仅是冰冷的指令集合,它还可以成为表达情感、传递温暖的独特方式。今天,我们将一同探索用 Python 语言绘制爱心的神奇之旅。 爱心,这个象征着爱与温暖的符号,一直以来都在人类的情感世界中占据着特殊的地位。而通过 Python 的强大功能,…

ENSP RIP动态路由

RIP&#xff08;距离矢量路由协议&#xff09;以网络中所有链路的距离和矢量为依据计算最佳路径&#xff0c;是第一个动态路由协议。条数作为唯一的度量单位。默认开启水平分割&#xff08;从一个路由接口学到的路由信息&#xff0c;便不在从这个接口发送出去&#xff09;防止路…

Qt 练习做一个登录界面

练习做一个登录界面 效果 UI图 UI代码 <?xml version"1.0" encoding"UTF-8"?> <ui version"4.0"><class>Dialog</class><widget class"QDialog" name"Dialog"><property name"ge…

C++——priority_queue模拟实现过程中的优化

前言的前言&#xff1a; 大佬写博客给别人看&#xff0c;菜鸟写博客给自己看&#xff0c;我是菜鸟。 前言&#xff1a; 1.priority_queue&#xff08;优先队列&#xff09;的底层原理和堆极其相似&#xff0c;因此在模拟实现的过程中&#xff0c;主要借助堆的思想取完成&…

蓝桥杯真题——三角回文数(C语言)

问题描述 对于正整数 n, 如果存在正整数 k 使得 n123⋯kk(k1)2n123⋯kk(k1)/2​, 则 n 称为三角数。例如, 66066 是一个三角数, 因为 66066123⋯36366066123⋯363 。 如果一个整数从左到右读出所有数位上的数字, 与从右到左读出所有数位 上的数字是一样的, 则称这个数为回文数…

密码学知识点整理一:密码学概论

密码学是什么&#xff1f; 密码学是一门研究编制密码和破译密码的技术科学。 密码学&#xff0c;作为信息安全的核心技术之一&#xff0c;其重要性在于能够为信息传输提供安全保障&#xff0c;确保数据在存储或传输过程中的机密性、完整性与真实性不被破坏。从古至今&#x…

51单片机教程(五)- LED灯闪烁

1 项目分析 让输入/输出口的P1.0或P1.0~P1.7连接的LED灯闪烁。 2 技术准备 1、C语言知识点 1 运算符 1 算术运算符 #include <stdio.h>int main(){// 算术运算符int a 13;int b 6;printf("%d\n", ab); printf("%d\n", a-b); printf("%…

Unity中实现伤害飘字或者提示飘字效果(DoTween实现版本)

&#xff01;&#xff01;&#xff01;在实现以下效果之前&#xff0c;一定要往项目中导入DoTween插件。 一、搭建测试场景 1、在场景中新建一个带有Text组件的游戏物体A&#xff0c;并把这个游戏物体A中Text组件的Color属性中alpha值为0&#xff0c;让文字在场景中隐藏。 …

其他节点使用kubectl访问集群,kubeconfig配置文件 详解

上述两种方式&#xff1a;可使用kubectl连接k8s集群。 $HOME/.kube/config 是config文件默认路径&#xff0c;要么直接定义环境变量&#xff0c;要么就直接把文件拷过去 config文件里面&#xff0c;定义了context&#xff0c;里面指定了用户和对应的集群信息&#xff1a; ku…

【vim文本编辑器gcc编译器gdb调试器】

提示&#xff1a;文章写完后&#xff0c;目录可以自动生成&#xff0c;如何生成可参考右边的帮助文档 文章目录 一、vimvim安装vim常用快捷键vim使用vimtutor zh文档 二、gcc编译器安装gcc工具编译源代码 三、gdb调试器gdb安装gdb常用指令gdb简单上手使用gdb的单步调试功能 总结…

陀螺仪BMI323驱动开发测试(基于HAL库SPI通信)

参考资料 编写代码 读取芯片ID void BMI160_Init(void) {uint16_t chipID BMI323_read(BMI160_REG_CHIP_ID);debug("BMI323芯片ID为0x%x;", chipID);if (chipID ! 0x43){debug("未检测到BMI323;");}elsedebug("检测到陀螺仪BMI323;");u8 buf_…

【MySQL初阶】--- MySQL在Ubuntu环境下安装

Welcome to 9ilks Code World (๑•́ ₃ •̀๑) 个人主页: 9ilk (๑•́ ₃ •̀๑) 文章专栏&#xff1a; MySQL 本篇博客博主采用的是ubuntu 22.04的系统按照MySQL&#xff0c;且在root用户下安装。 &#x1f3e0; MySQL安装 1. 更新系统的软件包列表 sudo a…

Charles简单压力测试

1.接口请求次数&#xff0c;并发量&#xff0c;请求延迟时间均可配置 1.1选中需要进行测试的接口&#xff0c;鼠标右键选中【repeat advance】 2.设置并发参数 下面的图中&#xff0c;选择了1个接口&#xff0c;每次迭代中1个接口同时请求&#xff0c;迭代1000次&#xff08;…

【大模型LLM面试合集】大语言模型架构_chatglm系列模型

chatglm系列模型 1.ChatGLM 1.1 背景 主流的预训练框架主要有三种&#xff1a; autoregressive自回归模型&#xff08;AR模型&#xff09;&#xff1a;代表作GPT。本质上是一个left-to-right的语言模型。通常用于生成式任务&#xff0c;在长文本生成方面取得了巨大的成功&a…

从 vue 源码看问题 — 你知道 Hook Event 吗?

前言 在之前的几篇文章中&#xff0c;都有提到 vue 中调用生命周期钩子时是通过 callHook() 方法进行调用的&#xff0c;比如在初始化篇章中调用 beforeCreate 和 created 生命周期钩子方式如下: 那么接下来一起来了解下到底什么是 Hook Event &#xff1f; Hook Event 是什…

html练习2

实现下列图片的效果 代码&#xff1a; <!DOCTYPE html> <html><head><meta charset"utf-8"><title></title><style>* {margin: 0;padding: 0;}#menu {background-color: #0c0048;width: 100%;height: 50px;margin: auto;…

计算机视觉常用数据集Cityscapes的介绍、下载、转为YOLO格式进行训练

我在寻找Cityscapes数据集的时候花了一番功夫&#xff0c;因为官网下载需要用公司或学校邮箱邮箱注册账号&#xff0c;等待审核通过后才能进行下载数据集。并且一开始我也并不了解Cityscapes的格式和内容是什么样的&#xff0c;现在我弄明白后写下这篇文章&#xff0c;用于记录…

Java | Leetcode Java题解之第523题连续的子数组和

题目&#xff1a; 题解&#xff1a; class Solution {public boolean checkSubarraySum(int[] nums, int k) {int m nums.length;if (m < 2) {return false;}Map<Integer, Integer> map new HashMap<Integer, Integer>();map.put(0, -1);int remainder 0;fo…

MATLAB计算朗格朗日函数

1. 朗格朗日函数介绍 朗格朗日函数&#xff08;Lagrange function&#xff09;通常用于优化问题&#xff0c;尤其是带有约束的优化问题。其一般形式为&#xff1a; 其中&#xff1a; f(x) 是目标函数。 是约束条件。 是拉格朗日乘子。 为了编写一个MATLAB代码来计算和绘制…