极客时间运维进阶训练营第六周作业
- 2022-12-01 英国
本文字数:4268 字
阅读完需:约 14 分钟
基于 logstash filter 功能将 nginx 默认的访问日志及 error log 转换为 json 格式并写入 elasticsearch
基于 logstash 收集 json 格式的 nginx 访问日志
基于 logstash 收集 java 日志并实现多行合并
基于 logstash 收集 syslog 类型日志 (以 haproxy 替代网络设备)
logstash 收集日志并写入 Redis、再通过其它 logstash 消费至 elasticsearch 并保持 json 格式日志的解析
基于 docker-compose 部署单机版本 ELK
Mandatory Assignment
Logstash filter
Install logstash
Download and install public signing key:
$ rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearchCreate "logstash.repo" in "/etc/yum.repos.d/"
[logstash-8.x]name=Elastic repository for 8.x packagesbaseurl=https://artifacts.elastic.co/packages/8.x/yumgpgcheck=1gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearchenabled=1autorefresh=1type=rpm-mdInstall with "yum" command
$ yum install logstashConfigure logstash
Configure logstash with the following files ("/etc/logstash/logstash.yml"):
path.data: /var/lib/logstashpath.config: /etc/logstash/conf.dpath.logs: /var/log/logstash"/etc/logstash/conf.d/logstash-nginx-es.conf"
input { beats { port => 5400 ssl => true ssl_certificate_authorities => ["/etc/elk-certs/elk-ssl.crt"] ssl_certificate => "/etc/elk-certs/elk-ssl.crt" ssl_key => "/etc/elk-certs/elk-ssl.key" ssl_verify_mode => "force_peer" }}
filter { grok { match => [ "message" , "%{COMBINEDAPACHELOG}+%{GREEDYDATA:extra_fields}"] overwrite => [ "message" ] } mutate { convert => ["response", "integer"] convert => ["bytes", "integer"] convert => ["responsetime", "float"] } geoip { source => "clientip" add_tag => [ "nginx-geoip" ] } date { match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ] remove_field => [ "timestamp" ] } useragent { source => "agent" }}
output { elasticsearch { hosts => ["localhost:9200"] index => "weblogs-%{+YYYY.MM.dd}" document_type => "nginx_logs" } stdout { codec => rubydebug }}log-json.conf
# Match the format for Elasticsearchlog_format json escape=json '{ ' '"fileset": { ' '"module": "nginx", ' '"name": "access" ' '}, ' '"nginx": { ' '"access": { ' '"remote_ip": "$remote_addr", ' '"user_name": "$remote_user", ' '"time": "$time_local", ' '"method": "$request_method", ' '"host": "$host", ' '"url": "$request_uri", ' '"http_protocol": "$server_protocol", ' '"response_code": "$status", ' '"body_sent": { ' '"bytes": "$body_bytes_sent" ' '}, ' '"referrer": "$http_referer", ' '"agent": "$http_user_agent" ' '}, ' '"request": "$request", ' '"connection": "$connection", ' '"pipe": "$pipe", ' '"connection_requests": "$connection_requests", ' '"time": { ' '"iso8601": "$time_iso8601", ' '"msec": "$msec", ' '"request": "$request_time" ' '}, ' '"bytes": { ' '"request_length": "$request_length", ' '"body_sent": "$body_bytes_sent", ' '"sent": "$bytes_sent" ' '}, ' '"http": { ' '"x_forwarded_for": "$http_x_forwarded_for", ' '"x_forwarded_proto": "$http_x_forwarded_proto", ' '"x_real_ip": "$http_x_real_ip", ' '"x_scheme": "$http_x_scheme" ' '}, ' '"upstream": { ' '"addr": "$upstream_addr", ' '"status": "$upstream_status", ' '"response_time": "$upstream_response_time", ' '"connect_time": "$upstream_connect_time", ' '"header_time": "$upstream_header_time" ' '} ' '} ' '}';Start logstash
Start logstash with the following command:
$ systemctl enable logstash$ systemctl restart logstash.service$ netstat -tulpn | grep 5400tcp6 0 0 :::5400 :::* LISTEN 21329/javaFilebeat
Install firebeat
$ curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-8.5.2-x86_64.rpm$ rpm -vi filebeat-8.5.2-x86_64.rpmNow configure Filebeat by modifying this file:
/etc/filebeat/filebeat.yml
filebeat.inputs:- type: log paths: - /var/log/nginx/*.log exclude_files: ['\.gz$']
output.logstash: hosts: ["127.0.0.1:5400"] ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"] ssl.certificate: "/etc/elk-certs/elk-ssl.crt" ssl.key: "/etc/elk-certs/elk-ssl.key"Redis
Install redis server
$ amazon-linux-extras install epel$ yum install redisConfigure input.conf
input { file { path => "/home/tony/logstash/*.log" type => "cdn" }}
output { redis { host => "localhost" type => "cdn" data_type => "list" key => "logstash" }}Configure output.conf
input { redis { host => "localhost" type => "redis" data_type => "list" key => "logstash" }}
filter { grok { type => "cdn" pattern => "%{IPORHOST:message_ip} %{IPORHOST:message_hostname} - %{HTTPDATE:message_date} \"%{WORD:message_httpmethod} %{URI:message_uri} HTTP/%{NUMBER:message_httpversion}\" %{NUMBER:message_responsecode}" }
kv { field_split => "&?" }
mutate { remove => [ "@message" ] remove => [ "@source" ] remove => [ "@source_path" ] remove => [ "@source_host" ] }
date { match => [ "message_date", "dd/MMM/YYYY:HH:mm:ss Z" ] }}
output { stdout { debug => true debug_format => "json"} elasticsearch_http { host => "localhost" port => "9200" flush_size => 100# index => "logstash" }}
Install ELK with docker-compose
docker-compose.yml file
version: '3.7'
services:
# The 'setup' service runs a one-off script which initializes users inside # Elasticsearch — such as 'logstash_internal' and 'kibana_system' — with the # values of the passwords defined in the '.env' file. # # This task is only performed during the *initial* startup of the stack. On all # subsequent runs, the service simply returns immediately, without performing # any modification to existing users. setup: build: context: setup/ args: ELASTIC_VERSION: ${ELASTIC_VERSION} init: true volumes: - ./setup/entrypoint.sh:/entrypoint.sh:ro,Z - ./setup/helpers.sh:/helpers.sh:ro,Z - ./setup/roles:/roles:ro,Z - setup:/state:Z environment: ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-} KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-} METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-} FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-} HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-} MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-} BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-} networks: - elk depends_on: - elasticsearch
elasticsearch: build: context: elasticsearch/ args: ELASTIC_VERSION: ${ELASTIC_VERSION} volumes: - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z - elasticsearch:/usr/share/elasticsearch/data:Z ports: - 9200:9200 - 9300:9300 environment: node.name: elasticsearch ES_JAVA_OPTS: -Xms512m -Xmx512m # Bootstrap password. # Used to initialize the keystore during the initial startup of # Elasticsearch. Ignored on subsequent runs. ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} # Use single node discovery in order to disable production mode and avoid bootstrap checks. # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html discovery.type: single-node networks: - elk
logstash: build: context: logstash/ args: ELASTIC_VERSION: ${ELASTIC_VERSION} volumes: - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z - ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z ports: - 5044:5044 - 50000:50000/tcp - 50000:50000/udp - 9600:9600 environment: LS_JAVA_OPTS: -Xms256m -Xmx256m LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-} networks: - elk depends_on: - elasticsearch
kibana: build: context: kibana/ args: ELASTIC_VERSION: ${ELASTIC_VERSION} volumes: - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z ports: - 5601:5601 environment: KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-} networks: - elk depends_on: - elasticsearch
networks: elk: driver: bridge
volumes: setup: elasticsearch:Install
Now you should be able to access the ELK stack with:
URL: http://localhost:5601Login: elasticPassword: changeme
版权声明: 本文为 InfoQ 作者【9527】的原创文章。
原文链接:【http://xie.infoq.cn/article/6bd853b564deaf241102d3598】。文章转载请联系作者。
9527
还未添加个人签名 2020-04-22 加入
还未添加个人简介









评论