Bu örneğimizde Docker PHP-FPM ve Nginx loglarını Elasticsearch'e yönlendireceğiz. Bu işlem için Fluent-Bit kullanacağız. İzleme grubumuz EFK'dir (Elasticsearch Fluent-Bit Kibana). Logstash yerine FileBeat kullanmamamızın nedeni, Fluent-Bit'in sistem kaynakları üzerinde çok hafif olması. Yüzlerce MB ile karşılaştırıldığında neredeyse birkaç KB/MB hakkında konuşuyoruz!!! Bellek ve dosya sistemi gibi yerleşik bir kalıcılık mekanizmasına sahiptir. FileBeat ve Logstash çözümünde kalıcılık için Redis gibi bir araç gereklidir.


Akış


PHP-FPM ve Nginx günlükleri Fluent-Bit tarafından izleniyor, ardından Elasticsearch'e iletiliyor. Bu kadar basit!


Uygulama kurulumu


Yapı


.
├── docker
│   ├── docker-compose.yml
│   ├── nginx
│   │   ├── app.conf
│   │   ├── Dockerfile
│   │   └── nginx.conf
│   └── php
│   ├── Dockerfile
│   ├── php.ini
│   └── www.conf
└── index.php

docker/docker-compose.yml


Fluentd loglama sürücü seçenek bilgilerini burada ve burada bulabilirsiniz.


version: "3.4"

services:

inanzzz_wait_php:
build:
context: "./php"
hostname: "wait-php"
volumes:
- "..:/app"
environment:
PS1: "\\u@\\h:\\w\\$$ "
logging:
driver: "fluentd"
options:
fluentd-address: "inanzzz_monitoring_fluent_bit"
tag: "wait_php"
mode: "non-blocking"
max-buffer-size: "2m"
fluentd-async-connect: "true"

inanzzz_wait_nginx:
build:
context: "./nginx"
hostname: "wait-nginx"
ports:
- "1080:80"
volumes:
- "..:/app"
depends_on:
- "inanzzz_wait_php"
environment:
PS1: "\\u@\\h:\\w\\$$ "
logging:
driver: "fluentd"
options:
fluentd-address: "inanzzz_monitoring_fluent_bit"
tag: "wait_nginx"
mode: "non-blocking"
max-buffer-size: "2m"
fluentd-async-connect: "true"

docker/nginx/app.conf


server {
listen 80 default_server;

server_name localhost;

root /app;

index index.php;

location ~ \.php$ {
try_files $uri =404;
fastcgi_pass inanzzz_wait_php:9000;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param HTTP_X_REQUEST_ID $request_id;
}
}

docker/nginx/Dockerfile


FROM nginx:1.15.8-alpine

WORKDIR /app

COPY app.conf /etc/nginx/conf.d/default.conf
COPY nginx.conf /etc/nginx/nginx.conf

docker/nginx/nginx.conf


user nginx;

worker_processes 1;

error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
worker_connections 1024;
}

http {
include /etc/nginx/mime.types;
default_type application/octet-stream;

log_format json_combined escape=json
'{'
'"time_local":"$time_iso8601",'
'"client_ip":"$http_x_forwarded_for",'
'"remote_addr":"$remote_addr",'
'"remote_user":"$remote_user",'
'"request":"$request",'
'"status":"$status",'
'"body_bytes_sent":"$body_bytes_sent",'
'"request_time":"$request_time",'
'"http_referrer":"$http_referer",'
'"http_user_agent":"$http_user_agent",'
'"request_id":"$request_id"'
'}';

access_log /var/log/nginx/access.log json_combined;

error_log off;

sendfile on;

keepalive_timeout 65;

include /etc/nginx/conf.d/*.conf;
}

docker/php/Dockerfile


FROM php:7.2.13-fpm-alpine3.8

WORKDIR /app

COPY php.ini /usr/local/etc/php/conf.d/php.override.ini
COPY www.conf /usr/local/etc/php-fpm.d/www.conf

CMD ["php-fpm", "--nodaemonize"]

docker/php/php.ini


[PHP]
date.timezone=UTC
log_errors=On
error_reporting=E_ALL & ~E_DEPRECATED & ~E_STRICT
display_errors=Off
max_execution_time=60
memory_limit=256M

docker/php/www.conf


[global]
daemonize=no

[www]
user=www-data
group=www-data

listen=inanzzz_wait_nginx:9000

pm=dynamic
pm.max_children=40
pm.start_servers=2
pm.min_spare_servers=2
pm.max_spare_servers=4
pm.max_requests=500

access.format='{"time_local":"%{%Y-%m-%dT%H:%M:%S%z}T","client_ip":"%{HTTP_X_FORWARDED_FOR}e","remote_addr":"%R","remote_user":"%u","request":"%m %{REQUEST_URI}e %{SERVER_PROTOCOL}e","status":"%s","body_bytes_sent":"%l","request_time":"%d","http_referrer":"%{HTTP_REFERER}e","http_user_agent":"%{HTTP_USER_AGENT}e","request_id":"%{HTTP_X_REQUEST_ID}e"}'

İzleme kurulumu


Yapı


├── data
├── docker-compose.yml
└── fluent-bit.conf

docker-compose.yml


version: "3.4"

services:

inanzzz_monitoring_elasticsearch:
image: "docker.elastic.co/elasticsearch/elasticsearch:7.1.1"
hostname: "monitoring-elasticsearch"
environment:
node.name: "monitoring_node"
discovery.type: "single-node"
cluster.name: "monitoring_cluster"
bootstrap.memory_lock: "true"
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
PS1: "\\u@\\h:\\w\\$$ "
ulimits:
memlock:
soft: "-1"
hard: "-1"
ports:
- "9200:9200"
volumes:
- "./data/elasticsearch:/usr/share/elasticsearch/data"

inanzzz_monitoring_fluent_bit:
image: "fluent/fluent-bit:1.0.4"
hostname: "monitoring-fluent-bit"
ports:
- "24224:24224"
depends_on:
- "inanzzz_monitoring_elasticsearch"
volumes:
- "./fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf:ro"

inanzzz_monitoring_kibana:
image: "docker.elastic.co/kibana/kibana:7.1.1"
hostname: "monitoring-kibana"
environment:
ELASTICSEARCH_HOSTS: "http://inanzzz_monitoring_elasticsearch:9200"
ports:
- "5601:5601"
depends_on:
- "inanzzz_monitoring_elasticsearch"

inanzzz_monitoring_elastichq:
image: "elastichq/elasticsearch-hq"
hostname: "monitoring-elastichq"
ports:
- "5000:5000"
depends_on:
- "inanzzz_monitoring_elasticsearch"

fluent-bit.conf


Aşırı yoğunlukta günlük akışı yoksa, tüm storage.* girdilerini kaldırabilirsiniz. Kalıcılık, varsayılan olan memory seçeneğine geri dönecektir.


[SERVICE]
Flush 5
Daemon Off
Log_Level debug
storage.path /var/log/flb-storage
storage.sync normal
storage.checksum off
storage.backlog.mem_limit 5M

[INPUT]
Name forward
storage.type filesystem
Listen inanzzz_monitoring_fluent_bit
Port 24224

[OUTPUT]
Name es
Host inanzzz_monitoring_elasticsearch
Port 9200
Match wait_*
Index wait
Type logs
Include_Tag_Key On
Tag_Key tag

Tüm indexleri görmek için $ curl -X GET 0.0.0.0:9200/_cat/indices adresini ve wait indexinin içeriğini görmek için ise $ curl -X GET 0.0.0.0:9200/wait/_search adresini kullanabilirsiniz. Bununla birlikte Kibana'ya http://localhost:5601, ElasticHQ'ya ise http://192.168.99.30:5000 adresinden ulaşabilirsiniz.


Testler


Uygulamanıza birkaç tane istek gönderin. Sonuç, aşağıdakine benzeyecektir.




Logu bölmek


Log günlüğünü tek tek günlük alanlarına bölmek için aşağıdaki örneği kullanın. Bu tercih edilen yapılandırmadır.


fluent-bit.conf


[SERVICE]
Flush 5
Daemon Off
Log_Level debug
storage.path /var/log/flb-storage
storage.sync normal
storage.checksum off
storage.backlog.mem_limit 5M
Parsers_File parsers.conf

[INPUT]
Name forward
storage.type filesystem
Listen inanzzz_monitoring_fluent_bit
Port 24224

[FILTER]
Name parser
Parser docker
Match wait_*
Key_Name log
Reserve_Data On
Preserve_Key On

[OUTPUT]
Name es
Host inanzzz_monitoring_elasticsearch
Port 9200
Match wait_*
Index wait
Type logs
Include_Tag_Key On
Tag_Key tag

parsers.conf


# When this is used, empty fields are removed from the log so we are missing value.
[PARSER]
Name json_regex
Format regex
Regex ^{"time_local":"(?<time_local>.*?)","client_ip":"(?<client_ip>.*?)","remote_addr":"(?<remote_addr>.*?)","remote_user":"(?<remote_user>.*?)","request":"(?<request>.*?)","status":"(?<status>.*?)","body_bytes_sent":"(?<body_bytes_sent>.*?)","request_time":"(?<request_time>.*?)","http_referrer":"(?<http_referrer>.*?)","http_user_agent":"(?<http_user_agent>.*?)","request_id":"(?<request_id>.*?)"}$

# This doesn't have the problem mentioned above so this is the preferred choice.
[PARSER]
Name docker
Format json
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
Time_Keep On
# Command | Decoder | Field | Optional Action
# =============|==================|=================
Decode_Field_As escaped_utf8 log do_next
Decode_Field_As json log

Sonuç