Рассмотрим установку стека EFK в docker swarm:
Thank you for reading this post, don't forget to subscribe!Создаём следующие директории:
mkdir -p docker-efk/{curator,elasticsearch/config,fluentd/config,fluentd/plugins,kibana/config}
Curator
cat curator/Dockerfile
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
FROM centos:7 MAINTAINER play-backend ENV TZ=Asia/Bishkek RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN mkdir /etc/curator ADD action_file.yml /etc/curator ADD config.yml /etc/curator # Install python-pip RUN curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py" RUN python get-pip.py # Install curator (https://www.elastic.co/guide/en/elasticsearch/client/curator/4.2/installation.html) RUN pip install elasticsearch-curator # download go-cron RUN curl -L -o /usr/local/bin/go-cron-linux.gz https://github.com/odise/go-cron/releases/download/v0.0.7/go-cron-linux.gz RUN gunzip /usr/local/bin/go-cron-linux.gz RUN chmod u+x /usr/local/bin/go-cron-linux ENV PATH=/usr/local/bin:$PATH ENV SCHEDULE "* * * * * *" ENV COMMAND "echo test go-cron" EXPOSE 8080 CMD go-cron-linux -s "$SCHEDULE" -p 8080 -- /bin/bash -c "$COMMAND" |
[/codesyntax]
cat curator/action_file.yml
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
--- actions: 1: action: delete_indices description: >- Alias indices older than 14 days, with a prefix of play-backend. options: disable_action: False timeout_override: ignore_empty_list: True continue_if_exception: False filters: - filtertype: pattern kind: prefix value: fluentd- exclude: - filtertype: age source: field_stats field: '@timestamp' stats_result: min_value direction: older unit: days unit_count: 1 |
[/codesyntax]
cat curator/config.yml
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
client: hosts: - elasticsearch port: 9200 url_prefix: use_ssl: False certificate: client_cert: client_key: aws_key: aws_secret_key: aws_region: ssl_no_validate: False http_auth: timeout: 30 master_only: False logging: loglevel: INFO loglevel: DEBUG logfile: logformat: default blacklist: ['elasticsearch', 'urllib3'] |
[/codesyntax]
Elasticsearch
cat elasticsearch/config/elasticsearch.yml
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
--- ## Default Elasticsearch configuration from elasticsearch-docker. ## from https://github.com/elastic/elasticsearch-docker/blob/master/build/elasticsearch/elasticsearch.yml # cluster.name: "docker-cluster" network.host: 0.0.0.0 # minimum_master_nodes need to be explicitly set when bound on a public IP # set to 1 to allow single node clusters # Details: https://github.com/elastic/elasticsearch/pull/17288 discovery.zen.minimum_master_nodes: 1 ## Use single node discovery in order to disable production mode and avoid bootstrap checks ## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html # discovery.type: single-node |
[/codesyntax]
Kibana
cat kibana/config/kibana.yml
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 |
--- ## Default Kibana configuration from kibana-docker. ## from https://github.com/elastic/kibana-docker/blob/master/build/kibana/config/kibana.yml # server.name: kibana server.host: "0" elasticsearch.url: http://elasticsearch:9200 |
[/codesyntax]
Fluent
cat fluentd/Dockerfile
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
FROM fluent/fluentd:v0.12-onbuild MAINTAINER YOUR_NAME <...@...> ENV TZ=Asia/Bishkek RUN apk add ruby-bigdecimal RUN apk add --no-cache tzdata RUN apk add --update --virtual .build-deps \ sudo build-base ruby-dev ruby-bigdecimal \ && apk add postgresql-dev \ && ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN gem install fluent-plugin-secure-forward \ fluent-plugin-bigquery bigdecimal \ fluent-plugin-elasticsearch \ fluent-plugin-pgjson \ fluent-plugin-out-http\ fluent-plugin-record-modifier \ fluent-plugin-exclude-filter \ fluent-plugin-splunk-enterprise\ && sudo gem sources --clear-all \ && apk del .build-deps \ && rm -rf /var/cache/apk/* \ /home/fluent/.gem/ruby/2.3.0/cache/*.gem \ COPY fluent.conf /fluentd/etc/ VOLUME ["/fluentd/etc/"] EXPOSE 24224 42185 |
[/codesyntax]
cat fluentd/fluent.conf
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
# fluentd/conf/fluent.conf <source> @type forward port 24224 bind 0.0.0.0 source_hostname_key host use_record_host true </source> <match *.**> @type copy <store> @type elasticsearch host elasticsearch port 9200 logstash_format true logstash_prefix fluentd logstash_dateformat %Y%m%d include_tag_key true type_name access_log tag_key @log_name flush_interval 1s </store> <store> @type stdout </store> </match> <source> type syslog port 42185 tag syslog source_hostname_key host use_record_host true </source> <match syslog.**> @type elasticsearch host elasticsearch port 9200 logstash_format true logstash_dateformat %Y%m%d include_tag_key true flush_interval 1s # for testing </match> |
[/codesyntax]
точно такой же конфиг пихаем в
cat fluentd/config/fluent.conf
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
# fluentd/conf/fluent.conf <source> @type forward port 24224 bind 0.0.0.0 source_hostname_key host use_record_host true </source> <match *.**> @type copy <store> @type elasticsearch host elasticsearch port 9200 logstash_format true logstash_prefix fluentd logstash_dateformat %Y%m%d include_tag_key true type_name access_log tag_key @log_name flush_interval 1s </store> <store> @type stdout </store> </match> <source> type syslog port 42185 tag syslog source_hostname_key host use_record_host true </source> <match syslog.**> @type elasticsearch host elasticsearch port 9200 logstash_format true logstash_dateformat %Y%m%d include_tag_key true flush_interval 1s # for testing </match> |
[/codesyntax]
cd fluentd/
далее собираем наш образ:
docker build -t fluent .
если у нас есть прокси сервер то выполняем команду:
docker build -t fluent --build-arg http_proxy=http://адрес_прокси:3128 .
создаём архив, копируем его на остальные ноды и загружаем:
docker save -o fluent.tar fluent
scp fluent.tar node2:~/
scp fluent.tar node3:~/
ssh -t node2 "docker load -i fluent.tar"
ssh -t node3 "docker load -i fluent.tar"
тоже самое проделываем с образом куратора:
cd curator/
docker build -t curator --no-cache .
если есть прокси то используем команду в которой будет указан как http так и https:
docker build -t curator --no-cache --build-arg http_proxy=http://адрес_прокси:3128 --build-arg https_proxy=http://адрес_прокси:3128 .
создаём архив, копируем его на остальные ноды и загружаем:
docker save -o curator.tar curator
scp curator.tar node2:~/
scp curator.tar node3:~/
ssh -t node2 "docker load -i curator.tar"
ssh -t node3 "docker load -i curator.tar"
далее создаём основной конфиг для деплоя:
cat docker-stack-proxy.yml
[codesyntax lang="php" blockstate="collapsed"]
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
version: '3.3' volumes: elastic: driver: "rexray/rbd:latest" driver_opts: size: 3 services: elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.4.0 ports: - "9200:9200" - "9300:9300" configs: - source: elastic_config target: /usr/share/elasticsearch/config/elasticsearch.yml volumes: - elastic:/usr/share/elasticsearch environment: ES_JAVA_OPTS: "-Xmx256m -Xms256m" networks: - efk deploy: mode: replicated replicas: 1 fluentd: image: fluent ports: - "24224:24224" - "24224:24224/udp" - "42185:42185" - "42185:42185/udp" configs: - source: fluent_config target: ./fluentd/etc/fluent.conf environment: LS_JAVA_OPTS: "-Xmx256m -Xms256m" networks: - efk deploy: mode: replicated replicas: 1 kibana: image: docker.elastic.co/kibana/kibana-oss:6.4.0 ports: - "5601" configs: - source: kibana_config target: /usr/share/kibana/config/kibana.yml networks: - efk - proxy deploy: mode: replicated replicas: 1 labels: - com.df.notify=true - com.df.serviceDomain=kibana.test.ru - com.df.port=5601 curator: image: curator configs: - source: curator_action target: /etc/curator/action_file.yml - source: curator_config target: /etc/curator/config.yml environment: ELASTICSEARCH_HOST: elasticsearch ELASTICSEARCH_PORT: 9200 SCHEDULE: "* * * * *" COMMAND: "curator --config /etc/curator/config.yml /etc/curator/action_file.yml" networks: - efk deploy: mode: replicated replicas: 1 configs: elastic_config: file: ./elasticsearch/config/elasticsearch.yml fluent_config: file: ./fluentd/config/fluent.conf kibana_config: file: ./kibana/config/kibana.yml curator_action: file: ./curator/action_file.yml curator_config: file: ./curator/config.yml networks: efk: driver: overlay attachable: true proxy: external: true |
[/codesyntax]
Рассмотрим подробнее что происходит в stack
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
тут задаётся переменная, в которой мы указываем количество выделяемой оперативной памяти для приложения
configs:
- source: elastic_config
target: /usr/share/elasticsearch/config/elasticsearch.yml
тут мы указываем что конфиг из переменной elastic_config кладём в контейнер по пути: /usr/share/elasticsearch/config/elasticsearch.yml
configs:
elastic_config:
file: ./elasticsearch/config/elasticsearch.yml
тут мы указываем откуда переменной elastic_config взять файл с хостовой тачки
environment:
ELASTICSEARCH_HOST: elasticsearch
ELASTICSEARCH_PORT: 9200
SCHEDULE: "* * * * *"
COMMAND: "curator --config /etc/curator/config.yml /etc/curator/action_file.yml"
тут мы указываем переменные для сервиса curator и добавляем в cron задание на выполнение каждую минуту, с указанием команды.