Commit 82003051 authored by Pavel Taskov's avatar Pavel Taskov

docker configuration upload

parent 5165a826
ELK_VERSION=7.4.1
# Declare files that will always have LF line endings on checkout.
*.sh text eol=lf
\ No newline at end of file
<!--
Because we focus purely on integrating the Elastic stack using Docker and not on the individual stack components themselves, we kindly ask our users to submit questions about Elastic products in the Elastic Discussion Forums @ https://discuss.elastic.co/.
General questions regarding this project can be asked in the docker-elk Gitter chat room @ https://gitter.im/deviantony/docker-elk.
-->
### Problem description
<!-- Be as descriptive as possible regarding the encountered issue versus the expected outcome. -->
### Extra information
<!-- Please include the following information in your issue report. -->
#### Stack configuration
<!-- Detail all performed configuration changes, including to Dockerfiles. -->
#### Docker setup
```
<!-- Replace this comment with the full output of the `docker version` command. -->
```
```
<!-- Replace this comment with the full output of the `docker-compose version` command. -->
```
#### Docker logs
```
<!-- Replace this comment with the full output of the `docker-compose logs` command. -->
```
language: minimal
services: docker
env:
- DOCKER_COMPOSE_VERSION=1.14.0
before_install:
- sudo apt-get update
- sudo apt-get install -y expect jq
install:
# Install Docker Compose
- curl -L "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o "$HOME/bin/docker-compose"
- chmod +x "$HOME/bin/docker-compose"
- docker-compose --version
before_script:
# Build images
- docker-compose build
# Use built-in users
- sed -i 's/\(elasticsearch.username:\) elastic/\1 kibana/g' kibana/config/kibana.yml
- sed -i 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' logstash/config/logstash.yml
script:
# Compose
- docker-compose up -d elasticsearch
- sleep 30
- .travis/elasticsearch-setup-passwords.exp
- docker-compose up -d
- .travis/run-tests.sh
- docker-compose ps
- docker-compose logs elasticsearch
- docker-compose logs kibana
- docker-compose logs logstash
- docker-compose down -v
# Swarm
- docker swarm init
- docker stack deploy -c ./docker-stack.yml elk
- docker service scale elk_kibana=0 --detach=false
- docker service scale elk_logstash=0 --detach=false
- sleep 40
- .travis/elasticsearch-setup-passwords.exp swarm
- docker service scale elk_kibana=1 --detach=false
- docker service scale elk_logstash=1 --detach=false
- .travis/run-tests.sh swarm
- docker stack services elk
- docker service logs elk_elasticsearch
- docker service logs elk_kibana
- docker service logs elk_logstash
- docker stack rm elk
#!/usr/bin/expect -f
# List of expected users with dummy password
set user "(elastic|apm_system|kibana|logstash_system|beats_system|remote_monitoring_user)"
set password "changeme"
# Find elasticsearch container id
set MODE [lindex $argv 0]
if { [string match "swarm" $MODE] } {
set cid [exec docker ps -q -f label=com.docker.swarm.service.name=elk_elasticsearch]
} else {
set cid [exec docker ps -q -f label=com.docker.compose.service=elasticsearch]
}
set cmd "docker exec -it $cid bin/elasticsearch-setup-passwords interactive -s -b"
spawn {*}$cmd
expect {
-re "(E|Ree)nter password for \\\[$user\\\]: " {
send "$password\r"
exp_continue
}
eof
}
lassign [wait] pid spawnid os_error_flag value
exit $value
#!/usr/bin/env bash
set -eu
set -o pipefail
function log {
echo -e "\n[+] $1\n"
}
function poll_ready {
local svc=$1
local url=$2
local -a args=( '-s' '-D-' '-w' '%{http_code}' "$url" )
if [ "$#" -ge 3 ]; then
args+=( '-u' "$3" )
fi
local label
if [ "$MODE" == "swarm" ]; then
label="com.docker.swarm.service.name=elk_${svc}"
else
label="com.docker.compose.service=${svc}"
fi
local -i result=1
local cid
local output
# retry for max 90s (18*5s)
for _ in $(seq 1 18); do
cid="$(docker ps -q -f label="$label")"
if [ -z "${cid:-}" ]; then
echo "Container exited"
return 1
fi
set +e
output="$(curl "${args[@]}")"
set -e
if [ "${output: -3}" -eq 200 ]; then
result=0
break
fi
echo -n '.'
sleep 5
done
echo -e "\n${output::-3}"
return $result
}
declare MODE=""
if [ "$#" -ge 1 ]; then
MODE=$1
fi
log 'Waiting for Elasticsearch readiness'
poll_ready elasticsearch 'http://localhost:9200/' 'elastic:changeme'
log 'Waiting for Kibana readiness'
poll_ready kibana 'http://localhost:5601/api/status' 'kibana:changeme'
log 'Waiting for Logstash readiness'
poll_ready logstash 'http://localhost:9600/_node/pipelines/main?pretty'
log 'Creating Logstash index pattern in Kibana'
source .env
curl -X POST -D- 'http://localhost:5601/api/saved_objects/index-pattern' \
-s -w '\n' \
-H 'Content-Type: application/json' \
-H "kbn-version: ${ELK_VERSION}" \
-u elastic:changeme \
-d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}'
log 'Searching index pattern via Kibana API'
response="$(curl 'http://localhost:5601/api/saved_objects/_find?type=index-pattern' -u elastic:changeme)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.total')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 index pattern, got ${count}"
exit 1
fi
log 'Sending message to Logstash TCP input'
echo 'dockerelk' | nc localhost 5000
sleep 1
curl -X POST 'http://localhost:9200/_refresh' -u elastic:changeme \
-s -w '\n'
log 'Searching message in Elasticsearch'
response="$(curl 'http://localhost:9200/_count?q=message:dockerelk&pretty' -u elastic:changeme)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.count')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 document, got ${count}"
exit 1
fi
The MIT License (MIT)
Copyright (c) 2015 Anthony Lapenna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This diff is collapsed.
version: '3.2'
services:
elasticsearch:
build:
context: elasticsearch/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./elasticsearch/config/elasticsearch.yml
target: /usr/share/elasticsearch/config/elasticsearch.yml
read_only: true
- type: volume
source: elasticsearch
target: /usr/share/elasticsearch/data
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
networks:
- elk
logstash:
build:
context: logstash/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./logstash/config/logstash.yml
target: /usr/share/logstash/config/logstash.yml
read_only: true
- type: bind
source: ./logstash/pipeline
target: /usr/share/logstash/pipeline
read_only: true
ports:
- "5000:5000"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./kibana/config/kibana.yml
target: /usr/share/kibana/config/kibana.yml
read_only: true
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
volumes:
elasticsearch:
version: '3.3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.1
ports:
- "9200:9200"
- "9300:9300"
configs:
- source: elastic_config
target: /usr/share/elasticsearch/config/elasticsearch.yml
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
networks:
- elk
deploy:
mode: replicated
replicas: 1
logstash:
image: docker.elastic.co/logstash/logstash:7.4.1
ports:
- "5000:5000"
- "9600:9600"
configs:
- source: logstash_config
target: /usr/share/logstash/config/logstash.yml
- source: logstash_pipeline
target: /usr/share/logstash/pipeline/logstash.conf
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
deploy:
mode: replicated
replicas: 1
kibana:
image: docker.elastic.co/kibana/kibana:7.4.1
ports:
- "5601:5601"
configs:
- source: kibana_config
target: /usr/share/kibana/config/kibana.yml
networks:
- elk
deploy:
mode: replicated
replicas: 1
configs:
elastic_config:
file: ./elasticsearch/config/elasticsearch.yml
logstash_config:
file: ./logstash/config/logstash.yml
logstash_pipeline:
file: ./logstash/pipeline/logstash.conf
kibana_config:
file: ./kibana/config/kibana.yml
networks:
elk:
driver: overlay
ARG ELK_VERSION
# https://github.com/elastic/elasticsearch-docker
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION}
# Add your elasticsearch plugins setup here
# Example: RUN elasticsearch-plugin install analysis-icu
---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0
## Use single node discovery in order to disable production mode and avoid bootstrap checks
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
#
discovery.type: single-node
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
xpack.license.self_generated.type: trial
xpack.security.enabled: false
xpack.monitoring.collection.enabled: true
Third-party extensions that enable extra integrations with the ELK stack.
ARG ELK_VERSION
FROM docker.elastic.co/apm/apm-server:${ELK_VERSION}
# APM Server extension
Adds a container for Elasticsearch APM server. Forwards caught errors and traces to Elasticsearch to enable their
visualisation in Kibana.
## Usage
If you want to include the APM server, run Docker Compose from the root of the repository with an additional command
line argument referencing the `apm-server-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up
```
## Connecting an agent to APM-Server
The most basic configuration to send traces to APM server is to specify the `SERVICE_NAME` and `SERVICE_URL`. Here is an
example Python FLASK configuration:
```python
import elasticapm
from elasticapm.contrib.flask import ElasticAPM
from flask import Flask
app = Flask(__name__)
app.config['ELASTIC_APM'] = {
# Set required service name. Allowed characters:
# a-z, A-Z, 0-9, -, _, and space
'SERVICE_NAME': 'PYTHON_FLASK_TEST_APP',
# Set custom APM Server URL (default: http://localhost:8200)
'SERVER_URL': 'http://apm-server:8200',
'DEBUG': True,
}
```
More configuration settings can be found under the **Configuration** section for each language:
https://www.elastic.co/guide/en/apm/agent/index.html
## Checking connectivity and importing default APM dashboards
From the Kibana Dashboard:
1. `Add APM` button under _Add Data to Kibana_ section
2. Ignore all the install instructions and press `Check APM Server status` button.
3. Press `Check agent status`
4. Press `Load Kibana objects` to get the default dashboards
5. Lastly press the `APM dashboard` to the bottom right.
## See also
[Running APM Server on Docker](https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html)
version: '3.2'
services:
apm-server:
build:
context: extensions/apm-server/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./extensions/apm-server/config/apm-server.yml:/usr/share/apm-server/apm-server.yml:ro
ports:
- "8200:8200"
networks:
- elk
depends_on:
- elasticsearch
apm-server:
host: 0.0.0.0:8200
output:
elasticsearch:
hosts: ['http://elasticsearch:9200']
username: elastic
password: changeme
FROM alpine:3.8
ENV CURATOR_VERSION=5.5.4
RUN apk --update add --no-cache tini python py-pip \
&& pip install elasticsearch-curator==${CURATOR_VERSION}
COPY entrypoint.sh /
WORKDIR /usr/share/curator
COPY config ./config
ENTRYPOINT ["/entrypoint.sh"]
# Curator
Elasticsearch Curator helps you curate or manage your indices.
## Usage
If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `curator-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up
```
All configuration files are available in the `config/` directory.
## Documentation
https://github.com/elastic/curator
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
client:
hosts:
- ${ELASTICSEARCH_HOST}
port: 9200
url_prefix:
use_ssl: False
certificate:
client_cert:
client_key:
aws_key:
aws_secret_key:
aws_region:
ssl_no_validate: False
http_auth:
timeout: 30
master_only: False
logging:
loglevel: DEBUG
logfile:
logformat: default
actions:
1:
action: delete_indices
description: >-
Delete indices. Find which to delete by first limiting the list to logstash-
prefixed indices. Then further filter those to prevent deletion of anything
less than ${UNIT_COUNT} days old. Ignore the error if the filter does not result in an
actionable list of indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: ${UNIT_COUNT}
version: '3.2'
services:
curator:
build:
context: extensions/curator/
environment:
ELASTICSEARCH_HOST: elasticsearch
CRON: 0 0 * * *
CONFIG_FILE: /usr/share/curator/config/curator.yml
COMMAND: /usr/share/curator/config/delete_log_files_curator.yml
UNIT_COUNT: 2
networks:
- elk
depends_on:
- elasticsearch
#!/bin/sh
echo "$CRON /usr/bin/curator --config ${CONFIG_FILE} ${COMMAND}" >>/etc/crontabs/root
# https://github.com/krallin/tini/blob/master/README.md#subreaping
tini -s -- crond -f -d 8 -l 8
# uses ONBUILD instructions described here:
# https://github.com/gliderlabs/logspout/tree/master/custom
FROM gliderlabs/logspout:master
ENV SYSLOG_FORMAT rfc3164
# Logspout extension
Logspout collects all Docker logs using the Docker logs API, and forwards them to Logstash without any additional
configuration.
## Usage
If you want to include the Logspout extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `logspout-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up
```
In your Logstash pipeline configuration, enable the `udp` input and set the input codec to `json`:
```
input {
udp {
port => 5000
codec => json
}
}
```
## Documentation
https://github.com/looplab/logspout-logstash
#!/bin/sh
# unmodified from:
# https://github.com/gliderlabs/logspout/blob/67ee3831cbd0594361bb3381380c65bdbeb3c20f/custom/build.sh
set -e
apk add --update go git mercurial build-base
mkdir -p /go/src/github.com/gliderlabs
cp -r /src /go/src/github.com/gliderlabs/logspout
cd /go/src/github.com/gliderlabs/logspout
export GOPATH=/go
go get
go build -ldflags "-X main.Version=$1" -o /bin/logspout
apk del go git mercurial build-base
rm -rf /go /var/cache/apk/* /root/.glide
# backwards compatibility
ln -fs /tmp/docker.sock /var/run/docker.sock
version: '3.2'
services:
logspout:
build:
context: extensions/logspout
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
ROUTE_URIS: logstash://logstash:5000
LOGSTASH_TAGS: docker-elk
networks:
- elk
depends_on:
- logstash
restart: on-failure
package main
// installs the Logstash adapter for Logspout, and required dependencies
// https://github.com/looplab/logspout-logstash
import (
_ "github.com/looplab/logspout-logstash"
_ "github.com/gliderlabs/logspout/transports/udp"
_ "github.com/gliderlabs/logspout/transports/tcp"
)
ARG ELK_VERSION
# https://github.com/elastic/kibana-docker
FROM docker.elastic.co/kibana/kibana:${ELK_VERSION}
# Add your kibana plugins setup here
# Example: RUN kibana-plugin install <name|url>
---
## Default Kibana configuration from Kibana base image.
## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js
#
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
## X-Pack security credentials
#
elasticsearch.username: elastic
elasticsearch.password: changeme
ARG ELK_VERSION
# https://github.com/elastic/logstash-docker
FROM docker.elastic.co/logstash/logstash:${ELK_VERSION}
<