Commit d03da149 authored by Pavel Taskov's avatar Pavel Taskov

base docker compose

parent 82003051
ELK_VERSION=7.4.1
# Declare files that will always have LF line endings on checkout.
*.sh text eol=lf
\ No newline at end of file
<!--
Because we focus purely on integrating the Elastic stack using Docker and not on the individual stack components themselves, we kindly ask our users to submit questions about Elastic products in the Elastic Discussion Forums @ https://discuss.elastic.co/.
General questions regarding this project can be asked in the docker-elk Gitter chat room @ https://gitter.im/deviantony/docker-elk.
-->
### Problem description
<!-- Be as descriptive as possible regarding the encountered issue versus the expected outcome. -->
### Extra information
<!-- Please include the following information in your issue report. -->
#### Stack configuration
<!-- Detail all performed configuration changes, including to Dockerfiles. -->
#### Docker setup
```
<!-- Replace this comment with the full output of the `docker version` command. -->
```
```
<!-- Replace this comment with the full output of the `docker-compose version` command. -->
```
#### Docker logs
```
<!-- Replace this comment with the full output of the `docker-compose logs` command. -->
```
language: minimal
services: docker
env:
- DOCKER_COMPOSE_VERSION=1.14.0
before_install:
- sudo apt-get update
- sudo apt-get install -y expect jq
install:
# Install Docker Compose
- curl -L "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o "$HOME/bin/docker-compose"
- chmod +x "$HOME/bin/docker-compose"
- docker-compose --version
before_script:
# Build images
- docker-compose build
# Use built-in users
- sed -i 's/\(elasticsearch.username:\) elastic/\1 kibana/g' kibana/config/kibana.yml
- sed -i 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' logstash/config/logstash.yml
script:
# Compose
- docker-compose up -d elasticsearch
- sleep 30
- .travis/elasticsearch-setup-passwords.exp
- docker-compose up -d
- .travis/run-tests.sh
- docker-compose ps
- docker-compose logs elasticsearch
- docker-compose logs kibana
- docker-compose logs logstash
- docker-compose down -v
# Swarm
- docker swarm init
- docker stack deploy -c ./docker-stack.yml elk
- docker service scale elk_kibana=0 --detach=false
- docker service scale elk_logstash=0 --detach=false
- sleep 40
- .travis/elasticsearch-setup-passwords.exp swarm
- docker service scale elk_kibana=1 --detach=false
- docker service scale elk_logstash=1 --detach=false
- .travis/run-tests.sh swarm
- docker stack services elk
- docker service logs elk_elasticsearch
- docker service logs elk_kibana
- docker service logs elk_logstash
- docker stack rm elk
#!/usr/bin/expect -f
# List of expected users with dummy password
set user "(elastic|apm_system|kibana|logstash_system|beats_system|remote_monitoring_user)"
set password "changeme"
# Find elasticsearch container id
set MODE [lindex $argv 0]
if { [string match "swarm" $MODE] } {
set cid [exec docker ps -q -f label=com.docker.swarm.service.name=elk_elasticsearch]
} else {
set cid [exec docker ps -q -f label=com.docker.compose.service=elasticsearch]
}
set cmd "docker exec -it $cid bin/elasticsearch-setup-passwords interactive -s -b"
spawn {*}$cmd
expect {
-re "(E|Ree)nter password for \\\[$user\\\]: " {
send "$password\r"
exp_continue
}
eof
}
lassign [wait] pid spawnid os_error_flag value
exit $value
#!/usr/bin/env bash
set -eu
set -o pipefail
function log {
echo -e "\n[+] $1\n"
}
function poll_ready {
local svc=$1
local url=$2
local -a args=( '-s' '-D-' '-w' '%{http_code}' "$url" )
if [ "$#" -ge 3 ]; then
args+=( '-u' "$3" )
fi
local label
if [ "$MODE" == "swarm" ]; then
label="com.docker.swarm.service.name=elk_${svc}"
else
label="com.docker.compose.service=${svc}"
fi
local -i result=1
local cid
local output
# retry for max 90s (18*5s)
for _ in $(seq 1 18); do
cid="$(docker ps -q -f label="$label")"
if [ -z "${cid:-}" ]; then
echo "Container exited"
return 1
fi
set +e
output="$(curl "${args[@]}")"
set -e
if [ "${output: -3}" -eq 200 ]; then
result=0
break
fi
echo -n '.'
sleep 5
done
echo -e "\n${output::-3}"
return $result
}
declare MODE=""
if [ "$#" -ge 1 ]; then
MODE=$1
fi
log 'Waiting for Elasticsearch readiness'
poll_ready elasticsearch 'http://localhost:9200/' 'elastic:changeme'
log 'Waiting for Kibana readiness'
poll_ready kibana 'http://localhost:5601/api/status' 'kibana:changeme'
log 'Waiting for Logstash readiness'
poll_ready logstash 'http://localhost:9600/_node/pipelines/main?pretty'
log 'Creating Logstash index pattern in Kibana'
source .env
curl -X POST -D- 'http://localhost:5601/api/saved_objects/index-pattern' \
-s -w '\n' \
-H 'Content-Type: application/json' \
-H "kbn-version: ${ELK_VERSION}" \
-u elastic:changeme \
-d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}'
log 'Searching index pattern via Kibana API'
response="$(curl 'http://localhost:5601/api/saved_objects/_find?type=index-pattern' -u elastic:changeme)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.total')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 index pattern, got ${count}"
exit 1
fi
log 'Sending message to Logstash TCP input'
echo 'dockerelk' | nc localhost 5000
sleep 1
curl -X POST 'http://localhost:9200/_refresh' -u elastic:changeme \
-s -w '\n'
log 'Searching message in Elasticsearch'
response="$(curl 'http://localhost:9200/_count?q=message:dockerelk&pretty' -u elastic:changeme)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.count')"
if [[ $count -ne 1 ]]; then
echo "Expected 1 document, got ${count}"
exit 1
fi
The MIT License (MIT)
Copyright (c) 2015 Anthony Lapenna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# Elastic stack (ELK) on Docker
[![Join the chat at https://gitter.im/deviantony/docker-elk](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/deviantony/docker-elk?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Elastic Stack version](https://img.shields.io/badge/ELK-7.4.1-blue.svg?style=flat)](https://github.com/deviantony/docker-elk/issues/441)
[![Build Status](https://api.travis-ci.org/deviantony/docker-elk.svg?branch=master)](https://travis-ci.org/deviantony/docker-elk)
Run the latest version of the [Elastic stack][elk-stack] with Docker and Docker Compose.
It gives you the ability to analyze any data set by using the searching/aggregation capabilities of Elasticsearch and
the visualization power of Kibana.
> :information_source: The Docker images backing this stack include [Stack Features][stack-features] (formerly X-Pack)
with [paid features][paid-features] enabled by default (see [How to disable paid
features](#how-to-disable-paid-features) to disable them). The [trial license][trial-license] is valid for 30 days.
Based on the official Docker images from Elastic:
* [Elasticsearch](https://github.com/elastic/elasticsearch/tree/master/distribution/docker)
* [Logstash](https://github.com/elastic/logstash/tree/master/docker)
* [Kibana](https://github.com/elastic/kibana/tree/master/src/dev/build/tasks/os_packages/docker_generator)
Other available stack variants:
* [`searchguard`](https://github.com/deviantony/docker-elk/tree/searchguard): Search Guard support
## Contents
1. [Requirements](#requirements)
* [Host setup](#host-setup)
* [SELinux](#selinux)
* [Docker for Desktop](#docker-for-desktop)
* [Windows](#windows)
* [macOS](#macos)
2. [Usage](#usage)
* [Bringing up the stack](#bringing-up-the-stack)
* [Cleanup](#cleanup)
* [Initial setup](#initial-setup)
* [Setting up user authentication](#setting-up-user-authentication)
* [Injecting data](#injecting-data)
* [Default Kibana index pattern creation](#default-kibana-index-pattern-creation)
3. [Configuration](#configuration)
* [How to configure Elasticsearch](#how-to-configure-elasticsearch)
* [How to configure Kibana](#how-to-configure-kibana)
* [How to configure Logstash](#how-to-configure-logstash)
* [How to disable paid features](#how-to-disable-paid-features)
* [How to scale out the Elasticsearch cluster](#how-to-scale-out-the-elasticsearch-cluster)
4. [Extensibility](#extensibility)
* [How to add plugins](#how-to-add-plugins)
* [How to enable the provided extensions](#how-to-enable-the-provided-extensions)
5. [JVM tuning](#jvm-tuning)
* [How to specify the amount of memory used by a service](#how-to-specify-the-amount-of-memory-used-by-a-service)
* [How to enable a remote JMX connection to a service](#how-to-enable-a-remote-jmx-connection-to-a-service)
6. [Going further](#going-further)
* [Using a newer stack version](#using-a-newer-stack-version)
* [Plugins and integrations](#plugins-and-integrations)
* [Swarm mode](#swarm-mode)
## Requirements
### Host setup
* [Docker Engine](https://docs.docker.com/install/) version **17.05+**
* [Docker Compose](https://docs.docker.com/compose/install/) version **1.12.0+**
* 1.5 GB of RAM
By default, the stack exposes the following ports:
* 5000: Logstash TCP input
* 9200: Elasticsearch HTTP
* 9300: Elasticsearch TCP transport
* 5601: Kibana
> :information_source: Elasticsearch's [bootstrap checks][booststap-checks] were purposely disabled to facilitate the
> setup of the Elastic stack in development environments. For production setups, we recommend users to set up their host
> according to the instructions from the Elasticsearch documentation: [Important System Configuration][es-sys-config].
### SELinux
On distributions which have SELinux enabled out-of-the-box you will need to either re-context the files or set SELinux
into Permissive mode in order for docker-elk to start properly. For example on Redhat and CentOS, the following will
apply the proper context:
```console
$ chcon -R system_u:object_r:admin_home_t:s0 docker-elk/
```
### Docker for Desktop
#### Windows
Ensure the [Shared Drives][win-shareddrives] feature is enabled for the `C:` drive.
#### macOS
The default Docker for Mac configuration allows mounting files from `/Users/`, `/Volumes/`, `/private/`, and `/tmp`
exclusively. Make sure the repository is cloned in one of those locations or follow the instructions from the
[documentation][mac-mounts] to add more locations.
## Usage
### Bringing up the stack
Clone this repository, then start the stack using Docker Compose:
```console
$ docker-compose up
```
You can also run all services in the background (detached mode) by adding the `-d` flag to the above command.
> :information_source: You must run `docker-compose build` first whenever you switch branch or update a base image.
If you are starting the stack for the very first time, please read the section below attentively.
### Cleanup
Elasticsearch data is persisted inside a volume by default.
In order to entirely shutdown the stack and remove all persisted data, use the following Docker Compose command:
```console
$ docker-compose down -v
```
## Initial setup
### Setting up user authentication
> :information_source: Refer to [How to disable paid features](#how-to-disable-paid-features) to disable authentication.
The stack is pre-configured with the following **privileged** bootstrap user:
* user: *elastic*
* password: *changeme*
Although all stack components work out-of-the-box with this user, we strongly recommend using the unprivileged [built-in
users][builtin-users] instead for increased security. Passwords for these users must be initialized:
```console
$ docker-compose exec -T elasticsearch bin/elasticsearch-setup-passwords auto --batch
```
Passwords for all 6 built-in users will be randomly generated. Take note of them and replace the `elastic` username with
`kibana` and `logstash_system` inside the Kibana and Logstash configuration files respectively. See the
[Configuration](#configuration) section below.
> :information_source: Do not use the `logstash_system` user inside the Logstash *pipeline* file, it does not have
> sufficient permissions to create indices. Follow the instructions at [Configuring Security in Logstash][ls-security]
> to create a user with suitable roles.
Restart Kibana and Logstash to apply the passwords you just wrote to the configuration files.
```console
$ docker-compose restart kibana logstash
```
> :information_source: Learn more about the security of the Elastic stack at [Tutorial: Getting started with
> security][sec-tutorial].
### Injecting data
Give Kibana about a minute to initialize, then access the Kibana web UI by hitting
[http://localhost:5601](http://localhost:5601) with a web browser and use the following default credentials to log in:
* user: *elastic*
* password: *\<your generated elastic password>*
Now that the stack is running, you can go ahead and inject some log entries. The shipped Logstash configuration allows
you to send content via TCP:
```console
$ nc localhost 5000 < /path/to/logfile.log
```
You can also load the sample data provided by your Kibana installation.
### Default Kibana index pattern creation
When Kibana launches for the first time, it is not configured with any index pattern.
#### Via the Kibana web UI
> :information_source: You need to inject data into Logstash before being able to configure a Logstash index pattern via
the Kibana web UI. Then all you have to do is hit the *Create* button.
Refer to [Connect Kibana with Elasticsearch][connect-kibana] for detailed instructions about the index pattern
configuration.
#### On the command line
Create an index pattern via the Kibana API:
```console
$ curl -XPOST -D- 'http://localhost:5601/api/saved_objects/index-pattern' \
-H 'Content-Type: application/json' \
-H 'kbn-version: 7.4.1' \
-u elastic:<your generated elastic password> \
-d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}'
```
The created pattern will automatically be marked as the default index pattern as soon as the Kibana UI is opened for the first time.
## Configuration
> :information_source: Configuration is not dynamically reloaded, you will need to restart individual components after
any configuration change.
### How to configure Elasticsearch
The Elasticsearch configuration is stored in [`elasticsearch/config/elasticsearch.yml`][config-es].
You can also specify the options you want to override by setting environment variables inside the Compose file:
```yml
elasticsearch:
environment:
network.host: _non_loopback_
cluster.name: my-cluster
```
Please refer to the following documentation page for more details about how to configure Elasticsearch inside Docker
containers: [Install Elasticsearch with Docker][es-docker].
### How to configure Kibana
The Kibana default configuration is stored in [`kibana/config/kibana.yml`][config-kbn].
It is also possible to map the entire `config` directory instead of a single file.
Please refer to the following documentation page for more details about how to configure Kibana inside Docker
containers: [Running Kibana on Docker][kbn-docker].
### How to configure Logstash
The Logstash configuration is stored in [`logstash/config/logstash.yml`][config-ls].
It is also possible to map the entire `config` directory instead of a single file, however you must be aware that
Logstash will be expecting a [`log4j2.properties`][log4j-props] file for its own logging.
Please refer to the following documentation page for more details about how to configure Logstash inside Docker
containers: [Configuring Logstash for Docker][ls-docker].
### How to disable paid features
Switch the value of Elasticsearch's `xpack.license.self_generated.type` option from `trial` to `basic` (see [License
settings][trial-license]).
### How to scale out the Elasticsearch cluster
Follow the instructions from the Wiki: [Scaling out Elasticsearch](https://github.com/deviantony/docker-elk/wiki/Elasticsearch-cluster)
## Extensibility
### How to add plugins
To add plugins to any ELK component you have to:
1. Add a `RUN` statement to the corresponding `Dockerfile` (eg. `RUN logstash-plugin install logstash-filter-json`)
2. Add the associated plugin code configuration to the service configuration (eg. Logstash input/output)
3. Rebuild the images using the `docker-compose build` command
### How to enable the provided extensions
A few extensions are available inside the [`extensions`](extensions) directory. These extensions provide features which
are not part of the standard Elastic stack, but can be used to enrich it with extra integrations.
The documentation for these extensions is provided inside each individual subdirectory, on a per-extension basis. Some
of them require manual changes to the default ELK configuration.
## JVM tuning
### How to specify the amount of memory used by a service
By default, both Elasticsearch and Logstash start with [1/4 of the total host
memory](https://docs.oracle.com/javase/8/docs/technotes/guides/vm/gctuning/parallel.html#default_heap_size) allocated to
the JVM Heap Size.
The startup scripts for Elasticsearch and Logstash can append extra JVM options from the value of an environment
variable, allowing the user to adjust the amount of memory that can be used by each component:
| Service | Environment variable |
|---------------|----------------------|
| Elasticsearch | ES_JAVA_OPTS |
| Logstash | LS_JAVA_OPTS |
To accomodate environments where memory is scarce (Docker for Mac has only 2 GB available by default), the Heap Size
allocation is capped by default to 256MB per service in the `docker-compose.yml` file. If you want to override the
default JVM configuration, edit the matching environment variable(s) in the `docker-compose.yml` file.
For example, to increase the maximum JVM Heap Size for Logstash:
```yml
logstash:
environment:
LS_JAVA_OPTS: -Xmx1g -Xms1g
```
### How to enable a remote JMX connection to a service
As for the Java Heap memory (see above), you can specify JVM options to enable JMX and map the JMX port on the Docker
host.
Update the `{ES,LS}_JAVA_OPTS` environment variable with the following content (I've mapped the JMX service on the port
18080, you can change that). Do not forget to update the `-Djava.rmi.server.hostname` option with the IP address of your
Docker host (replace **DOCKER_HOST_IP**):
```yml
logstash:
environment:
LS_JAVA_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=18080 -Dcom.sun.management.jmxremote.rmi.port=18080 -Djava.rmi.server.hostname=DOCKER_HOST_IP -Dcom.sun.management.jmxremote.local.only=false
```
## Going further
### Using a newer stack version
To use a different Elastic Stack version than the one currently available in the repository, simply change the version
number inside the `.env` file, and rebuild the stack with:
```console
$ docker-compose build
$ docker-compose up
```
> :information_source: Always pay attention to the [upgrade instructions][upgrade] for each individual component before
performing a stack upgrade.
### Plugins and integrations
See the following Wiki pages:
* [External applications](https://github.com/deviantony/docker-elk/wiki/External-applications)
* [Popular integrations](https://github.com/deviantony/docker-elk/wiki/Popular-integrations)
### Swarm mode
Experimental support for Docker [Swarm mode][swarm-mode] is provided in the form of a `docker-stack.yml` file, which can
be deployed in an existing Swarm cluster using the following command:
```console
$ docker stack deploy -c docker-stack.yml elk
```
If all components get deployed without any error, the following command will show 3 running services:
```console
$ docker stack services elk
```
> :information_source: To scale Elasticsearch in Swarm mode, configure *zen* to use the DNS name `tasks.elasticsearch`
instead of `elasticsearch`.
[elk-stack]: https://www.elastic.co/elk-stack
[stack-features]: https://www.elastic.co/products/stack
[paid-features]: https://www.elastic.co/subscriptions
[trial-license]: https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html
[booststap-checks]: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
[es-sys-config]: https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config.html
[win-shareddrives]: https://docs.docker.com/docker-for-windows/#shared-drives
[mac-mounts]: https://docs.docker.com/docker-for-mac/osxfs/
[builtin-users]: https://www.elastic.co/guide/en/x-pack/current/setting-up-authentication.html#built-in-users
[ls-security]: https://www.elastic.co/guide/en/logstash/current/ls-security.html
[sec-tutorial]: https://www.elastic.co/guide/en/elastic-stack-overview/current/security-getting-started.html
[connect-kibana]: https://www.elastic.co/guide/en/kibana/current/connect-to-elasticsearch.html
[config-es]: ./elasticsearch/config/elasticsearch.yml
[config-kbn]: ./kibana/config/kibana.yml
[config-ls]: ./logstash/config/logstash.yml
[es-docker]: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html
[kbn-docker]: https://www.elastic.co/guide/en/kibana/current/docker.html
[ls-docker]: https://www.elastic.co/guide/en/logstash/current/docker-config.html
[log4j-props]: https://github.com/elastic/logstash/tree/7.3/docker/data/logstash/config
[esuser]: https://github.com/elastic/elasticsearch/blob/7.3/distribution/docker/src/docker/Dockerfile#L18-L19
[upgrade]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html
[swarm-mode]: https://docs.docker.com/engine/swarm/
version: '3.2'
services:
elasticsearch:
build:
context: elasticsearch/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./elasticsearch/config/elasticsearch.yml
target: /usr/share/elasticsearch/config/elasticsearch.yml
read_only: true
- type: volume
source: elasticsearch
target: /usr/share/elasticsearch/data
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
networks:
- elk
logstash:
build:
context: logstash/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./logstash/config/logstash.yml
target: /usr/share/logstash/config/logstash.yml
read_only: true
- type: bind
source: ./logstash/pipeline
target: /usr/share/logstash/pipeline
read_only: true
ports:
- "5000:5000"
- "9600:9600"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./kibana/config/kibana.yml
target: /usr/share/kibana/config/kibana.yml
read_only: true
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
volumes:
elasticsearch:
version: '3.3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.1
ports:
- "9200:9200"
- "9300:9300"
configs:
- source: elastic_config
target: /usr/share/elasticsearch/config/elasticsearch.yml
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
networks:
- elk
deploy:
mode: replicated
replicas: 1
logstash:
image: docker.elastic.co/logstash/logstash:7.4.1
ports:
- "5000:5000"
- "9600:9600"
configs:
- source: logstash_config
target: /usr/share/logstash/config/logstash.yml
- source: logstash_pipeline
target: /usr/share/logstash/pipeline/logstash.conf
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
deploy:
mode: replicated
replicas: 1
kibana:
image: docker.elastic.co/kibana/kibana:7.4.1
ports:
- "5601:5601"
configs:
- source: kibana_config
target: /usr/share/kibana/config/kibana.yml
networks:
- elk
deploy:
mode: replicated
replicas: 1
configs:
elastic_config:
file: ./elasticsearch/config/elasticsearch.yml
logstash_config:
file: ./logstash/config/logstash.yml
logstash_pipeline:
file: ./logstash/pipeline/logstash.conf
kibana_config:
file: ./kibana/config/kibana.yml
networks:
elk:
driver: overlay
ARG ELK_VERSION
# https://github.com/elastic/elasticsearch-docker
FROM docker.elastic.co/elasticsearch/elasticsearch:${ELK_VERSION}
# Add your elasticsearch plugins setup here
# Example: RUN elasticsearch-plugin install analysis-icu
---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0
## Use single node discovery in order to disable production mode and avoid bootstrap checks
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
#
discovery.type: single-node
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
xpack.license.self_generated.type: trial
xpack.security.enabled: false
xpack.monitoring.collection.enabled: true
Third-party extensions that enable extra integrations with the ELK stack.
ARG ELK_VERSION
FROM docker.elastic.co/apm/apm-server:${ELK_VERSION}
# APM Server extension
Adds a container for Elasticsearch APM server. Forwards caught errors and traces to Elasticsearch to enable their
visualisation in Kibana.
## Usage
If you want to include the APM server, run Docker Compose from the root of the repository with an additional command
line argument referencing the `apm-server-compose.yml` file:
```console
$ docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up
```
## Connecting an agent to APM-Server
The most basic configuration to send traces to APM server is to specify the `SERVICE_NAME` and `SERVICE_URL`. Here is an
example Python FLASK configuration:
```python
import elasticapm
from elasticapm.contrib.flask import ElasticAPM
from flask import Flask
app = Flask(__name__)
app.config['ELASTIC_APM'] = {
# Set required service name. Allowed characters:
# a-z, A-Z, 0-9, -, _, and space
'SERVICE_NAME': 'PYTHON_FLASK_TEST_APP',
# Set custom APM Server URL (default: http://localhost:8200)
'SERVER_URL': 'http://apm-server:8200',
'DEBUG': True,
}
```
More configuration settings can be found under the **Configuration** section for each language:
https://www.elastic.co/guide/en/apm/agent/index.html
## Checking connectivity and importing default APM dashboards
From the Kibana Dashboard:
1. `Add APM` button under _Add Data to Kibana_ section
2. Ignore all the install instructions and press `Check APM Server status` button.
3. Press `Check agent status`
4. Press `Load Kibana objects` to get the default dashboards
5. Lastly press the `APM dashboard` to the bottom right.
## See also
[Running APM Server on Docker](https://www.elastic.co/guide/en/apm/server/current/running-on-docker.html)
version: '3.2'
services:
apm-server:
build:
context: extensions/apm-server/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- ./extensions/apm-server/config/apm-server.yml:/usr/share/apm-server/apm-server.yml:ro
ports:
- "8200:8200"
networks:
- elk
depends_on:
- elasticsearch
apm-server:
host: 0.0.0.0:8200
output:
elasticsearch:
hosts: ['http://elasticsearch:9200']
username: elastic
password: changeme
FROM alpine:3.8
ENV CURATOR_VERSION=5.5.4
RUN apk --update add --no-cache tini python py-pip \
&& pip install elasticsearch-curator==${CURATOR_VERSION}
COPY entrypoint.sh /
WORKDIR /usr/share/curator
COPY config ./config
ENTRYPOINT ["/entrypoint.sh"]
# Curator
Elasticsearch Curator helps you curate or manage your indices.
## Usage
If you want to include the Curator extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `curator-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/curator/curator-compose.yml up
```
All configuration files are available in the `config/` directory.
## Documentation
https://github.com/elastic/curator
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
client:
hosts:
- ${ELASTICSEARCH_HOST}
port: 9200
url_prefix:
use_ssl: False
certificate:
client_cert:
client_key:
aws_key:
aws_secret_key:
aws_region:
ssl_no_validate: False
http_auth:
timeout: 30
master_only: False
logging:
loglevel: DEBUG
logfile:
logformat: default
actions:
1:
action: delete_indices
description: >-
Delete indices. Find which to delete by first limiting the list to logstash-
prefixed indices. Then further filter those to prevent deletion of anything
less than ${UNIT_COUNT} days old. Ignore the error if the filter does not result in an
actionable list of indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: creation_date
direction: older
unit: days
unit_count: ${UNIT_COUNT}
version: '3.2'
services:
curator:
build:
context: extensions/curator/
environment:
ELASTICSEARCH_HOST: elasticsearch
CRON: 0 0 * * *
CONFIG_FILE: /usr/share/curator/config/curator.yml
COMMAND: /usr/share/curator/config/delete_log_files_curator.yml
UNIT_COUNT: 2
networks:
- elk
depends_on:
- elasticsearch
#!/bin/sh
echo "$CRON /usr/bin/curator --config ${CONFIG_FILE} ${COMMAND}" >>/etc/crontabs/root
# https://github.com/krallin/tini/blob/master/README.md#subreaping
tini -s -- crond -f -d 8 -l 8
# uses ONBUILD instructions described here:
# https://github.com/gliderlabs/logspout/tree/master/custom
FROM gliderlabs/logspout:master
ENV SYSLOG_FORMAT rfc3164
# Logspout extension
Logspout collects all Docker logs using the Docker logs API, and forwards them to Logstash without any additional
configuration.
## Usage
If you want to include the Logspout extension, run Docker Compose from the root of the repository with an additional
command line argument referencing the `logspout-compose.yml` file:
```bash
$ docker-compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml up
```
In your Logstash pipeline configuration, enable the `udp` input and set the input codec to `json`:
```
input {
udp {
port => 5000
codec => json
}
}
```
## Documentation
https://github.com/looplab/logspout-logstash
#!/bin/sh
# unmodified from:
# https://github.com/gliderlabs/logspout/blob/67ee3831cbd0594361bb3381380c65bdbeb3c20f/custom/build.sh
set -e
apk add --update go git mercurial build-base
mkdir -p /go/src/github.com/gliderlabs
cp -r /src /go/src/github.com/gliderlabs/logspout
cd /go/src/github.com/gliderlabs/logspout
export GOPATH=/go
go get
go build -ldflags "-X main.Version=$1" -o /bin/logspout
apk del go git mercurial build-base
rm -rf /go /var/cache/apk/* /root/.glide
# backwards compatibility
ln -fs /tmp/docker.sock /var/run/docker.sock
version: '3.2'
services:
logspout:
build:
context: extensions/logspout
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
ROUTE_URIS: logstash://logstash:5000
LOGSTASH_TAGS: docker-elk
networks:
- elk
depends_on:
- logstash
restart: on-failure
package main
// installs the Logstash adapter for Logspout, and required dependencies
// https://github.com/looplab/logspout-logstash
import (
_ "github.com/looplab/logspout-logstash"
_ "github.com/gliderlabs/logspout/transports/udp"
_ "github.com/gliderlabs/logspout/transports/tcp"
)
ARG ELK_VERSION
# https://github.com/elastic/kibana-docker
FROM docker.elastic.co/kibana/kibana:${ELK_VERSION}
# Add your kibana plugins setup here
# Example: RUN kibana-plugin install <name|url>
---
## Default Kibana configuration from Kibana base image.
## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js
#
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
## X-Pack security credentials
#
elasticsearch.username: elastic
elasticsearch.password: changeme
ARG ELK_VERSION
# https://github.com/elastic/logstash-docker
FROM docker.elastic.co/logstash/logstash:${ELK_VERSION}
# Add your logstash plugins setup here
# Example: RUN logstash-plugin install logstash-filter-json
---
## Default Logstash configuration from Logstash base image.
## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml
#
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
## X-Pack security credentials
#
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: changeme
input {
tcp {
port => 5000
}
}
## Add your filters / logstash plugins configuration here
output {
elasticsearch {
hosts => "elasticsearch:9200"
user => "elastic"
password => "changeme"
}
}
input {
file {
path => ["/home/tacserver/Downloads/data/attachments/Offer.csv"]
start_position => ["beginning"]
}
}
filter {
csv {
separator => ","
columns => [ "id", "car", "priceForKm", "priceForTime",
"startDate", "endDate", "deposit", "startPlace", "endPlaces", "available" ]
}
mutate {convert => ["priceForKm", "integer"] }
mutate {convert => ["priceForTime", "integer"] }
mutate {convert => ["startDate", "date"] }
mutate {convert => ["endDate", "date"] }
mutate {convert => ["deposit", "double"] }
mutate {convert => ["startPlace", "geo_point"] }
mutate {convert => ["endDate", "geo_point"] }
mutate {convert => ["available", "boolean"] }
}
output {
elasticsearch {
hosts => ["localhost:9200"]
index => ["offers55"]
}
stdout {}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment