🔧 build:更新 docker compose 文件

develop
xiaojin 5 years ago
parent 35676fc89b
commit 1d1b0338f1

@ -0,0 +1,58 @@
## Compose sample application
### Elasticsearch, Logstash, and Kibana (ELK) in single-node
Project structure:
```
.
└── docker-compose.yml
```
[_docker-compose.yml_](docker-compose.yml)
```
services:
elasticsearch:
image: elasticsearch:7.8.0
...
logstash:
image: logstash:7.8.0
...
kibana:
image: kibana:7.8.0
...
```
## Deploy with docker-compose
```
$ docker-compose up -d
Creating network "elasticsearch-logstash-kibana_elastic" with driver "bridge"
Creating es ... done
Creating log ... done
Creating kib ... done
```
## Expected result
Listing containers must show three containers running and the port mapping as below:
```
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
173f0634ed33 logstash:7.8.0 "/usr/local/bin/dock…" 43 seconds ago Up 41 seconds 0.0.0.0:5000->5000/tcp, 0.0.0.0:5044->5044/tcp, 0.0.0.0:9600->9600/tcp, 0.0.0.0:5000->5000/udp log
b448fd3e9b30 kibana:7.8.0 "/usr/local/bin/dumb…" 43 seconds ago Up 42 seconds 0.0.0.0:5601->5601/tcp kib
366d358fb03d elasticsearch:7.8.0 "/tini -- /usr/local…" 43 seconds ago Up 42 seconds (healthy) 0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp es
```
After the application starts, navigate to below links in your web browser:
* Elasticsearch: [`http://localhost:9200`](http://localhost:9200)
* Logstash: [`http://localhost:9600`](http://localhost:9600)
* Kibana: [`http://localhost:5601`](http://localhost:5601)
Stop and remove the containers
```
$ docker-compose down
```
## Attribution
The [example Nginx logs](https://github.com/docker/awesome-compose/tree/master/elasticsearch-logstash-kibana/logstash/nginx.log) are copied from [here](https://github.com/elastic/examples/blob/master/Common%20Data%20Formats/nginx_json_logs/nginx_json_logs).

@ -0,0 +1,50 @@
version: '3.8'
services:
elasticsearch:
image: elasticsearch:7.8.0
container_name: es
environment:
discovery.type: single-node
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
ports:
- "9200:9200"
- "9300:9300"
healthcheck:
test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
interval: 10s
timeout: 10s
retries: 3
networks:
- elastic
logstash:
image: logstash:7.8.0
container_name: log
environment:
discovery.seed_hosts: logstash
LS_JAVA_OPTS: "-Xms512m -Xmx512m"
volumes:
- ./logstash/pipeline/logstash-nginx.config:/usr/share/logstash/pipeline/logstash-nginx.config
- ./logstash/nginx.log:/home/nginx.log
ports:
- "5000:5000/tcp"
- "5000:5000/udp"
- "5044:5044"
- "9600:9600"
depends_on:
- elasticsearch
networks:
- elastic
command: logstash -f /usr/share/logstash/pipeline/logstash-nginx.config
kibana:
image: kibana:7.8.0
container_name: kib
ports:
- "5601:5601"
depends_on:
- elasticsearch
networks:
- elastic
networks:
elastic:
driver: bridge

@ -0,0 +1,30 @@
input {
file {
path => "/home/nginx.log"
start_position => "beginning"
sincedb_path => "/dev/null"
}
}
filter {
json {
source => "message"
}
geoip {
source => "remote_ip"
}
useragent {
source => "agent"
target => "useragent"
}
}
output {
elasticsearch {
hosts => ["http://es:9200"]
index => "nginx"
}
stdout {
codec => rubydebug
}
}

@ -0,0 +1,66 @@
## Compose sample
### Prometheus & Grafana
Project structure:
```
.
├── docker-compose.yml
├── grafana
│   └── datasource.yml
├── prometheus
│   └── prometheus.yml
└── README.md
```
[_docker-compose.yml_](docker-compose.yml)
```
version: "3.7"
services:
prometheus:
image: prom/prometheus
...
ports:
- 9090:9090
grafana:
image: grafana/grafana
...
ports:
- 3000:3000
```
The compose file defines a stack with two services `prometheus` and `grafana`.
When deploying the stack, docker-compose maps port the default ports for each service to the equivalent ports on the host in order to inspect easier the web interface of each service.
Make sure the ports 9090 and 3000 on the host are not already in use.
## Deploy with docker-compose
```
$ docker-compose up -d
Creating network "prometheus-grafana_default" with the default driver
Creating volume "prometheus-grafana_prom_data" with default driver
...
Creating grafana ... done
Creating prometheus ... done
Attaching to prometheus, grafana
```
## Expected result
Listing containers must show two containers running and the port mapping as below:
```
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
dbdec637814f prom/prometheus "/bin/prometheus --c…" 8 minutes ago Up 8 minutes 0.0.0.0:9090->9090/tcp prometheus
79f667cb7dc2 grafana/grafana "/run.sh" 8 minutes ago Up 8 minutes 0.0.0.0:3000->3000/tcp grafana
```
Navigate to `http://localhost:3000` in your web browser and use the login credentials specified in the compose file to access Grafana. It is already configured with prometheus as the default datasource.
![page](output.jpg)
Navigate to `http://localhost:9090` in your web browser to access directly the web interface of prometheus.
Stop and remove the containers. Use `-v` to remove the volumes if looking to erase all data.
```
$ docker-compose down -v
```

@ -0,0 +1,26 @@
version: "3.7"
services:
prometheus:
image: prom/prometheus
container_name: prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
ports:
- 9090:9090
restart: unless-stopped
volumes:
- ./prometheus:/etc/prometheus
- prom_data:/prometheus
grafana:
image: grafana/grafana
container_name: grafana
ports:
- 3000:3000
restart: unless-stopped
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=grafana
volumes:
- ./grafana:/etc/grafana/provisioning/datasources
volumes:
prom_data:

@ -0,0 +1,9 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus:9090
isDefault: true
access: proxy
editable: true

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

@ -0,0 +1,21 @@
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets: []
scheme: http
timeout: 10s
api_version: v1
scrape_configs:
- job_name: prometheus
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- localhost:9090

@ -1,3 +0,0 @@
grafana
prometheus
alertmanager

@ -1,6 +0,0 @@
# prometheus
普罗米修斯监控系统
本配置实现 Prometheus+Alertmanager+Grafana

@ -1,51 +0,0 @@
version: '3'
services:
prometheus:
image: prom/prometheus:v2.15.1
container_name: "prometheus"
logging:
options:
max-size: "1g"
networks:
- "prometheusnet"
volumes:
- ./prometheus/work:/prometheus
- ./prometheus/etc:/etc/prometheus
ports:
- 9999:9090
command:
- --web.enable-lifecycle
- --config.file=/etc/prometheus/prometheus.yml
alertmanager:
image: prom/alertmanager:v0.20.0
container_name: "alertmanager"
logging:
options:
max-size: "1g"
networks:
- "prometheusnet"
volumes:
- ./alertmanager/work:/alertmanager
- ./alertmanager/etc:/etc/alertmanager
ports:
- 9093:9093
grafana:
image: grafana/grafana:6.1.4
container_name: "grafana"
logging:
options:
max-size: "1g"
networks:
- "prometheusnet"
volumes:
- ./grafana/lib:/var/lib/grafana
- ./grafana/logs:/var/log/grafana
environment:
GF_SECURITY_ADMIN_PASSWORD: 123456
ports:
- 3000:3000
networks:
prometheusnet:
driver: bridge

@ -1 +0,0 @@
data

@ -1,3 +0,0 @@
# redis
redis 3.x版本如果自己需要其它版本请参考[https://hub.docker.com/r/library/redis/tags/](https://hub.docker.com/r/library/redis/tags/)修改docker-compose.yml中的版本`image: redis:x.x.x`

@ -1,18 +0,0 @@
version: '3'
services:
redis:
image: redis:3
container_name: "redis3"
restart: always
ports:
- 6379:6379
volumes:
- ./data:/data
networks:
- "redisnet"
# command: redis-server --appendonly yes
networks:
redisnet:
driver: bridge

@ -0,0 +1,6 @@
*~
.DS_Store
Thumbs.db
*.swp
.env

@ -0,0 +1,23 @@
# Launch Redis cluster via docker-compose for local development purpose
This repo is cloned from: https://github.com/yowko/docker-compose-redis-cluster, here're some changes to fit my own needs:
1. Upgrade Redis version by using image `6.0.7-alpine`
2. Simplify the usage by removing the master/slave password (CAUTION: DO NOT USE IT ON PRODUCTION ENVIRONMENT)
## Prerequisites
- docker
- docker-compose
## Usage
#### Start Redis cluster
```bash
ip=$(ipconfig getifaddr en0) docker-compose up -d --build
```
The redis cluster will be ready on `127.0.0.1:7000`
#### Stop Redis cluster
```bash
docker-compose down -v
```

@ -0,0 +1,73 @@
version: '3.4'
services:
redis-node1:
build:
context: redis
ports:
- "7000:7000"
- "17000:17000"
restart: always
entrypoint: [redis-server, /etc/redis/rediscluster.conf, --port,"7000", --cluster-announce-ip,"${ip}"]
redis-node2:
build:
context: redis
ports:
- "7001:7001"
- "17001:17001"
restart: always
entrypoint: [redis-server, /etc/redis/rediscluster.conf, --port,"7001",--cluster-announce-ip,"${ip}"]
redis-node3:
build:
context: redis
ports:
- "7002:7002"
- "17002:17002"
restart: always
entrypoint: [redis-server, /etc/redis/rediscluster.conf, --port,"7002",--cluster-announce-ip,"${ip}"]
redis-node4:
build:
context: redis
ports:
- "7003:7003"
- "17003:17003"
restart: always
entrypoint: [redis-server, /etc/redis/rediscluster.conf, --port,"7003",--cluster-announce-ip,"${ip}"]
depends_on:
- redis-node1
- redis-node2
- redis-node3
redis-node5:
build:
context: redis
ports:
- "7004:7004"
- "17004:17004"
restart: always
entrypoint: [redis-server, /etc/redis/rediscluster.conf, --port,"7004",--cluster-announce-ip,"${ip}"]
depends_on:
- redis-node1
- redis-node2
- redis-node3
redis-node6:
build:
context: redis
ports:
- "7005:7005"
- "17005:17005"
restart: always
entrypoint: [redis-server, /etc/redis/rediscluster.conf, --port,"7005",--cluster-announce-ip,"${ip}"]
depends_on:
- redis-node1
- redis-node2
- redis-node3
redis-cluster-creator:
image: redis:6.0.3
entrypoint: [/bin/sh,-c,'echo "yes" | redis-cli --cluster create ${ip}:7000 ${ip}:7001 ${ip}:7002 ${ip}:7003 ${ip}:7004 ${ip}:7005 --cluster-replicas 1']
depends_on:
- redis-node1
- redis-node2
- redis-node3
- redis-node4
- redis-node5
- redis-node6

@ -0,0 +1,5 @@
FROM redis:6.0.7-alpine
COPY rediscluster.conf /etc/redis/rediscluster.conf
ENTRYPOINT redis-server /etc/redis/rediscluster.conf

@ -0,0 +1,15 @@
# Allow remote access
bind 0.0.0.0
# Enable cluster mode
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
# Set password for master node
#masterauth <changeforyourneed>
# Set password for slave node
#requirepass <changeforyourneed>
Loading…
Cancel
Save