Docker
Log config
cat <<EOF | sudo tee /etc/docker/daemon.json
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
}
}
EOFAdmin
# find what's using space
docker system df -v
docker ps -a --filter volume=$VOLUME_NAME_OR_MOUNT_POINTMongoDB Container
# create a place to store the data
docker volume create --name mongo-data
# run mongo container
docker run -d \
--restart always \
--name mongo \
--network private \
--hostname $HOSTNAME \
-e "MONGO_INITDB_ROOT_USERNAME=$MONGO_INITDB_ROOT_USERNAME" \
-e "MONGO_INITDB_ROOT_PASSWORD=$MONGO_INITDB_ROOT_PASSWORD" \
-v mongo-data:/data/db \
-p 27017:27017 \
mongo:4.2-bionic Neo4J Container
# create a place to store the data
docker volume create --name neo4j-data
# run neo4j container
docker run -d \
--restart always \
--name neo4j \
--network private \
--env "NEO4J_AUTH=neo4j/test" \
-v neo4j-data:/data \
-v /data/neo4j/logs:/logs \
-v /data/neo4j/import:/var/lib/neo4j/import \
-v /data/neo4j/plugins:/plugins \
-p7474:7474 \
-p7687:7687 \
neo4j:latest TimescaleDB Container
# create a place to store the data
docker volume create --name timescaledb-data
docker run -d \
--name timescaledb \
--network private \
-e "POSTGRES_PASSWORD=$POSTGRES_PASSWORD" \
-v timescaledb-data:/var/lib/postgresql/data \
-p 5432:5432 \
timescale/timescaledb-postgis:latest-pg12RabbitMQ Container
# run rabbitmq container
docker run -d \
--restart always \
--hostname rabbitmq \
--name rabbitmq \
--network private \
-e RABBITMQ_ERLANG_COOKIE='secret cookie here' \
-p 5672:5672 \
-p 15672:15672 \
rabbitmq:3-management-alpineLogging
sudo mkdir /etc/filebeat
cat << 'EOF' | sudo tee /etc/filebeat/filebeat.docker.yml
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true
processors:
- add_cloud_metadata: ~
- add_fields:
target: 'host'
fields:
location: 'onprem'
output.logstash:
hosts: '${LOGSTASH_HOSTS:}'
username: '${LOGSTASH_USERNAME:}'
password: '${LOGSTASH_PASSWORD:}'
EOF
docker stop filebeat
docker rm filebeat
docker run -d \
--restart always \
--hostname="$HOSTNAME" \
--name=filebeat \
--user=root \
--volume="/etc/filebeat/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml:ro" \
--volume="/var/lib/docker/containers:/var/lib/docker/containers:ro" \
--volume="/var/run/docker.sock:/var/run/docker.sock:ro" \
docker.elastic.co/beats/filebeat-oss:7.5.2 filebeat -e -strict.perms=false \
-E output.logstash.hosts=["$LOGSTASH_HOSTS"] \
-E setup.kibana.host=https://kibana.directed.tools:443/Redis
# run redis container
docker volume create --name redis-data
docker run -d \
--restart always \
--name redis \
--network private \
-v redis-data:/data \
-p 6379:6379 \
redis:alpineArango DB
# create a place to store the data
docker volume create --name arango-data
# run arango container
docker run -d \
--restart always \
--name arango \
--network private \
-e "ARANGO_ROOT_PASSWORD=$ARANGO_ROOT_PASSWORD" \
-v arango-data:/var/lib/arangodb3 \
-p 8529:8529 \
arangodbLast updated
Was this helpful?