Recommended RAM ratios for ELK with docker-compose

Robbo_UK picture Robbo_UK · Jul 26, 2018 · Viewed 7.5k times · Source

I have a production server with 8GB RAM. Im looking to host elastic,logstash and kibana on the server. Using docker compose.

What would be the recommended java sizes memory sizes for each of the containers. How might I configure this.

My docker-compose looks like the following

---
version: '3'
services

  kibana:
    build:
      context: kibana/
    container_name: kibana
    volumes:
      - ./kibana/config/:/usr/share/kibana/config:ro
    networks: ['elk']
    depends_on:
      - elasticsearch
    restart: always

  elasticsearch:
    build:
      context: elasticsearch/
    container_name: elasticsearch
    networks: ['elk']
    volumes:
      - ./elastic-data:/usr/share/elasticsearch/data
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    environment:
      - cluster.name=es-docker
      - node.name=node1
      - bootstrap.memory_lock=true

  logstash:
      build:
        context: logstash/
      container_name: logstash
      volumes:
        - ./logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
      networks: ['elk']
      ports:
        - "5044:5044"
      depends_on:
        - elasticsearch
      restart: always

networks: {elk: {}}

Now searching around on the elastic documentatino im seeting some settings like - "ES_JAVA_OPTS=-Xms512m -Xmx512m" etc

So what I would like to know.. For the above docker-compose what settings should I allow for java heap sizes / memory limit and how do I update the compose to include it.

My thoughts are 4GB for elastic 2GB for logstash 1GB for Kibana

1GB reserved for host

Answer

Val picture Val · Jul 30, 2018

Following up with our discussion in the comments above, and supposing the sizes are right, what you need to do now is to size each Docker container as discussed. Note that since you're not using Swarm, you don't really need to use the v3 format, v2 is sufficient, hence I've modified the version line below. I've also added mem_limit for each container and the heap sizing in the environment section of the elasticsearch container.

version: '2.3'
services

  kibana:
    build:
      context: kibana/
    container_name: kibana
    volumes:
      - ./kibana/config/:/usr/share/kibana/config:ro
    networks: ['elk']
    depends_on:
      - elasticsearch
    restart: always
    mem_limit: 1g

  elasticsearch:
    build:
      context: elasticsearch/
    container_name: elasticsearch
    networks: ['elk']
    volumes:
      - ./elastic-data:/usr/share/elasticsearch/data
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536
        hard: 65536
    environment:
      - cluster.name=es-docker
      - node.name=node1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
    mem_limit: 4g

  logstash:
      build:
        context: logstash/
      container_name: logstash
      volumes:
        - ./logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
      networks: ['elk']
      ports:
        - "5044:5044"
      depends_on:
        - elasticsearch
      restart: always
      mem_limit: 2g
      environment:
        - "LS_JAVA_OPTS=-Xmx1g -Xms1g"

networks: {elk: {}}