Move CI flow from Travis CI to GitHub Actions

parent 081589dd
name: CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
test-compose:
name: 'Test suite: Compose'
# List of supported runners:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-runners-and-hardware-resources
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
#####################################################
# #
# Install all dependencies required by test suites. #
# #
#####################################################
- name: Prepare environment
run: |
# Install Linux packages
#
# List of packages pre-installed in the runner:
# https://docs.github.com/en/free-pro-team@latest/actions/reference/specifications-for-github-hosted-runners#supported-software
sudo apt install -y expect
# Pre-build container images
docker-compose build
########################################################
# #
# Ensure §"Initial setup" of the README remains valid. #
# #
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files
sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml
sed -i -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' -e 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml
sed -i 's/\(password:\) changeme/\1 testpasswd/g' extensions/apm-server/config/apm-server.yml
# Run Elasticsearch and wait for its availability
docker-compose up -d elasticsearch
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp
##########################################################
# #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
##########################################################
- name: Run the stack
run: docker-compose up -d
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh
- name: 'debug: Display state and logs (core)'
# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idif
# https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions#job-status-check-functions
if: always()
run: |
docker-compose ps
docker-compose logs elasticsearch
docker-compose logs logstash
docker-compose logs kibana
# next steps don't need Logstash
docker-compose stop logstash
##############################
# #
# Test supported extensions. #
# #
##############################
#
# Enterprise Search
#
- name: Execute Enterprise Search test suite
run: |
# Set mandatory Elasticsearch settings
sed -i '$ a xpack.security.authc.api_key.enabled: true' elasticsearch/config/elasticsearch.yml
# Restart Elasticsearch for changes to take effect
docker-compose restart elasticsearch
# Run Enterprise Search and execute tests
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up -d enterprise-search
.github/workflows/scripts/run-tests-enterprise-search.sh
# Revert changes to Elasticsearch configuration
sed -i '/xpack.security.authc.api_key.enabled: true/d' elasticsearch/config/elasticsearch.yml
docker-compose restart elasticsearch
- name: 'debug: Display state and logs (Enterprise Search)'
if: always()
run: |
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml ps
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml logs enterprise-search
# next steps don't need Enterprise Search
docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml stop enterprise-search
#
# APM Server
#
- name: Execute APM Server test suite
run: |
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up -d apm-server
.github/workflows/scripts/run-tests-apm-server.sh
- name: 'debug: Display state and logs (APM Server)'
if: always()
run: |
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml ps
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml logs apm-server
# next steps don't need APM Server
docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml stop apm-server
##############
# #
# Tear down. #
# #
##############
- name: Terminate all components
if: always()
run: >-
docker-compose
-f docker-compose.yml
-f extensions/enterprise-search/enterprise-search-compose.yml
-f extensions/apm-server/apm-server-compose.yml
down -v
test-swarm:
name: 'Test suite: Swarm'
runs-on: ubuntu-latest
env:
MODE: swarm
steps:
- uses: actions/checkout@v2
#####################################################
# #
# Install all dependencies required by test suites. #
# #
#####################################################
- name: Prepare environment
run: |
# Install Linux packages
sudo apt install -y expect
# Enable Swarm mode
docker swarm init
########################################################
# #
# Ensure §"Initial setup" of the README remains valid. #
# #
########################################################
- name: Set password of every built-in user to 'testpasswd'
run: |
# Change password of 'elastic' user from 'changeme' to 'testpasswd' in config files
sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml
sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml
# Run Elasticsearch and wait for its availability
docker stack deploy -c ./docker-stack.yml elk
docker service scale elk_logstash=0 elk_kibana=0
source .github/workflows/scripts/lib/testing.sh
poll_ready "$(container_id elasticsearch)" "http://$(service_ip elasticsearch):9200/" 'elastic:changeme'
# Set passwords
.github/workflows/scripts/elasticsearch-setup-passwords.exp swarm
##########################################################
# #
# Test core components: Elasticsearch, Logstash, Kibana. #
# #
##########################################################
- name: Run the stack
run: docker service scale elk_logstash=1 elk_kibana=1
- name: Execute core test suite
run: .github/workflows/scripts/run-tests-core.sh swarm
- name: 'debug: Display state and logs (core)'
if: always()
run: |
docker stack services elk
docker service logs elk_elasticsearch
docker service logs elk_kibana
docker service logs elk_logstash
##############
# #
# Tear down. #
# #
##############
- name: Terminate all components
if: always()
run: docker stack rm elk
#!/usr/bin/env bash
# Log a message.
function log {
echo -e "\n[+] $1\n"
}
# Log an error.
function err {
echo -e "\n[x] $1\n"
}
# Return the ID of the container running the given service.
function container_id {
local svc=$1
local label
if [[ "$MODE" == "swarm" ]]; then
label="com.docker.swarm.service.name=elk_${svc}"
else
label="com.docker.compose.service=${svc}"
fi
local cid
# retry for max 60s (30*2s)
for _ in $(seq 1 30); do
cid="$(docker container ls -aq -f label="$label")"
if [ -n "$cid" ]; then
break
fi
echo -n '.' >&2
sleep 2
done
echo -e '\n' >&2
if [ -z "${cid:-}" ]; then
err "Timed out waiting for creation of container with label ${label}"
return 1
fi
echo "$cid"
}
# Return the IP address at which a service can be reached.
# In Compose mode, returns the container's IP.
# In Swarm mode, returns the IP of the node to ensure traffic enters the routing mesh (ingress).
function service_ip {
local svc=$1
local ip
if [[ "$MODE" == "swarm" ]]; then
#ingress_net="$(docker network inspect ingress --format '{{ .Id }}')"
#ip="$(docker service inspect elk_"$svc" --format "{{ range .Endpoint.VirtualIPs }}{{ if eq .NetworkID \"${ingress_net}\" }}{{ .Addr }}{{ end }}{{ end }}" | cut -d/ -f1)"
node="$(docker node ls --format '{{ .ID }}')"
ip="$(docker node inspect "$node" --format '{{ .Status.Addr }}')"
if [ -z "${ip:-}" ]; then
err "Node ${node} has no IP address"
return 1
fi
echo "$ip"
return
fi
local cid
cid="$(container_id "$svc")"
ip="$(docker container inspect "$cid" --format '{{ (index .NetworkSettings.Networks "docker-elk_elk").IPAddress }}')"
if [ -z "${ip:-}" ]; then
err "Container ${cid} has no IP address"
return 1
fi
echo "$ip"
}
# Poll the given service at the given port:/path until it responds with HTTP code 200.
function poll_ready {
local cid=$1
local url=$2
local -a args=( '-s' '-D-' '-m3' '-w' '%{http_code}' "$url" )
if [ "$#" -ge 3 ]; then
args+=( '-u' "$3" )
fi
echo "curl arguments: ${args[*]}"
local -i result=1
local output
# retry for max 180s (36*5s)
for _ in $(seq 1 36); do
if [[ $(docker container inspect "$cid" --format '{{ .State.Status}}') == 'exited' ]]; then
err "Container exited ($(docker container inspect "$cid" --format '{{ .Name }}'))"
return 1
fi
output="$(curl "${args[@]}" || true)"
if [ "${output: -3}" -eq 200 ]; then
result=0
break
fi
echo -n 'x' >&2
sleep 5
done
echo -e '\n' >&2
echo -e "\n${output::-3}"
return $result
}
......@@ -9,5 +9,9 @@ source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
declare MODE=""
cid="$(container_id apm-server)"
ip="$(service_ip apm-server)"
log 'Waiting for readiness of APM Server'
poll_ready apm-server 'http://localhost:8200/'
poll_ready "$cid" "http://${ip}:8200/"
......@@ -12,18 +12,26 @@ if [ "$#" -ge 1 ]; then
MODE=$1
fi
log 'Waiting for readiness of Elasticsearch'
poll_ready elasticsearch 'http://localhost:9200/' 'elastic:testpasswd'
cid_es="$(container_id elasticsearch)"
cid_ls="$(container_id logstash)"
cid_kb="$(container_id kibana)"
log 'Waiting for readiness of Kibana'
poll_ready kibana 'http://localhost:5601/api/status' 'kibana_system:testpasswd'
ip_es="$(service_ip elasticsearch)"
ip_ls="$(service_ip logstash)"
ip_kb="$(service_ip kibana)"
log 'Waiting for readiness of Elasticsearch'
poll_ready "$cid_es" "http://${ip_es}:9200/" 'elastic:testpasswd'
log 'Waiting for readiness of Logstash'
poll_ready logstash 'http://localhost:9600/_node/pipelines/main?pretty'
poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty"
log 'Waiting for readiness of Kibana'
poll_ready "$cid_kb" "http://${ip_kb}:5601/api/status" 'kibana_system:testpasswd'
log 'Creating Logstash index pattern in Kibana'
source .env
curl -X POST -D- 'http://localhost:5601/api/saved_objects/index-pattern' \
curl -X POST -D- "http://${ip_kb}:5601/api/saved_objects/index-pattern" \
-s -w '\n' \
-H 'Content-Type: application/json' \
-H "kbn-version: ${ELK_VERSION}" \
......@@ -31,7 +39,7 @@ curl -X POST -D- 'http://localhost:5601/api/saved_objects/index-pattern' \
-d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}'
log 'Searching index pattern via Kibana API'
response="$(curl 'http://localhost:5601/api/saved_objects/_find?type=index-pattern' -s -u elastic:testpasswd)"
response="$(curl "http://${ip_kb}:5601/api/saved_objects/_find?type=index-pattern" -s -u elastic:testpasswd)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.total')"
if [[ $count -ne 1 ]]; then
......@@ -40,14 +48,14 @@ if [[ $count -ne 1 ]]; then
fi
log 'Sending message to Logstash TCP input'
echo 'dockerelk' | nc -q0 localhost 5000
echo 'dockerelk' | nc -q0 "$ip_ls" 5000
sleep 1
curl -X POST 'http://localhost:9200/_refresh' -u elastic:testpasswd \
curl -X POST "http://${ip_es}:9200/_refresh" -u elastic:testpasswd \
-s -w '\n'
log 'Searching message in Elasticsearch'
response="$(curl 'http://localhost:9200/_count?q=message:dockerelk&pretty' -s -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/_count?q=message:dockerelk&pretty" -s -u elastic:testpasswd)"
echo "$response"
count="$(jq -rn --argjson data "${response}" '$data.count')"
if [[ $count -ne 1 ]]; then
......
......@@ -9,14 +9,21 @@ source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh"
declare MODE=""
cid_es="$(container_id elasticsearch)"
cid_en="$(container_id enterprise-search)"
ip_es="$(service_ip elasticsearch)"
ip_en="$(service_ip enterprise-search)"
log 'Waiting for readiness of Elasticsearch'
poll_ready elasticsearch 'http://localhost:9200/' 'elastic:testpasswd'
poll_ready "$cid_es" "http://${ip_es}:9200/" 'elastic:testpasswd'
log 'Waiting for readiness of Enterprise Search'
poll_ready enterprise-search 'http://localhost:3002/api/ent/v1/internal/health' 'elastic:testpasswd'
poll_ready "$cid_en" "http://${ip_en}:3002/api/ent/v1/internal/health" 'elastic:testpasswd'
log 'Retrieving private key from Elasticsearch'
response="$(curl 'http://localhost:9200/.ent-search-actastic-app_search_api_tokens_v2/_search?q=name:private-key' -s -u elastic:testpasswd)"
response="$(curl "http://${ip_es}:9200/.ent-search-actastic-app_search_api_tokens_v2/_search?q=name:private-key" -s -u elastic:testpasswd)"
hits="$(jq -rn --argjson data "${response}" '$data.hits.hits')"
echo "$hits"
count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')"
......@@ -27,7 +34,7 @@ fi
key="$(jq -rn --argjson data "${hits}" '$data[0]._source.authentication_token')"
log 'Creating App Search engine'
response="$(curl 'http://localhost:3002/api/as/v1/engines' -s -d '{"name": "dockerelk"}' -H "Authorization: Bearer ${key}")"
response="$(curl "http://${ip_en}:3002/api/as/v1/engines" -s -d '{"name": "dockerelk"}' -H "Authorization: Bearer ${key}")"
echo "$response"
name="$(jq -rn --argjson data "${response}" '$data.name')"
if [[ $name != 'dockerelk' ]]; then
......
language: minimal
services: docker
env:
- DOCKER_COMPOSE_VERSION=1.20.1
before_install:
- sudo apt-get update
- sudo apt-get install -y expect jq
install:
# Install Docker Compose
- curl -L "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o "$HOME/bin/docker-compose"
- chmod +x "$HOME/bin/docker-compose"
- docker-compose --version
before_script:
# Build images
- docker-compose build
# Use built-in users with passwords set by 'elasticsearch-setup-passwords.exp'
- sed -i -e 's/\(elasticsearch.username:\) elastic/\1 kibana_system/g' -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' kibana/config/kibana.yml
- sed -i -e 's/\(xpack.monitoring.elasticsearch.username:\) elastic/\1 logstash_system/g' -e 's/\(xpack.monitoring.elasticsearch.password:\) changeme/\1 testpasswd/g' logstash/config/logstash.yml
- sed -i 's/\(password =>\) "changeme"/\1 "testpasswd"/g' logstash/pipeline/logstash.conf
- sed -i -e 's/\(elasticsearch.password:\) changeme/\1 testpasswd/g' -e 's/\(secret_management.encryption_keys:\)/\1 [test-encrypt]/g' extensions/enterprise-search/config/enterprise-search.yml
- sed -i 's/\(password:\) changeme/\1 testpasswd/g' extensions/apm-server/config/apm-server.yml
script:
# Core Elastic Stack
- docker-compose up -d elasticsearch
- sleep 30
- .travis/elasticsearch-setup-passwords.exp
- docker-compose up -d
- .travis/run-tests-core.sh
- docker-compose ps
- docker-compose logs elasticsearch
- docker-compose logs kibana
- docker-compose logs logstash
- docker-compose stop logstash kibana
# Extensions
# Enterprise Search
- "sed -i '$ a xpack.security.authc.api_key.enabled: true' elasticsearch/config/elasticsearch.yml"
- docker-compose restart elasticsearch
- docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml up -d enterprise-search
- sleep 30
- .travis/run-tests-enterprise-search.sh
- docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml ps
- docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml logs enterprise-search
- docker-compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml stop enterprise-search
- "sed -i '/xpack.security.authc.api_key.enabled: true/d' elasticsearch/config/elasticsearch.yml"
- docker-compose restart elasticsearch
# APM Server
- docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml up -d apm-server
- .travis/run-tests-apm-server.sh
- docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml ps
- docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml logs apm-server
- docker-compose -f docker-compose.yml -f extensions/apm-server/apm-server-compose.yml stop apm-server
# Tear down
- >-
docker-compose
-f docker-compose.yml
-f extensions/enterprise-search/enterprise-search-compose.yml
-f extensions/apm-server/apm-server-compose.yml
down -v
# Swarm
- docker swarm init
- docker stack deploy -c ./docker-stack.yml elk
- docker service scale elk_kibana=0 --detach=false
- docker service scale elk_logstash=0 --detach=false
- sleep 40
- .travis/elasticsearch-setup-passwords.exp swarm
- docker service scale elk_kibana=1 --detach=false
- docker service scale elk_logstash=1 --detach=false
- .travis/run-tests-core.sh swarm
- docker stack services elk
- docker service logs elk_elasticsearch
- docker service logs elk_kibana
- docker service logs elk_logstash
- docker stack rm elk
#!/usr/bin/env bash
function log {
echo -e "\n[+] $1\n"
}
function poll_ready {
local svc=$1
local url=$2
local -a args=( '-s' '-D-' '-w' '%{http_code}' "$url" )
if [ "$#" -ge 3 ]; then
args+=( '-u' "$3" )
fi
local label
if [ "$MODE" == "swarm" ]; then
label="com.docker.swarm.service.name=elk_${svc}"
else
label="com.docker.compose.service=${svc}"
fi
local -i result=1
local cid
local output
# retry for max 180s (36*5s)
for _ in $(seq 1 36); do
cid="$(docker ps -q -f label="$label")"
if [ -z "${cid:-}" ]; then
echo "Container exited"
return 1
fi
set +e
output="$(curl "${args[@]}")"
set -e
if [ "${output: -3}" -eq 200 ]; then
result=0
break
fi
echo -n '.'
sleep 5
done
echo -e "\n${output::-3}"
return $result
}
......@@ -2,7 +2,7 @@
[![Join the chat at https://gitter.im/deviantony/docker-elk](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/deviantony/docker-elk?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Elastic Stack version](https://img.shields.io/badge/ELK-7.9.2-blue.svg?style=flat)](https://github.com/deviantony/docker-elk/issues/539)
[![Build Status](https://api.travis-ci.org/deviantony/docker-elk.svg?branch=master)](https://travis-ci.org/deviantony/docker-elk)
[![Build Status](https://github.com/deviantony/docker-elk/workflows/CI/badge.svg)](https://github.com/deviantony/docker-elk/actions?query=workflow%3ACI)
Run the latest version of the [Elastic stack][elk-stack] with Docker and Docker Compose.
......
......@@ -20,8 +20,8 @@ services:
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
# Use single node discovery in order to disable production mode and avoid bootstrap checks
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- elk
......
......@@ -13,9 +13,11 @@ services:
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: changeme
# Use single node discovery in order to disable production mode and avoid bootstrap checks
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
# Force publishing on the 'elk' overlay.
network.publish_host: _eth0_
networks:
- elk
deploy:
......
......@@ -3,7 +3,7 @@
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0
network.host: _site_
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment