-
Notifications
You must be signed in to change notification settings - Fork 5
194 lines (167 loc) · 6.38 KB
/
deploy-docker.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
---
# This is a basic workflow to help you get started with Actions
name: "deploy-docker"
# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events
push:
pull_request:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
deploy_docker:
# The type of runner that the job will run on
runs-on: "ubuntu-24.04"
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: Checkout $GITHUB_WORKSPACE
uses: actions/checkout@v4
- name: Update and install stuff
run: |
sudo rm -f /opt/pipx_bin/ansible*
sudo apt -q update
sudo apt -y install ansible bridge-utils apparmor-utils wget \
python3-pip python3-setuptools python3-wheel \
python3-venv yamllint python3-jinja2 python3-yaml \
python3-selenium python3-requests python3-bs4
sudo pip3 install --break-system-packages webdriver-manager
git clone https://github.com/dw/mitogen.git ~/mitogen
wget --quiet https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo apt install ./google-chrome-stable_current_amd64.deb
- name: Fix docker crap
run: |
cat docker/hosts | sudo tee -a /etc/hosts
#sudo docker system prune -f
sudo systemctl stop docker
sudo mv /var/lib/docker /var/lib/docker.old # faster than removing
sudo mkdir -p /mnt/docker
sudo ln -s /mnt/docker /var/lib
sudo systemctl start docker
# See https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache
- name: Setup cache
id: cache-docker
uses: actions/cache@v4
with:
path: ~/docker-save
key: docker-save-${{ github.run_id }}
restore-keys: docker-save-
# Useful for debugging
- name: Show versions
run: |
python3 --version
ansible --version
# apparmor is by default enabled for binaries _in_ the containers and is restricting
# mysql from reading tls keys
- name: Disable apparmor
run: |
sudo aa-status --json | jq '
.profiles | to_entries
| map( select( (.key | startswith("/") ) and .value=="enforce") | .key )
| .[]
' | grep -vE 'totem|pidgin|passt' | \
while read -r profile; do
sudo aa-complain "$profile" || true
done
# Install Ansible modules
- name: Install Ansible modules
run: |
ansible-galaxy role install -r requirements.yml
ansible-galaxy collection install -r requirements.yml
# Restore docker cache
- name: Restore docker cache
run: docker load -i ~/docker-save/scz-base-cache.tar || true
# Get scz-base-cache ImageID
- name: Get scz-base-cache ImageID
id: scz-base-cache-id
run: >
ID=$(docker image list scz-base-cache -q)
echo "ID=$ID" >> $GITHUB_OUTPUT
# Start containers without deploy
- name: Start containers
shell: bash
env:
SKIP_ANSIBLE: 1
run: "./start-vm --container"
# Get scz-base ImageID
- name: Get scz-base ImageID
id: scz-base-id
run: >
ID=$(docker image list scz-base -q) &&
echo "ID=$ID" >> $GITHUB_OUTPUT
# Save newly created containers to docker cache
- name: Save docker cache
run: >
mkdir -p ~/docker-save &&
docker tag scz-base scz-base-cache &&
docker save scz-base-cache -o ~/docker-save/scz-base-cache.tar &&
ls -lh ~/docker-save || true
if: steps.scz-base-cache-id.outputs.ID != steps.scz-base-id.outputs.ID
# Deploy components
- name: Run start-vm
shell: bash
run: "./start-vm --container"
# Deploy components again for idempotency
- name: Run start-vm
env:
REEANTRANT: 1
run: "./start-vm --container --diff --tags=common"
- name: Run idempotency check...
run: /usr/bin/python3 ./scripts/check-idempotency-status
# Run SBS logintest
- name: Run SBS logintest
run: /usr/bin/python3 ./scripts/sbs-login.py
- name: Save screenshot of error
uses: actions/upload-artifact@v4
with:
name: "sbs-logintest"
path: |
screenshot.png
page.html
console.txt
if: failure()
- name: Show docker status
run: |
ansible -v -i environments/docker/inventory --become "docker" \
-m command -a "/usr/bin/docker ps -a"
if: failure()
- name: Show docker logs
run: |
docker exec -ti docker-docker1-1 /bin/sh -c '
docker ps -q | while read c; do
name=$(docker inspect --format "{{.Name}} ({{.Id}})" $c);
echo -e "\n\n========\n== $name\n==========\n";
docker logs -n 10 $c 2>/dev/null;
done
'
docker exec -ti docker-docker2-1 /bin/sh -c '
docker ps -q | while read c; do
name=$(docker inspect --format "{{.Name}} ({{.Id}})" $c);
echo -e "\n\n========\n== $name\n==========\n";
docker logs -n 10 $c 2>/dev/null;
done
'
if: failure()
- name: Show sbs logs
run: |
ansible -v -i environments/docker/inventory --become "docker" \
-m command -a "/bin/sh -c '
if test -e /opt/sram/sbs/log/sbs.log; then
echo ===sbs log===;
cat /opt/sram/sbs/log/sbs.log;
else
echo ===no sbs log===;
fi
'"
if: failure()
# Setup tmate session
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
with:
limit-access-to-actor: true
timeout-minutes: 60
if: failure()
# env:
# ACTIONS_STEP_DEBUG: ${{ secrets.ACTIONS_STEP_DEBUG }}
# if: ${{ failure() && env.ACTIONS_STEP_DEBUG == 'true' }}