Skip to content

Commit

Permalink
Add ability for parallel Jenkins multi-arch builds
Browse files Browse the repository at this point in the history
Trigger all jenkins builds (adding them to jenkins queue) and then parallel wait on them. We control the concurrency by the number of executors per arch (or if we add multiple machines that match the build label).
  • Loading branch information
yosifkit committed Nov 25, 2024
1 parent d7eadbd commit 33386da
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 43 deletions.
14 changes: 12 additions & 2 deletions Jenkinsfile.build
Original file line number Diff line number Diff line change
@@ -1,15 +1,25 @@
// one job per arch (for now) that just builds "the top thing" (triggered by the meta-update job)
// any number of jobs per arch that build the specified buildId (triggered by the respective trigger job)
properties([
disableConcurrentBuilds(),
// limited by one job per buildId so that the same build cannot run concurrently
throttleJobProperty(
limitOneJobWithMatchingParams: true,
paramsToUseForLimit: 'buildId',
throttleEnabled: true,
throttleOption: 'project',
),
disableResume(),
durabilityHint('PERFORMANCE_OPTIMIZED'),
parameters([
string(name: 'identifier', trim: true),
string(name: 'buildId', trim: true),
]),
])

env.BASHBREW_ARCH = env.JOB_NAME.minus('/build').split('/')[-1] // "windows-amd64", "arm64v8", etc
env.BUILD_ID = params.buildId
if (params.identifier) {
currentBuild.displayName = params.identifier + ' (#' + currentBuild.number + ')'
}

node('multiarch-' + env.BASHBREW_ARCH) { ansiColor('xterm') {
stage('Checkout') {
Expand Down
111 changes: 70 additions & 41 deletions Jenkinsfile.trigger
Original file line number Diff line number Diff line change
Expand Up @@ -102,69 +102,98 @@ if (breakEarly) { return } // thanks Jenkins...

// new data to be added to the past-jobs.json
// { lastTime: unixTimestamp, url: "" }
def buildCompletionData = [:]
buildCompletionData = [:]

for (buildObj in queue) {
stage(buildObj.identifier) {
//def json = writeJSON(json: buildObj, returnText: true)
//echo(json) // for debugging/data purposes
// list of closures that we can use to wait for the jobs on.
def waitQueue = [:]
def addToWait(identifier, buildId, externalizableId) {
return {
// "catchError" to set "stageResult" :(
catchError(message: 'Build of "' + identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') {
stage(identifier) {
def res = waitForBuild(
runId: externalizableId,
propagateAbort: true, // allow cancelling this job to cancel all the triggered jobs
)
buildCompletionData[buildId] = [
lastTime: (res.startTimeInMillis + res.duration) / 1000, // convert to seconds
url: res.absoluteUrl,
]
if (res.result != 'SUCCESS') {
// set stage result via catchError
error(res.result)
}
}
}
}
}

// stage to wrap up all the build job triggers that get waited on later
stage('trigger') {
for (int i = 0; i < queue.size(); i++) {
def buildObj = queue[i]

// "catchError" to set "stageResult" :(
catchError(message: 'Build of "' + buildObj.identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') {
if (buildObj.gha_payload) {
node {
withEnv([
'payload=' + buildObj.gha_payload,
]) {
withCredentials([
string(
variable: 'GH_TOKEN',
credentialsId: 'github-access-token-docker-library-bot-meta',
),
stage(buildObj.identifier) {
node {
withEnv([
'payload=' + buildObj.gha_payload,
]) {
sh '''
set -u +x
# https://docs.github.com/en/free-pro-team@latest/rest/actions/workflows?apiVersion=2022-11-28#create-a-workflow-dispatch-event
curl -fL \
-X POST \
-H 'Accept: application/vnd.github+json' \
-H "Authorization: Bearer $GH_TOKEN" \
-H 'X-GitHub-Api-Version: 2022-11-28' \
https://api.github.com/repos/docker-library/meta/actions/workflows/build.yml/dispatches \
-d "$payload"
'''
withCredentials([
string(
variable: 'GH_TOKEN',
credentialsId: 'github-access-token-docker-library-bot-meta',
),
]) {
sh '''
set -u +x
# https://docs.github.com/en/free-pro-team@latest/rest/actions/workflows?apiVersion=2022-11-28#create-a-workflow-dispatch-event
curl -fL \
-X POST \
-H 'Accept: application/vnd.github+json' \
-H "Authorization: Bearer $GH_TOKEN" \
-H 'X-GitHub-Api-Version: 2022-11-28' \
https://api.github.com/repos/docker-library/meta/actions/workflows/build.yml/dispatches \
-d "$payload"
'''
}
}
// record that GHA was triggered (for tracking continued triggers that fail to push an image)
buildCompletionData[buildObj.buildId] = [
lastTime: System.currentTimeMillis() / 1000, // convert to seconds
url: currentBuild.absoluteUrl,
]
}
// record that GHA was triggered (for tracking continued triggers that fail to push an image)
buildCompletionData[buildObj.buildId] = [
lastTime: System.currentTimeMillis() / 1000, // convert to seconds
url: currentBuild.absoluteUrl,
]
}
} else {
// why not parallel these build() invocations?
// jenkins parallel closures get started in a randomish order, ruining our sorted queue
def res = build(
job: 'build',
parameters: [
string(name: 'identifier', value: buildObj.identifier),
string(name: 'buildId', value: buildObj.buildId),
],
propagate: false,
quietPeriod: 5, // seconds
// trigger these quickly so they all get added to Jenkins queue in "queue" order
quietPeriod: 0, // seconds
// we'll wait on them after they are all queued
waitForStart: true,
)
// record the job failure
buildCompletionData[buildObj.buildId] = [
lastTime: (res.startTimeInMillis + res.duration) / 1000, // convert to seconds
url: res.absoluteUrl,
]
if (res.result != 'SUCCESS') {
// set stage result via catchError
error(res.result)
}
waitQueue[buildObj.identifier] = addToWait(buildObj.identifier, buildObj.buildId, res.externalizableId)
}
}
}
}

// wait on all the 'build' jobs that were queued
if (waitQueue.size() > 0) {
parallel waitQueue
}

// save currentJobs so we can use it next run as pastJobs
node {
def buildCompletionDataJson = writeJSON(json: buildCompletionData, returnText: true)
Expand Down

0 comments on commit 33386da

Please sign in to comment.