diff --git a/Jenkinsfile.build b/Jenkinsfile.build index e708904..686f928 100644 --- a/Jenkinsfile.build +++ b/Jenkinsfile.build @@ -1,15 +1,25 @@ -// one job per arch (for now) that just builds "the top thing" (triggered by the meta-update job) +// any number of jobs per arch that build the specified buildId (triggered by the respective trigger job) properties([ - disableConcurrentBuilds(), + // limited by one job per buildId so that the same build cannot run concurrently + throttleJobProperty( + limitOneJobWithMatchingParams: true, + paramsToUseForLimit: 'buildId', + throttleEnabled: true, + throttleOption: 'project', + ), disableResume(), durabilityHint('PERFORMANCE_OPTIMIZED'), parameters([ string(name: 'buildId', trim: true), + string(name: 'identifier', trim: true, description: '(optional) used to set currentBuild.displayName to a meaningful value earlier'), ]), ]) env.BASHBREW_ARCH = env.JOB_NAME.minus('/build').split('/')[-1] // "windows-amd64", "arm64v8", etc env.BUILD_ID = params.buildId +if (params.identifier) { + currentBuild.displayName = params.identifier + ' (#' + currentBuild.number + ')' +} node('multiarch-' + env.BASHBREW_ARCH) { ansiColor('xterm') { stage('Checkout') { diff --git a/Jenkinsfile.trigger b/Jenkinsfile.trigger index 5467a77..3b8b634 100644 --- a/Jenkinsfile.trigger +++ b/Jenkinsfile.trigger @@ -102,69 +102,100 @@ if (breakEarly) { return } // thanks Jenkins... // new data to be added to the past-jobs.json // { lastTime: unixTimestamp, url: "" } -def buildCompletionData = [:] - -for (buildObj in queue) { - stage(buildObj.identifier) { - //def json = writeJSON(json: buildObj, returnText: true) - //echo(json) // for debugging/data purposes - - // "catchError" to set "stageResult" :( - catchError(message: 'Build of "' + buildObj.identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') { - if (buildObj.gha_payload) { - node { - withEnv([ - 'payload=' + buildObj.gha_payload, - ]) { - withCredentials([ - string( - variable: 'GH_TOKEN', - credentialsId: 'github-access-token-docker-library-bot-meta', - ), +buildCompletionData = [:] + +// list of closures that we can use to wait for the jobs on. +def waitQueue = [:] +def waitQueueClosure(identifier, buildId, externalizableId) { + return { + stage(identifier) { + // "catchError" to set "stageResult" :( + catchError(message: 'Build of "' + identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') { + def res = waitForBuild( + runId: externalizableId, + propagateAbort: true, // allow cancelling this job to cancel all the triggered jobs + ) + buildCompletionData[buildId] = [ + lastTime: (res.startTimeInMillis + res.duration) / 1000, // convert to seconds + url: res.absoluteUrl, + ] + if (res.result != 'SUCCESS') { + // set stage result via catchError + error(res.result) + } + } + } + } +} + +// stage to wrap up all the build job triggers that get waited on later +stage('trigger') { + for (buildObj in queue) { + if (buildObj.gha_payload) { + stage(buildObj.identifier) { + // "catchError" to set "stageResult" :( + catchError(message: 'Build of "' + buildObj.identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') { + node { + withEnv([ + 'payload=' + buildObj.gha_payload, ]) { - sh ''' - set -u +x - - # https://docs.github.com/en/free-pro-team@latest/rest/actions/workflows?apiVersion=2022-11-28#create-a-workflow-dispatch-event - curl -fL \ - -X POST \ - -H 'Accept: application/vnd.github+json' \ - -H "Authorization: Bearer $GH_TOKEN" \ - -H 'X-GitHub-Api-Version: 2022-11-28' \ - https://api.github.com/repos/docker-library/meta/actions/workflows/build.yml/dispatches \ - -d "$payload" - ''' + withCredentials([ + string( + variable: 'GH_TOKEN', + credentialsId: 'github-access-token-docker-library-bot-meta', + ), + ]) { + sh ''' + set -u +x + + # https://docs.github.com/en/free-pro-team@latest/rest/actions/workflows?apiVersion=2022-11-28#create-a-workflow-dispatch-event + curl -fL \ + -X POST \ + -H 'Accept: application/vnd.github+json' \ + -H "Authorization: Bearer $GH_TOKEN" \ + -H 'X-GitHub-Api-Version: 2022-11-28' \ + https://api.github.com/repos/docker-library/meta/actions/workflows/build.yml/dispatches \ + -d "$payload" + ''' + } } + // record that GHA was triggered (for tracking continued triggers that fail to push an image) + buildCompletionData[buildObj.buildId] = [ + lastTime: System.currentTimeMillis() / 1000, // convert to seconds + url: currentBuild.absoluteUrl, + ] } - // record that GHA was triggered (for tracking continued triggers that fail to push an image) - buildCompletionData[buildObj.buildId] = [ - lastTime: System.currentTimeMillis() / 1000, // convert to seconds - url: currentBuild.absoluteUrl, - ] } - } else { + } + } else { + // "catchError" to set "stageResult" :( + catchError(message: 'Build of "' + buildObj.identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') { + + // why not parallel these build() invocations? + // jenkins parallel closures get started in a randomish order, ruining our sorted queue def res = build( job: 'build', parameters: [ string(name: 'buildId', value: buildObj.buildId), + string(name: 'identifier', value: buildObj.identifier), ], propagate: false, - quietPeriod: 5, // seconds + // trigger these quickly so they all get added to Jenkins queue in "queue" order (also using "waitForStart" means we have to wait for the entire "quietPeriod" before we get to move on and schedule more) + quietPeriod: 0, // seconds + // we'll wait on the builds in parallel after they are all queued (so our sorted order is the queue order) + waitForStart: true, ) - // record the job failure - buildCompletionData[buildObj.buildId] = [ - lastTime: (res.startTimeInMillis + res.duration) / 1000, // convert to seconds - url: res.absoluteUrl, - ] - if (res.result != 'SUCCESS') { - // set stage result via catchError - error(res.result) - } + waitQueue[buildObj.identifier] = waitQueueClosure(buildObj.identifier, buildObj.buildId, res.externalizableId) } } } } +// wait on all the 'build' jobs that were queued +if (waitQueue.size() > 0) { + parallel waitQueue +} + // save currentJobs so we can use it next run as pastJobs node { def buildCompletionDataJson = writeJSON(json: buildCompletionData, returnText: true)