Skip to content

Commit

Permalink
Streamline notebook tests
Browse files Browse the repository at this point in the history
  • Loading branch information
kbattocchi committed Apr 5, 2022
1 parent 02618a0 commit b311a14
Show file tree
Hide file tree
Showing 24 changed files with 8 additions and 23,875 deletions.
249 changes: 8 additions & 241 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,100 +6,28 @@
trigger:
- main

jobs:
- job: 'EvalChanges'
displayName: 'Analyze changed files to determine which job to run'
pool:
vmImage: 'macOS-10.15'
steps:
# We want to enforce the following rules for PRs:
# * if all modifications are to README.md
# no testing is needed
# * if there are modifications to docs/* or to any code
# then docs need to be built to verify consistency
# * if there are modifications to notebooks/* or to any code
# then notebooks need to be run to verify consistency
# * for any code changes (or changes to metadata files)
# linting and testing should be run
# For a PR build, HEAD will be the merge commit, and we want to diff against the base branch,
# which will be the first parent: HEAD^
# (For non-PR changes, we will always perform all CI tasks)
- powershell: |
if ($env:BUILD_REASON -eq 'PullRequest') {
$editedFiles = git diff HEAD^ --name-only
$editedFiles # echo edited files to enable easier debugging
$codeChanges = $false
$docChanges = $false
$nbChanges = $false
$changeType = "none"
foreach ($file in $editedFiles) {
switch -Wildcard ($file) {
"README.md" { Continue }
"econml/_version.py" { Continue }
"prototypes/*" { Continue }
"images/*" { Continue }
"doc/*" { $docChanges = $true; Continue }
"notebooks/*" { $nbChanges = $true; Continue }
default { $codeChanges = $true; Continue }
}
}
}
Write-Host "##vso[task.setvariable variable=buildDocs;isOutput=true]$(($env:BUILD_REASON -ne 'PullRequest') -or ($docChanges -or $codeChanges))"
Write-Host "##vso[task.setvariable variable=buildNbs;isOutput=true]$(($env:BUILD_REASON -ne 'PullRequest') -or ($nbChanges -or $codeChanges))"
Write-Host "##vso[task.setvariable variable=testCode;isOutput=true]$(($env:BUILD_REASON -ne 'PullRequest') -or $codeChanges)"
name: output
displayName: 'Determine type of code change'
- template: azure-pipelines-steps.yml
parameters:
versions: ['3.6']
images: ['ubuntu-18.04']
package: '-e .[all]'
job:
job: 'Docs'
displayName: 'Build documentation'
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.buildDocs'], 'True')
steps:
- script: 'sudo apt-get -yq install graphviz'
displayName: 'Install graphviz'

- script: 'pip install sklearn-contrib-lightning'
displayName: 'Install lightning'

- script: 'pip install git+https://github.com/slundberg/shap.git@d1d2700acc0259f211934373826d5ff71ad514de'
displayName: 'Install specific version of shap'

- script: 'pip install sphinx sphinx_rtd_theme'
displayName: 'Install sphinx'

- script: 'python setup.py build_sphinx -W'
displayName: 'Build documentation'

- publish: 'build/sphinx/html'
artifact: 'Documentation'
displayName: 'Publish documentation as artifact'

- script: 'python setup.py build_sphinx -b doctest'
displayName: 'Run doctests'


jobs:
- template: azure-pipelines-steps.yml
parameters:
versions: ['3.8']
images: ['ubuntu-18.04']
package: '-e .[tf,plt]'
job:
job: 'Notebooks_cust'
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.buildNbs'], 'True')
displayName: 'Notebooks (Customer Solutions)'
steps:
# Work around https://github.com/pypa/pip/issues/9542
- script: 'pip install -U numpy~=1.21.0'
displayName: 'Upgrade numpy'

- script: 'pip install pytest pytest-runner jupyter jupyter-client nbconvert nbformat seaborn xgboost tqdm && pip list && python setup.py pytest'
# shap 0.39 and sklearn 1.0 interact badly in these notebooks
# shap 0.40 has a bug in waterfall (https://github.com/slundberg/shap/issues/2283) that breaks our main tests
# but fixes the interaction here...
- script: 'pip install -U shap~=0.40.0'
displayName: 'Upgrade shap'

- script: 'pip install pytest pytest-runner jupyter jupyter-client nbconvert nbformat seaborn xgboost tqdm && pip freeze && python setup.py pytest'
displayName: 'Unit tests'
env:
PYTEST_ADDOPTS: '-m "notebook"'
Expand All @@ -119,8 +47,6 @@ jobs:
package: '-e .[tf,plt]'
job:
job: 'Notebooks_noncust'
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.buildNbs'], 'True')
displayName: 'Notebooks (except Customer Solutions)'
steps:
# Work around https://github.com/pypa/pip/issues/9542
Expand All @@ -145,162 +71,3 @@ jobs:
testResultsFiles: '**/test-results.xml'
testRunTitle: 'Notebooks'
condition: succeededOrFailed()


# - job: 'AutoML'
# dependsOn: 'EvalChanges'
# condition: eq(dependencies.EvalChanges.outputs['output.testCode'], 'True')
# variables:
# python.version: '3.6'
# pool:
# vmImage: 'ubuntu-18.04'
# steps:
# - template: azure-pipelines-steps.yml
# parameters:
# body:
# - task: AzureCLI@2
# displayName: 'AutoML tests'
# inputs:
# azureSubscription: 'automl'
# scriptLocation: 'inlineScript'
# scriptType: 'pscore'
# powerShellIgnoreLASTEXITCODE: '' # string for now due to https://github.com/microsoft/azure-pipelines-tasks/issues/12266
# inlineScript: |
# $env:SUBSCRIPTION_ID = az account show --query id -o tsv
# python setup.py pytest
# env:
# WORKSPACE_NAME: 'testWorkspace'
# RESOURCE_GROUP: 'testingAutoMLEconML'
# PYTEST_ADDOPTS: '-m "automl" -n 0'
# COVERAGE_PROCESS_START: 'setup.cfg'

# - task: PublishTestResults@2
# displayName: 'Publish Test Results **/test-results.xml'
# inputs:
# testResultsFiles: '**/test-results.xml'
# testRunTitle: 'AutoML'
# condition: succeededOrFailed()
# package: '.[automl]'

- template: azure-pipelines-steps.yml
parameters:
versions: ['3.8']
images: ['macOS-10.15']
job:
job: 'Linting'
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.testCode'], 'True')
steps:
- script: 'pip install pycodestyle && pycodestyle econml'
failOnStderr: true
displayName: Linting

- template: azure-pipelines-steps.yml
parameters:
package: '-e .[tf,plt]'
job:
job: Tests_main
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.testCode'], 'True')
displayName: 'Run tests (main)'
steps:
- script: 'pip install pytest pytest-runner && python setup.py pytest'
displayName: 'Unit tests'
env:
PYTEST_ADDOPTS: '-m "not (notebook or automl or dml or serial or cate_api)" -n 2'
COVERAGE_PROCESS_START: 'setup.cfg'
- task: PublishTestResults@2
displayName: 'Publish Test Results **/test-results.xml'
inputs:
testResultsFiles: '**/test-results.xml'
testRunTitle: 'Python $(python.version), image $(imageName)'
condition: succeededOrFailed()

- task: PublishCodeCoverageResults@1
displayName: 'Publish Code Coverage Results'
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'

- template: azure-pipelines-steps.yml
parameters:
package: '-e .[tf,plt]'
job:
job: Tests_dml
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.testCode'], 'True')
displayName: 'Run tests (DML)'
steps:
- script: 'pip install pytest pytest-runner && python setup.py pytest'
displayName: 'Unit tests'
env:
PYTEST_ADDOPTS: '-m "dml"'
COVERAGE_PROCESS_START: 'setup.cfg'
- task: PublishTestResults@2
displayName: 'Publish Test Results **/test-results.xml'
inputs:
testResultsFiles: '**/test-results.xml'
testRunTitle: 'Python $(python.version), image $(imageName)'
condition: succeededOrFailed()

- task: PublishCodeCoverageResults@1
displayName: 'Publish Code Coverage Results'
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'

- template: azure-pipelines-steps.yml
parameters:
package: '-e .[tf,plt]'
job:
job: Tests_serial
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.testCode'], 'True')
displayName: 'Run tests (Serial)'
steps:
- script: 'pip install pytest pytest-runner && python setup.py pytest'
displayName: 'Unit tests'
env:
PYTEST_ADDOPTS: '-m "serial" -n 1'
COVERAGE_PROCESS_START: 'setup.cfg'
- task: PublishTestResults@2
displayName: 'Publish Test Results **/test-results.xml'
inputs:
testResultsFiles: '**/test-results.xml'
testRunTitle: 'Python $(python.version), image $(imageName)'
condition: succeededOrFailed()

- task: PublishCodeCoverageResults@1
displayName: 'Publish Code Coverage Results'
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'

- template: azure-pipelines-steps.yml
parameters:
package: '-e .[tf,plt]'
job:
job: Tests_CATE_API
dependsOn: 'EvalChanges'
condition: eq(dependencies.EvalChanges.outputs['output.testCode'], 'True')
displayName: 'Run tests (Other)'
steps:
- script: 'pip install pytest pytest-runner'
displayName: 'Install pytest'
- script: 'python setup.py pytest'
displayName: 'CATE Unit tests'
env:
PYTEST_ADDOPTS: '-m "cate_api" -n auto'
COVERAGE_PROCESS_START: 'setup.cfg'
- task: PublishTestResults@2
displayName: 'Publish Test Results **/test-results.xml'
inputs:
testResultsFiles: '**/test-results.xml'
testRunTitle: 'Python $(python.version), image $(imageName)'
condition: succeededOrFailed()

- task: PublishCodeCoverageResults@1
displayName: 'Publish Code Coverage Results'
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'
Loading

0 comments on commit b311a14

Please sign in to comment.