diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ee504d..e6a4ffb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -56,5 +56,5 @@ jobs: with: stack-name: ${{ matrix.stack-name }} python-version: ${{ matrix.python-version }} - ref-zenml: ${{ inputs.ref-zenml || 'feature/followup-run-metadata' }} + ref-zenml: ${{ inputs.ref-zenml || 'develop' }} ref-template: ${{ inputs.ref-template || github.ref }} diff --git a/README.md b/README.md index fada305..66bf521 100644 --- a/README.md +++ b/README.md @@ -14,14 +14,14 @@ Once you have ZenML installed, you can initialize a new project using this templ ```bash zenml init --template -# example: zenml init --template template-nlp +# example: zenml init --template nlp ``` Running the command above will result in input prompts being shown to you. If you would like to rely on default values for the ZenML project template - you can add --template-with-defaults to the same command, like this: ```bash zenml init --template --template-with-defaults -# example: zenml init --template template-nlp --template-with-defaults +# example: zenml init --template nlp --template-with-defaults ``` diff --git a/requirements.txt b/requirements.txt index ea98812..b25fbbb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,5 @@ copier jinja2-time pyyaml-include<2.0 datasets>=2.12.0,<3.0.0 +# Starting with version 1.6.0 this is incompatible with the metrics computation of the datasets library +scikit-learn<1.6.0 diff --git a/template/requirements.txt b/template/requirements.txt index e79245c..e05cb52 100644 --- a/template/requirements.txt +++ b/template/requirements.txt @@ -2,3 +2,4 @@ torchvision gradio zenml[server]>=0.56.3 datasets>=2.12.0,<3.0.0 +scikit-learn<1.6.0 \ No newline at end of file diff --git a/template/utils/misc.py b/template/utils/misc.py index a133a73..96760cd 100644 --- a/template/utils/misc.py +++ b/template/utils/misc.py @@ -17,7 +17,7 @@ def compute_metrics(eval_pred: Tuple[np.ndarray, np.ndarray]) -> Dict[str, float """ logits, labels = eval_pred predictions = np.argmax(logits, axis=-1) - # calculate the mertic using the predicted and true value + # calculate the metric using the predicted and true value accuracy = load_metric("accuracy", trust_remote_code=True).compute( predictions=predictions, references=labels )