From 31503a9c3b4cf12158c1508cf9ab90c954b3f2ff Mon Sep 17 00:00:00 2001 From: mcollardanuy Date: Mon, 31 Jul 2023 13:04:35 +0000 Subject: [PATCH] Update example notebooks --- examples/load_use_ner_model.ipynb | 2 +- examples/run_pipeline_basic.ipynb | 78 +++++++++++++++++-- examples/run_pipeline_deezy_mostpopular.ipynb | 22 +----- .../run_pipeline_deezy_reldisamb+wmtops.ipynb | 20 +---- ...ipeline_deezy_reldisamb+wpubl+wmtops.ipynb | 25 +----- .../run_pipeline_deezy_reldisamb+wpubl.ipynb | 27 +------ examples/run_pipeline_modular.ipynb | 10 ++- .../run_pipeline_perfect_mostpopular.ipynb | 16 ---- examples/train_use_deezy_model_1.ipynb | 7 +- examples/train_use_deezy_model_2.ipynb | 14 +--- examples/train_use_deezy_model_3.ipynb | 16 +--- examples/train_use_ner_model.ipynb | 4 +- 12 files changed, 100 insertions(+), 141 deletions(-) diff --git a/examples/load_use_ner_model.ipynb b/examples/load_use_ner_model.ipynb index 750dd06d..6890cfd9 100644 --- a/examples/load_use_ner_model.ipynb +++ b/examples/load_use_ner_model.ipynb @@ -7,7 +7,7 @@ "source": [ "# Loading and using a NER model\n", "\n", - "This notebook shows how to load an existing named entity recognition (NER) model from the HuggingFace hub.\n", + "This notebook shows how to load an existing named entity recognition (NER) model from the HuggingFace hub, using T-Res.\n", "\n", "We start by importing some libraries, and the `recogniser` script from the `geoparser` folder:" ] diff --git a/examples/run_pipeline_basic.ipynb b/examples/run_pipeline_basic.ipynb index fe25430a..17417872 100644 --- a/examples/run_pipeline_basic.ipynb +++ b/examples/run_pipeline_basic.ipynb @@ -28,7 +28,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once the `pipeline` script has been imported (in the previous cell), we create a new object of the `Pipeline` class. Since we don't pass any parameters, it will take all the default values: it will detect toponyms using the fine-grained tagset, it will find candidates using the perfect match approach, and will disambiguate them using the most popular approach. You can see the default `Pipeline` values [here](https://github.com/Living-with-machines/toponym-resolution/blob/main/geoparser/pipeline.py)." + "Once the `pipeline` script has been imported (in the previous cell), we create a new object of the `Pipeline` class. Since we don't pass any parameters, it will take all the default values: it will detect toponyms using `Livingwithmachines/toponym-19thC-en` NER model, it will find candidates using the perfect match approach, and will disambiguate them using the most popular approach. You can see the default `Pipeline` values [here](https://living-with-machines.github.io/T-Res/reference/geoparser/pipeline.html)." ] }, { @@ -40,6 +40,13 @@ "geoparser = pipeline.Pipeline()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using the pipeline: end-to-end" + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -54,10 +61,8 @@ "metadata": {}, "outputs": [], "source": [ - "resolved = geoparser.run_text(\"A remarkable case of rattening has just occurred in the building trade at Shefrield, but also in Lancaster. Not in Nottingham though. Not in Ashton either, nor in Salop!\")\n", - " \n", - "for r in resolved:\n", - " print(r)" + "resolved = geoparser.run_text(\"A remarkable case of rattening has just occurred in the building trade at Sheffield.\")\n", + "print(resolved)" ] }, { @@ -67,8 +72,67 @@ "outputs": [], "source": [ "resolved = geoparser.run_sentence(\"A remarkable case of rattening has just occurred in the building trade at Sheffield.\")\n", - "for r in resolved:\n", - " print(r)" + "print(resolved)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using the pipeline: step-wise" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instead of using the end-to-end pipeline, the pipeline can be used step-wise.\n", + "\n", + "Therefore, it can be used to just perform toponym recognition (i.e. NER):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mentions = geoparser.run_text_recognition(\"A remarkable case of rattening has just occurred in the building trade at Sheffield.\")\n", + "print(mentions)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The pipeline can then be used to just perform candidate selection given the output of NER:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "candidates = geoparser.run_candidate_selection(mentions)\n", + "print(candidates)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And finally, the pipeline can be used to perform entity disambiguation, given the output from the previous two steps:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "disamb_output = geoparser.run_disambiguation(mentions, candidates)\n", + "print(disamb_output)" ] } ], diff --git a/examples/run_pipeline_deezy_mostpopular.ipynb b/examples/run_pipeline_deezy_mostpopular.ipynb index 4c7f9f5a..8392dd90 100644 --- a/examples/run_pipeline_deezy_mostpopular.ipynb +++ b/examples/run_pipeline_deezy_mostpopular.ipynb @@ -32,8 +32,6 @@ "myranker = ranking.Ranker(\n", " method=\"deezymatch\",\n", " resources_path=\"../resources/wikidata/\",\n", - " mentions_to_wikidata=dict(),\n", - " wikidata_to_mentions=dict(),\n", " strvar_parameters={\n", " # Parameters to create the string pair dataset:\n", " \"ocr_threshold\": 60,\n", @@ -52,9 +50,8 @@ " \"dm_output\": \"deezymatch_on_the_fly\",\n", " # Ranking measures:\n", " \"ranking_metric\": \"faiss\",\n", - " \"selection_threshold\": 25,\n", - " \"num_candidates\": 3,\n", - " \"search_size\": 3,\n", + " \"selection_threshold\": 50,\n", + " \"num_candidates\": 1,\n", " \"verbose\": False,\n", " # DeezyMatch training:\n", " \"overwrite_training\": False,\n", @@ -72,9 +69,6 @@ "mylinker = linking.Linker(\n", " method=\"mostpopular\",\n", " resources_path=\"../resources/\",\n", - " linking_resources=dict(),\n", - " rel_params=dict(),\n", - " overwrite_training=False,\n", ")" ] }, @@ -87,18 +81,6 @@ "geoparser = pipeline.Pipeline(myranker=myranker, mylinker=mylinker)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "resolved = geoparser.run_text(\"A remarkable case of rattening has just occurred in the building trade at Shefrield, but also in Lancaster. Not in Nottingham though. Not in Ashton either, nor in Salop!\")\n", - " \n", - "for r in resolved:\n", - " print(r)" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/examples/run_pipeline_deezy_reldisamb+wmtops.ipynb b/examples/run_pipeline_deezy_reldisamb+wmtops.ipynb index 6df6aaa7..b79ec83d 100644 --- a/examples/run_pipeline_deezy_reldisamb+wmtops.ipynb +++ b/examples/run_pipeline_deezy_reldisamb+wmtops.ipynb @@ -35,18 +35,7 @@ "myranker = ranking.Ranker(\n", " method=\"deezymatch\",\n", " resources_path=\"../resources/wikidata/\",\n", - " mentions_to_wikidata=dict(),\n", - " wikidata_to_mentions=dict(),\n", - " strvar_parameters={\n", - " # Parameters to create the string pair dataset:\n", - " \"ocr_threshold\": 60,\n", - " \"top_threshold\": 85,\n", - " \"min_len\": 5,\n", - " \"max_len\": 15,\n", - " \"w2v_ocr_path\": str(Path(\"../resources/models/w2v/\").resolve()),\n", - " \"w2v_ocr_model\": \"w2v_*_news\",\n", - " \"overwrite_dataset\": False,\n", - " },\n", + " strvar_parameters=dict(),\n", " deezy_parameters={\n", " # Paths and filenames of DeezyMatch models and data:\n", " \"dm_path\": str(Path(\"../resources/deezymatch/\").resolve()),\n", @@ -55,9 +44,8 @@ " \"dm_output\": \"deezymatch_on_the_fly\",\n", " # Ranking measures:\n", " \"ranking_metric\": \"faiss\",\n", - " \"selection_threshold\": 25,\n", - " \"num_candidates\": 3,\n", - " \"search_size\": 3,\n", + " \"selection_threshold\": 50,\n", + " \"num_candidates\": 1,\n", " \"verbose\": False,\n", " # DeezyMatch training:\n", " \"overwrite_training\": False,\n", @@ -77,12 +65,10 @@ " mylinker = linking.Linker(\n", " method=\"reldisamb\",\n", " resources_path=\"../resources/\",\n", - " linking_resources=dict(),\n", " rel_params={\n", " \"model_path\": \"../resources/models/disambiguation/\",\n", " \"data_path\": \"../experiments/outputs/data/lwm/\",\n", " \"training_split\": \"originalsplit\",\n", - " \"context_length\": 100,\n", " \"db_embeddings\": cursor,\n", " \"with_publication\": False,\n", " \"without_microtoponyms\": True,\n", diff --git a/examples/run_pipeline_deezy_reldisamb+wpubl+wmtops.ipynb b/examples/run_pipeline_deezy_reldisamb+wpubl+wmtops.ipynb index f2f4cff2..f7b9ec99 100644 --- a/examples/run_pipeline_deezy_reldisamb+wpubl+wmtops.ipynb +++ b/examples/run_pipeline_deezy_reldisamb+wpubl+wmtops.ipynb @@ -35,8 +35,6 @@ "myranker = ranking.Ranker(\n", " method=\"deezymatch\",\n", " resources_path=\"../resources/wikidata/\",\n", - " mentions_to_wikidata=dict(),\n", - " wikidata_to_mentions=dict(),\n", " strvar_parameters={\n", " # Parameters to create the string pair dataset:\n", " \"ocr_threshold\": 60,\n", @@ -55,9 +53,8 @@ " \"dm_output\": \"deezymatch_on_the_fly\",\n", " # Ranking measures:\n", " \"ranking_metric\": \"faiss\",\n", - " \"selection_threshold\": 25,\n", - " \"num_candidates\": 3,\n", - " \"search_size\": 3,\n", + " \"selection_threshold\": 50,\n", + " \"num_candidates\": 1,\n", " \"verbose\": False,\n", " # DeezyMatch training:\n", " \"overwrite_training\": False,\n", @@ -77,12 +74,10 @@ " mylinker = linking.Linker(\n", " method=\"reldisamb\",\n", " resources_path=\"../resources/\",\n", - " linking_resources=dict(),\n", " rel_params={\n", " \"model_path\": \"../resources/models/disambiguation/\",\n", " \"data_path\": \"../experiments/outputs/data/lwm/\",\n", " \"training_split\": \"originalsplit\",\n", - " \"context_length\": 100,\n", " \"db_embeddings\": cursor,\n", " \"with_publication\": True,\n", " \"without_microtoponyms\": True,\n", @@ -103,22 +98,6 @@ "geoparser = pipeline.Pipeline(myranker=myranker, mylinker=mylinker)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "resolved = geoparser.run_text(\n", - " \"A remarkable case of rattening has just occurred in the building trade next to the Market-street of Shefrield, but also in Lancaster. Not in Nottingham though. Not in Ashton either, nor in Salop!\", \n", - " place=\"Manchester\", \n", - " place_wqid=\"Q18125\"\n", - ")\n", - " \n", - "for r in resolved:\n", - " print(r)" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/examples/run_pipeline_deezy_reldisamb+wpubl.ipynb b/examples/run_pipeline_deezy_reldisamb+wpubl.ipynb index ad475493..3375ce41 100644 --- a/examples/run_pipeline_deezy_reldisamb+wpubl.ipynb +++ b/examples/run_pipeline_deezy_reldisamb+wpubl.ipynb @@ -35,18 +35,7 @@ "myranker = ranking.Ranker(\n", " method=\"deezymatch\",\n", " resources_path=\"../resources/wikidata/\",\n", - " mentions_to_wikidata=dict(),\n", - " wikidata_to_mentions=dict(),\n", - " strvar_parameters={\n", - " # Parameters to create the string pair dataset:\n", - " \"ocr_threshold\": 60,\n", - " \"top_threshold\": 85,\n", - " \"min_len\": 5,\n", - " \"max_len\": 15,\n", - " \"w2v_ocr_path\": str(Path(\"../resources/models/w2v/\").resolve()),\n", - " \"w2v_ocr_model\": \"w2v_*_news\",\n", - " \"overwrite_dataset\": False,\n", - " },\n", + " strvar_parameters=dict(),\n", " deezy_parameters={\n", " # Paths and filenames of DeezyMatch models and data:\n", " \"dm_path\": str(Path(\"../resources/deezymatch/\").resolve()),\n", @@ -55,9 +44,8 @@ " \"dm_output\": \"deezymatch_on_the_fly\",\n", " # Ranking measures:\n", " \"ranking_metric\": \"faiss\",\n", - " \"selection_threshold\": 25,\n", - " \"num_candidates\": 3,\n", - " \"search_size\": 3,\n", + " \"selection_threshold\": 50,\n", + " \"num_candidates\": 1,\n", " \"verbose\": False,\n", " # DeezyMatch training:\n", " \"overwrite_training\": False,\n", @@ -77,12 +65,10 @@ " mylinker = linking.Linker(\n", " method=\"reldisamb\",\n", " resources_path=\"../resources/\",\n", - " linking_resources=dict(),\n", " rel_params={\n", " \"model_path\": \"../resources/models/disambiguation/\",\n", " \"data_path\": \"../experiments/outputs/data/lwm/\",\n", " \"training_split\": \"originalsplit\",\n", - " \"context_length\": 100,\n", " \"db_embeddings\": cursor,\n", " \"with_publication\": True,\n", " \"without_microtoponyms\": False,\n", @@ -133,13 +119,6 @@ "for r in resolved:\n", " print(r)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/examples/run_pipeline_modular.ipynb b/examples/run_pipeline_modular.ipynb index a3d03797..091422b9 100644 --- a/examples/run_pipeline_modular.ipynb +++ b/examples/run_pipeline_modular.ipynb @@ -64,7 +64,6 @@ " mylinker = linking.Linker(\n", " method=\"reldisamb\",\n", " resources_path=\"../resources/\",\n", - " linking_resources=dict(),\n", " rel_params={\n", " \"model_path\": \"../resources/models/disambiguation/\",\n", " \"data_path\": \"../experiments/outputs/data/lwm/\",\n", @@ -127,6 +126,15 @@ "source": [ "output_disamb = geoparser.run_disambiguation(output, cands)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output_disamb" + ] } ], "metadata": { diff --git a/examples/run_pipeline_perfect_mostpopular.ipynb b/examples/run_pipeline_perfect_mostpopular.ipynb index 3a60edf4..7f6cae50 100644 --- a/examples/run_pipeline_perfect_mostpopular.ipynb +++ b/examples/run_pipeline_perfect_mostpopular.ipynb @@ -30,8 +30,6 @@ "myranker = ranking.Ranker(\n", " method=\"perfectmatch\",\n", " resources_path=\"../resources/wikidata/\",\n", - " mentions_to_wikidata=dict(),\n", - " wikidata_to_mentions=dict(),\n", ")\n" ] }, @@ -44,8 +42,6 @@ "mylinker = linking.Linker(\n", " method=\"mostpopular\",\n", " resources_path=\"../resources/\",\n", - " linking_resources=dict(),\n", - " overwrite_training=False,\n", ")" ] }, @@ -58,18 +54,6 @@ "geoparser = pipeline.Pipeline(myranker=myranker, mylinker=mylinker)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "resolved = geoparser.run_text(\"A remarkable case of rattening has just occurred in the building trade at Shefrield, but also in Lancaster. Not in Nottingham though. Not in Ashton either, nor in Salop!\")\n", - " \n", - "for r in resolved:\n", - " print(r)" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/examples/train_use_deezy_model_1.ipynb b/examples/train_use_deezy_model_1.ipynb index 95ffb5e4..e2ce98f9 100644 --- a/examples/train_use_deezy_model_1.ipynb +++ b/examples/train_use_deezy_model_1.ipynb @@ -73,8 +73,6 @@ "myranker = ranking.Ranker(\n", " method=\"deezymatch\", # Here we're telling the ranker to use DeezyMatch.\n", " resources_path=\"../resources/wikidata/\", # Here, the path to the Wikidata resources.\n", - " mentions_to_wikidata=dict(), # We'll load the mentions-to-wikidata model here, leave it empty.\n", - " wikidata_to_mentions=dict(), # We'll load the wikidata-to-mentions model here, leave it empty.\n", " # Parameters to create the string pair dataset:\n", " strvar_parameters={\n", " \"ocr_threshold\": 60,\n", @@ -94,9 +92,8 @@ " \"dm_output\": \"deezymatch_on_the_fly\", # Name of the file where the output of DeezyMatch will be stored. Feel free to change that.\n", " # Ranking measures:\n", " \"ranking_metric\": \"faiss\", # Metric used by DeezyMatch to rank the candidates.\n", - " \"selection_threshold\": 25, # Threshold for that metric.\n", - " \"num_candidates\": 3, # Number of name variations for a string (e.g. \"London\", \"Londra\", and \"Londres\" are three different variations in our gazetteer of \"Londcn\").\n", - " \"search_size\": 3, # That should be the same as `num_candidates`.\n", + " \"selection_threshold\": 50, # Threshold for that metric.\n", + " \"num_candidates\": 1, # Number of name variations for a string (e.g. \"London\", \"Londra\", and \"Londres\" are three different variations in our gazetteer of \"Londcn\").\n", " \"verbose\": False, # Whether to see the DeezyMatch progress or not.\n", " # DeezyMatch training:\n", " \"overwrite_training\": True, # You can choose to overwrite the model if it exists: in this case we're training a model, regardless of whether it already exists.\n", diff --git a/examples/train_use_deezy_model_2.ipynb b/examples/train_use_deezy_model_2.ipynb index 1e49cc9f..5045329e 100644 --- a/examples/train_use_deezy_model_2.ipynb +++ b/examples/train_use_deezy_model_2.ipynb @@ -68,8 +68,6 @@ "myranker = ranking.Ranker(\n", " method=\"deezymatch\", # Here we're telling the ranker to use DeezyMatch.\n", " resources_path=\"../resources/wikidata/\", # Here, the path to the Wikidata resources.\n", - " mentions_to_wikidata=dict(), # We'll load the mentions-to-wikidata model here, leave it empty.\n", - " wikidata_to_mentions=dict(), # We'll load the wikidata-to-mentions model here, leave it empty.\n", " # Parameters to create the string pair dataset:\n", " strvar_parameters={\n", " \"overwrite_dataset\": False,\n", @@ -83,9 +81,8 @@ " \"dm_output\": \"deezymatch_on_the_fly\", # Name of the file where the output of DeezyMatch will be stored. Feel free to change that.\n", " # Ranking measures:\n", " \"ranking_metric\": \"faiss\", # Metric used by DeezyMatch to rank the candidates.\n", - " \"selection_threshold\": 25, # Threshold for that metric.\n", - " \"num_candidates\": 3, # Number of name variations for a string (e.g. \"London\", \"Londra\", and \"Londres\" are three different variations in our gazetteer of \"Londcn\").\n", - " \"search_size\": 3, # That should be the same as `num_candidates`.\n", + " \"selection_threshold\": 50, # Threshold for that metric.\n", + " \"num_candidates\": 1, # Number of name variations for a string (e.g. \"London\", \"Londra\", and \"Londres\" are three different variations in our gazetteer of \"Londcn\").\n", " \"verbose\": False, # Whether to see the DeezyMatch progress or not.\n", " # DeezyMatch training:\n", " \"overwrite_training\": True, # You can choose to overwrite the model if it exists: in this case we're loading an existing model, so that should be False.\n", @@ -148,13 +145,6 @@ "toponym = \"Manchefter\"\n", "print(myranker.find_candidates([{\"mention\": toponym}])[0][toponym])" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/examples/train_use_deezy_model_3.ipynb b/examples/train_use_deezy_model_3.ipynb index f2b2f226..3b9a0c35 100644 --- a/examples/train_use_deezy_model_3.ipynb +++ b/examples/train_use_deezy_model_3.ipynb @@ -70,8 +70,6 @@ "myranker = ranking.Ranker(\n", " method=\"deezymatch\", # Here we're telling the ranker to use DeezyMatch.\n", " resources_path=\"../resources/wikidata/\", # Here, the path to the Wikidata resources.\n", - " mentions_to_wikidata=dict(), # We'll load the mentions-to-wikidata model here, leave it empty.\n", - " wikidata_to_mentions=dict(), # We'll load the wikidata-to-mentions model here, leave it empty.\n", " # Parameters to create the string pair dataset:\n", " strvar_parameters={\n", " \"overwrite_dataset\": False,\n", @@ -85,9 +83,8 @@ " \"dm_output\": \"deezymatch_on_the_fly\", # Name of the file where the output of DeezyMatch will be stored. Feel free to change that.\n", " # Ranking measures:\n", " \"ranking_metric\": \"faiss\", # Metric used by DeezyMatch to rank the candidates.\n", - " \"selection_threshold\": 25, # Threshold for that metric.\n", - " \"num_candidates\": 3, # Number of name variations for a string (e.g. \"London\", \"Londra\", and \"Londres\" are three different variations in our gazetteer of \"Londcn\").\n", - " \"search_size\": 3, # That should be the same as `num_candidates`.\n", + " \"selection_threshold\": 50, # Threshold for that metric.\n", + " \"num_candidates\": 1, # Number of name variations for a string (e.g. \"London\", \"Londra\", and \"Londres\" are three different variations in our gazetteer of \"Londcn\").\n", " \"verbose\": False, # Whether to see the DeezyMatch progress or not.\n", " # DeezyMatch training:\n", " \"overwrite_training\": False, # You can choose to overwrite the model if it exists: in this case we're loading an existing model, so that should be False.\n", @@ -140,16 +137,9 @@ "outputs": [], "source": [ "# Find candidates given a toponym:\n", - "toponym = \"Sheftield\"\n", + "toponym = \"Shefiield\"\n", "print(myranker.find_candidates([{\"mention\": toponym}])[0][toponym])" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/examples/train_use_ner_model.ipynb b/examples/train_use_ner_model.ipynb index 8bc8ea09..c0e3542e 100644 --- a/examples/train_use_ner_model.ipynb +++ b/examples/train_use_ner_model.ipynb @@ -7,7 +7,7 @@ "source": [ "# Training and using a NER model\n", "\n", - "This notebook shows how to train a new toponym recognition (NER) model. You can see how to use a model from the HuggingFace hub in the `load_use_ner_model.ipynb` notebook.\n", + "This notebook shows how to train a new toponym recognition (NER) model, via the `transformers` library.\n", "\n", "We start by importing some libraries, and the `recogniser` script from the `geoparser` folder:" ] @@ -65,7 +65,7 @@ " }, # Training arguments: you can change them.\n", " overwrite_training=False, # Set to True if you want to overwrite an existing model with the same name.\n", " do_test=True, # Set to True if you want to perform the training on test mode (the string \"_test\" will be appended to your model name).\n", - " load_from_hub=False, # Whether the model should be loaded from the HuggingFace hub\n", + " load_from_hub=False, # Whether the final model should be loaded from the HuggingFace hub\n", ")" ] },