diff --git a/docs/conf.py b/docs/conf.py index 0e4d19d8..c7dde9e5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -16,7 +16,7 @@ import sys import datetime -import sphinx_rtd_theme +# import sphinx_rtd_theme sys.path.append("..") @@ -67,13 +67,13 @@ def get_version(): # a list of builtin themes. # -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] +# html_theme = "sphinx_rtd_theme" +# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # import karma_sphinx_theme # html_theme = "karma_sphinx_theme" -html_theme = "faculty_sphinx_theme" +html_theme = "sphinx_book_theme" # import catalyst_sphinx_theme # html_theme = "catalyst_sphinx_theme" diff --git a/docs/encoders.rst b/docs/encoders.rst index d64607b8..652745b7 100644 --- a/docs/encoders.rst +++ b/docs/encoders.rst @@ -1,4 +1,4 @@ -πŸ” Available Encoders +πŸ” Available Encoders ===================== ResNet diff --git a/docs/encoders_timm.rst b/docs/encoders_timm.rst index 3441fdca..26a18a64 100644 --- a/docs/encoders_timm.rst +++ b/docs/encoders_timm.rst @@ -1,4 +1,4 @@ -πŸͺ Timm Encoders +🎯 Timm Encoders ~~~~~~~~~~~~~~~~ Pytorch Image Models (a.k.a. timm) has a lot of pretrained models and interface which allows using these models as encoders in smp, diff --git a/docs/index.rst b/docs/index.rst index bb75d6e6..54bdea00 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,6 +17,7 @@ Welcome to Segmentation Models's documentation! encoders_timm losses metrics + save_load insights diff --git a/docs/insights.rst b/docs/insights.rst index 6489dfd1..ad5355b9 100644 --- a/docs/insights.rst +++ b/docs/insights.rst @@ -1,4 +1,4 @@ -πŸ”§ Insights +πŸ’‘ Insights =========== 1. Models architecture diff --git a/docs/install.rst b/docs/install.rst index 583a2527..e43c0f38 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -1,4 +1,4 @@ -πŸ›  Installation +βš™οΈ Installation =============== PyPI version: diff --git a/docs/metrics.rst b/docs/metrics.rst index db5ec581..fc06c131 100644 --- a/docs/metrics.rst +++ b/docs/metrics.rst @@ -1,4 +1,4 @@ -πŸ“ˆ Metrics +πŸ“ Metrics ========== Functional metrics diff --git a/docs/models.rst b/docs/models.rst index 47de61ee..003908a0 100644 --- a/docs/models.rst +++ b/docs/models.rst @@ -1,40 +1,68 @@ -πŸ“¦ Segmentation Models +πŸ•ΈοΈ Segmentation Models ============================== + +.. contents:: + :local: + +.. _unet: + Unet ~~~~ .. autoclass:: segmentation_models_pytorch.Unet + +.. _unetplusplus: + Unet++ ~~~~~~ .. autoclass:: segmentation_models_pytorch.UnetPlusPlus -MAnet -~~~~~~ -.. autoclass:: segmentation_models_pytorch.MAnet -Linknet -~~~~~~~ -.. autoclass:: segmentation_models_pytorch.Linknet +.. _fpn: FPN ~~~ .. autoclass:: segmentation_models_pytorch.FPN + +.. _pspnet: + PSPNet ~~~~~~ .. autoclass:: segmentation_models_pytorch.PSPNet -PAN -~~~ -.. autoclass:: segmentation_models_pytorch.PAN + +.. _deeplabv3: DeepLabV3 ~~~~~~~~~ .. autoclass:: segmentation_models_pytorch.DeepLabV3 + +.. _deeplabv3plus: + DeepLabV3+ ~~~~~~~~~~ .. autoclass:: segmentation_models_pytorch.DeepLabV3Plus +.. _linknet: + +Linknet +~~~~~~~ +.. autoclass:: segmentation_models_pytorch.Linknet + + +.. _manet: + +MAnet +~~~~~~ +.. autoclass:: segmentation_models_pytorch.MAnet + + +.. _pan: + +PAN +~~~ +.. autoclass:: segmentation_models_pytorch.PAN diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 60f4f287..0c27ab4a 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -1,4 +1,4 @@ -⏳ Quick Start +πŸš€ Quick Start ============== **1. Create segmentation model** @@ -16,8 +16,9 @@ Segmentation model is just a PyTorch nn.Module, which can be created as easy as: classes=3, # model output channels (number of classes in your dataset) ) -- see table with available model architectures -- see table with avaliable encoders and its corresponding weights +- Check the page with available :doc:`model architectures `. +- Check the table with :doc:`available ported encoders and its corresponding weights `. +- `Pytorch Image Models (timm) `_ encoders are also supported, check it :doc:`here`. **2. Configure data preprocessing** @@ -33,4 +34,23 @@ All encoders have pretrained weights. Preparing your data the same way as during **3. Congratulations!** πŸŽ‰ -You are done! Now you can train your model with your favorite framework! +You are done! Now you can train your model with your favorite framework, or as simple as: + +.. code-block:: python + + for images, gt_masks in dataloader: + + predicted_mask = model(image) + loss = loss_fn(predicted_mask, gt_masks) + + loss.backward() + optimizer.step() + +Check the following examples: + +.. |colab-badge| image:: https://colab.research.google.com/assets/colab-badge.svg + :target: https://colab.research.google.com/github/qubvel/segmentation_models.pytorch/blob/master/examples/binary_segmentation_intro.ipynb + :alt: Open In Colab + +- Finetuning notebook on Oxford Pet dataset with `PyTorch Lightning `_ |colab-badge| +- Finetuning script for cloth segmentation with `PyTorch Lightning `_ diff --git a/docs/requirements.txt b/docs/requirements.txt index 4d7d4875..7e5b1eff 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,5 @@ -faculty-sphinx-theme==0.2.2 +sphinx<7 +sphinx-book-theme==1.1.2 six==1.15.0 -autodocsumm \ No newline at end of file +autodocsumm +huggingface_hub \ No newline at end of file diff --git a/docs/save_load.rst b/docs/save_load.rst new file mode 100644 index 00000000..0aec7d50 --- /dev/null +++ b/docs/save_load.rst @@ -0,0 +1,74 @@ +πŸ“‚ Saving and Loading +===================== + +In this section, we will discuss how to save a trained model, push it to the Hugging Face Hub, and load it back for later use. + +Saving and Sharing a Model +-------------------------- + +Once you have trained your model, you can save it using the `.save_pretrained` method. This method saves the model configuration and weights to a directory of your choice. +And, optionally, you can push the model to the Hugging Face Hub by setting the `push_to_hub` parameter to `True`. + +For example: + +.. code:: python + + import segmentation_models_pytorch as smp + + model = smp.Unet('resnet34', encoder_weights='imagenet') + + # After training your model, save it to a directory + model.save_pretrained('./my_model') + + # Or saved and pushed to the Hub simultaneously + model.save_pretrained('username/my-model', push_to_hub=True) + +Loading Trained Model +--------------------- + +Once your model is saved and pushed to the Hub, you can load it back using the `smp.from_pretrained` method. This method allows you to load the model weights and configuration from a directory or directly from the Hub. + +For example: + +.. code:: python + + import segmentation_models_pytorch as smp + + # Load the model from the local directory + model = smp.from_pretrained('./my_model') + + # Alternatively, load the model directly from the Hugging Face Hub + model = smp.from_pretrained('username/my-model') + +Saving model Metrics and Dataset Name +------------------------------------- + +You can simply pass the `metrics` and `dataset` parameters to the `save_pretrained` method to save the model metrics and dataset name in Model Card along with the model configuration and weights. + +For example: + +.. code:: python + + import segmentation_models_pytorch as smp + + model = smp.Unet('resnet34', encoder_weights='imagenet') + + # After training your model, save it to a directory + model.save_pretrained('./my_model', metrics={'accuracy': 0.95}, dataset='my_dataset') + + # Or saved and pushed to the Hub simultaneously + model.save_pretrained('username/my-model', push_to_hub=True, metrics={'accuracy': 0.95}, dataset='my_dataset') + + +Conclusion +---------- + +By following these steps, you can easily save, share, and load your models, facilitating collaboration and reproducibility in your projects. Don't forget to replace the placeholders with your actual model paths and names. + +|colab-badge| + +.. |colab-badge| image:: https://colab.research.google.com/assets/colab-badge.svg + :target: https://colab.research.google.com/github/qubvel/segmentation_models.pytorch/blob/master/examples/binary_segmentation_intro.ipynb + :alt: Open In Colab + +