From 5c97507a8f10ce039fb2c1bfc4fe7635ce35f2a9 Mon Sep 17 00:00:00 2001 From: Tanmay Verma Date: Wed, 24 Apr 2024 12:06:59 -0700 Subject: [PATCH] Document the thread count options (#126) * Document the thread count options * Format fix * Apply suggestions from code review Co-authored-by: Jacky <18255193+kthui@users.noreply.github.com> --------- Co-authored-by: Jacky <18255193+kthui@users.noreply.github.com> --- README.md | 41 +++++++++++++++++++++++++++++++++++++++++ src/libtorch.cc | 8 ++++---- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b82c774..106eb13 100644 --- a/README.md +++ b/README.md @@ -176,6 +176,47 @@ key: "ENABLE_CACHE_CLEANING" } ``` +* `INTER_OP_THREAD_COUNT`: + +PyTorch allows using multiple CPU threads during TorchScript model inference. +One or more inference threads execute a model’s forward pass on the given +inputs. Each inference thread invokes a JIT interpreter that executes the ops +of a model inline, one by one. This parameter sets the size of this thread +pool. The default value of this setting is the number of cpu cores. Please refer +to [this](https://pytorch.org/docs/stable/notes/cpu_threading_torchscript_inference.html) +document on how to set this parameter properly. + +The section of model config file specifying this parameter will look like: + +``` +parameters: { +key: "INTER_OP_THREAD_COUNT" + value: { + string_value:"1" + } +} +``` + +* `INTRA_OP_THREAD_COUNT`: + +In addition to the inter-op parallelism, PyTorch can also utilize multiple threads +within the ops (intra-op parallelism). This can be useful in many cases, including +element-wise ops on large tensors, convolutions, GEMMs, embedding lookups and +others. The default value for this setting is the number of CPU cores. Please refer +to [this](https://pytorch.org/docs/stable/notes/cpu_threading_torchscript_inference.html) +document on how to set this parameter properly. + +The section of model config file specifying this parameter will look like: + +``` +parameters: { +key: "INTRA_OP_THREAD_COUNT" + value: { + string_value:"1" + } +} +``` + * Additional Optimizations: Three additional boolean parameters are available to disable certain Torch optimizations that can sometimes cause latency regressions in models with complex execution modes and dynamic shapes. If not specified, all are enabled by default. diff --git a/src/libtorch.cc b/src/libtorch.cc index 8809206..c6d0b5a 100644 --- a/src/libtorch.cc +++ b/src/libtorch.cc @@ -476,8 +476,8 @@ ModelState::ParseParameters() // is made to 'intra_op_thread_count', which by default will take all // threads int intra_op_thread_count = -1; - err = ParseParameter( - params, "INTRA_OP_THREAD_COUNT", &intra_op_thread_count); + err = + ParseParameter(params, "INTRA_OP_THREAD_COUNT", &intra_op_thread_count); if (err != nullptr) { if (TRITONSERVER_ErrorCode(err) != TRITONSERVER_ERROR_NOT_FOUND) { return err; @@ -500,8 +500,8 @@ ModelState::ParseParameters() // is made to 'inter_op_thread_count', which by default will take all // threads int inter_op_thread_count = -1; - err = ParseParameter( - params, "INTER_OP_THREAD_COUNT", &inter_op_thread_count); + err = + ParseParameter(params, "INTER_OP_THREAD_COUNT", &inter_op_thread_count); if (err != nullptr) { if (TRITONSERVER_ErrorCode(err) != TRITONSERVER_ERROR_NOT_FOUND) { return err;