Skip to content
This repository has been archived by the owner on Dec 16, 2022. It is now read-only.

Don't cache reinit_modules #5543

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions allennlp/common/cached_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ class TransformerSpec(NamedTuple):
model_name: str
override_weights_file: Optional[str] = None
override_weights_strip_prefix: Optional[str] = None
reinit_modules: Optional[Union[int, Tuple[int, ...], Tuple[str, ...]]] = None


_model_cache: Dict[TransformerSpec, transformers.PreTrainedModel] = {}
Expand Down Expand Up @@ -66,9 +65,8 @@ def get(
model_name,
override_weights_file,
override_weights_strip_prefix,
reinit_modules,
)
transformer = _model_cache.get(spec, None)
transformer = None if reinit_modules is not None else _model_cache.get(spec, None)
if transformer is None:
if not load_weights:
if override_weights_file is not None:
Expand Down Expand Up @@ -181,7 +179,9 @@ def strip_prefix(s):
model_name,
**kwargs,
)
_model_cache[spec] = transformer
# Don't cache transformers with reinitialized weights.
if reinit_modules is None:
_model_cache[spec] = transformer
if make_copy:
import copy

Expand Down