Skip to content

Commit

Permalink
Avoid repeated calculation of vocab
Browse files Browse the repository at this point in the history
Signed-off-by: DarkLight1337 <[email protected]>
  • Loading branch information
DarkLight1337 committed Jan 22, 2025
1 parent 611f5aa commit cb6a912
Showing 1 changed file with 5 additions and 1 deletion.
6 changes: 5 additions & 1 deletion vllm/transformers_utils/tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,10 @@ def get_cached_tokenizer(tokenizer: AnyTokenizer) -> AnyTokenizer:
tokenizer_all_special_tokens_extended = (
tokenizer.all_special_tokens_extended)
tokenizer_all_special_tokens = set(tokenizer.all_special_tokens)
tokenizer_vocab = tokenizer.get_vocab()
tokenizer_len = len(tokenizer)

max_token_id = max(tokenizer.get_vocab().values())
max_token_id = max(tokenizer_vocab.values())
# Some tokenizers (e.g., QwenTokenizer) have special tokens that
# are added and included in the implementation of the vocab_size
# property, but not in get_vocab(); if there is an implementation
Expand All @@ -96,6 +97,9 @@ def all_special_tokens_extended(self):
def max_token_id(self):
return max_token_id

def get_vocab(self):
return tokenizer_vocab

def __len__(self):
return tokenizer_len

Expand Down

0 comments on commit cb6a912

Please sign in to comment.