Skip to content

Commit

Permalink
Fix handling pre-normalized tokens
Browse files Browse the repository at this point in the history
  • Loading branch information
mokeddembillel committed Dec 22, 2024
1 parent 92e41ec commit a1f146d
Showing 1 changed file with 5 additions and 4 deletions.
9 changes: 5 additions & 4 deletions convert_hf_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,10 +525,9 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]:
else:
token: str = reverse_vocab[i]
if token in added_vocab:
# We need to manually encode and decode the added tokens in case special characters
# used for `\n` / `\t` have been manually added in the added tokens
# To avoid unexpected issues - we make sure to encode single-char tokens
if len(token) == 1:
# The tokenizer in llama.cpp assumes the CONTROL and USER_DEFINED tokens are pre-normalized.
# To avoid unexpected issues - we make sure to normalize non-normalized tokens
if not tokenizer.added_tokens_decoder[i].normalized:
previous_token = token
token = tokenizer.decode(tokenizer.encode(token, add_special_tokens=False))
if previous_token != token:
Expand All @@ -537,6 +536,8 @@ def get_vocab_base(self) -> tuple[list[str], list[int], str]:
if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
toktypes.append(gguf.TokenType.CONTROL)
else:
# NOTE: this was added for Gemma.
# Encoding and decoding the tokens above isn't sufficient for this case.
token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
toktypes.append(gguf.TokenType.USER_DEFINED)
else:
Expand Down

0 comments on commit a1f146d

Please sign in to comment.