From 8672cad2cbe9b31d82e54223d0f4815ffe426cee Mon Sep 17 00:00:00 2001 From: Vincent Brouwers Date: Tue, 26 Sep 2023 16:16:43 +0200 Subject: [PATCH] Fix top_n_tokens returning non-log probs for some models (#1023) # What does this PR do? I made an embarrassing mistake where I accidentally passed normal softmax probabilities into `batch_top_tokens` for `CausalLM` and `Seq2SeqLM`. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. @Narsil --- server/text_generation_server/models/causal_lm.py | 2 +- server/text_generation_server/models/seq2seq_lm.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index cec9ae55..696f0fb2 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -579,7 +579,7 @@ class CausalLM(Model): batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, - torch.softmax(logits[:, -1], -1), + torch.log_softmax(logits[:, -1], -1), ) # Zipped iterator diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index 1a7911ac..34932c0b 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -642,7 +642,7 @@ class Seq2SeqLM(Model): batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, - torch.softmax(logits[:, -1], -1), + torch.log_softmax(logits[:, -1], -1), ) # Finished requests