fix: prefer inplace softmax to avoid copy (#2661)
* fix: prefer inplace softmax to avoid copy * Update server/text_generation_server/models/flash_causal_lm.py Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com> --------- Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
This commit is contained in:
parent
1b97e084bf
commit
5f32dea1e2
|
@ -1922,8 +1922,9 @@ class FlashCausalLM(Model):
|
||||||
batch.adapter_meta.adapter_indices = next_adapter_indices
|
batch.adapter_meta.adapter_indices = next_adapter_indices
|
||||||
|
|
||||||
if prefill and prefill_logprobs:
|
if prefill and prefill_logprobs:
|
||||||
# Get prefill logprobs
|
# Get prefill logprobs with inplace softmax (avoid copying the `out` tensor (max_batch_prefill_tokens * vocab_size))
|
||||||
prefill_logprobs_tensor = torch.log_softmax(out, -1)
|
torch.log_softmax(out, -1, out=out)
|
||||||
|
prefill_logprobs_tensor = out
|
||||||
prefill_logprobs = torch.gather(
|
prefill_logprobs = torch.gather(
|
||||||
prefill_logprobs_tensor, 1, prefill_tokens_indices.view(-1, 1)
|
prefill_logprobs_tensor, 1, prefill_tokens_indices.view(-1, 1)
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in New Issue