From e152cb022b4de8257547ad5dc66bd707eae4471c Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 22 Aug 2024 17:57:51 +0000 Subject: [PATCH] fix: also show total memory after full warmup --- server/text_generation_server/models/flash_causal_lm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 2cdc49a0..e4b060ca 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -1386,7 +1386,8 @@ class FlashCausalLM(Model): total_cuda_graph_memory = free_memory_post_alloc - last_available_memory log_master( logger.info, - f"Total memory used for CUDA graphs: {total_cuda_graph_memory/1024/1024:.2f} MB", + f"Total memory used for CUDA graphs: {total_cuda_graph_memory/1024/1024:.2f} MB" + f"\nTotal memory available: {last_available_memory/1024/1024:.2f} MB", ) except torch.cuda.OutOfMemoryError: logger.exception("Decode cuda graph warmup failed")