This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/llm_server/llm/vllm/info.py

15 lines
560 B
Python
Raw Normal View History

2023-09-30 19:41:50 -06:00
import requests
from llm_server import opts
2023-09-21 20:13:29 -06:00
vllm_info = """<p><strong>Important:</strong> This endpoint is running <a href="https://github.com/vllm-project/vllm" target="_blank">vllm</a> and not all Oobabooga parameters are supported.</p>
<strong>Supported Parameters:</strong>
<ul>
<li><kbd>temperature</kbd></li>
<li><kbd>top_p</kbd></li>
<li><kbd>top_k</kbd></li>
<li><kbd>max_new_tokens</kbd></li>
2023-09-21 20:13:29 -06:00
<li><kbd>num_beams</kbd> <span style="font-size:9pt">(setting to greater than 1 enables beam search)</span></li>
<li><kbd>ban_eos_token</kbd></li>
2023-09-30 19:41:50 -06:00
</ul>"""