local-llm-server/llm_server/config/config.py

15 lines
385 B
Python

from llm_server.config.global_config import GlobalConfig
def cluster_worker_count():
count = 0
for item in GlobalConfig.get().cluster:
count += item['concurrent_gens']
return count
mode_ui_names = {
'ooba': ('Text Gen WebUI (ooba)', 'Blocking API url', 'Streaming API url'),
'vllm': ('Text Gen WebUI (ooba)', 'Blocking API url', 'Streaming API url'),
}