This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/llm_server/config/config.py

23 lines
588 B
Python

from pydantic import BaseModel
from llm_server.config.global_config import GlobalConfig
def cluster_worker_count():
count = 0
for item in GlobalConfig.get().cluster:
count += item.concurrent_gens
return count
class ModeUINameStr(BaseModel):
name: str
api_name: str
streaming_name: str
MODE_UI_NAMES = {
'ooba': ModeUINameStr(name='Text Gen WebUI (ooba)', api_name='Blocking API url', streaming_name='Streaming API url'),
'vllm': ModeUINameStr(name='Text Gen WebUI (ooba)', api_name='Blocking API url', streaming_name='Streaming API url'),
}