local-llm-server/llm_server/llm/__init__.py

13 lines
357 B
Python

from llm_server.llm import oobabooga, vllm
from llm_server.routes.cache import redis
def get_token_count(prompt):
backend_mode = redis.get('backend_mode', str)
if backend_mode == 'vllm':
return vllm.tokenize(prompt)
elif backend_mode == 'ooba':
return oobabooga.tokenize(prompt)
else:
raise Exception(backend_mode)