from llm_server.llm import oobabooga, vllm from llm_server.custom_redis import redis def get_token_count(prompt: str, backend_url: str): backend_mode = redis.get('backend_mode', dtype=str) if backend_mode == 'vllm': return vllm.tokenize(prompt, backend_url) elif backend_mode == 'ooba': return oobabooga.tokenize(prompt) else: raise Exception(backend_mode)