local-llm-server/llm_server/llm/generator.py

15 lines
392 B
Python

from llm_server import opts
def generator(request_json_body):
if opts.mode == 'oobabooga':
# from .oobabooga.generate import generate
# return generate(request_json_body)
raise NotImplementedError
elif opts.mode == 'vllm':
from .vllm.generate import generate
r = generate(request_json_body)
return r
else:
raise Exception