This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/llm_server/llm/oobabooga/generate.py

22 lines
757 B
Python
Raw Permalink Normal View History

2023-08-30 18:53:26 -06:00
"""
This file is used by the worker that processes requests.
"""
import traceback
2023-08-30 18:53:26 -06:00
2023-08-21 21:28:52 -06:00
import requests
2024-05-07 12:20:53 -06:00
from llm_server.config.global_config import GlobalConfig
2023-08-21 21:28:52 -06:00
# def generate(json_data: dict):
# try:
# r = requests.post(f'{GlobalConfig.get().backend_url}/api/v1/generate', json=json_data, verify=GlobalConfig.get().verify_ssl, timeout=GlobalConfig.get().backend_generate_request_timeout)
# except requests.exceptions.ReadTimeout:
# return False, None, 'Request to backend timed out'
# except Exception as e:
# traceback.print_exc()
# return False, None, 'Request to backend encountered error'
# if r.status_code != 200:
# return False, r, f'Backend returned {r.status_code}'
# return True, r, None