reorganize to api v2

This commit is contained in:
Cyberes 2023-09-30 19:42:41 -06:00
parent 114f36e709
commit e0f86d053a
9 changed files with 4 additions and 4 deletions

View File

@ -10,7 +10,7 @@ from llm_server.config.load import load_config, parse_backends
from llm_server.custom_redis import redis from llm_server.custom_redis import redis
from llm_server.database.create import create_db from llm_server.database.create import create_db
from llm_server.routes.queue import priority_queue from llm_server.routes.queue import priority_queue
from llm_server.routes.v1.generate_stats import generate_stats from llm_server.routes.v2.generate_stats import generate_stats
from llm_server.workers.threader import start_background from llm_server.workers.threader import start_background
script_path = os.path.dirname(os.path.realpath(__file__)) script_path = os.path.dirname(os.path.realpath(__file__))

View File

@ -4,7 +4,7 @@ from threading import Thread
from llm_server import opts from llm_server import opts
from llm_server.cluster.stores import redis_running_models from llm_server.cluster.stores import redis_running_models
from llm_server.cluster.worker import cluster_worker from llm_server.cluster.worker import cluster_worker
from llm_server.routes.v1.generate_stats import generate_stats from llm_server.routes.v2.generate_stats import generate_stats
from llm_server.workers.inferencer import start_workers from llm_server.workers.inferencer import start_workers
from llm_server.workers.mainer import main_background_thread from llm_server.workers.mainer import main_background_thread
from llm_server.workers.moderator import start_moderation_workers from llm_server.workers.moderator import start_moderation_workers

View File

@ -21,7 +21,7 @@ from llm_server.database.create import create_db
from llm_server.pre_fork import server_startup from llm_server.pre_fork import server_startup
from llm_server.routes.openai import openai_bp from llm_server.routes.openai import openai_bp
from llm_server.routes.server_error import handle_server_error from llm_server.routes.server_error import handle_server_error
from llm_server.routes.v1 import bp from llm_server.routes.v2 import bp
from llm_server.sock import init_socketio from llm_server.sock import init_socketio
# TODO: per-backend workers # TODO: per-backend workers
@ -65,7 +65,7 @@ from llm_server.helpers import auto_set_base_client_api
from llm_server.llm.vllm.info import vllm_info from llm_server.llm.vllm.info import vllm_info
from llm_server.custom_redis import flask_cache from llm_server.custom_redis import flask_cache
from llm_server.llm import redis from llm_server.llm import redis
from llm_server.routes.v1.generate_stats import generate_stats from llm_server.routes.v2.generate_stats import generate_stats
app = Flask(__name__) app = Flask(__name__)
init_socketio(app) init_socketio(app)