local-llm-server/other/vllm/vllm_api_server.py

152 lines
4.7 KiB
Python
Raw Normal View History

import argparse
import json
2023-09-29 22:36:03 -06:00
import subprocess
2024-01-10 15:46:04 -07:00
import sys
import time
from pathlib import Path
from typing import AsyncGenerator
import uvicorn
2024-01-10 15:46:04 -07:00
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse, Response, StreamingResponse
2023-09-29 22:36:03 -06:00
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.sampling_params import SamplingParams
from vllm.transformers_utils.tokenizer import get_tokenizer
from vllm.utils import random_uuid
TIMEOUT_KEEP_ALIVE = 5 # seconds.
app = FastAPI()
2024-01-10 15:46:04 -07:00
engine = None
tokenizer = None
served_model = None
2023-09-29 22:36:03 -06:00
model_config = {}
startup_time = int(time.time())
2023-09-24 13:27:27 -06:00
# TODO: figure out ROPE scaling
# TODO: make sure error messages are returned in the response
2023-09-29 22:36:03 -06:00
def get_gpu_pstate():
cmd = ['nvidia-smi', '--query-gpu=pstate', '--format=csv']
output = subprocess.check_output(cmd).decode('utf-8')
lines = output.strip().split('\n')
if len(lines) > 1:
return int(lines[1].strip('P'))
else:
return None
2024-01-10 15:46:04 -07:00
@app.get("/health")
async def health() -> Response:
"""Health check."""
return Response(status_code=200)
2023-09-29 22:36:03 -06:00
@app.get("/info")
2024-01-10 15:46:04 -07:00
async def info(request: Request) -> Response:
2023-09-29 22:36:03 -06:00
return JSONResponse({
'uptime': int(time.time() - startup_time),
'startup_time': startup_time,
'model': served_model,
'model_config': model_config,
'nvidia': {
'pstate': get_gpu_pstate()
}
})
@app.get("/model")
2024-01-10 15:46:04 -07:00
async def model(request: Request) -> Response:
return JSONResponse({'model': served_model, 'timestamp': int(time.time())})
@app.post("/tokenize")
2024-01-10 15:46:04 -07:00
async def tokenize(request: Request) -> Response:
request_dict = await request.json()
to_tokenize = request_dict.get("input")
if not to_tokenize:
JSONResponse({'error': 'must have input field'}, status_code=400)
tokens = tokenizer.tokenize(to_tokenize)
response = {}
if request_dict.get("return", False):
response['tokens'] = tokens
response['length'] = len(tokens)
return JSONResponse(response)
@app.post("/generate")
async def generate(request: Request) -> Response:
"""Generate completion for the request.
The request should be a JSON object with the following fields:
- prompt: the prompt to use for the generation.
- stream: whether to stream the results or not.
- other fields: the sampling parameters (See `SamplingParams` for details).
"""
request_dict = await request.json()
prompt = request_dict.pop("prompt")
stream = request_dict.pop("stream", False)
sampling_params = SamplingParams(**request_dict)
request_id = random_uuid()
2024-01-10 15:46:04 -07:00
results_generator = engine.generate(prompt, sampling_params, request_id)
# Streaming case
async def stream_results() -> AsyncGenerator[bytes, None]:
async for request_output in results_generator:
prompt = request_output.prompt
text_outputs = [
prompt + output.text for output in request_output.outputs
]
ret = {"text": text_outputs}
yield (json.dumps(ret) + "\0").encode("utf-8")
if stream:
2024-01-10 15:46:04 -07:00
return StreamingResponse(stream_results())
# Non-streaming case
final_output = None
async for request_output in results_generator:
if await request.is_disconnected():
# Abort the request if the client disconnects.
await engine.abort(request_id)
return Response(status_code=499)
final_output = request_output
assert final_output is not None
prompt = final_output.prompt
text_outputs = [prompt + output.text for output in final_output.outputs]
ret = {"text": text_outputs}
return JSONResponse(ret)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
2024-01-10 15:46:04 -07:00
parser.add_argument("--host", type=str, default=None)
parser.add_argument("--port", type=int, default=8000)
parser = AsyncEngineArgs.add_cli_args(parser)
args = parser.parse_args()
engine_args = AsyncEngineArgs.from_cli_args(args)
2024-01-10 15:46:04 -07:00
# Do some init setup before loading the model.
2023-09-29 22:36:03 -06:00
model_path = Path(args.model)
served_model = model_path.name
try:
model_config = json.loads((model_path / 'config.json').read_text())
except Exception as e:
print(f"Failed to load the model's config - {e.__class__.__name__}: {e}")
2024-01-10 15:46:04 -07:00
sys.exit(1)
2023-09-29 22:36:03 -06:00
engine = AsyncLLMEngine.from_engine_args(engine_args)
tokenizer = get_tokenizer(engine_args.tokenizer,
tokenizer_mode=args.tokenizer_mode,
trust_remote_code=args.trust_remote_code)
uvicorn.run(app,
host=args.host,
port=args.port,
log_level="debug",
timeout_keep_alive=TIMEOUT_KEEP_ALIVE)