local-llm-server/llm_server/routes/v1/info.py

45 lines
1.1 KiB
Python

import time
from flask import jsonify, request
from . import bp
from ..cache import cache
from ...llm.info import get_running_model
# cache = Cache(bp, config={'CACHE_TYPE': 'simple'})
# @bp.route('/info', methods=['GET'])
# # @cache.cached(timeout=3600, query_string=True)
# def get_info():
# # requests.get()
# return 'yes'
@bp.route('/model', methods=['GET'])
def get_model():
# We will manage caching ourself since we don't want to cache
# when the backend is down. Also, Cloudflare won't cache 500 errors.
cache_key = 'model_cache::' + request.url
cached_response = cache.get(cache_key)
if cached_response:
return cached_response
model, error = get_running_model()
if not model:
response = jsonify({
'code': 502,
'msg': 'failed to reach backend',
'type': error.__class__.__name__
}), 500 # return 500 so Cloudflare doesn't intercept us
else:
response = jsonify({
'result': model,
'timestamp': int(time.time())
}), 200
cache.set(cache_key, response, timeout=60)
return response