From 1f5e2da637331a43618b272158257fd9732375ca Mon Sep 17 00:00:00 2001 From: Cyberes Date: Wed, 23 Aug 2023 16:02:57 -0600 Subject: [PATCH] print fetch model error message --- llm_server/llm/info.py | 8 ++++---- llm_server/routes/v1/info.py | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/llm_server/llm/info.py b/llm_server/llm/info.py index 907e10e..f9a5369 100644 --- a/llm_server/llm/info.py +++ b/llm_server/llm/info.py @@ -8,21 +8,21 @@ def get_running_model(): try: backend_response = requests.get(f'{opts.backend_url}/api/v1/model') except Exception as e: - return False + return False, e try: r_json = backend_response.json() return r_json['result'] except Exception as e: - return False + return False, e elif opts.mode == 'hf-textgen': try: backend_response = requests.get(f'{opts.backend_url}/info') except Exception as e: - return False + return False, e try: r_json = backend_response.json() return r_json['model_id'].replace('/', '_') except Exception as e: - return False + return False, e else: raise Exception diff --git a/llm_server/routes/v1/info.py b/llm_server/routes/v1/info.py index 3ffea16..ce15154 100644 --- a/llm_server/routes/v1/info.py +++ b/llm_server/routes/v1/info.py @@ -22,11 +22,12 @@ from ..cache import cache @bp.route('/model', methods=['GET']) @cache.cached(timeout=60, query_string=True) def get_model(): - model = get_running_model() + model, error = get_running_model() if not model: return jsonify({ 'code': 500, - 'error': 'failed to reach backend' + 'error': 'failed to reach backend', + 'msg': error }), 500 else: return jsonify({