From b10d22ca0db53f9df69bfb76430a77d3e5569e5a Mon Sep 17 00:00:00 2001 From: Cyberes Date: Sat, 30 Sep 2023 23:03:42 -0600 Subject: [PATCH] cache the home page in the background --- llm_server/config/config.py | 3 ++- llm_server/config/load.py | 1 + llm_server/opts.py | 1 + llm_server/routes/v1/__init__.py | 2 +- llm_server/workers/mainer.py | 14 ++++++++++++-- templates/home.html | 9 +-------- 6 files changed, 18 insertions(+), 12 deletions(-) diff --git a/llm_server/config/config.py b/llm_server/config/config.py index 645e81e..5308827 100644 --- a/llm_server/config/config.py +++ b/llm_server/config/config.py @@ -34,7 +34,8 @@ config_default_vars = { 'openai_moderation_enabled': True, 'netdata_root': None, 'show_backends': True, - 'cluster_workers': 30 + 'cluster_workers': 30, + 'background_homepage_cacher': True } config_required_vars = ['cluster', 'mode', 'llm_middleware_name'] diff --git a/llm_server/config/load.py b/llm_server/config/load.py index 9c2e7f3..edc5991 100644 --- a/llm_server/config/load.py +++ b/llm_server/config/load.py @@ -53,6 +53,7 @@ def load_config(config_path): opts.openai_moderation_enabled = config['openai_moderation_enabled'] opts.show_backends = config['show_backends'] opts.cluster_workers = config['cluster_workers'] + opts.background_homepage_cacher = config['background_homepage_cacher'] if opts.openai_expose_our_model and not opts.openai_api_key: print('If you set openai_epose_our_model to false, you must set your OpenAI key in openai_api_key.') diff --git a/llm_server/opts.py b/llm_server/opts.py index bbd6201..ae07ca4 100644 --- a/llm_server/opts.py +++ b/llm_server/opts.py @@ -38,3 +38,4 @@ openai_moderation_enabled = True cluster = {} show_backends = True cluster_workers = 30 +background_homepage_cacher = True diff --git a/llm_server/routes/v1/__init__.py b/llm_server/routes/v1/__init__.py index c492726..a52cb2e 100644 --- a/llm_server/routes/v1/__init__.py +++ b/llm_server/routes/v1/__init__.py @@ -14,5 +14,5 @@ def fallback(path): response_msg = format_sillytavern_err(error_msg, error_type='API') return jsonify({ 'results': [{'text': response_msg}], - 'result': f'Wrong API path, visit {base_client_api} for more info' + 'result': f'Wrong API path, visit {base_client_api} for more info.' }), 200 # return 200 so we don't trigger an error message in the client's ST diff --git a/llm_server/workers/mainer.py b/llm_server/workers/mainer.py index ca82d60..580060d 100644 --- a/llm_server/workers/mainer.py +++ b/llm_server/workers/mainer.py @@ -1,11 +1,13 @@ import time +import requests + from llm_server import opts -from llm_server.cluster.backend import get_a_cluster_backend, get_backends +from llm_server.cluster.backend import get_backends from llm_server.cluster.cluster_config import cluster_config from llm_server.custom_redis import redis from llm_server.database.database import weighted_average_column_for_model -from llm_server.llm.info import get_info, get_running_model +from llm_server.llm.info import get_info def main_background_thread(): @@ -26,6 +28,14 @@ def main_background_thread(): cluster_config.set_backend_value(backend_url, 'average_output_tokens', average_output_tokens) if average_generation_elapsed_sec and average_output_tokens: cluster_config.set_backend_value(backend_url, 'estimated_avg_tps', estimated_avg_tps) + + if opts.background_homepage_cacher: + try: + base_client_api = redis.get('base_client_api', dtype=str) + r = requests.get('https://' + base_client_api, timeout=5) + except Exception as e: + print(f'Failed fetch the homepage - {e.__class__.__name__}: {e}') + time.sleep(30) diff --git a/templates/home.html b/templates/home.html index d599aea..3a020a4 100644 --- a/templates/home.html +++ b/templates/home.html @@ -111,7 +111,7 @@

Instructions

    -
  1. In Settings > Power User Options, enable Relaxed API URLS
  2. +
  3. In Settings > Power User Options, enable Relaxed API URLS.
  4. Set your API type to {{ mode_name }}
  5. Enter {{ client_api }} in the {{ api_input_textbox }} textbox.
  6. {% if enable_streaming %} @@ -167,17 +167,10 @@

{% endfor %} - - -