This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/templates/home.html

182 lines
5.8 KiB
HTML

<!DOCTYPE html>
<html lang="en">
<head>
<title>{{ llm_middleware_name }}</title>
<meta content="width=device-width, initial-scale=1" name="viewport"/>
{{ analytics_tracking_code|safe }}
<style>
.container {
padding: 1em 3em;
}
#json {
background-color: rgb(229 231 235);
padding: 1em;
display: inline-block;
margin: auto;
max-width: 95%;
}
body {
background-color: #f3f4f6;
}
.info-box {
background-color: white;
padding: 1em;
margin: auto;
border-radius: 10px;
max-width: 95%;
}
pre code.hljs {
padding: 0 !important;
}
a, a:visited {
color: blue;
}
.footer {
font-size: 7pt;
text-align: center;
}
pre {
white-space: pre-wrap;
word-wrap: break-word;
text-align: justify;
}
@media only screen and (max-width: 600px) {
.container {
padding: 1em;
}
h1 {
font-size: 1.5em;
}
}
.hidden {
display: none;
}
.header-workers {
font-weight: normal;
font-size: 14pt;
}
h3 {
font-size: 16pt;
}
.no-marker {
list-style: none;
}
</style>
</head>
<body>
<script>
const backend_online = {% if current_model != 'offline' %}true{% else %}false{% endif %};
</script>
<div class="container">
<h1 style="text-align: center;margin-top: 0;">{{ llm_middleware_name }}</h1>
<div class="info-box">
<p><strong>Current Model:</strong> <span id="model">{{ default_model }}</span></p>
<p>
<strong>Estimated Wait Time:</strong> <span id="estimatedWait">{{ default_estimated_wait }}</span><br>
Processing: {{ default_active_gen_workers }}<br>
Queued: {{ default_proompters_in_queue }}
</p>
<br>
<p><strong>Client API URL:</strong> {{ client_api }}</p>
<p><strong>Streaming API URL:</strong> {{ ws_client_api if enable_streaming else 'Disabled' }}</p>
<p><strong>OpenAI-Compatible API URL:</strong> {{ openai_client_api }}</p>
{% if info_html|length > 1 %}
<br>
{{ info_html|safe }}
{% endif %}
</div>
<br>
<div class="info-box">
<h3>Instructions</h3>
<div id="instructions">
<ol>
<li>In Settings > Power User Options, enable <kbd>Relaxed API URLS</kbd>.</li>
<li>Set your API type to <kbd>{{ mode_name }}</kbd></li>
<li>Enter <kbd>{{ client_api }}</kbd> in the <kbd>{{ api_input_textbox }}</kbd> textbox.</li>
{% if enable_streaming %}
<li>Enter <kbd>{{ ws_client_api }}</kbd> in the <kbd>{{ streaming_input_textbox }}</kbd> textbox.</li>
{% endif %}
<li>If you have a token, check the <kbd>Mancer AI</kbd> checkbox and enter your token in the <kbd>Mancer
API key</kbd> textbox.
</li>
<li>Click <kbd>Connect</kbd> to test the connection.</li>
<li>Open your preset config and set <kbd>Context Size</kbd> to {{ default_context_size }}.</li>
<li>Follow this guide to get set up: <a href="https://rentry.org/freellamas" target="_blank">rentry.org/freellamas</a>
</li>
</ol>
</div>
{% if openai_client_api != 'disabled' and expose_openai_system_prompt %}
<br>
<div id="openai">
<strong>OpenAI-Compatible API</strong>
<p>The OpenAI-compatible API adds a system prompt to set the AI's behavior to a "helpful assistant". You can view this prompt <a href="/api/openai/v1/prompt">here</a>.</p>
</div>
{% endif %}
<br>
<div id="extra-info">{{ extra_info|safe }}</div>
</div>
<br>
<div class="info-box">
<h3>Statistics</h3>
Proompters:
<ul style="margin-top: 5px;">
<li class="no-marker">5 minutes: {{ proompters_5_min }}</li>
<li class="no-marker">24 hours: {{ proompters_24_hrs }}</li>
</ul>
</div>
<br>
{% for key, value in model_choices.items() %}
<div class="info-box">
<h3>{{ key }} <span class="header-workers">- {{ value.backend_count }} {% if value.backend_count == 1 %}worker{% else %}workers{% endif %}</span></h3>
{% if value.estimated_wait == 0 and value.estimated_wait >= value.concurrent_gens %}
{# There will be a wait if the queue is empty but prompts are processing, but we don't know how long. #}
{% set estimated_wait_sec = "less than " + value.estimated_wait|int|string + " seconds" %}
{% else %}
{% set estimated_wait_sec = value.estimated_wait|int|string + " seconds" %}
{% endif %}
<p>
<strong>Estimated Wait Time:</strong> {{ estimated_wait_sec }}<br>
Processing: {{ value.processing }}<br>
Queued: {{ value.queued }}<br>
</p>
<p>
<strong>Client API URL:</strong> {{ value.client_api }}<br>
<strong>Streaming API URL:</strong> {{ value.ws_client_api }}<br>
<strong>OpenAI-Compatible API URL:</strong> {{ value.openai_client_api }}
</p>
<p><strong>Context Size:</strong> {{ value.context_size }}</p>
<p><strong>Average Generation Time:</strong> {{ value.avg_generation_time | int }} seconds</p>
</div>
<br>
{% endfor %}
</div>
<div class="footer">
<a href="https://git.evulid.cc/cyberes/local-llm-server" target="_blank">git.evulid.cc/cyberes/local-llm-server</a>
</div>
</body>
</html>