option to show SYSTEM tokens in stats

This commit is contained in:
Cyberes 2023-09-25 23:39:50 -06:00
parent e37cde5d48
commit b44dda7a3a
5 changed files with 12 additions and 4 deletions

View File

@ -26,6 +26,7 @@ config_default_vars = {
'admin_token': None, 'admin_token': None,
'openai_epose_our_model': False, 'openai_epose_our_model': False,
'openai_force_no_hashes': True, 'openai_force_no_hashes': True,
'include_system_tokens_in_stats': True
} }
config_required_vars = ['token_limit', 'concurrent_gens', 'mode', 'llm_middleware_name'] config_required_vars = ['token_limit', 'concurrent_gens', 'mode', 'llm_middleware_name']

View File

@ -97,12 +97,17 @@ def average_column_for_model(table_name, column_name, model_name):
cursor.close() cursor.close()
def weighted_average_column_for_model(table_name, column_name, model_name, backend_name, backend_url, exclude_zeros: bool = False): def weighted_average_column_for_model(table_name, column_name, model_name, backend_name, backend_url, exclude_zeros: bool = False, include_system_tokens: bool = True):
if include_system_tokens:
sql = f"SELECT {column_name}, id FROM {table_name} WHERE model = %s AND backend_mode = %s AND backend_url = %s ORDER BY id DESC"
else:
sql = f"SELECT {column_name}, id FROM {table_name} WHERE model = %s AND backend_mode = %s AND backend_url = %s AND (token NOT LIKE 'SYSTEM__%%' OR token IS NULL) ORDER BY id DESC"
conn = db_pool.connection() conn = db_pool.connection()
cursor = conn.cursor() cursor = conn.cursor()
try: try:
try: try:
cursor.execute(f"SELECT {column_name}, id FROM {table_name} WHERE model = %s AND backend_mode = %s AND backend_url = %s AND (token NOT LIKE 'SYSTEM__%%' OR token IS NULL) ORDER BY id DESC", (model_name, backend_name, backend_url,)) cursor.execute(sql, (model_name, backend_name, backend_url,))
results = cursor.fetchall() results = cursor.fetchall()
except Exception: except Exception:
traceback.print_exc() traceback.print_exc()

View File

@ -31,3 +31,4 @@ backend_generate_request_timeout = 95
admin_token = None admin_token = None
openai_expose_our_model = False openai_expose_our_model = False
openai_force_no_hashes = True openai_force_no_hashes = True
include_system_tokens_in_stats = True

View File

@ -46,14 +46,14 @@ class MainBackgroundThread(Thread):
# exclude_zeros=True filters out rows where an error message was returned. Previously, if there was an error, 0 # exclude_zeros=True filters out rows where an error message was returned. Previously, if there was an error, 0
# was entered into the column. The new code enters null instead but we need to be backwards compatible for now. # was entered into the column. The new code enters null instead but we need to be backwards compatible for now.
average_generation_elapsed_sec = weighted_average_column_for_model('prompts', 'generation_time', opts.running_model, opts.mode, opts.backend_url, exclude_zeros=True) or 0 average_generation_elapsed_sec = weighted_average_column_for_model('prompts', 'generation_time', opts.running_model, opts.mode, opts.backend_url, exclude_zeros=True, include_system_tokens=opts.include_system_tokens_in_stats) or 0
if average_generation_elapsed_sec: # returns None on exception if average_generation_elapsed_sec: # returns None on exception
redis.set('average_generation_elapsed_sec', average_generation_elapsed_sec) redis.set('average_generation_elapsed_sec', average_generation_elapsed_sec)
# overall = average_column_for_model('prompts', 'generation_time', opts.running_model) # overall = average_column_for_model('prompts', 'generation_time', opts.running_model)
# print(f'Weighted: {average_generation_elapsed_sec}, overall: {overall}') # print(f'Weighted: {average_generation_elapsed_sec}, overall: {overall}')
average_output_tokens = weighted_average_column_for_model('prompts', 'response_tokens', opts.running_model, opts.mode, opts.backend_url, exclude_zeros=True) or 0 average_output_tokens = weighted_average_column_for_model('prompts', 'response_tokens', opts.running_model, opts.mode, opts.backend_url, exclude_zeros=True, include_system_tokens=opts.include_system_tokens_in_stats) or 0
if average_generation_elapsed_sec: if average_generation_elapsed_sec:
redis.set('average_output_tokens', average_output_tokens) redis.set('average_output_tokens', average_output_tokens)

View File

@ -102,6 +102,7 @@ openai.api_key = opts.openai_api_key
opts.admin_token = config['admin_token'] opts.admin_token = config['admin_token']
opts.openai_expose_our_model = config['openai_epose_our_model'] opts.openai_expose_our_model = config['openai_epose_our_model']
opts.openai_force_no_hashes = config['openai_force_no_hashes'] opts.openai_force_no_hashes = config['openai_force_no_hashes']
opts.include_system_tokens_in_stats = config['include_system_tokens_in_stats']
if opts.openai_expose_our_model and not opts.openai_api_key: if opts.openai_expose_our_model and not opts.openai_api_key:
print('If you set openai_epose_our_model to false, you must set your OpenAI key in openai_api_key.') print('If you set openai_epose_our_model to false, you must set your OpenAI key in openai_api_key.')