local-llm-server/llm_server/llm/vllm/tokenize.py

19 lines
617 B
Python
Raw Normal View History

import requests
import tiktoken
from llm_server import opts
2023-09-27 14:36:49 -06:00
def tokenize(prompt: str) -> int:
2023-09-26 22:09:11 -06:00
tokenizer = tiktoken.get_encoding("cl100k_base")
if not prompt:
# The tokenizers have issues when the prompt is None.
return 0
try:
r = requests.post(f'{opts.backend_url}/tokenize', json={'input': prompt}, verify=opts.verify_ssl, timeout=opts.backend_generate_request_timeout)
j = r.json()
return j['length']
2023-09-27 14:36:49 -06:00
except Exception as e:
print(f'Failed to tokenize using VLLM -', f'{e.__class__.__name__}: {e}')
return len(tokenizer.encode(prompt)) + 10