This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/llm_server/llm/vllm/tokenize.py

19 lines
475 B
Python

import traceback
import requests
import tiktoken
from llm_server import opts
tokenizer = tiktoken.get_encoding("cl100k_base")
def tokenize(prompt: str) -> int:
try:
r = requests.post(f'{opts.backend_url}/tokenize', json={'input': prompt}, verify=opts.verify_ssl, timeout=opts.backend_generate_request_timeout)
j = r.json()
return j['length']
except:
print(traceback.format_exc())
return len(tokenizer.encode(prompt)) + 10