36 lines
1.1 KiB
Python
36 lines
1.1 KiB
Python
import asyncio
|
|
|
|
import aiohttp
|
|
import tiktoken
|
|
|
|
from llm_server import opts
|
|
|
|
|
|
def tokenize(prompt: str, backend_url: str) -> int:
|
|
assert backend_url
|
|
if not prompt:
|
|
return 0
|
|
|
|
async def run():
|
|
tokenizer = tiktoken.get_encoding("cl100k_base")
|
|
|
|
async def send_chunk(chunk):
|
|
try:
|
|
async with session.post(f'{backend_url}/tokenize', json={'input': chunk}, verify_ssl=opts.verify_ssl, timeout=opts.backend_generate_request_timeout) as response:
|
|
j = await response.json()
|
|
return j['length']
|
|
except Exception as e:
|
|
print(f'Failed to tokenize using VLLM -', f'{e.__class__.__name__}: {e}')
|
|
return len(tokenizer.encode(chunk)) + 10
|
|
|
|
chunk_size = 300
|
|
chunks = [prompt[i:i + chunk_size] for i in range(0, len(prompt), chunk_size)]
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
tasks = [send_chunk(chunk) for chunk in chunks]
|
|
lengths = await asyncio.gather(*tasks)
|
|
|
|
return sum(lengths)
|
|
|
|
return asyncio.run(run())
|