2024-11-21 10:11:42 -07:00
|
|
|
import argparse
|
|
|
|
import datetime
|
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import traceback
|
|
|
|
from typing import Dict, Tuple, List
|
|
|
|
|
|
|
|
import GPUtil
|
|
|
|
import docker
|
|
|
|
from docker.models.containers import Container
|
|
|
|
from loguru import logger
|
|
|
|
import pandas as pd
|
|
|
|
|
|
|
|
|
|
|
|
class InferenceEngineRunner:
|
|
|
|
def __init__(self, model: str):
|
|
|
|
self.model = model
|
|
|
|
|
|
|
|
def run(self, parameters: list[tuple], gpus: int = 0):
|
|
|
|
NotImplementedError("This method should be implemented by the subclass")
|
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
NotImplementedError("This method should be implemented by the subclass")
|
|
|
|
|
|
|
|
|
|
|
|
class TGIDockerRunner(InferenceEngineRunner):
|
2024-11-22 10:26:15 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
model: str,
|
|
|
|
image: str = "ghcr.io/huggingface/text-generation-inference:latest",
|
|
|
|
volumes=None,
|
|
|
|
):
|
2024-11-21 10:11:42 -07:00
|
|
|
super().__init__(model)
|
|
|
|
if volumes is None:
|
|
|
|
volumes = []
|
|
|
|
self.container = None
|
|
|
|
self.image = image
|
|
|
|
self.volumes = volumes
|
|
|
|
|
|
|
|
def run(self, parameters: list[tuple], gpus: int = 0):
|
|
|
|
params = f"--model-id {self.model} --port 8080"
|
|
|
|
for p in parameters:
|
|
|
|
params += f" --{p[0]} {str(p[1])}"
|
|
|
|
logger.info(f"Running TGI with parameters: {params}")
|
|
|
|
volumes = {}
|
|
|
|
for v in self.volumes:
|
|
|
|
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
|
2024-11-22 10:26:15 -07:00
|
|
|
self.container = run_docker(
|
|
|
|
self.image,
|
|
|
|
params,
|
|
|
|
"Connected",
|
|
|
|
"ERROR",
|
|
|
|
volumes=volumes,
|
|
|
|
gpus=gpus,
|
|
|
|
ports={"8080/tcp": 8080},
|
|
|
|
)
|
2024-11-21 10:11:42 -07:00
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
if self.container:
|
|
|
|
self.container.stop()
|
|
|
|
|
|
|
|
|
|
|
|
class BenchmarkRunner:
|
2024-11-22 10:26:15 -07:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
image: str = "ghcr.io/huggingface/text-generation-inference-benchmark:latest",
|
|
|
|
volumes: List[Tuple[str, str]] = None,
|
|
|
|
):
|
2024-11-21 10:11:42 -07:00
|
|
|
if volumes is None:
|
|
|
|
volumes = []
|
|
|
|
self.container = None
|
|
|
|
self.image = image
|
|
|
|
self.volumes = volumes
|
|
|
|
|
|
|
|
def run(self, parameters: list[tuple], network_mode):
|
|
|
|
params = "text-generation-inference-benchmark"
|
|
|
|
for p in parameters:
|
|
|
|
params += f" --{p[0]} {str(p[1])}" if p[1] is not None else f" --{p[0]}"
|
2024-11-22 10:26:15 -07:00
|
|
|
logger.info(
|
|
|
|
f"Running text-generation-inference-benchmarks with parameters: {params}"
|
|
|
|
)
|
2024-11-21 10:11:42 -07:00
|
|
|
volumes = {}
|
|
|
|
for v in self.volumes:
|
|
|
|
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
|
2024-11-22 10:26:15 -07:00
|
|
|
self.container = run_docker(
|
|
|
|
self.image,
|
|
|
|
params,
|
|
|
|
"Benchmark finished",
|
|
|
|
"Fatal:",
|
|
|
|
volumes=volumes,
|
|
|
|
extra_env={
|
|
|
|
"RUST_LOG": "text_generation_inference_benchmark=info",
|
|
|
|
"RUST_BACKTRACE": "full",
|
|
|
|
},
|
|
|
|
network_mode=network_mode,
|
|
|
|
)
|
2024-11-21 10:11:42 -07:00
|
|
|
|
|
|
|
def stop(self):
|
|
|
|
if self.container:
|
|
|
|
self.container.stop()
|
|
|
|
|
|
|
|
|
2024-11-22 10:26:15 -07:00
|
|
|
def run_docker(
|
|
|
|
image: str,
|
|
|
|
args: str,
|
|
|
|
success_sentinel: str,
|
|
|
|
error_sentinel: str,
|
|
|
|
ports: Dict[str, int] = None,
|
|
|
|
volumes=None,
|
|
|
|
network_mode: str = "bridge",
|
|
|
|
gpus: int = 0,
|
|
|
|
extra_env: Dict[str, str] = None,
|
|
|
|
) -> Container:
|
2024-11-21 10:11:42 -07:00
|
|
|
if ports is None:
|
|
|
|
ports = {}
|
|
|
|
if volumes is None:
|
|
|
|
volumes = {}
|
|
|
|
if extra_env is None:
|
|
|
|
extra_env = {}
|
|
|
|
client = docker.from_env(timeout=300)
|
|
|
|
# retrieve the GPU devices from CUDA_VISIBLE_DEVICES
|
2024-11-22 10:26:15 -07:00
|
|
|
devices = [f"{i}" for i in range(get_num_gpus())][:gpus]
|
2024-11-21 10:11:42 -07:00
|
|
|
environment = {"HF_TOKEN": os.environ.get("HF_TOKEN")}
|
|
|
|
environment.update(extra_env)
|
2024-11-22 10:26:15 -07:00
|
|
|
container = client.containers.run(
|
|
|
|
image,
|
|
|
|
args,
|
|
|
|
detach=True,
|
|
|
|
device_requests=(
|
|
|
|
[docker.types.DeviceRequest(device_ids=devices, capabilities=[["gpu"]])]
|
|
|
|
if gpus > 0
|
|
|
|
else None
|
|
|
|
),
|
|
|
|
volumes=volumes,
|
|
|
|
shm_size="1g",
|
|
|
|
ports=ports,
|
|
|
|
network_mode=network_mode,
|
|
|
|
environment=environment,
|
|
|
|
)
|
2024-11-21 10:11:42 -07:00
|
|
|
for line in container.logs(stream=True):
|
|
|
|
print(line.decode("utf-8"), end="")
|
|
|
|
if success_sentinel.encode("utf-8") in line:
|
|
|
|
break
|
|
|
|
if error_sentinel.encode("utf-8") in line:
|
|
|
|
container.stop()
|
|
|
|
raise Exception(f"Error starting container: {line}")
|
|
|
|
return container
|
|
|
|
|
|
|
|
|
|
|
|
def get_gpu_names() -> str:
|
|
|
|
gpus = GPUtil.getGPUs()
|
|
|
|
if len(gpus) == 0:
|
2024-11-22 10:26:15 -07:00
|
|
|
return ""
|
2024-11-21 10:11:42 -07:00
|
|
|
return f'{len(gpus)}x{gpus[0].name if gpus else "No GPU available"}'
|
|
|
|
|
|
|
|
|
|
|
|
def get_gpu_name() -> str:
|
|
|
|
gpus = GPUtil.getGPUs()
|
|
|
|
if len(gpus) == 0:
|
2024-11-22 10:26:15 -07:00
|
|
|
return ""
|
2024-11-21 10:11:42 -07:00
|
|
|
return gpus[0].name
|
|
|
|
|
|
|
|
|
|
|
|
def get_num_gpus() -> int:
|
|
|
|
return len(GPUtil.getGPUs())
|
|
|
|
|
|
|
|
|
|
|
|
def build_df(model: str, data_files: dict[str, str]) -> pd.DataFrame:
|
|
|
|
df = pd.DataFrame()
|
|
|
|
now = datetime.datetime.now(datetime.timezone.utc)
|
|
|
|
created_at = now.isoformat() # '2024-10-02T11:53:17.026215+00:00'
|
|
|
|
# Load the results
|
|
|
|
for key, filename in data_files.items():
|
2024-11-22 10:26:15 -07:00
|
|
|
with open(filename, "r") as f:
|
2024-11-21 10:11:42 -07:00
|
|
|
data = json.load(f)
|
2024-11-22 10:26:15 -07:00
|
|
|
for result in data["results"]:
|
2024-11-21 10:11:42 -07:00
|
|
|
entry = result
|
2024-11-22 10:26:15 -07:00
|
|
|
[config] = pd.json_normalize(result["config"]).to_dict(orient="records")
|
2024-11-21 10:11:42 -07:00
|
|
|
entry.update(config)
|
2024-11-22 10:26:15 -07:00
|
|
|
entry["engine"] = data["config"]["meta"]["engine"]
|
|
|
|
entry["tp"] = data["config"]["meta"]["tp"]
|
|
|
|
entry["version"] = data["config"]["meta"]["version"]
|
|
|
|
entry["model"] = model
|
|
|
|
entry["created_at"] = created_at
|
|
|
|
del entry["config"]
|
2024-11-21 10:11:42 -07:00
|
|
|
df = pd.concat([df, pd.DataFrame(entry, index=[0])])
|
|
|
|
return df
|
|
|
|
|
|
|
|
|
|
|
|
def main(sha, results_file):
|
2024-11-22 10:26:15 -07:00
|
|
|
results_dir = "results"
|
2024-11-21 10:11:42 -07:00
|
|
|
# get absolute path
|
|
|
|
results_dir = os.path.join(os.path.dirname(__file__), results_dir)
|
2024-11-22 10:26:15 -07:00
|
|
|
logger.info("Starting benchmark")
|
2024-11-21 10:11:42 -07:00
|
|
|
models = [
|
2024-11-22 10:26:15 -07:00
|
|
|
("meta-llama/Llama-3.1-8B-Instruct", 1),
|
2024-11-21 10:11:42 -07:00
|
|
|
# ('meta-llama/Llama-3.1-70B-Instruct', 4),
|
|
|
|
# ('mistralai/Mixtral-8x7B-Instruct-v0.1', 2),
|
|
|
|
]
|
|
|
|
success = True
|
|
|
|
for model in models:
|
|
|
|
tgi_runner = TGIDockerRunner(model[0])
|
|
|
|
# create results directory
|
2024-11-22 10:26:15 -07:00
|
|
|
model_dir = os.path.join(
|
|
|
|
results_dir, f'{model[0].replace("/", "_").replace(".", "_")}'
|
|
|
|
)
|
2024-11-21 10:11:42 -07:00
|
|
|
os.makedirs(model_dir, exist_ok=True)
|
|
|
|
runner = BenchmarkRunner(
|
2024-11-22 10:26:15 -07:00
|
|
|
volumes=[(model_dir, "/opt/text-generation-inference-benchmark/results")]
|
2024-11-21 10:11:42 -07:00
|
|
|
)
|
|
|
|
try:
|
2024-11-22 10:26:15 -07:00
|
|
|
tgi_runner.run([("max-concurrent-requests", 512)], gpus=model[1])
|
|
|
|
logger.info(f"TGI started for model {model[0]}")
|
2024-11-21 10:11:42 -07:00
|
|
|
parameters = [
|
2024-11-22 10:26:15 -07:00
|
|
|
("tokenizer-name", model[0]),
|
|
|
|
("max-vus", 800),
|
|
|
|
("url", "http://localhost:8080"),
|
|
|
|
("duration", "120s"),
|
|
|
|
("warmup", "30s"),
|
|
|
|
("benchmark-kind", "rate"),
|
|
|
|
(
|
|
|
|
"prompt-options",
|
|
|
|
"num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"decode-options",
|
|
|
|
"num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"extra-meta",
|
|
|
|
f'"engine=TGI,tp={model[1]},version={sha},gpu={get_gpu_name()}"',
|
|
|
|
),
|
|
|
|
("no-console", None),
|
2024-11-21 10:11:42 -07:00
|
|
|
]
|
2024-11-22 10:26:15 -07:00
|
|
|
rates = [("rates", f"{r / 10.}") for r in list(range(8, 248, 8))]
|
2024-11-21 10:11:42 -07:00
|
|
|
parameters.extend(rates)
|
2024-11-22 10:26:15 -07:00
|
|
|
runner.run(parameters, f"container:{tgi_runner.container.id}")
|
2024-11-21 10:11:42 -07:00
|
|
|
except Exception as e:
|
2024-11-22 10:26:15 -07:00
|
|
|
logger.error(f"Error running benchmark for model {model[0]}: {e}")
|
2024-11-21 10:11:42 -07:00
|
|
|
# print the stack trace
|
|
|
|
print(traceback.format_exc())
|
|
|
|
success = False
|
|
|
|
finally:
|
|
|
|
tgi_runner.stop()
|
|
|
|
runner.stop()
|
|
|
|
if not success:
|
2024-11-22 10:26:15 -07:00
|
|
|
logger.error("Some benchmarks failed")
|
2024-11-21 10:11:42 -07:00
|
|
|
exit(1)
|
|
|
|
|
|
|
|
df = pd.DataFrame()
|
|
|
|
# list recursively directories
|
2024-11-22 10:26:15 -07:00
|
|
|
directories = [
|
|
|
|
f"{results_dir}/{d}"
|
|
|
|
for d in os.listdir(results_dir)
|
|
|
|
if os.path.isdir(f"{results_dir}/{d}")
|
|
|
|
]
|
|
|
|
logger.info(f"Found result directories: {directories}")
|
2024-11-21 10:11:42 -07:00
|
|
|
for directory in directories:
|
|
|
|
data_files = {}
|
|
|
|
for filename in os.listdir(directory):
|
2024-11-22 10:26:15 -07:00
|
|
|
if filename.endswith(".json"):
|
|
|
|
data_files[filename.split(".")[-2]] = f"{directory}/{filename}"
|
|
|
|
logger.info(f"Processing directory {directory}")
|
|
|
|
df = pd.concat([df, build_df(directory.split("/")[-1], data_files)])
|
|
|
|
df["device"] = get_gpu_name()
|
|
|
|
df["error_rate"] = (
|
|
|
|
df["failed_requests"]
|
|
|
|
/ (df["failed_requests"] + df["successful_requests"])
|
|
|
|
* 100.0
|
|
|
|
)
|
2024-11-21 10:11:42 -07:00
|
|
|
df.to_parquet(results_file)
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser()
|
2024-11-22 10:26:15 -07:00
|
|
|
parser.add_argument(
|
|
|
|
"--sha", help="SHA of the commit to add to the results", required=True
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--results-file",
|
|
|
|
help="The file where to store the results, can be a local file or a s3 path",
|
|
|
|
)
|
2024-11-21 10:11:42 -07:00
|
|
|
args = parser.parse_args()
|
|
|
|
if args.results_file is None:
|
2024-11-22 10:26:15 -07:00
|
|
|
results_file = f"{args.sha}.parquet"
|
2024-11-21 10:11:42 -07:00
|
|
|
else:
|
|
|
|
results_file = args.results_file
|
|
|
|
|
|
|
|
main(args.sha, results_file)
|