2023-03-30 07:26:27 -06:00
|
|
|
/// Text Generation Inference benchmarking tool
|
|
|
|
///
|
|
|
|
/// Inspired by the great Oha app: https://github.com/hatoo/oha
|
|
|
|
/// and: https://github.com/orhun/rust-tui-template
|
|
|
|
use clap::Parser;
|
|
|
|
use std::path::Path;
|
2024-06-04 07:56:56 -06:00
|
|
|
use text_generation_client::v3::ShardedClient;
|
2023-04-29 04:17:30 -06:00
|
|
|
use tokenizers::{FromPretrainedParameters, Tokenizer};
|
2023-03-30 07:26:27 -06:00
|
|
|
use tracing_subscriber::layer::SubscriberExt;
|
|
|
|
use tracing_subscriber::util::SubscriberInitExt;
|
|
|
|
use tracing_subscriber::EnvFilter;
|
|
|
|
|
|
|
|
/// App Configuration
|
|
|
|
#[derive(Parser, Debug)]
|
|
|
|
#[clap(author, version, about, long_about = None)]
|
|
|
|
struct Args {
|
2023-07-04 10:35:37 -06:00
|
|
|
/// The name of the tokenizer (as in model_id on the huggingface hub, or local path).
|
2023-03-30 07:26:27 -06:00
|
|
|
#[clap(short, long, env)]
|
|
|
|
tokenizer_name: String,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// The revision to use for the tokenizer if on the hub.
|
2023-04-29 04:17:30 -06:00
|
|
|
#[clap(default_value = "main", long, env)]
|
|
|
|
revision: String,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// The various batch sizes to benchmark for, the idea is to get enough
|
|
|
|
/// batching to start seeing increased latency, this usually means you're
|
|
|
|
/// moving from memory bound (usual as BS=1) to compute bound, and this is
|
|
|
|
/// a sweet spot for the maximum batch size for the model under test
|
2023-03-30 07:26:27 -06:00
|
|
|
#[clap(short, long)]
|
|
|
|
batch_size: Option<Vec<u32>>,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// This is the initial prompt sent to the text-generation-server length
|
|
|
|
/// in token. Longer prompt will slow down the benchmark. Usually the
|
|
|
|
/// latency grows somewhat linearly with this for the prefill step.
|
|
|
|
///
|
|
|
|
/// Most importantly, the prefill step is usually not the one dominating
|
|
|
|
/// your runtime, so it's ok to keep it short.
|
2023-03-30 07:26:27 -06:00
|
|
|
#[clap(default_value = "10", short, long, env)]
|
|
|
|
sequence_length: u32,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// This is how many tokens will be generated by the server and averaged out
|
|
|
|
/// to give the `decode` latency. This is the *critical* number you want to optimize for
|
|
|
|
/// LLM spend most of their time doing decoding.
|
|
|
|
///
|
|
|
|
/// Decode latency is usually quite stable.
|
2023-03-30 07:26:27 -06:00
|
|
|
#[clap(default_value = "8", short, long, env)]
|
|
|
|
decode_length: u32,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
///How many runs should we average from
|
2023-03-30 07:26:27 -06:00
|
|
|
#[clap(default_value = "10", short, long, env)]
|
|
|
|
runs: usize,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// Number of warmup cycles
|
2023-03-30 07:26:27 -06:00
|
|
|
#[clap(default_value = "1", short, long, env)]
|
|
|
|
warmups: usize,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// The location of the grpc socket. This benchmark tool bypasses the router
|
|
|
|
/// completely and directly talks to the gRPC processes
|
|
|
|
#[clap(default_value = "/tmp/text-generation-server-0", short, long, env)]
|
|
|
|
master_shard_uds_path: String,
|
|
|
|
|
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
2023-05-25 05:38:36 -06:00
|
|
|
#[clap(long, env)]
|
|
|
|
temperature: Option<f32>,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
2023-05-25 05:38:36 -06:00
|
|
|
#[clap(long, env)]
|
|
|
|
top_k: Option<u32>,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
2023-05-25 05:38:36 -06:00
|
|
|
#[clap(long, env)]
|
|
|
|
top_p: Option<f32>,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
2023-05-25 05:38:36 -06:00
|
|
|
#[clap(long, env)]
|
|
|
|
typical_p: Option<f32>,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
2023-05-25 05:38:36 -06:00
|
|
|
#[clap(long, env)]
|
|
|
|
repetition_penalty: Option<f32>,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
2024-02-08 10:41:25 -07:00
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
|
|
|
#[clap(long, env)]
|
|
|
|
frequency_penalty: Option<f32>,
|
|
|
|
|
2023-07-04 10:35:37 -06:00
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
2023-05-25 05:38:36 -06:00
|
|
|
#[clap(long, env)]
|
|
|
|
watermark: bool,
|
2023-07-04 10:35:37 -06:00
|
|
|
|
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
2023-05-25 05:38:36 -06:00
|
|
|
#[clap(long, env)]
|
|
|
|
do_sample: bool,
|
2023-08-28 03:43:47 -06:00
|
|
|
|
|
|
|
/// Generation parameter in case you want to specifically test/debug particular
|
|
|
|
/// decoding strategies, for full doc refer to the `text-generation-server`
|
|
|
|
#[clap(long, env)]
|
|
|
|
top_n_tokens: Option<u32>,
|
2023-03-30 07:26:27 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
2023-05-25 05:38:36 -06:00
|
|
|
init_logging();
|
|
|
|
|
2023-03-30 07:26:27 -06:00
|
|
|
// Get args
|
|
|
|
let args = Args::parse();
|
|
|
|
// Pattern match configuration
|
|
|
|
let Args {
|
|
|
|
tokenizer_name,
|
2023-04-29 04:17:30 -06:00
|
|
|
revision,
|
2023-03-30 07:26:27 -06:00
|
|
|
batch_size,
|
|
|
|
sequence_length,
|
|
|
|
decode_length,
|
|
|
|
runs,
|
|
|
|
warmups,
|
2023-05-25 05:38:36 -06:00
|
|
|
temperature,
|
|
|
|
top_k,
|
|
|
|
top_p,
|
|
|
|
typical_p,
|
|
|
|
repetition_penalty,
|
2024-02-08 10:41:25 -07:00
|
|
|
frequency_penalty,
|
2023-05-25 05:38:36 -06:00
|
|
|
watermark,
|
|
|
|
do_sample,
|
2023-03-30 07:26:27 -06:00
|
|
|
master_shard_uds_path,
|
2023-08-28 03:43:47 -06:00
|
|
|
top_n_tokens,
|
2023-03-30 07:26:27 -06:00
|
|
|
} = args;
|
|
|
|
|
|
|
|
let batch_size = batch_size.unwrap_or(vec![1, 2, 4, 8, 16, 32]);
|
|
|
|
|
|
|
|
// Tokenizer instance
|
|
|
|
// This will only be used to validate payloads
|
|
|
|
tracing::info!("Loading tokenizer");
|
|
|
|
let local_path = Path::new(&tokenizer_name);
|
|
|
|
let tokenizer =
|
|
|
|
if local_path.exists() && local_path.is_dir() && local_path.join("tokenizer.json").exists()
|
|
|
|
{
|
|
|
|
// Load local tokenizer
|
|
|
|
tracing::info!("Found local tokenizer");
|
|
|
|
Tokenizer::from_file(local_path.join("tokenizer.json")).unwrap()
|
|
|
|
} else {
|
2023-04-29 04:17:30 -06:00
|
|
|
tracing::info!("Downloading tokenizer");
|
|
|
|
|
|
|
|
// Parse Huggingface hub token
|
2024-06-25 09:53:36 -06:00
|
|
|
let auth_token = std::env::var("HF_TOKEN")
|
|
|
|
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
|
|
|
|
.ok();
|
2023-04-29 04:17:30 -06:00
|
|
|
|
2023-03-30 07:26:27 -06:00
|
|
|
// Download and instantiate tokenizer
|
|
|
|
// We need to download it outside of the Tokio runtime
|
2023-04-29 04:17:30 -06:00
|
|
|
let params = FromPretrainedParameters {
|
|
|
|
revision,
|
|
|
|
auth_token,
|
|
|
|
..Default::default()
|
|
|
|
};
|
|
|
|
Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap()
|
2023-03-30 07:26:27 -06:00
|
|
|
};
|
|
|
|
tracing::info!("Tokenizer loaded");
|
|
|
|
|
|
|
|
// Launch Tokio runtime
|
|
|
|
tokio::runtime::Builder::new_multi_thread()
|
|
|
|
.enable_all()
|
|
|
|
.build()
|
|
|
|
.unwrap()
|
|
|
|
.block_on(async {
|
|
|
|
// Instantiate sharded client from the master unix socket
|
|
|
|
tracing::info!("Connect to model server");
|
|
|
|
let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
|
|
|
|
.await
|
|
|
|
.expect("Could not connect to server");
|
|
|
|
// Clear the cache; useful if the webserver rebooted
|
|
|
|
sharded_client
|
|
|
|
.clear_cache(None)
|
|
|
|
.await
|
|
|
|
.expect("Unable to clear cache");
|
|
|
|
tracing::info!("Connected");
|
|
|
|
|
|
|
|
// Run app
|
|
|
|
text_generation_benchmark::run(
|
|
|
|
tokenizer_name,
|
|
|
|
tokenizer,
|
|
|
|
batch_size,
|
|
|
|
sequence_length,
|
|
|
|
decode_length,
|
2023-08-28 03:43:47 -06:00
|
|
|
top_n_tokens,
|
2023-03-30 07:26:27 -06:00
|
|
|
runs,
|
|
|
|
warmups,
|
2023-05-25 05:38:36 -06:00
|
|
|
temperature,
|
|
|
|
top_k,
|
|
|
|
top_p,
|
|
|
|
typical_p,
|
|
|
|
repetition_penalty,
|
2024-02-08 10:41:25 -07:00
|
|
|
frequency_penalty,
|
2023-05-25 05:38:36 -06:00
|
|
|
watermark,
|
|
|
|
do_sample,
|
2023-03-30 07:26:27 -06:00
|
|
|
sharded_client,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.unwrap();
|
|
|
|
});
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Init logging using LOG_LEVEL
|
|
|
|
fn init_logging() {
|
|
|
|
// STDOUT/STDERR layer
|
|
|
|
let fmt_layer = tracing_subscriber::fmt::layer()
|
|
|
|
.with_file(true)
|
|
|
|
.with_line_number(true);
|
|
|
|
|
|
|
|
// Filter events with LOG_LEVEL
|
|
|
|
let env_filter =
|
|
|
|
EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));
|
|
|
|
|
|
|
|
tracing_subscriber::registry()
|
|
|
|
.with(env_filter)
|
|
|
|
.with(fmt_layer)
|
|
|
|
.init();
|
|
|
|
}
|