Damn inflated sparse tflops.
This commit is contained in:
parent
7a2a219aaf
commit
15ad790a52
|
@ -33,7 +33,7 @@ fn compute_optimal(config: Option<&Config>, compute: Option<&ComputeType>) -> Op
|
|||
if let (Some(config), Some(compute)) = (config, compute) {
|
||||
if let (Some(f16_max_compute), Some(model_compute)) = (compute.f16_flop(), config.flop()) {
|
||||
tracing::debug!("MAx compute {f16_max_compute} model compute {model_compute}");
|
||||
let optimal_size = (f16_max_compute / model_compute / 2) as usize;
|
||||
let optimal_size = (f16_max_compute / model_compute) as usize;
|
||||
if optimal_size > 100 {
|
||||
// Ignore calculations that's too low
|
||||
// Most likely an error
|
||||
|
@ -1492,7 +1492,8 @@ impl ComputeType {
|
|||
// https://www.nvidia.com/en-us/data-center/products/a10-gpu/
|
||||
"nvidia-a10g" => Some(125 * 10u64.pow(12)),
|
||||
// https://www.nvidia.com/en-us/data-center/h100/
|
||||
"nvidia-h100-80gb-hb3" => Some(1900 * 10u64.pow(12)),
|
||||
// https://www.techpowerup.com/gpu-specs/docs/nvidia-gh100-architecture.pdf
|
||||
"nvidia-h100-80gb-hb3" => Some(900 * 10u64.pow(12)),
|
||||
// https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/a100/pdf/nvidia-a100-datasheet-us-nvidia-1758950-r4-web.pdf
|
||||
"nvidia-a100" => Some(312 * 10u64.pow(12)),
|
||||
card => {
|
||||
|
|
Loading…
Reference in New Issue