Damn inflated sparse tflops.
This commit is contained in:
parent
748dce60cd
commit
3a53e8c288
|
@ -33,7 +33,7 @@ fn compute_optimal(config: Option<&Config>, compute: Option<&ComputeType>) -> Op
|
|||
if let (Some(config), Some(compute)) = (config, compute) {
|
||||
if let (Some(f16_max_compute), Some(model_compute)) = (compute.f16_flop(), config.flop()) {
|
||||
tracing::debug!("MAx compute {f16_max_compute} model compute {model_compute}");
|
||||
let optimal_size = (f16_max_compute / model_compute / 2) as usize;
|
||||
let optimal_size = (f16_max_compute / model_compute) as usize;
|
||||
if optimal_size > 100 {
|
||||
// Ignore calculations that's too low
|
||||
// Most likely an error
|
||||
|
@ -1508,7 +1508,8 @@ impl ComputeType {
|
|||
// https://www.nvidia.com/en-us/data-center/products/a10-gpu/
|
||||
"nvidia-a10g" => Some(125 * 10u64.pow(12)),
|
||||
// https://www.nvidia.com/en-us/data-center/h100/
|
||||
"nvidia-h100-80gb-hb3" => Some(1900 * 10u64.pow(12)),
|
||||
// https://www.techpowerup.com/gpu-specs/docs/nvidia-gh100-architecture.pdf
|
||||
"nvidia-h100-80gb-hb3" => Some(900 * 10u64.pow(12)),
|
||||
// https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/a100/pdf/nvidia-a100-datasheet-us-nvidia-1758950-r4-web.pdf
|
||||
"nvidia-a100" => Some(312 * 10u64.pow(12)),
|
||||
card => {
|
||||
|
|
Loading…
Reference in New Issue