2024-03-20 00:17:11 -06:00
|
|
|
import dataclasses
|
|
|
|
import torch
|
|
|
|
import k_diffusion
|
2024-06-08 16:56:23 -06:00
|
|
|
import numpy as np
|
|
|
|
|
|
|
|
from modules import shared
|
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
|
2024-06-08 16:55:07 -06:00
|
|
|
def to_d(x, sigma, denoised):
|
|
|
|
"""Converts a denoiser output to a Karras ODE derivative."""
|
|
|
|
return (x - denoised) / sigma
|
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
|
2024-06-08 16:55:07 -06:00
|
|
|
k_diffusion.sampling.to_d = to_d
|
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
|
2024-03-20 00:17:11 -06:00
|
|
|
@dataclasses.dataclass
|
|
|
|
class Scheduler:
|
|
|
|
name: str
|
|
|
|
label: str
|
|
|
|
function: any
|
|
|
|
|
|
|
|
default_rho: float = -1
|
|
|
|
need_inner_model: bool = False
|
|
|
|
aliases: list = None
|
|
|
|
|
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
def uniform(n, sigma_min, sigma_max, inner_model, device):
|
|
|
|
return inner_model.get_sigmas(n).to(device)
|
2024-03-20 01:27:32 -06:00
|
|
|
|
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
|
2024-03-20 00:17:11 -06:00
|
|
|
start = inner_model.sigma_to_t(torch.tensor(sigma_max))
|
|
|
|
end = inner_model.sigma_to_t(torch.tensor(sigma_min))
|
|
|
|
sigs = [
|
|
|
|
inner_model.t_to_sigma(ts)
|
2024-03-20 01:29:52 -06:00
|
|
|
for ts in torch.linspace(start, end, n + 1)[:-1]
|
2024-03-20 00:17:11 -06:00
|
|
|
]
|
|
|
|
sigs += [0.0]
|
2024-06-09 12:23:53 -06:00
|
|
|
return torch.FloatTensor(sigs).to(device)
|
2024-03-20 00:17:11 -06:00
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
|
|
|
|
def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device):
|
2024-05-09 20:38:10 -06:00
|
|
|
# https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
|
|
|
|
def loglinear_interp(t_steps, num_steps):
|
|
|
|
"""
|
|
|
|
Performs log-linear interpolation of a given array of decreasing numbers.
|
|
|
|
"""
|
|
|
|
xs = np.linspace(0, 1, len(t_steps))
|
|
|
|
ys = np.log(t_steps[::-1])
|
|
|
|
|
|
|
|
new_xs = np.linspace(0, 1, num_steps)
|
|
|
|
new_ys = np.interp(new_xs, xs, ys)
|
|
|
|
|
|
|
|
interped_ys = np.exp(new_ys)[::-1].copy()
|
|
|
|
return interped_ys
|
|
|
|
|
2024-05-10 02:05:45 -06:00
|
|
|
if shared.sd_model.is_sdxl:
|
2024-05-09 20:38:10 -06:00
|
|
|
sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029]
|
|
|
|
else:
|
2024-05-10 02:05:45 -06:00
|
|
|
# Default to SD 1.5 sigmas.
|
2024-05-09 20:38:10 -06:00
|
|
|
sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029]
|
|
|
|
|
|
|
|
if n != len(sigmas):
|
|
|
|
sigmas = np.append(loglinear_interp(sigmas, n), [0.0])
|
|
|
|
else:
|
|
|
|
sigmas.append(0.0)
|
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
return torch.FloatTensor(sigmas).to(device)
|
|
|
|
|
2024-03-20 00:17:11 -06:00
|
|
|
|
2024-06-09 12:23:53 -06:00
|
|
|
def kl_optimal(n, sigma_min, sigma_max, device):
|
|
|
|
alpha_min = torch.arctan(torch.tensor(sigma_min, device=device))
|
|
|
|
alpha_max = torch.arctan(torch.tensor(sigma_max, device=device))
|
|
|
|
step_indices = torch.arange(n + 1, device=device)
|
2024-04-27 22:15:58 -06:00
|
|
|
sigmas = torch.tan(step_indices / n * alpha_min + (1.0 - step_indices / n) * alpha_max)
|
2024-04-22 22:09:43 -06:00
|
|
|
return sigmas
|
|
|
|
|
2024-07-06 00:46:57 -06:00
|
|
|
|
2024-07-03 15:56:18 -06:00
|
|
|
def simple_scheduler(n, sigma_min, sigma_max, inner_model, device):
|
|
|
|
sigs = []
|
|
|
|
ss = len(inner_model.sigmas) / n
|
|
|
|
for x in range(n):
|
|
|
|
sigs += [float(inner_model.sigmas[-(1 + int(x * ss))])]
|
|
|
|
sigs += [0.0]
|
|
|
|
return torch.FloatTensor(sigs).to(device)
|
|
|
|
|
2024-04-22 22:09:43 -06:00
|
|
|
|
2024-07-04 10:27:08 -06:00
|
|
|
def normal_scheduler(n, sigma_min, sigma_max, inner_model, device, sgm=False, floor=False):
|
|
|
|
start = inner_model.sigma_to_t(torch.tensor(sigma_max))
|
|
|
|
end = inner_model.sigma_to_t(torch.tensor(sigma_min))
|
|
|
|
|
|
|
|
if sgm:
|
|
|
|
timesteps = torch.linspace(start, end, n + 1)[:-1]
|
|
|
|
else:
|
|
|
|
timesteps = torch.linspace(start, end, n)
|
|
|
|
|
|
|
|
sigs = []
|
|
|
|
for x in range(len(timesteps)):
|
|
|
|
ts = timesteps[x]
|
|
|
|
sigs.append(inner_model.t_to_sigma(ts))
|
|
|
|
sigs += [0.0]
|
|
|
|
return torch.FloatTensor(sigs).to(device)
|
|
|
|
|
2024-07-06 00:46:57 -06:00
|
|
|
|
2024-07-04 10:27:08 -06:00
|
|
|
def ddim_scheduler(n, sigma_min, sigma_max, inner_model, device):
|
|
|
|
sigs = []
|
|
|
|
ss = max(len(inner_model.sigmas) // n, 1)
|
|
|
|
x = 1
|
|
|
|
while x < len(inner_model.sigmas):
|
|
|
|
sigs += [float(inner_model.sigmas[x])]
|
|
|
|
x += ss
|
|
|
|
sigs = sigs[::-1]
|
|
|
|
sigs += [0.0]
|
|
|
|
return torch.FloatTensor(sigs).to(device)
|
|
|
|
|
2024-04-22 22:09:43 -06:00
|
|
|
|
2024-03-20 00:17:11 -06:00
|
|
|
schedulers = [
|
|
|
|
Scheduler('automatic', 'Automatic', None),
|
2024-03-20 01:27:32 -06:00
|
|
|
Scheduler('uniform', 'Uniform', uniform, need_inner_model=True),
|
2024-03-20 00:17:11 -06:00
|
|
|
Scheduler('karras', 'Karras', k_diffusion.sampling.get_sigmas_karras, default_rho=7.0),
|
|
|
|
Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential),
|
|
|
|
Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0),
|
|
|
|
Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]),
|
2024-04-22 22:09:43 -06:00
|
|
|
Scheduler('kl_optimal', 'KL Optimal', kl_optimal),
|
2024-05-10 02:05:45 -06:00
|
|
|
Scheduler('align_your_steps', 'Align Your Steps', get_align_your_steps_sigmas),
|
2024-07-03 15:56:18 -06:00
|
|
|
Scheduler('simple', 'Simple', simple_scheduler, need_inner_model=True),
|
2024-07-04 10:27:08 -06:00
|
|
|
Scheduler('normal', 'Normal', normal_scheduler, need_inner_model=True),
|
|
|
|
Scheduler('ddim', 'DDIM', ddim_scheduler, need_inner_model=True),
|
2024-03-20 00:17:11 -06:00
|
|
|
]
|
|
|
|
|
|
|
|
schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}}
|