2023-01-30 00:11:30 -07:00
|
|
|
from collections import namedtuple
|
2022-09-06 14:10:12 -06:00
|
|
|
import numpy as np
|
2022-09-03 03:08:45 -06:00
|
|
|
import torch
|
2022-09-06 14:10:12 -06:00
|
|
|
from PIL import Image
|
2022-11-28 19:24:06 -07:00
|
|
|
import torchsde._brownian.brownian_interval
|
2023-01-29 23:51:06 -07:00
|
|
|
from modules import devices, processing, images, sd_vae_approx
|
2022-09-03 03:08:45 -06:00
|
|
|
|
2023-01-29 23:51:06 -07:00
|
|
|
from modules.shared import opts, state
|
2022-09-03 03:08:45 -06:00
|
|
|
import modules.shared as shared
|
2022-09-03 08:21:15 -06:00
|
|
|
|
2022-10-06 05:12:52 -06:00
|
|
|
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
|
2022-09-03 08:21:15 -06:00
|
|
|
|
2022-10-22 11:48:13 -06:00
|
|
|
|
2022-09-19 07:42:56 -06:00
|
|
|
def setup_img2img_steps(p, steps=None):
|
|
|
|
if opts.img2img_fix_steps or steps is not None:
|
2023-01-04 13:56:43 -07:00
|
|
|
requested_steps = (steps or p.steps)
|
|
|
|
steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
|
|
|
|
t_enc = requested_steps - 1
|
2022-09-16 04:38:02 -06:00
|
|
|
else:
|
|
|
|
steps = p.steps
|
|
|
|
t_enc = int(min(p.denoising_strength, 0.999) * steps)
|
|
|
|
|
|
|
|
return steps, t_enc
|
|
|
|
|
|
|
|
|
2022-12-24 12:39:00 -07:00
|
|
|
approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2}
|
|
|
|
|
|
|
|
|
|
|
|
def single_sample_to_image(sample, approximation=None):
|
|
|
|
if approximation is None:
|
|
|
|
approximation = approximation_indexes.get(opts.show_progress_type, 0)
|
|
|
|
|
|
|
|
if approximation == 2:
|
|
|
|
x_sample = sd_vae_approx.cheap_approximation(sample)
|
|
|
|
elif approximation == 1:
|
|
|
|
x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
|
2022-12-24 04:00:17 -07:00
|
|
|
else:
|
|
|
|
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
|
2022-12-24 12:39:00 -07:00
|
|
|
|
2022-09-06 14:10:12 -06:00
|
|
|
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
|
|
|
|
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
|
|
|
x_sample = x_sample.astype(np.uint8)
|
|
|
|
return Image.fromarray(x_sample)
|
|
|
|
|
2022-10-22 11:48:13 -06:00
|
|
|
|
2022-12-24 12:39:00 -07:00
|
|
|
def sample_to_image(samples, index=0, approximation=None):
|
2022-12-24 04:00:17 -07:00
|
|
|
return single_sample_to_image(samples[index], approximation)
|
2022-10-22 11:48:13 -06:00
|
|
|
|
2022-11-02 03:45:03 -06:00
|
|
|
|
2022-12-24 12:39:00 -07:00
|
|
|
def samples_to_image_grid(samples, approximation=None):
|
2022-12-24 04:00:17 -07:00
|
|
|
return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
|
2022-10-22 11:48:13 -06:00
|
|
|
|
2022-09-06 14:10:12 -06:00
|
|
|
|
|
|
|
def store_latent(decoded):
|
|
|
|
state.current_latent = decoded
|
|
|
|
|
2023-01-14 06:29:23 -07:00
|
|
|
if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
|
2022-09-06 14:10:12 -06:00
|
|
|
if not shared.parallel_processing_allowed:
|
2023-01-15 08:50:56 -07:00
|
|
|
shared.state.assign_current_image(sample_to_image(decoded))
|
2022-09-06 14:10:12 -06:00
|
|
|
|
|
|
|
|
2022-10-18 08:23:38 -06:00
|
|
|
class InterruptedException(BaseException):
|
|
|
|
pass
|
2022-09-05 17:09:01 -06:00
|
|
|
|
|
|
|
|
2022-11-28 19:24:06 -07:00
|
|
|
# MPS fix for randn in torchsde
|
2023-01-30 00:11:30 -07:00
|
|
|
# XXX move this to separate file for MPS
|
2022-11-28 19:24:06 -07:00
|
|
|
def torchsde_randn(size, dtype, device, seed):
|
|
|
|
if device.type == 'mps':
|
|
|
|
generator = torch.Generator(devices.cpu).manual_seed(int(seed))
|
|
|
|
return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device)
|
|
|
|
else:
|
|
|
|
generator = torch.Generator(device).manual_seed(int(seed))
|
|
|
|
return torch.randn(size, dtype=dtype, device=device, generator=generator)
|
|
|
|
|
|
|
|
|
|
|
|
torchsde._brownian.brownian_interval._randn = torchsde_randn
|
|
|
|
|