Merge pull request #1371 from d8ahazard/master
Fix LDSR, BSRGAN, recursive SD checkpoint Loading
This commit is contained in:
commit
f253790b40
|
@ -69,7 +69,7 @@ class UpscalerBSRGAN(modules.upscaler.Upscaler):
|
||||||
if not os.path.exists(filename) or filename is None:
|
if not os.path.exists(filename) or filename is None:
|
||||||
print(f"BSRGAN: Unable to load model from {filename}", file=sys.stderr)
|
print(f"BSRGAN: Unable to load model from {filename}", file=sys.stderr)
|
||||||
return None
|
return None
|
||||||
model = RRDBNet(in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=2) # define network
|
model = RRDBNet(in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=4) # define network
|
||||||
model.load_state_dict(torch.load(filename), strict=True)
|
model.load_state_dict(torch.load(filename), strict=True)
|
||||||
model.eval()
|
model.eval()
|
||||||
for k, v in model.named_parameters():
|
for k, v in model.named_parameters():
|
||||||
|
|
|
@ -22,8 +22,20 @@ class UpscalerLDSR(Upscaler):
|
||||||
self.scalers = [scaler_data]
|
self.scalers = [scaler_data]
|
||||||
|
|
||||||
def load_model(self, path: str):
|
def load_model(self, path: str):
|
||||||
|
# Remove incorrect project.yaml file if too big
|
||||||
|
yaml_path = os.path.join(self.model_path, "project.yaml")
|
||||||
|
old_model_path = os.path.join(self.model_path, "model.pth")
|
||||||
|
new_model_path = os.path.join(self.model_path, "model.ckpt")
|
||||||
|
if os.path.exists(yaml_path):
|
||||||
|
statinfo = os.stat(yaml_path)
|
||||||
|
if statinfo.st_size >= 10485760:
|
||||||
|
print("Removing invalid LDSR YAML file.")
|
||||||
|
os.remove(yaml_path)
|
||||||
|
if os.path.exists(old_model_path):
|
||||||
|
print("Renaming model from model.pth to model.ckpt")
|
||||||
|
os.rename(old_model_path, new_model_path)
|
||||||
model = load_file_from_url(url=self.model_url, model_dir=self.model_path,
|
model = load_file_from_url(url=self.model_url, model_dir=self.model_path,
|
||||||
file_name="model.pth", progress=True)
|
file_name="model.ckpt", progress=True)
|
||||||
yaml = load_file_from_url(url=self.yaml_url, model_dir=self.model_path,
|
yaml = load_file_from_url(url=self.yaml_url, model_dir=self.model_path,
|
||||||
file_name="project.yaml", progress=True)
|
file_name="project.yaml", progress=True)
|
||||||
|
|
||||||
|
@ -41,5 +53,4 @@ class UpscalerLDSR(Upscaler):
|
||||||
print("NO LDSR!")
|
print("NO LDSR!")
|
||||||
return img
|
return img
|
||||||
ddim_steps = shared.opts.ldsr_steps
|
ddim_steps = shared.opts.ldsr_steps
|
||||||
pre_scale = shared.opts.ldsr_pre_down
|
|
||||||
return ldsr.super_resolution(img, ddim_steps, self.scale)
|
return ldsr.super_resolution(img, ddim_steps, self.scale)
|
||||||
|
|
|
@ -98,9 +98,7 @@ class LDSR:
|
||||||
im_og = image
|
im_og = image
|
||||||
width_og, height_og = im_og.size
|
width_og, height_og = im_og.size
|
||||||
# If we can adjust the max upscale size, then the 4 below should be our variable
|
# If we can adjust the max upscale size, then the 4 below should be our variable
|
||||||
print("Foo")
|
|
||||||
down_sample_rate = target_scale / 4
|
down_sample_rate = target_scale / 4
|
||||||
print(f"Downsample rate is {down_sample_rate}")
|
|
||||||
wd = width_og * down_sample_rate
|
wd = width_og * down_sample_rate
|
||||||
hd = height_og * down_sample_rate
|
hd = height_og * down_sample_rate
|
||||||
width_downsampled_pre = int(wd)
|
width_downsampled_pre = int(wd)
|
||||||
|
@ -111,7 +109,7 @@ class LDSR:
|
||||||
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
|
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
|
||||||
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
|
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
|
||||||
else:
|
else:
|
||||||
print(f"Down sample rate is 1 from {target_scale} / 4")
|
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
|
||||||
logs = self.run(model["model"], im_og, diffusion_steps, eta)
|
logs = self.run(model["model"], im_og, diffusion_steps, eta)
|
||||||
|
|
||||||
sample = logs["sample"]
|
sample = logs["sample"]
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import glob
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import importlib
|
import importlib
|
||||||
|
@ -41,7 +42,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
|
||||||
|
|
||||||
for place in places:
|
for place in places:
|
||||||
if os.path.exists(place):
|
if os.path.exists(place):
|
||||||
for file in os.listdir(place):
|
for file in glob.iglob(place + '**/**', recursive=True):
|
||||||
full_path = os.path.join(place, file)
|
full_path = os.path.join(place, file)
|
||||||
if os.path.isdir(full_path):
|
if os.path.isdir(full_path):
|
||||||
continue
|
continue
|
||||||
|
|
Loading…
Reference in New Issue