parent
d6e8d85e30
commit
1a1f7e85c7
|
@ -18,3 +18,4 @@ __pycache__
|
|||
/webui-user.sh
|
||||
/interrogate
|
||||
/user.css
|
||||
/.idea
|
|
@ -22,6 +22,7 @@ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "6
|
|||
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
|
||||
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
|
||||
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
|
||||
ldsr_commit_hash = os.environ.get('LDSR_COMMIT_HASH',"e1a84a89fcbb49881546cf2acf1e7e250923dba0")
|
||||
|
||||
args = shlex.split(commandline_args)
|
||||
|
||||
|
@ -121,6 +122,8 @@ git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-di
|
|||
git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
|
||||
git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
|
||||
git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
|
||||
# Using my repo until my changes are merged, as this makes interfacing with our version of SD-web a lot easier
|
||||
git_clone("https://github.com/Hafiidz/latent-diffusion", repo_dir('latent-diffusion'), "LDSR", ldsr_commit_hash)
|
||||
|
||||
if not is_installed("lpips"):
|
||||
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
import os
|
||||
import sys
|
||||
import traceback
|
||||
from collections import namedtuple
|
||||
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
|
||||
import modules.images
|
||||
from modules import shared
|
||||
from modules.paths import script_path
|
||||
|
||||
LDSRModelInfo = namedtuple("LDSRModelInfo", ["name", "location", "model", "netscale"])
|
||||
|
||||
ldsr_models = []
|
||||
have_ldsr = False
|
||||
LDSR_obj = None
|
||||
|
||||
|
||||
class UpscalerLDSR(modules.images.Upscaler):
|
||||
def __init__(self, steps):
|
||||
self.steps = steps
|
||||
self.name = "LDSR"
|
||||
|
||||
def do_upscale(self, img):
|
||||
return upscale_with_ldsr(img)
|
||||
|
||||
|
||||
def add_lsdr():
|
||||
modules.shared.sd_upscalers.append(UpscalerLDSR(100))
|
||||
|
||||
|
||||
def setup_ldsr():
|
||||
path = modules.paths.paths.get("LDSR", None)
|
||||
if path is None:
|
||||
return
|
||||
global have_ldsr
|
||||
global LDSR_obj
|
||||
try:
|
||||
from LDSR import LDSR
|
||||
model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
|
||||
yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
|
||||
repo_path = 'latent-diffusion/experiments/pretrained_models/'
|
||||
model_path = load_file_from_url(url=model_url, model_dir=os.path.join("repositories", repo_path),
|
||||
progress=True, file_name="model.chkpt")
|
||||
yaml_path = load_file_from_url(url=yaml_url, model_dir=os.path.join("repositories", repo_path),
|
||||
progress=True, file_name="project.yaml")
|
||||
have_ldsr = True
|
||||
LDSR_obj = LDSR(model_path, yaml_path)
|
||||
|
||||
|
||||
except Exception:
|
||||
print("Error importing LDSR:", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
have_ldsr = False
|
||||
|
||||
|
||||
def upscale_with_ldsr(image):
|
||||
setup_ldsr()
|
||||
if not have_ldsr or LDSR_obj is None:
|
||||
return image
|
||||
|
||||
ddim_steps = shared.opts.ldsr_steps
|
||||
pre_scale = shared.opts.ldsr_pre_down
|
||||
post_scale = shared.opts.ldsr_post_down
|
||||
|
||||
image = LDSR_obj.super_resolution(image, ddim_steps, pre_scale, post_scale)
|
||||
return image
|
|
@ -19,6 +19,7 @@ path_dirs = [
|
|||
(os.path.join(sd_path, '../taming-transformers'), 'taming', 'Taming Transformers'),
|
||||
(os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer'),
|
||||
(os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP'),
|
||||
(os.path.join(sd_path, '../latent-diffusion'), 'LDSR.py', 'LDSR'),
|
||||
]
|
||||
|
||||
paths = {}
|
||||
|
|
|
@ -144,6 +144,12 @@ class Options:
|
|||
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
|
||||
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
|
||||
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
|
||||
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster",
|
||||
gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
|
||||
"ldsr_pre_down":OptionInfo(1, "LDSR Pre-process downssample scale. 1 = no down-sampling, 4 = 1/4 scale.",
|
||||
gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
|
||||
"ldsr_post_down":OptionInfo(1, "LDSR Post-process down-sample scale. 1 = no down-sampling, 4 = 1/4 scale.",
|
||||
gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
|
||||
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
|
||||
"upscale_at_full_resolution_padding": OptionInfo(16, "Inpainting at full resolution: padding, in pixels, for the masked region.", gr.Slider, {"minimum": 0, "maximum": 128, "step": 4}),
|
||||
"upscaler_for_hires_fix": OptionInfo(None, "Upscaler for highres. fix", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}),
|
||||
|
|
|
@ -23,6 +23,7 @@ from modules.shared import opts, cmd_opts
|
|||
import modules.shared as shared
|
||||
from modules.sd_samplers import samplers, samplers_for_img2img
|
||||
import modules.realesrgan_model as realesrgan
|
||||
import modules.ldsr_model
|
||||
import modules.scripts
|
||||
import modules.gfpgan_model
|
||||
import modules.codeformer_model
|
||||
|
|
3
webui.py
3
webui.py
|
@ -15,6 +15,7 @@ import modules.gfpgan_model
|
|||
import modules.face_restoration
|
||||
import modules.realesrgan_model as realesrgan
|
||||
import modules.esrgan_model as esrgan
|
||||
import modules.ldsr_model as ldsr
|
||||
import modules.extras
|
||||
import modules.lowvram
|
||||
import modules.txt2img
|
||||
|
@ -30,7 +31,7 @@ shared.face_restorers.append(modules.face_restoration.FaceRestoration())
|
|||
esrgan.load_models(cmd_opts.esrgan_models_path)
|
||||
swinir.load_models(cmd_opts.swinir_models_path)
|
||||
realesrgan.setup_realesrgan()
|
||||
|
||||
ldsr.add_lsdr()
|
||||
queue_lock = threading.Lock()
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue