Merge branch 'AUTOMATIC1111:master' into js

This commit is contained in:
papuSpartan 2022-11-01 14:05:57 -05:00 committed by GitHub
commit d0d74e459d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 1454 additions and 1121 deletions

View File

@ -21,4 +21,15 @@ function extensions_check(){
})
return []
}
}
function install_extension_from_index(button, url){
button.disabled = "disabled"
button.value = "Installing..."
textarea = gradioApp().querySelector('#extension_to_install textarea')
textarea.value = url
textarea.dispatchEvent(new Event("input", { bubbles: true }))
gradioApp().querySelector('#install_extension_button').click()
}

View File

@ -7,6 +7,7 @@ import shlex
import platform
dir_repos = "repositories"
dir_extensions = "extensions"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
@ -16,11 +17,11 @@ def extract_arg(args, name):
return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None):
def run(command, desc=None, errdesc=None, custom_env=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
@ -101,9 +102,27 @@ def version_check(commit):
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("versipm check failed",e)
print("version check failed", e)
def run_extensions_installers():
if not os.path.isdir(dir_extensions):
return
for dirname_extension in os.listdir(dir_extensions):
path_installer = os.path.join(dir_extensions, dirname_extension, "install.py")
if not os.path.isfile(path_installer):
continue
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env))
except Exception as e:
print(e, file=sys.stderr)
def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
@ -189,6 +208,8 @@ def prepare_enviroment():
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
run_extensions_installers()
if update_check:
version_check(commit)

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,7 @@
"A directory on the same machine where the server is running.": "WebUI 서버가 돌아가고 있는 디바이스에 존재하는 디렉토리를 선택해 주세요.",
"A merger of the two checkpoints will be generated in your": "체크포인트들이 병합된 결과물이 당신의",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "난수 생성기의 결과물을 지정하는 값 - 동일한 설정값과 동일한 시드를 적용 시, 완전히 똑같은 결과물을 얻게 됩니다.",
"Action": "작업",
"Add a random artist to the prompt.": "프롬프트에 랜덤한 작가 추가",
"Add a second progress bar to the console that shows progress for an entire job.": "콘솔에 전체 작업의 진행도를 보여주는 2번째 프로그레스 바 추가하기",
"Add difference": "차이점 추가",
@ -23,6 +24,8 @@
"Add model hash to generation information": "생성 정보에 모델 해시 추가",
"Add model name to generation information": "생성 정보에 모델 이름 추가",
"Add number to filename when saving": "이미지를 저장할 때 파일명에 숫자 추가하기",
"Aesthetic Gradients": "스타일 그라디언트",
"Aesthetic Image Scorer": "스타일 이미지 스코어러",
"Aesthetic imgs embedding": "스타일 이미지 임베딩",
"Aesthetic learning rate": "스타일 학습 수",
"Aesthetic steps": "스타일 스텝 수",
@ -39,8 +42,10 @@
"Apply color correction to img2img results to match original colors.": "이미지→이미지 결과물이 기존 색상과 일치하도록 색상 보정 적용하기",
"Apply selected styles to current prompt": "현재 프롬프트에 선택된 스타일 적용",
"Apply settings": "설정 적용하기",
"Artists to study": "연구할만한 작가들",
"Auto focal point crop": "초점 기준 크롭(자동 감지)",
"Autocomplete options": "자동완성 설정",
"Available": "지원되는 확장기능 목록",
"Batch count": "배치 수",
"Batch from Directory": "저장 경로로부터 여러장 처리",
"Batch img2img": "이미지→이미지 배치",
@ -49,6 +54,7 @@
"behind": "최신 아님",
"BSRGAN 4x": "BSRGAN 4x",
"built with gradio": "gradio로 제작되었습니다",
"Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Chad 스코어러를 기반으로 한 CLIP+MLP 스타일 점수 예측기를 이용해 생성된 이미지의 스타일 점수를 계산합니다.",
"Cancel generate forever": "반복 생성 취소",
"cfg cnt": "CFG 변화 횟수",
"cfg count": "CFG 변화 횟수",
@ -78,6 +84,7 @@
"Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "서로 다른 설정값으로 생성된 이미지의 그리드를 만듭니다. 아래의 설정으로 가로/세로에 어떤 설정값을 적용할지 선택하세요.",
"Create a text file next to every image with generation parameters.": "생성된 이미지마다 생성 설정값을 담은 텍스트 파일 생성하기",
"Create aesthetic images embedding": "스타일 이미지 임베딩 생성하기",
"Create an embedding from one or few pictures and use it to apply their style to generated images.": "하나 혹은 그 이상의 이미지들로부터 임베딩을 생성해, 그 이미지들의 스타일을 다른 이미지 생성 시 적용할 수 있게 해줍니다.",
"Create debug image": "디버그 이미지 생성",
"Create embedding": "임베딩 생성",
"Create flipped copies": "좌우로 뒤집은 복사본 생성",
@ -88,6 +95,7 @@
"custom fold": "커스텀 경로",
"Custom Name (Optional)": "병합 모델 이름 (선택사항)",
"Dataset directory": "데이터셋 경로",
"Dataset Tag Editor": "데이터셋 태그 편집기",
"date": "생성 일자",
"DDIM": "DDIM",
"Decode CFG scale": "디코딩 CFG 스케일",
@ -98,6 +106,7 @@
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - 인페이팅에 뛰어남",
"Denoising strength": "디노이즈 강도",
"Denoising strength change factor": "디노이즈 강도 변경 배수",
"Description": "설명",
"Destination directory": "결과물 저장 경로",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "알고리즘이 얼마나 원본 이미지를 반영할지를 결정하는 수치입니다. 0일 경우 아무것도 바뀌지 않고, 1일 경우 원본 이미지와 전혀 관련없는 결과물을 얻게 됩니다. 1.0 아래의 값일 경우, 설정된 샘플링 스텝 수보다 적은 스텝 수를 거치게 됩니다.",
"Directory for saving images using the Save button": "저장 버튼을 이용해 저장하는 이미지들의 저장 경로",
@ -121,6 +130,7 @@
"Drop File Here": "파일을 끌어 놓으세요",
"Drop Image Here": "이미지를 끌어 놓으세요",
"Dropdown": "드롭다운",
"Dynamic Prompts": "다이나믹 프롬프트",
"Embedding": "임베딩",
"Embedding Learning rate": "임베딩 학습률",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "강조 : (텍스트)를 이용해 모델의 텍스트에 대한 가중치를 더 강하게 주고 [텍스트]를 이용해 더 약하게 줍니다.",
@ -141,6 +151,7 @@
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 매우 창의적, 스텝 수에 따라 완전히 다른 결과물이 나올 수 있음. 30~40보다 높은 스텝 수는 효과가 미미함",
"Existing Caption txt Action": "이미 존재하는 캡션 텍스트 처리",
"Extension": "확장기능",
"Extension index URL": "확장기능 목록 URL",
"Extensions": "확장기능",
"Extra": "고급",
"Extras": "부가기능",
@ -200,6 +211,7 @@
"ignore": "무시",
"Image": "이미지",
"Image Browser": "이미지 브라우저",
"Image browser": "이미지 브라우저",
"Image for img2img": "Image for img2img",
"Image for inpainting with mask": "마스크로 인페인팅할 이미지",
"Image not found (may have been already moved)": "이미지를 찾을 수 없습니다 (이미 옮겨졌을 수 있음)",
@ -210,6 +222,7 @@
"img2img alternative test": "이미지→이미지 대체버전 테스트",
"img2img DDIM discretize": "이미지→이미지 DDIM 이산화",
"img2img history": "이미지→이미지 기록",
"Implements an expressive template language for random or combinatorial prompt generation along with features to support deep wildcard directory structures.": "무작위/조합 프롬프트 생성을 위한 문법과 복잡한 와일드카드 구조를 지원합니다.",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "루프백 모드에서는 매 루프마다 디노이즈 강도에 이 값이 곱해집니다. 1보다 작을 경우 다양성이 낮아져 결과 이미지들이 고정된 형태로 모일 겁니다. 1보다 클 경우 다양성이 높아져 결과 이미지들이 갈수록 혼란스러워지겠죠.",
"Include Separate Images": "분리된 이미지 포함하기",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "75개보다 많은 토큰을 사용시 마지막 쉼표로부터 N개의 토큰 이내에 패딩을 추가해 통일성 증가시키기",
@ -222,6 +235,7 @@
"Inpainting conditioning mask strength": "인페인팅 조절 마스크 강도",
"Input directory": "인풋 이미지 경로",
"Input images directory": "이미지 경로 입력",
"Inspiration": "\"영감\"",
"Install": "설치",
"Install from URL": "URL로부터 확장기능 설치",
"Installed": "설치된 확장기능",
@ -259,12 +273,14 @@
"Leave blank to save images to the default path.": "기존 저장 경로에 이미지들을 저장하려면 비워두세요.",
"Leave empty for auto": "자동 설정하려면 비워두십시오",
"left": "왼쪽",
"Lets you edit captions in training datasets.": "훈련에 사용되는 데이터셋의 캡션을 수정할 수 있게 해줍니다.",
"linear": "linear",
"List of prompt inputs": "프롬프트 입력 리스트",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "설정 탭이 아니라 상단의 빠른 설정 바에 위치시킬 설정 이름을 쉼표로 분리해서 입력하십시오. 설정 이름은 modules/shared.py에서 찾을 수 있습니다. 재시작이 필요합니다.",
"LMS": "LMS",
"LMS Karras": "LMS Karras",
"Load": "불러오기",
"Load from:": "URL로부터 불러오기",
"Loading...": "로딩 중...",
"Local directory name": "로컬 경로 이름",
"Localization (requires restart)": "현지화 (재시작 필요)",
@ -360,6 +376,7 @@
"Prompt template file": "프롬프트 템플릿 파일 경로",
"Prompts": "프롬프트",
"Prompts from file or textbox": "파일이나 텍스트박스로부터 프롬프트 불러오기",
"Provides an interface to browse created images in the web browser.": "생성된 이미지를 브라우저 내에서 볼 수 있는 인터페이스를 추가합니다.",
"Put variable parts at start of prompt": "변경되는 프롬프트를 앞에 위치시키기",
"quad": "quad",
"Quality for saved jpeg images": "저장된 jpeg 이미지들의 품질",
@ -367,11 +384,13 @@
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
"Random": "랜덤",
"Random grid": "랜덤 그리드",
"Randomly display the pictures of the artist's or artistic genres typical style, more pictures of this artist or genre is displayed after selecting. So you don't have to worry about how hard it is to choose the right style of art when you create.": "특정 작가 또는 스타일의 이미지들 중 하나를 무작위로 보여줍니다. 선택 후 선택한 작가 또는 스타일의 이미지들이 더 나타나게 됩니다. 고르기 어려워도 걱정하실 필요 없어요!",
"Randomness": "랜덤성",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "클립보드에 복사된 정보로부터 설정값 읽어오기/프롬프트창이 비어있을경우 제일 최근 설정값 불러오기",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "프리뷰 이미지 생성 시 텍스트→이미지 탭에서 설정값(프롬프트 등) 읽어오기",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "추천 설정값 - 샘플링 스텝 수 : 80-100 , 샘플러 : Euler a, 디노이즈 강도 : 0.8",
"Reload custom script bodies (No ui updates, No restart)": "커스텀 스크립트 리로드하기(UI 업데이트 없음, 재시작 없음)",
"Reloading...": "재시작 중...",
"relu": "relu",
"Renew Page": "Renew Page",
"Request browser notifications": "브라우저 알림 권한 요청",
@ -391,6 +410,7 @@
"Reuse seed from last generation, mostly useful if it was randomed": "이전 생성에서 사용된 시드를 불러옵니다. 랜덤하게 생성했을 시 도움됨",
"right": "오른쪽",
"Run": "가동",
"Sample extension. Allows you to use __name__ syntax in your prompt to get a random line from a file named name.txt in the wildcards directory. Also see Dynamic Prompts for similar functionality.": "샘플 확장기능입니다. __이름__형식의 문법을 사용해 와일드카드 경로 내의 이름.txt파일로부터 무작위 프롬프트를 적용할 수 있게 해줍니다. 유사한 확장기능으로 다이나믹 프롬프트가 있습니다.",
"Sampler": "샘플러",
"Sampler parameters": "샘플러 설정값",
"Sampling method": "샘플링 방법",
@ -442,6 +462,7 @@
"Show progressbar": "프로그레스 바 보이기",
"Show result images": "이미지 결과 보이기",
"Show Textbox": "텍스트박스 보이기",
"Shows a gallery of generated pictures by artists separated into categories.": "생성된 이미지들을 작가별로 분류해 보여줍니다. 원본 - https://artiststostudy.pages.dev",
"Sigma adjustment for finding noise for image": "이미지 노이즈를 찾기 위해 시그마 조정",
"Sigma Churn": "시그마 섞기",
"sigma churn": "시그마 섞기",
@ -479,6 +500,7 @@
"System": "시스템",
"Tertiary model (C)": "3차 모델 (C)",
"Textbox": "텍스트박스",
"The official port of Deforum, an extensive script for 2D and 3D animations, supporting keyframable sequences, dynamic math parameters (even inside the prompts), dynamic masking, depth estimation and warping.": "Deforum의 공식 포팅 버전입니다. 2D와 3D 애니메이션, 키프레임 시퀀스, 수학적 매개변수, 다이나믹 마스킹 등을 지원합니다.",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "이 정규표현식은 파일명으로부터 단어를 추출하는 데 사용됩니다. 추출된 단어들은 하단의 설정을 이용해 라벨 텍스트로 변환되어 훈련에 사용됩니다. 파일명 텍스트를 유지하려면 비워두십시오.",
"This string will be used to join split words into a single line if the option above is enabled.": "이 문자열은 상단 설정이 활성화되어있을 때 분리된 단어들을 한 줄로 합치는 데 사용됩니다.",
"This text is used to rotate the feature space of the imgs embs": "이 텍스트는 이미지 임베딩의 특징 공간을 회전하는 데 사용됩니다.",

View File

@ -1,6 +1,8 @@
import base64
import io
import time
import uvicorn
from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image
from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image
from fastapi import APIRouter, Depends, HTTPException
import modules.shared as shared
from modules import devices
@ -29,6 +31,12 @@ def setUpscalers(req: dict):
return reqDict
def encode_pil_to_base64(image):
buffer = io.BytesIO()
image.save(buffer, format="png")
return base64.b64encode(buffer.getvalue())
class Api:
def __init__(self, app, queue_lock):
self.router = APIRouter()
@ -40,6 +48,7 @@ class Api:
self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
sampler_index = sampler_to_index(txt2imgreq.sampler_index)
@ -176,6 +185,11 @@ class Api:
return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image)
def interruptapi(self):
shared.state.interrupt()
return {}
def launch(self, server_name, port):
self.app.include_router(self.router)
uvicorn.run(self.app, host=server_name, port=port)

View File

@ -141,7 +141,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
upscaling_resize_w, upscaling_resize_h, upscaling_crop)
cache_key = LruCache.Key(image_hash=hash(np.array(image.getdata()).tobytes()),
info_hash=hash(info),
args_hash=hash(upscale_args))
args_hash=hash((upscale_args, upscale_first)))
cached_entry = cached_images.get(cache_key)
if cached_entry is None:
res = upscale(image, *upscale_args)

View File

@ -510,8 +510,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
if opts.enable_pnginfo:
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)

View File

@ -55,6 +55,7 @@ def process_batch(p, input_dir, output_dir, args):
filename = f"{left}-{n}{right}"
if not save_normally:
os.makedirs(output_dir, exist_ok=True)
processed_image.save(os.path.join(output_dir, filename))

View File

@ -56,9 +56,9 @@ class InterrogateModels:
import clip
if self.running_on_cpu:
model, preprocess = clip.load(clip_model_name, device="cpu")
model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.cmd_opts.clip_models_path)
else:
model, preprocess = clip.load(clip_model_name)
model, preprocess = clip.load(clip_model_name, download_root=shared.cmd_opts.clip_models_path)
model.eval()
model = model.to(devices.device_interrogate)

View File

@ -38,13 +38,18 @@ def setup_for_low_vram(sd_model, use_medvram):
# see below for register_forward_pre_hook;
# first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
# useless here, and we just replace those methods
def first_stage_model_encode_wrap(self, encoder, x):
send_me_to_gpu(self, None)
return encoder(x)
def first_stage_model_decode_wrap(self, decoder, z):
send_me_to_gpu(self, None)
return decoder(z)
first_stage_model = sd_model.first_stage_model
first_stage_model_encode = sd_model.first_stage_model.encode
first_stage_model_decode = sd_model.first_stage_model.decode
def first_stage_model_encode_wrap(x):
send_me_to_gpu(first_stage_model, None)
return first_stage_model_encode(x)
def first_stage_model_decode_wrap(z):
send_me_to_gpu(first_stage_model, None)
return first_stage_model_decode(z)
# remove three big modules, cond, first_stage, and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
@ -56,8 +61,8 @@ def setup_for_low_vram(sd_model, use_medvram):
# register hooks for those the first two models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.encode = lambda x, en=sd_model.first_stage_model.encode: first_stage_model_encode_wrap(sd_model.first_stage_model, en, x)
sd_model.first_stage_model.decode = lambda z, de=sd_model.first_stage_model.decode: first_stage_model_decode_wrap(sd_model.first_stage_model, de, z)
sd_model.first_stage_model.encode = first_stage_model_encode_wrap
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if use_medvram:

View File

@ -597,6 +597,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.scripts is not None:
p.scripts.postprocess(p, res)
p.sd_model = None
p.sampler = None
return res

View File

@ -32,7 +32,7 @@ class RestrictedUnpickler(pickle.Unpickler):
return getattr(collections, name)
if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:
return getattr(torch._utils, name)
if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage']:
if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage']:
return getattr(torch, name)
if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
return getattr(torch.nn.modules.container, name)

View File

@ -3,6 +3,8 @@ import traceback
from collections import namedtuple
import inspect
from fastapi import FastAPI
from gradio import Blocks
def report_exception(c, job):
print(f"Error executing callback {job} for {c.script}", file=sys.stderr)
@ -25,6 +27,7 @@ class ImageSaveParams:
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
callbacks_app_started = []
callbacks_model_loaded = []
callbacks_ui_tabs = []
callbacks_ui_settings = []
@ -40,6 +43,14 @@ def clear_callbacks():
callbacks_image_saved.clear()
def app_started_callback(demo: Blocks, app: FastAPI):
for c in callbacks_app_started:
try:
c.callback(demo, app)
except Exception:
report_exception(c, 'app_started_callback')
def model_loaded_callback(sd_model):
for c in callbacks_model_loaded:
try:
@ -69,7 +80,7 @@ def ui_settings_callback():
def before_image_saved_callback(params: ImageSaveParams):
for c in callbacks_image_saved:
for c in callbacks_before_image_saved:
try:
c.callback(params)
except Exception:
@ -91,6 +102,12 @@ def add_callback(callbacks, fun):
callbacks.append(ScriptCallback(filename, fun))
def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments"""
add_callback(callbacks_app_started, callback)
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
passed as an argument"""

View File

@ -94,6 +94,10 @@ class StableDiffusionModelHijack:
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
self.layers = None
self.circular_enabled = False
self.clip = None
def apply_circular(self, enable):
if self.circular_enabled == enable:
return

View File

@ -1,6 +1,7 @@
import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
@ -220,6 +221,12 @@ def load_model(checkpoint_info=None):
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
@ -233,6 +240,7 @@ def load_model(checkpoint_info=None):
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
@ -252,14 +260,18 @@ def load_model(checkpoint_info=None):
return sd_model
def reload_model_weights(sd_model, info=None):
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model

View File

@ -1,5 +1,6 @@
from collections import namedtuple
import numpy as np
from math import floor
import torch
import tqdm
from PIL import Image
@ -205,17 +206,22 @@ class VanillaStableDiffusionSampler:
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
valid_step = 999 / (1000 // num_steps)
if valid_step == floor(valid_step):
return int(valid_step) + 1
return num_steps
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
steps = self.adjust_steps_if_invalid(p, steps)
self.initialize(p)
# existing code fails with certain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
@ -239,18 +245,14 @@ class VanillaStableDiffusionSampler:
self.last_latent = x
self.step = 0
steps = steps or p.steps
steps = self.adjust_steps_if_invalid(p, steps or p.steps)
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
# existing code fails with certain step counts, like 9
try:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
except Exception:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim

View File

@ -51,6 +51,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
@ -288,11 +289,12 @@ options_templates.update(options_section(('system', "System"), {
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."),
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {

View File

@ -235,6 +235,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
unload = shared.opts.unload_models_when_training
if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings")
@ -272,6 +273,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size)
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
embedding.vec.requires_grad = True
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
@ -328,6 +331,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{embedding_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
@ -355,6 +361,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
processed = processing.process_images(p)
image = processed.images[0]
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
shared.state.current_image = image
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
@ -400,6 +409,7 @@ Last saved image: {html.escape(last_saved_image)}<br/>
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
shared.sd_model.first_stage_model.to(devices.device)
return embedding, filename

View File

@ -25,8 +25,10 @@ def train_embedding(*args):
assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
apply_optimizations = shared.opts.training_xattention_optimizations
try:
sd_hijack.undo_optimizations()
if not apply_optimizations:
sd_hijack.undo_optimizations()
embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args)
@ -38,5 +40,6 @@ Embedding saved to {html.escape(filename)}
except Exception:
raise
finally:
sd_hijack.apply_optimizations()
if not apply_optimizations:
sd_hijack.apply_optimizations()

View File

@ -13,6 +13,9 @@ import html
from modules import extensions, shared, paths
available_extensions = {"extensions": []}
def check_access():
assert not shared.cmd_opts.disable_extension_access, "extension access disabed because of commandline flags"
@ -96,6 +99,14 @@ def extension_table():
return code
def normalize_git_url(url):
if url is None:
return ""
url = url.replace(".git", "")
return url
def install_extension_from_url(dirname, url):
check_access()
@ -103,14 +114,15 @@ def install_extension_from_url(dirname, url):
if dirname is None or dirname == "":
*parts, last_part = url.split('/')
last_part = last_part.replace(".git", "")
last_part = normalize_git_url(last_part)
dirname = last_part
target_dir = os.path.join(extensions.extensions_dir, dirname)
assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}'
assert len([x for x in extensions.extensions if x.remote == url]) == 0, 'Extension with this URL is already installed'
normalized_url = normalize_git_url(url)
assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed'
tmpdir = os.path.join(paths.script_path, "tmp", dirname)
@ -128,18 +140,80 @@ def install_extension_from_url(dirname, url):
shutil.rmtree(tmpdir, True)
def install_extension_from_index(url):
ext_table, message = install_extension_from_url(None, url)
return refresh_available_extensions_from_data(), ext_table, message
def refresh_available_extensions(url):
global available_extensions
import urllib.request
with urllib.request.urlopen(url) as response:
text = response.read()
available_extensions = json.loads(text)
return url, refresh_available_extensions_from_data(), ''
def refresh_available_extensions_from_data():
extlist = available_extensions["extensions"]
installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
code = f"""<!-- {time.time()} -->
<table id="available_extensions">
<thead>
<tr>
<th>Extension</th>
<th>Description</th>
<th>Action</th>
</tr>
</thead>
<tbody>
"""
for ext in extlist:
name = ext.get("name", "noname")
url = ext.get("url", None)
description = ext.get("description", "")
if url is None:
continue
existing = installed_extension_urls.get(normalize_git_url(url), None)
install_code = f"""<input onclick="install_extension_from_index(this, '{html.escape(url)}')" type="button" value="{"Install" if not existing else "Installed"}" {"disabled=disabled" if existing else ""} class="gr-button gr-button-lg gr-button-secondary">"""
code += f"""
<tr>
<td><a href="{html.escape(url)}">{html.escape(name)}</a></td>
<td>{html.escape(description)}</td>
<td>{install_code}</td>
</tr>
"""
code += """
</tbody>
</table>
"""
return code
def create_ui():
import modules.ui
with gr.Blocks(analytics_enabled=False) as ui:
with gr.Tabs(elem_id="tabs_extensions") as tabs:
with gr.TabItem("Installed"):
extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False)
extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False)
with gr.Row():
apply = gr.Button(value="Apply and restart UI", variant="primary")
check = gr.Button(value="Check for updates")
extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False)
extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False)
extensions_table = gr.HTML(lambda: extension_table())
@ -157,16 +231,38 @@ def create_ui():
outputs=[extensions_table],
)
with gr.TabItem("Available"):
with gr.Row():
refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary")
available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/wiki/AUTOMATIC1111/stable-diffusion-webui/Extensions-index.md", label="Extension index URL").style(container=False)
extension_to_install = gr.Text(elem_id="extension_to_install", visible=False)
install_extension_button = gr.Button(elem_id="install_extension_button", visible=False)
install_result = gr.HTML()
available_extensions_table = gr.HTML()
refresh_available_extensions_button.click(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update()]),
inputs=[available_extensions_index],
outputs=[available_extensions_index, available_extensions_table, install_result],
)
install_extension_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]),
inputs=[extension_to_install],
outputs=[available_extensions_table, extensions_table, install_result],
)
with gr.TabItem("Install from URL"):
install_url = gr.Text(label="URL for extension's git repository")
install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto")
intall_button = gr.Button(value="Install", variant="primary")
intall_result = gr.HTML(elem_id="extension_install_result")
install_button = gr.Button(value="Install", variant="primary")
install_result = gr.HTML(elem_id="extension_install_result")
intall_button.click(
install_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]),
inputs=[install_dirname, install_url],
outputs=[extensions_table, intall_result],
outputs=[extensions_table, install_result],
)
return ui

View File

@ -12,7 +12,7 @@ opencv-python
requests
piexif
Pillow
pytorch_lightning
pytorch_lightning==1.7.7
realesrgan
scikit-image>=0.19
timm==0.4.12

View File

@ -532,16 +532,16 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
/* Extensions */
#extensions{
#tab_extensions table{
border-collapse: collapse;
}
#extensions td, #extensions th{
#tab_extensions table td, #tab_extensions table th{
border: 1px solid #ccc;
padding: 0.25em 0.5em;
}
#extensions input[type="checkbox"]{
#tab_extensions table input[type="checkbox"]{
margin-right: 0.5em;
}
@ -549,6 +549,9 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
max-width: 16em;
}
#tab_extensions input[disabled="disabled"]{
opacity: 0.5;
}
/* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js.

View File

@ -23,6 +23,7 @@ import modules.sd_hijack
import modules.sd_models
import modules.shared as shared
import modules.txt2img
import modules.script_callbacks
import modules.ui
from modules import devices
@ -77,7 +78,7 @@ def initialize():
modules.scripts.load_scripts()
modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
@ -140,6 +141,8 @@ def webui():
if launch_api:
create_api(app)
modules.script_callbacks.app_started_callback(demo, app)
wait_on_server(demo)
sd_samplers.set_samplers()