Merge branch 'AUTOMATIC1111:master' into master

This commit is contained in:
random-thoughtss 2022-10-27 11:19:12 -07:00 committed by GitHub
commit f3f2ffd448
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1942 additions and 95 deletions

View File

@ -44,7 +44,7 @@ body:
id: commit id: commit
attributes: attributes:
label: Commit where the problem happens label: Commit where the problem happens
description: Which commit are you running ? (copy the **Commit hash** shown in the cmd/terminal when you launch the UI) description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit hash** shown in the cmd/terminal when you launch the UI)
validations: validations:
required: true required: true
- type: dropdown - type: dropdown

415
localizations/fr-FR.json Normal file
View File

@ -0,0 +1,415 @@
{
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"Loading...": "Chargement...",
"view": "vue",
"api": "api",
"•": "•",
"built with gradio": "Construit avec Gradio",
"Stable Diffusion checkpoint": "checkpoint Stable Diffusion",
"txt2img": "txt2img",
"img2img": "img2img",
"Extras": "Extras",
"PNG Info": "Infos PNG",
"History": "Historique",
"Checkpoint Merger": "Fusion de checkpoints",
"Train": "Entrainer",
"Settings": "Paramètres",
"Prompt": "Requête",
"Negative prompt": "Requête négative",
"Run": "Lancer",
"Skip": "Passer",
"Interrupt": "Interrrompre",
"Generate": "Générer",
"Style 1": "Style 1",
"Style 2": "Style 2",
"Label": "Etiquette",
"File": "Fichier",
"Drop File Here": "Déposer votre fichier ici",
"-": "-",
"or": "ou",
"Click to Upload": "Cliquer pour uploader",
"Image": "Image",
"Check progress": "Voir l'avancement",
"Check progress (first)": "Voir l'avancement (1er)",
"Sampling Steps": "Étapes d'échantillonnage",
"Sampling method": "Méthode d'échantillonnage",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
"Heun": "Heun",
"DPM2": "DPM2",
"DPM2 a": "DPM2 a",
"DPM fast": "DPM fast",
"DPM adaptive": "DPM adaptive",
"LMS Karras": "LMS Karras",
"DPM2 Karras": "DPM2 Karras",
"DPM2 a Karras": "DPM2 a Karras",
"DDIM": "DDIM",
"PLMS": "PLMS",
"Width": "Largeur",
"Height": "Hauteur",
"Restore faces": "Restaurer les visages",
"Tiling": "Mode Tuile",
"Highres. fix": "Correction haute résolution",
"Firstpass width": "Largeur première passe",
"Firstpass height": "Hauteur seconde passe",
"Denoising strength": "Puissance de réduction du bruit",
"Batch count": "Nombre de lots",
"Batch size": "Taille de lots",
"CFG Scale": "Echelle CFG",
"Seed": "Valeur aléatoire",
"Extra": "Extra",
"Variation seed": "Variation de la valeur aléatoire",
"Variation strength": "Puissance de variation",
"Resize seed from width": "Largeur de redimensionnement de la valeur aléatoire",
"Resize seed from height": "Hauteur de redimensionnement de la valeur aléatoire",
"Script": "Script",
"None": "Aucun",
"Prompt matrix": "Matrice de requète",
"Prompts from file or textbox": "Requètes depuis un fichier ou une boite de dialogue",
"X/Y plot": "graphe X/Y",
"Put variable parts at start of prompt": "Mettre les mots clés variable au début de la requête",
"Show Textbox": "Afficher le champs texte",
"File with inputs": "Fichier d'entrée",
"Prompts": "Requêtes",
"X type": "Paramètre axe X",
"Nothing": "Rien",
"Var. seed": "Valeur aléatoire variable",
"Var. strength": "Puissance variable",
"Steps": "Étapes",
"Prompt S/R": "Cherche et remplace dans la requête",
"Prompt order": "Ordre de la requête",
"Sampler": "Echantilloneur",
"Checkpoint name": "Nom du checkpoint",
"Hypernetwork": "Hypernetwork",
"Hypernet str.": "Force de l'Hypernetwork",
"Sigma Churn": "Sigma Churn",
"Sigma min": "Sigma min.",
"Sigma max": "Sigma max.",
"Sigma noise": "Bruit Sigma",
"Eta": "Temps estimé",
"Clip skip": "Passer Clip",
"Denoising": "Réduction du bruit",
"X values": "Valeurs X",
"Y type": "Paramètre axe Y",
"Y values": "Valeurs Y",
"Draw legend": "Afficher la légende",
"Include Separate Images": "Inclure les images séparées",
"Keep -1 for seeds": "Conserver -1 pour la valeur aléatoire",
"Drop Image Here": "Déposer l'image ici",
"Save": "Enregistrer",
"Send to img2img": "Envoyer vers img2img",
"Send to inpaint": "Envoyer vers inpaint",
"Send to extras": "Envoyer vers extras",
"Make Zip when Save?": "Créer un zip lors de l'enregistrement?",
"Textbox": "Champ texte",
"Interrogate\nCLIP": "Interroger\nCLIP",
"Interrogate\nDeepBooru": "Interroger\nDeepBooru",
"Inpaint": "Inpaint",
"Batch img2img": "Lot img2img",
"Image for img2img": "Image pour img2img",
"Image for inpainting with mask": "Image pour inpainting avec masque",
"Mask": "Masque",
"Mask blur": "Flou masque",
"Mask mode": "Mode masque",
"Draw mask": "Dessiner masque",
"Upload mask": "Uploader masque",
"Masking mode": "Mode de masquage",
"Inpaint masked": "Inpaint masqué",
"Inpaint not masked": "Inpaint non masqué",
"Masked content": "Contenu masqué",
"fill": "remplir",
"original": "original",
"latent noise": "bruit latent",
"latent nothing": "latent vide",
"Inpaint at full resolution": "Inpaint en pleine résolution",
"Inpaint at full resolution padding, pixels": "Padding de l'inpaint en pleine résolution, en pixels",
"Process images in a directory on the same machine where the server is running.": "Traite les images dans un dossier sur la même machine où le serveur tourne",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "Utiliser un dossier de sortie vide pour enregistrer les images normalement plutôt que d'écrire dans le dossier de sortie",
"Input directory": "Dossier d'entrée",
"Output directory": "Dossier de sortie",
"Resize mode": "Mode redimensionnement",
"Just resize": "Redimensionner uniquement",
"Crop and resize": "Recadrer et redimensionner",
"Resize and fill": "Redimensionner et remplir",
"img2img alternative test": "Test alternatif img2img",
"Loopback": "Bouclage",
"Outpainting mk2": "Outpainting v2",
"Poor man's outpainting": "Outpainting du pauvre",
"SD upscale": "Agrandissement SD",
"should be 2 or lower.": "doit être inférieur ou égal à 2",
"Override `Sampling method` to Euler?(this method is built for it)": "Forcer `Méthode d'échantillonnage` à Euler ? (cette méthode est dédiée à cela)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Forcer la `requête` au contenu de la `requête d'origine` ? (de même pour la `requête négative`)",
"Original prompt": "Requête d'origine",
"Original negative prompt": "Requête négative d'origine",
"Override `Sampling Steps` to the same value as `Decode steps`?": "Forcer le valeur d'`Étapes d'échantillonnage` à la même valeur qu'`Étapes de décodage` ?",
"Decode steps": "Étapes de décodage",
"Override `Denoising strength` to 1?": "Forcer `Puissance de réduction du bruit` à 1 ?",
"Decode CFG scale": "Echelle CFG de décodage",
"Randomness": "Aléatoire",
"Sigma adjustment for finding noise for image": "Ajustement Sigma lors de la recherche du bruit dans l'image",
"Loops": "Boucles",
"Denoising strength change factor": "Facteur de changement de la puissance de réduction du bruit",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Paramètres recommandés : Étapes d'échantillonnage : 80-100, Echantillonneur : Euler a, Puissance de réduction du bruit : 0.8",
"Pixels to expand": "Pixels à étendre",
"Outpainting direction": "Direction de l'outpainting",
"left": "gauche",
"right": "droite",
"up": "haut",
"down": "bas",
"Fall-off exponent (lower=higher detail)": "Exposant de diminution (plus petit = plus de détails)",
"Color variation": "Variation de couleur",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Agrandira l'image à deux fois sa taille; utilisez les glissières largeur et hauteur afin de choisir la taille de tuile",
"Tile overlap": "Chevauchement de tuile",
"Upscaler": "Agrandisseur",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
"ESRGAN_4x": "ESRGAN_4x",
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"SwinIR 4x": "SwinIR 4x",
"Single Image": "Image unique",
"Batch Process": "Traitement par lot",
"Batch from Directory": "Lot depuis un dossier",
"Source": "Source",
"Show result images": "Montrez les images résultantes",
"Scale by": "Mise à l'échelle de",
"Scale to": "Mise à l'échelle à",
"Resize": "Redimensionner",
"Crop to fit": "Recadrer à la taille",
"Upscaler 2": "Agrandisseur 2",
"Upscaler 2 visibility": "Visibilité de l'agrandisseur 2",
"GFPGAN visibility": "Visibilité GFPGAN",
"CodeFormer visibility": "Visibilité CodeFormer",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Poids CodeFormer (0 = effet maximum, 1 = effet minimum)",
"Open output directory": "Ouvrir le dossier de sortie",
"Send to txt2img": "Envoyer vers txt2img",
"txt2img history": "historique txt2img",
"img2img history": "historique img2img",
"extras history": "historique extras",
"Renew Page": "Rafraichr la page",
"First Page": "Première page",
"Prev Page": "Page précendente",
"Page Index": "Index des pages",
"Next Page": "Page suivante",
"End Page": "Page de fin",
"number of images to delete consecutively next": "nombre d'image à supprimer consécutivement ensuite",
"Delete": "Supprimer",
"Generate Info": "Générer les informations",
"File Name": "Nom de fichier",
"set_index": "set_index",
"A merger of the two checkpoints will be generated in your": "Une fusion des deux checkpoints sera générée dans votre",
"checkpoint": "checkpoint",
"directory.": "dossier",
"Primary model (A)": "Modèle primaire (A)",
"Secondary model (B)": "Modèle secondaire (B)",
"Tertiary model (C)": "Modèle tertiaire (C)",
"Custom Name (Optional)": "Nom personnalisé (Optionel)",
"Multiplier (M) - set to 0 to get model A": "Multiplieur (M) - utiliser 0 pour le modèle A",
"Interpolation Method": "Méthode d'interpolation",
"Weighted sum": "Somme pondérée",
"Add difference": "Ajouter différence",
"Save as float16": "Enregistrer en tant que float16",
"See": "Voir",
"wiki": "wiki",
"for detailed explanation.": "pour une explication détaillée.",
"Create embedding": "Créer un embedding",
"Create hypernetwork": "Créer un hypernetwork",
"Preprocess images": "Pré-traite les images",
"Name": "Nom",
"Initialization text": "Texte d'initialisation",
"Number of vectors per token": "Nombre de vecteurs par jeton",
"Modules": "Modules",
"Source directory": "Dossier source",
"Destination directory": "Dossier destination",
"Create flipped copies": "Créer des copies en mirroir",
"Split oversized images into two": "Couper les images trop grandes en deux",
"Use BLIP for caption": "Utiliser BLIP pour les descriptions",
"Use deepbooru for caption": "Utiliser deepbooru pour les descriptions",
"Preprocess": "Pré-traite",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "Entrainer un embedding ; spécifiez un dossier contenant un ensemble d'images avec un ratio de 1:1",
"Embedding": "Embedding",
"Learning rate": "Vitesse d'apprentissage",
"Dataset directory": "Dossier des images d'entrée",
"Log directory": "Dossier de journalisation",
"Prompt template file": "Fichier modèle de requêtes",
"Max steps": "Étapes max.",
"Save an image to log directory every N steps, 0 to disable": "Enregistrer une image dans le dossier de journalisation toutes les N étapes, 0 pour désactiver",
"Save a copy of embedding to log directory every N steps, 0 to disable": "Enregistrer une copie de l'embedding dans le dossier de journalisation toutes les N étapes, 0 pour désactiver",
"Save images with embedding in PNG chunks": "Sauvegarder les images incluant l'embedding dans leur blocs PNG",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "Lire les paramètres (requête, etc.) depuis l'onglet txt2img lors de la génération des previews",
"Train Hypernetwork": "Entrainer un Hypernetwork",
"Train Embedding": "Entrainer un Embedding",
"Apply settings": "Appliquer les paramètres",
"Saving images/grids": "Enregistrer les images/grilles",
"Always save all generated images": "Toujours enregistrer toutes les images",
"File format for images": "Format de fichier pour les images",
"Images filename pattern": "Motif pour le nom de fichier des images",
"Always save all generated image grids": "Toujours enregistrer toutes les grilles d'images générées",
"File format for grids": "Format de fichier pour les grilles",
"Add extended info (seed, prompt) to filename when saving grid": "Ajouter les informations étendues (valeur aléatoire, requête) aux noms de fichiers lors de l'enregistrement d'une grille",
"Do not save grids consisting of one picture": "Ne pas enregistrer les grilles contenant une seule image",
"Prevent empty spots in grid (when set to autodetect)": "Eviter les vides dans la grille (quand autodétection est choisie)",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Nombre de colonnes de la grille; utilisez -1 pour autodétection et 0 pour qu'il soit égal à la taille du lot",
"Save text information about generation parameters as chunks to png files": "Enregistrer l'information du text des paramètres de génération en tant que blocs dans les fichiers PNG",
"Create a text file next to every image with generation parameters.": "Créer un fichier texte contenant les paramètres de génération à côté de chaque image",
"Save a copy of image before doing face restoration.": "Enregistrer une copie de l'image avant de lancer la restauration de visage",
"Quality for saved jpeg images": "Qualité pour les images jpeg enregistrées",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Si l'image PNG est plus grande que 4MB or l'une des ses dimensions supérieure à 4000, réduire sa taille et enregistrer une copie en JPG",
"Use original name for output filename during batch process in extras tab": "Utiliser un nom de fichier original pour les fichiers de sortie durant le traitement par lot dans l'onglet Extras",
"When using 'Save' button, only save a single selected image": "A l'utilisation du bouton `Enregistrer`, n'enregistrer que l'image séléctionnée",
"Do not add watermark to images": "Ne pas ajouter de filigrane aux images",
"Paths for saving": "Chemins pour l'enregistrement",
"Output directory for images; if empty, defaults to three directories below": "Dossier de sortie pour les images; si non spécifié, le chemin par défaut sera trois niveau en dessous",
"Output directory for txt2img images": "Dossier de sortie pour les images txt2img",
"Output directory for img2img images": "Dossier de sortie pour les images img2img",
"Output directory for images from extras tab": "Dossier de sortie pour les images de l'onglet Extras",
"Output directory for grids; if empty, defaults to two directories below": "Dossier de sortie pour les grilles; si non spécifié, le chemin par défaut sera deux niveau en dessous",
"Output directory for txt2img grids": "Dossier de sortie pour les grilles txt2img",
"Output directory for img2img grids": "Dossier de sortie pour les grilles img2img",
"Directory for saving images using the Save button": "Dossier de sauvegarde des images pour le bouton `Enregistrer`",
"Saving to a directory": "Enregistrer dans un dossier",
"Save images to a subdirectory": "Enregistrer les images dans un sous dossier",
"Save grids to a subdirectory": "Enregistrer les grilles dans un sous dossier",
"When using \"Save\" button, save images to a subdirectory": "Lors de l'utilisation du bouton \"Enregistrer\", sauvegarder les images dans un sous dossier",
"Directory name pattern": "Motif pour le nom des dossiers",
"Max prompt words for [prompt_words] pattern": "Maximum de mot pour le motif [prompt_words]",
"Upscaling": "Agrandissement",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "Taille des tuile for les agrandisseurs ESRGAN. 0 = mode tuile désactivé.",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Chevauchement des tuiles, en pixel pour l'agrandisseur ESRGAN. Valeur faible = couture visible",
"Tile size for all SwinIR.": "Taille de la tuile pour tous les agrandisseur SwinIR.",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Chevauchement de tuile, en pixels pour SwinIR. Valeur faible = couture visible",
"LDSR processing steps. Lower = faster": "Echantillon du traitement LDSR. Valeur faible = plus rapide",
"Upscaler for img2img": "Agrandisseur pour img2img",
"Upscale latent space image when doing hires. fix": "Agrandir l'image de l'espace latent lors de la correction haute résolution",
"Face restoration": "Restauration de visage",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "Paramètre de poids pour CodeFormer; 0 = effet maximum 1 = effet minimum",
"Move face restoration model from VRAM into RAM after processing": "Déplacer le modèle de restauration de visage de la VRAM vers la RAM après traitement",
"System": "Système",
"VRAM usage polls per second during generation. Set to 0 to disable.": "Fréquence d'interrogation par seconde pendant la génération. Mettez la valeur à 0 pour désactiver.",
"Always print all generation info to standard output": "Toujours afficher toutes les informations de génération dans la sortie standard",
"Add a second progress bar to the console that shows progress for an entire job.": "Ajouter un seconde barre de progression dans la console montrant l'avancement pour un tâche complète.",
"Training": "Entrainement",
"Unload VAE and CLIP from VRAM when training": "Décharger VAE et CLIP de la VRAM pendant l'entrainement",
"Filename word regex": "Regex de mot",
"Filename join string": "Chaine de caractère pour lier les noms de fichier",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Nombre de répétition pour une image unique par époque; utilisé seulement pour afficher le nombre d'époques",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Enregistrer un csv contenant la perte dans le dossier de journalisation toutes les N étapes, 0 pour désactiver",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoint à mettre en cache dans la RAM",
"Hypernetwork strength": "Force de l'Hypernetwork",
"Apply color correction to img2img results to match original colors.": "Appliquer une correction de couleur aux résultats img2img afin de conserver les couleurs d'origine",
"Save a copy of image before applying color correction to img2img results": "Enregistrer une copie de l'image avant d'appliquer les résultats de la correction de couleur img2img",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Avec img2img, executer exactement le nombre d'étapes spécifiées par la glissière (normalement moins d'étapes sont executées quand la réduction du bruit est plus faible).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Activer la quantisation des échantillionneurs K pour des résultats plus nets et plus propres. Cela peut modifier les valeurs aléatoires existantes. Requiert un redémarrage pour être actif.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Emphase : utilisez (texte) afin de forcer le modèle à porter plus d'attention au texte et [texte] afin qu'il y porte moins attention",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "Utilisez l'ancienne méthode d'emphase. Peut être utile afin de reproduire d'anciennes valeurs aléatoires.",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "Demander aux échantillionneurs K-diffusion de produire les mêmes dans un lot que lors de la génération d'une image unique",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Améliorer la cohérence en remplissant (padding) à partir de la dernière virgule dans les X jetons quand on en utilise plus de 75",
"Filter NSFW content": "Filtrer le contenu +18 (NSFW)",
"Stop At last layers of CLIP model": "S'arrêter aux derniers niveaux du modèle CLIP",
"Interrogate Options": "Options d'intérrogation",
"Interrogate: keep models in VRAM": "Interroger : conserver les modèles en VRAM",
"Interrogate: use artists from artists.csv": "Interroger : utiliser les artistes dans artists.csv",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interroger : inclure la correspondance du classement des labels de modèle dans les résultats (N'a pas d'effet sur les interrogateurs basés sur des descriptions) ",
"Interrogate: num_beams for BLIP": "Interroger : num_beams pour BLIP",
"Interrogate: minimum description length (excluding artists, etc..)": "Interroger : longueur minimale de la description (excluant les artistes, etc.)",
"Interrogate: maximum description length": "Interroger : longueur maximale de la description",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP : nombre maximum de lignes dans le fichier texte (0 = pas de limite)",
"Interrogate: deepbooru score threshold": "Interroger : seuil du score deepbooru",
"Interrogate: deepbooru sort alphabetically": "Interroger : classement alphabétique deepbooru",
"use spaces for tags in deepbooru": "Utiliser des espaces pour les étiquettes dans deepbooru",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "échapper (\\) les crochets dans deepbooru (afin qu'ils puissent être utilisés littéralement et non pour mettre en emphase)",
"User interface": "Interface utilisateur",
"Show progressbar": "Afficher la barre de progression",
"Show image creation progress every N sampling steps. Set 0 to disable.": "Afficher l'état d'avancement de la création d'image toutes les X étapes d'échantillionnage. Utiliser 0 pour désactiver.",
"Show grid in results for web": "Afficher la grille dans les résultats web",
"Do not show any images in results for web": "N'afficher aucune image dans les résultats web'",
"Add model hash to generation information": "Ajouter le hash du modèle dans l'information de génération",
"Add model name to generation information": "Ajouter le nom du modèle dans l'information de génération",
"Font for image grids that have text": "Police pour les grilles d'images contenant du texte",
"Enable full page image viewer": "Activer l'affichage des images en plein écran",
"Show images zoomed in by default in full page image viewer": "Afficher les images zoomées par défaut lors de l'affichage en plein écran",
"Show generation progress in window title.": "Afficher l'avancement de la génération dans le titre de la fenêtre.",
"Quicksettings list": "Liste de réglages rapides",
"Localization (requires restart)": "Localisation (requiert un redémarrage)",
"Sampler parameters": "Paramètres de l'échantillionneur",
"Hide samplers in user interface (requires restart)": "Cacher les échantillonneurs dans l'interface utilisateur (requiert un redémarrage)",
"eta (noise multiplier) for DDIM": "eta (multiplicateur de bruit) pour DDIM",
"eta (noise multiplier) for ancestral samplers": "eta (multiplicateur de bruit) poru les échantillionneurs de type 'ancestral'",
"img2img DDIM discretize": "Discrétisation DDIM pour img2img",
"uniform": "uniforme",
"quad": "quad",
"sigma churn": "sigma churn",
"sigma tmin": "sigma tmin",
"sigma noise": "sigma noise",
"Eta noise seed delta": "Eta noise seed delta",
"Request browser notifications": "Demander les notifications au navigateur",
"Download localization template": "Télécharger le modèle de localisation",
"Reload custom script bodies (No ui updates, No restart)": "Recharger le contenu des scripts personnalisés (Pas de mise à jour de l'interface, Pas de redémarrage)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Redémarrer Gradio et rafraichir les composants (Scripts personnalisés, ui.py, js et css uniquement)",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Requête (Ctrl + Entrée ou Alt + Entrée pour générer)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Requête négative (Ctrl + Entrée ou Alt + Entrée pour générer)",
"Add a random artist to the prompt.": "Ajouter un artiste aléatoire à la requête",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Lire les paramètres de génération depuis la requête, ou depuis la dernière génération si la requête est vide dans l'interface utilisateur.",
"Save style": "Sauvegarder le style",
"Apply selected styles to current prompt": "Appliquer les styles séléctionnés à la requête actuelle",
"Stop processing current image and continue processing.": "Arrêter le traitement de l'image actuelle et continuer le traitement.",
"Stop processing images and return any results accumulated so far.": "Arrêter le traitement des images et retourne les résultats accumulés depuis le début.",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "Style à appliquer ; les styles sont composés de requêtes positives et négatives et s'appliquent au deux",
"Do not do anything special": "Ne rien faire de particulier",
"Which algorithm to use to produce the image": "Quel algorithme utiliser pour produire l'image",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - très créatif, peut générer des images complètement différentes en fonction du nombre d'étapes, utiliser plus de 30 à 40 étapes n'améliore pas le résultat",
"Denoising Diffusion Implicit Models - best at inpainting": "Modèles implicite de réduction du bruit à diffusion - utile pour l'inpainting",
"Produce an image that can be tiled.": "Produit une image qui peut être bouclée (tuile).",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Utilise un processus en deux étapes afin de créer partiellement une image dans une résolution plus faible, l'agrandir et améliorer ses détails sans modifier la composition",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Détermine à quel point l'algorithme doit respecter le contenu de l'image. A 0 rien ne changera, à 1 l'image sera entièrement différente. Avec des valeurs inférieures à 1.0 le traitement utilisera moins d'étapes que ce que la glissière Étapes d'échantillionnage spécifie. ",
"How many batches of images to create": "Combien de lots d'images créer",
"How many image to create in a single batch": "Combien d'images créer par lot",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - spécifie à quel point l'image doit se conformer à la requête - des valeurs plus faibles produisent des résultats plus créatifs",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Une valeur qui détermine la sortie du générateur de nombres aléatoires - si vous créez une image avec les mêmes paramètres et valeur aléatoire qu'une autre, le résultat sera identique",
"Set seed to -1, which will cause a new random number to be used every time": "Passer la valeur aléatoire à -1, cela causera qu'un nombre aléatoire différent sera utilisé à chaque fois",
"Reuse seed from last generation, mostly useful if it was randomed": "Réutiliser la valeur aléatoire de la dernière génération, généralement utile uniquement si elle était randomisée",
"Seed of a different picture to be mixed into the generation.": "Valeur aléatoire d'une image différente à mélanger dans la génération",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Force de la variation à produire. A 0 il n'y aura pas d'effet. A 1 l'image sera composée uniquement de la valeur aléatoire variable spécifiée (à l'exception des échantillionneurs `ancestral`)",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Essayer de produire une image similaire à ce qu'elle aurait été avec la même valeur aléatoire, mais dans la résolution spécifiée",
"Separate values for X axis using commas.": "Séparer les valeurs pour l'axe X par des virgules",
"Separate values for Y axis using commas.": "Séparer les valeurs pour l'axe Y par des virgules",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Ecrire l'image dans un dossier (par défaut - log/images) et les paramètres de génération dans un fichier csv.",
"Open images output directory": "Ouvrir le dossier de sortie des images",
"How much to blur the mask before processing, in pixels.": "Quantité de flou à appliquer au masque avant traitement, en pixels",
"What to put inside the masked area before processing it with Stable Diffusion.": "Avec quoi remplir la zone masquée avant traitement par Stable Diffusion.",
"fill it with colors of the image": "remplir avec les couleurs de l'image",
"keep whatever was there originally": "conserver ce qui était présent à l'origine",
"fill it with latent space noise": "remplir avec le bruit de l'espace latent",
"fill it with latent space zeroes": "remplir avec des zéros dans l'espace latent",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Agrandir la région masquées à la résolution cible, exécuter l'inpainting, réduire à nouveau puis coller dans l'image originale",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Redimensionner l'image dans la résolution cible. A moins que la hauteur et la largeur coincident le ratio de l'image sera incorrect.",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Redimensionner l'image afin que l'entièreté de la résolution cible soit remplie par l'image. Recadrer les parties qui dépassent.",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Redimensionner l'image afin que l'entièreté de l'image soit contenue dans la résolution cible. Remplir l'espace vide avec les couleurs de l'image.",
"How many times to repeat processing an image and using it as input for the next iteration": "Combien de fois répéter le traitement d'une image et l'utiliser comme entrée pour la prochaine itération",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "En mode bouclage (Loopback), à chaque tour de la boucle la force du réducteur de bruit est multipliée par cette valeur. <1 signifie réduire la variation donc votre séquence convergera vers une image fixe. >1 signifie augmenter la variation donc votre séquence deviendra de plus en plus chaotique. ",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Pour l'agrandissement SD, de combien les tuiles doivent se chevaucher, en pixels. Les tuiles se chevauchent de manière à ce qu'il n'y ait pas de couture visible une fois fusionnées en une image. ",
"A directory on the same machine where the server is running.": "Un dossier sur la même machine où le serveur tourne.",
"Leave blank to save images to the default path.": "Laisser vide pour sauvegarder les images dans le chemin par défaut.",
"Result = A * (1 - M) + B * M": "Résultat = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "Résultat = A + (B - C) * M",
"Path to directory with input images": "Chemin vers le dossier contenant les images d'entrée",
"Path to directory where to write outputs": "Chemin vers le dossier où écrire les sorties",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Utiliser les étiquettes suivantes pour définir le nom des images : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp] ; laisser vide pour le nom par défaut.",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Si cette option est activée le filigrane ne sera pas ajouté au images crées. Attention : si vous n'ajoutez pas de filigrane vous pourriez vous comporter de manière non éthique.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "Utiliser les étiquettes suivantes pour définir le nom des sous dossiers pour les images et les grilles : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp] ; laisser vide pour le nom par défaut.",
"Restore low quality faces using GFPGAN neural network": "Restaurer les visages de basse qualité en utilisant le réseau neuronal GFPGAN",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Cette expression régulière sera utilisée pour extraire les mots depuis le nom de fichier ; ils seront joints en utilisant l'option ci dessous en une étiquette utilisée pour l'entrainement. Laisser vide pour conserver le texte du nom de fichier tel quel.",
"This string will be used to join split words into a single line if the option above is enabled.": "Cette chaine de caractères sera utilisée pour joindre les mots séparés en une ligne unique si l'option ci dessus est activée.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Liste des noms de paramètres, séparés par des virgules, pour les paramètres de la barre d'accès rapide en haut de page, plutôt que dans la page habituelle des paramètres. Voir modules/shared.py pour définir les noms. Requiert un redémarrage pour s'appliquer.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Si cette valeur est différente de zéro elle sera ajoutée à la valeur aléatoire et utilisée pour initialiser le générateur de nombres aléatoires du bruit lors de l'utilisation des échantillonneurs supportants Eta. Vous pouvez l'utiliser pour produire encore plus de variation dans les images, ou vous pouvez utiliser ceci pour faire correspondre les images avec d'autres logiciels si vous savez ce que vous faites.",
"Enable Autocomplete": "Activer l'autocomplétion",
"/0.0": "/0.0"
}

View File

@ -21,6 +21,7 @@
"Add layer normalization": "레이어 정규화(normalization) 추가", "Add layer normalization": "레이어 정규화(normalization) 추가",
"Add model hash to generation information": "생성 정보에 모델 해시 추가", "Add model hash to generation information": "생성 정보에 모델 해시 추가",
"Add model name to generation information": "생성 정보에 모델 이름 추가", "Add model name to generation information": "생성 정보에 모델 이름 추가",
"Add number to filename when saving": "이미지를 저장할 때 파일명에 숫자 추가하기",
"Aesthetic imgs embedding": "스타일 이미지 임베딩", "Aesthetic imgs embedding": "스타일 이미지 임베딩",
"Aesthetic learning rate": "스타일 학습 수", "Aesthetic learning rate": "스타일 학습 수",
"Aesthetic steps": "스타일 스텝 수", "Aesthetic steps": "스타일 스텝 수",
@ -35,6 +36,7 @@
"Apply color correction to img2img results to match original colors.": "이미지→이미지 결과물이 기존 색상과 일치하도록 색상 보정 적용하기", "Apply color correction to img2img results to match original colors.": "이미지→이미지 결과물이 기존 색상과 일치하도록 색상 보정 적용하기",
"Apply selected styles to current prompt": "현재 프롬프트에 선택된 스타일 적용", "Apply selected styles to current prompt": "현재 프롬프트에 선택된 스타일 적용",
"Apply settings": "설정 적용하기", "Apply settings": "설정 적용하기",
"Auto focal point crop": "초점 기준 크롭(자동 감지)",
"Batch count": "배치 수", "Batch count": "배치 수",
"Batch from Directory": "저장 경로로부터 여러장 처리", "Batch from Directory": "저장 경로로부터 여러장 처리",
"Batch img2img": "이미지→이미지 배치", "Batch img2img": "이미지→이미지 배치",
@ -66,12 +68,14 @@
"Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "서로 다른 설정값으로 생성된 이미지의 그리드를 만듭니다. 아래의 설정으로 가로/세로에 어떤 설정값을 적용할지 선택하세요.", "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "서로 다른 설정값으로 생성된 이미지의 그리드를 만듭니다. 아래의 설정으로 가로/세로에 어떤 설정값을 적용할지 선택하세요.",
"Create a text file next to every image with generation parameters.": "생성된 이미지마다 생성 설정값을 담은 텍스트 파일 생성하기", "Create a text file next to every image with generation parameters.": "생성된 이미지마다 생성 설정값을 담은 텍스트 파일 생성하기",
"Create aesthetic images embedding": "스타일 이미지 임베딩 생성하기", "Create aesthetic images embedding": "스타일 이미지 임베딩 생성하기",
"Create debug image": "디버그 이미지 생성",
"Create embedding": "임베딩 생성", "Create embedding": "임베딩 생성",
"Create flipped copies": "좌우로 뒤집은 복사본 생성", "Create flipped copies": "좌우로 뒤집은 복사본 생성",
"Create hypernetwork": "하이퍼네트워크 생성", "Create hypernetwork": "하이퍼네트워크 생성",
"Create images embedding": "이미지 임베딩 생성하기", "Create images embedding": "이미지 임베딩 생성하기",
"Crop and resize": "잘라낸 후 리사이징", "Crop and resize": "잘라낸 후 리사이징",
"Crop to fit": "잘라내서 맞추기", "Crop to fit": "잘라내서 맞추기",
"custom fold": "커스텀 경로",
"Custom Name (Optional)": "병합 모델 이름 (선택사항)", "Custom Name (Optional)": "병합 모델 이름 (선택사항)",
"Dataset directory": "데이터셋 경로", "Dataset directory": "데이터셋 경로",
"DDIM": "DDIM", "DDIM": "DDIM",
@ -107,6 +111,7 @@
"Embedding": "임베딩", "Embedding": "임베딩",
"Embedding Learning rate": "임베딩 학습률", "Embedding Learning rate": "임베딩 학습률",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "강조 : (텍스트)를 이용해 모델의 텍스트에 대한 가중치를 더 강하게 주고 [텍스트]를 이용해 더 약하게 줍니다.", "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "강조 : (텍스트)를 이용해 모델의 텍스트에 대한 가중치를 더 강하게 주고 [텍스트]를 이용해 더 약하게 줍니다.",
"Enable Autocomplete": "태그 자동완성 사용",
"Enable full page image viewer": "전체 페이지 이미지 뷰어 활성화", "Enable full page image viewer": "전체 페이지 이미지 뷰어 활성화",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "더 예리하고 깔끔한 결과물을 위해 K 샘플러들에 양자화를 적용합니다. 존재하는 시드가 변경될 수 있습니다. 재시작이 필요합니다.", "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "더 예리하고 깔끔한 결과물을 위해 K 샘플러들에 양자화를 적용합니다. 존재하는 시드가 변경될 수 있습니다. 재시작이 필요합니다.",
"End Page": "마지막 페이지", "End Page": "마지막 페이지",
@ -145,6 +150,9 @@
"First Page": "처음 페이지", "First Page": "처음 페이지",
"Firstpass height": "초기 세로길이", "Firstpass height": "초기 세로길이",
"Firstpass width": "초기 가로길이", "Firstpass width": "초기 가로길이",
"Focal point edges weight": "경계면 가중치",
"Focal point entropy weight": "엔트로피 가중치",
"Focal point face weight": "얼굴 가중치",
"Font for image grids that have text": "텍스트가 존재하는 그리드 이미지의 폰트", "Font for image grids that have text": "텍스트가 존재하는 그리드 이미지의 폰트",
"for detailed explanation.": "를 참조하십시오.", "for detailed explanation.": "를 참조하십시오.",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "SD 업스케일링에서 타일 간 몇 픽셀을 겹치게 할지 결정하는 설정값입니다. 타일들이 다시 한 이미지로 합쳐질 때, 눈에 띄는 이음매가 없도록 서로 겹치게 됩니다.", "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "SD 업스케일링에서 타일 간 몇 픽셀을 겹치게 할지 결정하는 설정값입니다. 타일들이 다시 한 이미지로 합쳐질 때, 눈에 띄는 이음매가 없도록 서로 겹치게 됩니다.",
@ -195,6 +203,7 @@
"Inpaint masked": "마스크만 처리", "Inpaint masked": "마스크만 처리",
"Inpaint not masked": "마스크 이외만 처리", "Inpaint not masked": "마스크 이외만 처리",
"Input directory": "인풋 이미지 경로", "Input directory": "인풋 이미지 경로",
"Input images directory": "이미지 경로 입력",
"Interpolation Method": "보간 방법", "Interpolation Method": "보간 방법",
"Interrogate\nCLIP": "CLIP\n분석", "Interrogate\nCLIP": "CLIP\n분석",
"Interrogate\nDeepBooru": "DeepBooru\n분석", "Interrogate\nDeepBooru": "DeepBooru\n분석",
@ -258,10 +267,12 @@
"None": "없음", "None": "없음",
"Nothing": "없음", "Nothing": "없음",
"Nothing found in the image.": "Nothing found in the image.", "Nothing found in the image.": "Nothing found in the image.",
"Number of columns on the page": "각 페이지마다 표시할 가로줄 수",
"Number of grids in each row": "각 세로줄마다 표시될 그리드 수", "Number of grids in each row": "각 세로줄마다 표시될 그리드 수",
"number of images to delete consecutively next": "연속적으로 삭제할 이미지 수", "number of images to delete consecutively next": "연속적으로 삭제할 이미지 수",
"Number of pictures displayed on each page": "각 페이지에 표시될 이미지 수", "Number of pictures displayed on each page": "각 페이지에 표시될 이미지 수",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "세대(Epoch)당 단일 인풋 이미지의 반복 횟수 - 세대(Epoch) 숫자를 표시하는 데에만 사용됩니다. ", "Number of repeats for a single input image per epoch; used only for displaying epoch number": "세대(Epoch)당 단일 인풋 이미지의 반복 횟수 - 세대(Epoch) 숫자를 표시하는 데에만 사용됩니다. ",
"Number of rows on the page": "각 페이지마다 표시할 세로줄 수",
"Number of vectors per token": "토큰별 벡터 수", "Number of vectors per token": "토큰별 벡터 수",
"Open for Clip Aesthetic!": "클립 스타일 기능을 활성화하려면 클릭!", "Open for Clip Aesthetic!": "클립 스타일 기능을 활성화하려면 클릭!",
"Open images output directory": "이미지 저장 경로 열기", "Open images output directory": "이미지 저장 경로 열기",
@ -375,6 +386,7 @@
"Seed": "시드", "Seed": "시드",
"Seed of a different picture to be mixed into the generation.": "결과물에 섞일 다른 그림의 시드", "Seed of a different picture to be mixed into the generation.": "결과물에 섞일 다른 그림의 시드",
"Select activation function of hypernetwork": "하이퍼네트워크 활성화 함수 선택", "Select activation function of hypernetwork": "하이퍼네트워크 활성화 함수 선택",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "레이어 가중치 초기화 방식 선택 - relu류 : Kaiming 추천, sigmoid류 : Xavier 추천",
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "WebUI에 표시할 Real-ESRGAN 모델을 선택하십시오. (재시작 필요)", "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "WebUI에 표시할 Real-ESRGAN 모델을 선택하십시오. (재시작 필요)",
"Send to extras": "부가기능으로 전송", "Send to extras": "부가기능으로 전송",
"Send to img2img": "이미지→이미지로 전송", "Send to img2img": "이미지→이미지로 전송",
@ -465,10 +477,11 @@
"Use BLIP for caption": "캡션에 BLIP 사용", "Use BLIP for caption": "캡션에 BLIP 사용",
"Use deepbooru for caption": "캡션에 deepbooru 사용", "Use deepbooru for caption": "캡션에 deepbooru 사용",
"Use dropout": "드롭아웃 사용", "Use dropout": "드롭아웃 사용",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지 파일명 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]. 비워두면 기본값으로 설정됩니다.", "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지 파일명 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지와 그리드의 하위 디렉토리명의 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]. 비워두면 기본값으로 설정됩니다.", "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지와 그리드의 하위 디렉토리명의 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "옛 방식의 강조 구현을 사용합니다. 옛 시드를 재현하는 데 효과적일 수 있습니다.", "Use old emphasis implementation. Can be useful to reproduce old seeds.": "옛 방식의 강조 구현을 사용합니다. 옛 시드를 재현하는 데 효과적일 수 있습니다.",
"Use original name for output filename during batch process in extras tab": "부가기능 탭에서 이미지를 여러장 처리 시 결과물 파일명에 기존 파일명 사용하기", "Use original name for output filename during batch process in extras tab": "부가기능 탭에서 이미지를 여러장 처리 시 결과물 파일명에 기존 파일명 사용하기",
"Use same seed for each image": "각 이미지에 동일한 시드 사용",
"use spaces for tags in deepbooru": "deepbooru에서 태그에 공백 사용", "use spaces for tags in deepbooru": "deepbooru에서 태그에 공백 사용",
"User interface": "사용자 인터페이스", "User interface": "사용자 인터페이스",
"Var. seed": "바리에이션 시드", "Var. seed": "바리에이션 시드",
@ -485,6 +498,7 @@
"Which algorithm to use to produce the image": "이미지를 생성할 때 사용할 알고리즘", "Which algorithm to use to produce the image": "이미지를 생성할 때 사용할 알고리즘",
"Width": "가로", "Width": "가로",
"wiki": " 위키", "wiki": " 위키",
"Wildcards": "와일드카드",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "이미지를 설정된 사이즈의 2배로 업스케일합니다. 상단의 가로와 세로 슬라이더를 이용해 타일 사이즈를 지정하세요.", "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "이미지를 설정된 사이즈의 2배로 업스케일합니다. 상단의 가로와 세로 슬라이더를 이용해 타일 사이즈를 지정하세요.",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "이미지→이미지 진행 시, 슬라이더로 설정한 스텝 수를 정확히 실행하기 (일반적으로 디노이즈 강도가 낮을수록 실제 설정된 스텝 수보다 적게 진행됨)", "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "이미지→이미지 진행 시, 슬라이더로 설정한 스텝 수를 정확히 실행하기 (일반적으로 디노이즈 강도가 낮을수록 실제 설정된 스텝 수보다 적게 진행됨)",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "이미지를 경로에 저장하고, 설정값들을 csv 파일로 저장합니다. (기본 경로 - log/images)", "Write image to a directory (default - log/images) and generation parameters into csv file.": "이미지를 경로에 저장하고, 설정값들을 csv 파일로 저장합니다. (기본 경로 - log/images)",

423
localizations/tr_TR.json Normal file
View File

@ -0,0 +1,423 @@
{
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"Loading...": "Yükleniyor...",
"view": "arayüz",
"api": "",
"•": "-",
"built with gradio": "gradio ile inşa edildi",
"Stable Diffusion checkpoint": "Kararlı Difüzyon kontrol noktası",
"txt2img": "txt2img",
"img2img": "img2img",
"Extras": "Ekstralar",
"PNG Info": "PNG Bilgisi",
"Checkpoint Merger": "Checkpoint Birleştir",
"Train": "Eğitim",
"Settings": "Ayarlar",
"Prompt": "İstem",
"Negative prompt": "Negatif istem",
"Run": "Koşmak",
"Skip": "Atla",
"Interrupt": "Durdur",
"Generate": "Oluştur",
"Style 1": "Stil 1",
"Style 2": "Stil 2",
"Label": "Etiket",
"File": "Dosya",
"Drop File Here": "Dosyayı Buraya Bırakın",
"-": "-",
"or": "veya",
"Click to Upload": "Yüklemek için Tıklayınız",
"Image": "Resim",
"Check progress": "İlerlemeyi kontrol edin",
"Check progress (first)": "Önce ilerlemeyi kontrol edin",
"Sampling Steps": "Örnekleme Adımları",
"Sampling method": "Örnekleme yöntemi",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
"Heun": "Heun",
"DPM2": "DPM2",
"DPM2 a": "DPM2 a",
"DPM fast": "DPM hızlı",
"DPM adaptive": "DPM uyarlanabilir",
"LMS Karras": "LMS Karras",
"DPM2 Karras": "DPM2 Karras",
"DPM2 a Karras": "DPM2 a Karras",
"DDIM": "DDIM",
"PLMS": "PLMS",
"Width": "Genişlik",
"Height": "Yükseklik",
"Restore faces": "Yüzleri düzeltme",
"Tiling": "Döşeme Oluştur",
"Highres. fix": "Highres. düzeltme",
"Firstpass width": "İlk geçiş genişliği",
"Firstpass height": "İlk geçiş yüksekliği",
"Denoising strength": "Gürültü arındırma gücü",
"Batch count": "Grup sayısı",
"Batch size": "Grup büyüklüğü",
"CFG Scale": "CFG Ölçeği",
"Seed": "Tohum",
"Extra": "Ekstra",
"Variation seed": "Varyasyon tohumu",
"Variation strength": "Varyasyon gücü",
"Resize seed from width": "Tohumu genişlik ile yeniden boyutlandırma",
"Resize seed from height": "Tohumu yükseklik ile yeniden boyutlandırma",
"Script": "Scriptler",
"None": "Hiçbiri",
"Prompt matrix": "İstem matrisi",
"Prompts from file or textbox": "Dosyadan veya metin kutusundan istemler",
"X/Y plot": "X/Y grafiği",
"Put variable parts at start of prompt": "Değişken parçaları komut isteminin başına koyun",
"Show Textbox": "Metin Kutusunu Göster",
"File with inputs": "Girdileri içeren dosya",
"Prompts": "İpuçları",
"X type": "X tipi",
"Nothing": "Hiçbir şey",
"Var. seed": "Var. tohum",
"Var. strength": "Var. güç",
"Steps": "Adımlar",
"Prompt S/R": "İstem S/R",
"Prompt order": "İstem sırası",
"Sampler": "Örnekleyici",
"Checkpoint name": "Kontrol noktası adı",
"Hypernetwork": "Hipernetwork",
"Hypernet str.": "Hypernet str.",
"Sigma Churn": "Sigma Churn",
"Sigma min": "Sigma dakika",
"Sigma max": "Sigma maksimum",
"Sigma noise": "Sigma gürültüsü",
"Eta": "Eta",
"Clip skip": "Klip atlama",
"Denoising": "Denoising",
"X values": "X değerleri",
"Y type": "Y tipi",
"Y values": "Y değerleri",
"Draw legend": "Gösterge çizin",
"Include Separate Images": "Ayrı Görseller Ekleyin",
"Keep -1 for seeds": "Tohumlar için -1'i saklayın",
"Drop Image Here": "Resmi Buraya Bırakın",
"Save": "Kaydet",
"Send to img2img": "img2img'ye gönder",
"Send to inpaint": "Inpaint'e gönder",
"Send to extras": "Ekstralara gönder",
"Make Zip when Save?": "Kaydederken Zip Yap?",
"Textbox": "Metin Kutusu",
"Interrogate\nCLIP": "Sorgula\nCLIP",
"Inpaint": "Inpaint",
"Batch img2img": "Toplu img2img",
"Image for img2img": "img2img için resim",
"Image for inpainting with mask": "Maske ile inpainting için görüntü",
"Mask": "Maske",
"Mask blur": "Maske bulanıklığı",
"Mask mode": "Maske modu",
"Draw mask": "Maske çizin",
"Upload mask": "Maske yükle",
"Masking mode": "Maskeleme modu",
"Inpaint masked": "Maskeli inpaint",
"Inpaint not masked": "Boya maskelenmemiş",
"Masked content": "Maskelenmiş içerik",
"fill": "doldurun",
"original": "orijinal",
"latent noise": "gizli gürültü",
"latent nothing": "gizli hiçbir şey",
"Inpaint at full resolution": "Tam çözünürlükte inpaint",
"Inpaint at full resolution padding, pixels": "Tam çözünürlükte inpaint dolgu, piksel",
"Process images in a directory on the same machine where the server is running.": "Görüntüleri sunucunun çalıştığı makinedeki bir dizinde işleyin.",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "Resimleri çıktı dizinine yazmak yerine normal şekilde kaydetmek için boş bir çıktı dizini kullanın.",
"Input directory": "Girdi dizini",
"Output directory": ıktı dizini",
"Resize mode": "Yeniden boyutlandırma modu",
"Just resize": "Sadece yeniden boyutlandır",
"Crop and resize": "Kırpma ve yeniden boyutlandırma",
"Resize and fill": "Yeniden boyutlandırın ve doldurun",
"img2img alternative test": "img2img alternatif test",
"Loopback": "Geri Döngü",
"Outpainting mk2": "Outpainting mk2",
"Poor man's outpainting": "Zavallı adamın dış boyaması",
"SD upscale": "SD lüks",
"should be 2 or lower.": "2 veya daha düşük olmalıdır.",
"Override `Sampling method` to Euler?(this method is built for it)": "Euler için `Örnekleme yöntemini` geçersiz kılın (bu yöntem bunun için oluşturulmuştur)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Prompt` değerini `orijinal prompt` ile aynı değere geçersiz kılma (ve `negatif prompt`)",
"Original prompt": "Orijinal bilgi istemi",
"Original negative prompt": "Orijinal negatif istem",
"Override `Sampling Steps` to the same value as `Decode steps`?": "Örnekleme Adımlarını `Kod çözme adımları` ile aynı değere mi geçersiz kılıyorsunuz?",
"Decode steps": "Kod çözme adımları",
"Override `Denoising strength` to 1?": "`Denoising strength` değerini 1 olarak geçersiz kıl?",
"Decode CFG scale": "CFG ölçeğinin kodunu çöz",
"Randomness": "Rastgelelik",
"Sigma adjustment for finding noise for image": "Görüntü için gürültü bulmaya yönelik Sigma ayarı",
"Loops": "Döngüler",
"Denoising strength change factor": "Denoising gücü değişim faktörü",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Önerilen ayarlar: Örnekleme Adımları: 80-100, Örnekleyici: Euler a, Denoising gücü: 0.8",
"Pixels to expand": "Genişletilecek pikseller",
"Outpainting direction": "Dış boyama yönü",
"left": "sol",
"right": "doğru",
"up": "yukarı",
"down": "aşağı",
"Fall-off exponent (lower=higher detail)": "Düşme üssü (düşük=daha yüksek detay)",
"Color variation": "Renk çeşitliliği",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Görüntüyü boyutlarının iki katına yükseltir; döşeme boyutunu ayarlamak için genişlik ve yükseklik kaydırıcılarını kullanın",
"Tile overlap": "Karo örtüşmesi",
"Upscaler": "Upscaler",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"SwinIR 4x": "SwinIR 4x",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"ESRGAN_4x": "ESRGAN_4x",
"Single Image": "Tek Resim",
"Batch Process": "Toplu İşlem",
"Batch from Directory": "Dizinden Toplu İş",
"Source": "Kaynak",
"Show result images": "Sonuç resimlerini göster",
"Scale by": "Ölçek tarafından",
"Scale to": "Ölçeklendir",
"Resize": "Yeniden Boyutlandır",
"Crop to fit": "Sığdırmak için kırpın",
"Upscaler 2 visibility": "Upscaler 2 görünürlüğü",
"GFPGAN visibility": "GFPGAN görünürlüğü",
"CodeFormer visibility": "CodeFormer görünürlüğü",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer ağırlığı (0 = maksimum etki, 1 = minimum etki)",
"Open output directory": ıktı dizinini aç",
"Send to txt2img": "txt2img'ye gönder",
"A merger of the two checkpoints will be generated in your": "İki kontrol noktasının bir birleşimi sizin kontrol noktanızda oluşturulacaktır.",
"checkpoint": "kontrol noktası",
"directory.": "dizin.",
"Primary model (A)": "Birincil model (A)",
"Secondary model (B)": "İkincil model (B)",
"Tertiary model (C)": "Üçüncü model (C)",
"Custom Name (Optional)": "Özel Ad (İsteğe Bağlı)",
"Multiplier (M) - set to 0 to get model A": "Çarpan (M) - A modelini elde etmek için 0'a ayarlayın",
"Interpolation Method": "İnterpolasyon Yöntemi",
"Weighted sum": "Ağırlıklı toplam",
"Add difference": "Farklılık ekleyin",
"Save as float16": "float16 olarak kaydet",
"See": "Bkz. ",
"wiki": "wiki",
"for detailed explanation.": " ayrıntılııklama için.",
"Create embedding": "Yerleştirme oluşturma",
"Create hypernetwork": "Hipernet oluşturun",
"Preprocess images": "Görüntüleri ön işleme",
"Name": "İsim",
"Initialization text": "Başlatma metni",
"Number of vectors per token": "Belirteç başına vektör sayısı",
"Overwrite Old Embedding": "Eski Yerleştirmenin Üzerine Yaz",
"Modules": "Modüller",
"Enter hypernetwork layer structure": "Hipernetwork katman yapısına girin",
"Select activation function of hypernetwork": "Hipernetwork'ün aktivasyon fonksiyonunu seçin",
"linear": "doğrusal",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
"Add layer normalization": "Katman normalizasyonu ekleyin",
"Use dropout": "Bırakmayı kullanın",
"Overwrite Old Hypernetwork": "Eski Hipernetwork'ün Üzerine Yazma",
"Source directory": "Kaynak dizini",
"Destination directory": "Hedef dizini",
"Existing Caption txt Action": "Mevcut Başlık txt Eylem",
"ignore": "görmezden gel",
"copy": "kopya",
"prepend": "prepend",
"append": "ekle",
"Create flipped copies": "Ters çevrilmiş kopyalar oluşturun",
"Split oversized images": "Büyük boyutlu görüntüleri bölme",
"Use BLIP for caption": "Başlık için BLIP kullanın",
"Use deepbooru for caption": "Başlık için deepbooru kullanın",
"Split image threshold": "Bölünmüş görüntü eşiği",
"Split image overlap ratio": "Bölünmüş görüntü örtüşme oranı",
"Preprocess": "Ön işlem",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Bir gömme veya Hipernetwork eğitin; 1:1 oranlı görüntülerin bulunduğu bir dizin belirtmelisiniz",
"[wiki]": "[wiki]",
"Embedding": "Yerleştirme",
"Embedding Learning rate": "Gömme Öğrenme oranı",
"Hypernetwork Learning rate": "Hypernetwork Öğrenme oranı",
"Dataset directory": "Veri seti dizini",
"Log directory": "Günlük dizini",
"Prompt template file": "Komut istemi şablon dosyası",
"Max steps": "Maksimum adım",
"Save an image to log directory every N steps, 0 to disable": "Her N adımda bir görüntüyü günlük dizinine kaydet, 0 devre dışı bırakmak için",
"Save a copy of embedding to log directory every N steps, 0 to disable": "Katıştırmanın bir kopyasını her N adımda bir günlük dizinine kaydedin, devre dışı bırakmak için 0",
"Save images with embedding in PNG chunks": "Görüntüleri PNG parçalarına yerleştirerek kaydedin",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "Önizleme yaparken txt2img sekmesinden parametreleri (istem, vb...) okuma",
"Train Hypernetwork": "Tren Hipernetwork",
"Train Embedding": "Tren Gömme",
"Apply settings": "Ayarları uygula",
"Saving images/grids": "Görüntüleri/gridleri kaydetme",
"Always save all generated images": "Oluşturulan tüm görüntüleri her zaman kaydedin",
"File format for images": "Görüntüler için dosya formatı",
"Images filename pattern": "Görüntü dosya adı deseni",
"Add number to filename when saving": "Kaydederken dosya adına numara ekle",
"Always save all generated image grids": "Oluşturulan tüm görüntü ızgaralarını her zaman kaydedin",
"File format for grids": "Izgaralar için dosya formatı",
"Add extended info (seed, prompt) to filename when saving grid": "Izgarayı kaydederken dosya adına genişletilmiş bilgi (tohum, istem) ekleyin",
"Do not save grids consisting of one picture": "Tek resimden oluşan ızgaraları kaydetmeyin",
"Prevent empty spots in grid (when set to autodetect)": "Izgaradaki boş noktaları önleme (otomatik algılamaya ayarlandığında)",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Izgara satır sayısı; otomatik algılama için -1, yığın boyutuyla aynı olması için 0 kullanın",
"Save text information about generation parameters as chunks to png files": "Üretim parametreleri hakkındaki metin bilgilerini png dosyalarına parçalar halinde kaydedin",
"Create a text file next to every image with generation parameters.": "Oluşturma parametreleri ile her görüntünün yanında bir metin dosyası oluşturun.",
"Save a copy of image before doing face restoration.": "Yüz restorasyonu yapmadan önce görüntünün bir kopyasını kaydedin.",
"Quality for saved jpeg images": "Kaydedilen jpeg görüntüleri için kalite",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "PNG görüntüsü 4MB'den büyükse veya herhangi bir boyut 4000'den büyükse, ölçeği küçültün ve kopyayı JPG olarak kaydedin",
"Use original name for output filename during batch process in extras tab": "Ekstralar sekmesinde toplu işlem sırasında çıktı dosya adı için orijinal adı kullan",
"When using 'Save' button, only save a single selected image": "'Kaydet' düğmesini kullanırken, yalnızca seçilen tek bir resmi kaydedin",
"Do not add watermark to images": "Görüntülere filigran eklemeyin",
"Paths for saving": "Tasarruf için yollar",
"Output directory for images; if empty, defaults to three directories below": "Görüntüler için çıktı dizini; boşsa, varsayılan olarak aşağıdaki üç dizine gider",
"Output directory for txt2img images": "txt2img görüntüleri için çıktı dizini",
"Output directory for img2img images": "img2img görüntüleri için çıktı dizini",
"Output directory for images from extras tab": "Ekstralar sekmesindeki görüntüler için çıktı dizini",
"Output directory for grids; if empty, defaults to two directories below": "Izgaralar için çıktı dizini; boşsa, varsayılan olarak aşağıdaki iki dizine gider",
"Output directory for txt2img grids": "txt2img ızgaraları için çıktı dizini",
"Output directory for img2img grids": "img2img ızgaraları için çıktı dizini",
"Directory for saving images using the Save button": "Kaydet düğmesini kullanarak görüntüleri kaydetmek için dizin",
"Saving to a directory": "Bir dizine kaydetme",
"Save images to a subdirectory": "Görüntüleri bir alt dizine kaydetme",
"Save grids to a subdirectory": "Izgaraları bir alt dizine kaydetme",
"When using \"Save\" button, save images to a subdirectory": "\"Kaydet\" düğmesini kullanırken, görüntüleri bir alt dizine kaydedin",
"Directory name pattern": "Dizin adı kalıbı",
"Max prompt words for [prompt_words] pattern": "prompt_words] kalıbı için maksimum istem sözcükleri",
"Upscaling": "Yükseltme",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "ESRGAN yükselticileri için döşeme boyutu. 0 = döşeme yok.",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "ESRGAN yükselticileri için piksel cinsinden döşeme örtüşmesi. Düşük değerler = görünür bağlantı hattı.",
"Tile size for all SwinIR.": "Tüm SwinIR için döşeme boyutu.",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR için piksel cinsinden döşeme örtüşmesi. Düşük değerler = görünür dikiş.",
"LDSR processing steps. Lower = faster": "LDSR işleme adımları. Düşük = daha hızlı",
"Upscaler for img2img": "img2img için üst ölçekleyici",
"Upscale latent space image when doing hires. fix": "İşe alım yaparken gizli alan görüntüsünü yükselt. düzelt",
"Face restoration": "Yüz restorasyonu",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer ağırlık parametresi; 0 = maksimum etki; 1 = minimum etki",
"Move face restoration model from VRAM into RAM after processing": "İşlemden sonra yüz restorasyon modelini VRAM'den RAM'e taşıma",
"System": "Sistem",
"VRAM usage polls per second during generation. Set to 0 to disable.": "Üretim sırasında saniye başına VRAM kullanım yoklamaları. Devre dışı bırakmak için 0 olarak ayarlayın.",
"Always print all generation info to standard output": "Tüm üretim bilgilerini her zaman standart çıktıya yazdır",
"Add a second progress bar to the console that shows progress for an entire job.": "Konsola tüm iş için ilerlemeyi gösteren ikinci bir ilerleme çubuğu ekleyin.",
"Training": "Eğitim",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Hiperneti eğitirken VAE ve CLIP'i RAM'e taşıyın. VRAM'den tasarruf sağlar.",
"Filename word regex": "Dosya adı kelime regex",
"Filename join string": "Dosya adı birleştirme dizesi",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Epok başına tek bir girdi görüntüsü için tekrar sayısı; yalnızca epok numarasını görüntülemek için kullanılır",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Her N adımda bir günlük dizinine kaybı içeren bir csv kaydedin, devre dışı bırakmak için 0",
"Stable Diffusion": "Kararlı Difüzyon",
"Checkpoints to cache in RAM": "RAM'de önbelleğe alınacak kontrol noktaları",
"Hypernetwork strength": "Hipernetwork gücü",
"Apply color correction to img2img results to match original colors.": "Orijinal renklerle eşleştirmek için img2img sonuçlarına renk düzeltmesi uygulayın.",
"Save a copy of image before applying color correction to img2img results": "img2img sonuçlarına renk düzeltmesi uygulamadan önce görüntünün bir kopyasını kaydedin",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "img2img ile, kaydırıcının belirttiği adım miktarını tam olarak yapın (normalde daha az denoising ile daha az yaparsınız).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Daha keskin ve temiz sonuçlar için K örnekleyicilerinde nicelemeyi etkinleştirin. Bu, mevcut tohumları değiştirebilir. Uygulamak için yeniden başlatma gerektirir.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Vurgu: modelin metne daha fazla dikkat etmesini sağlamak için (metin) ve daha az dikkat etmesini sağlamak için [metin] kullanın",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "Eski vurgu uygulamasını kullanın. Eski tohumları yeniden üretmek faydalı olabilir.",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "K-difüzyon örnekleyicilerinin tek bir görüntü oluştururken olduğu gibi toplu halde aynı görüntüleri üretmesini sağlayın",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "75'ten fazla belirteç kullanıldığında n belirteç içindeki son virgülden itibaren dolgu yaparak tutarlılığı artırın",
"Filter NSFW content": "NSFW içeriği filtreleme",
"Stop At last layers of CLIP model": "Durdur CLIP modelinin son katmanlarında",
"Interrogate Options": "Sorgulama Seçenekleri",
"Interrogate: keep models in VRAM": "Sorgula: modelleri VRAM'de tut",
"Interrogate: use artists from artists.csv": "Sorgula: artists.csv dosyasındaki sanatçıları kullan",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interrogate: sonuçlara eşleşen model etiketlerinin sıralarını dahil et (Başlık tabanlı sorgulayıcılar üzerinde etkisi yoktur).",
"Interrogate: num_beams for BLIP": "Sorgula: BLIP için num_beams",
"Interrogate: minimum description length (excluding artists, etc..)": "Sorgula: minimum açıklama uzunluğu (sanatçılar vb. hariç)",
"Interrogate: maximum description length": "Sorgula: maksimum açıklama uzunluğu",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: metin dosyasındaki maksimum satır sayısı (0 = Sınır yok)",
"Interrogate: deepbooru score threshold": "Sorgula: deepbooru puan eşiği",
"Interrogate: deepbooru sort alphabetically": "Sorgula: deepbooru alfabetik olarak sırala",
"use spaces for tags in deepbooru": "deepbooru'da etiketler için boşluk kullanın",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "deepbooru'da kaçış (\\) parantezleri (böylece vurgu için değil, gerçek parantez olarak kullanılırlar)",
"User interface": "Kullanıcı arayüzü",
"Show progressbar": "İlerleme çubuğunu göster",
"Show image creation progress every N sampling steps. Set 0 to disable.": "Her N örnekleme adımında görüntü oluşturma ilerlemesini gösterir. Devre dışı bırakmak için 0 olarak ayarlayın.",
"Show previews of all images generated in a batch as a grid": "Bir toplu işte oluşturulan tüm görüntülerin önizlemelerini ızgara olarak göster",
"Show grid in results for web": "Web için sonuçlarda ızgarayı göster",
"Do not show any images in results for web": "Web için sonuçlarda herhangi bir resim gösterme",
"Add model hash to generation information": "Üretim bilgilerine model karması ekleyin",
"Add model name to generation information": "Üretim bilgilerine model adı ekleme",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Üretim parametrelerini metinden kullanıcı arayüzüne okurken (PNG bilgisinden veya yapıştırılan metinden), seçilen modeli/denetim noktasını değiştirmeyin.",
"Font for image grids that have text": "Metin içeren görüntü ızgaraları için yazı tipi",
"Enable full page image viewer": "Tam sayfa resim görüntüleyiciyi etkinleştir",
"Show images zoomed in by default in full page image viewer": "Tam sayfa resim görüntüleyicide resimleri varsayılan olarak yakınlaştırılmış olarak gösterme",
"Show generation progress in window title.": "Pencere başlığında üretim ilerlemesini göster.",
"Quicksettings list": "Hızlı ayarlar listesi",
"Localization (requires restart)": "Yerelleştirme (yeniden başlatma gerektirir)",
"ko_KR": "ko_KR",
"ru_RU": "ru_RU",
"es_ES": "es_ES",
"ja_JP": "ja_JP",
"ar_AR": "ar_AR",
"Sampler parameters": "Örnekleyici parametreleri",
"Hide samplers in user interface (requires restart)": "Kullanıcı arayüzünde örnekleyicileri gizle (yeniden başlatma gerektirir)",
"eta (noise multiplier) for DDIM": "DDIM için eta (gürültü çarpanı)",
"eta (noise multiplier) for ancestral samplers": "eta örnekleyiciler için eta (gürültü çarpanı)",
"img2img DDIM discretize": "img2img DDIM discretize",
"uniform": "üniforma",
"quad": "dörtlü",
"sigma churn": "sigma churn",
"sigma tmin": "sigma tmin",
"sigma noise": "sigma gürültüsü",
"Eta noise seed delta": "Eta gürültü tohum deltası",
"Request browser notifications": "Tarayıcı bildirimleri isteyin",
"Download localization template": "Dil şablonunu indirin",
"Reload custom script bodies (No ui updates, No restart)": "Kişisel komut dosyası gövdelerini yeniden yükle (Kullanıcı arayüzü güncellemesi yok, yeniden başlatma yok)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Gradio'yu yeniden başlatın ve bileşenleri yenileyin (yalnızca Özel Komut Dosyaları, ui.py, js ve css)",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "İstem (oluşturmak için Ctrl+Enter veya Alt+Enter tuşlarına basın)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Negatif istem (oluşturmak için Ctrl+Enter veya Alt+Enter tuşlarına basın)",
"Add a random artist to the prompt.": "Komut istemine rastgele bir sanatçı ekleyin.",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Kullanıcı arayüzüne istemden veya istem boşsa son üretimden üretim parametrelerini okuyun.",
"Save style": "Stil kaydet",
"Apply selected styles to current prompt": "Seçilen stilleri geçerli komut istemine uygulama",
"Stop processing current image and continue processing.": "Geçerli görüntüyü işlemeyi durdurun ve işlemeye devam edin.",
"Stop processing images and return any results accumulated so far.": "Görüntüleri işlemeyi durdurun ve o ana kadar biriken tüm sonuçları döndürün.",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "Uygulanacak stil; stillerin hem pozitif hem de negatif istemler için bileşenleri vardır ve her ikisine de uygulanır",
"Do not do anything special": "Özel bir şey yapmayın",
"Which algorithm to use to produce the image": "Görüntüyü üretmek için hangi algoritmanın kullanılacağı",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - çok yaratıcı, adım sayısına bağlı olarak her biri tamamen farklı bir resim elde edebilir, adımları 30-40'tan daha yükseğe ayarlamak yardımcı olmaz",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Difüzyon Örtük Modelleri - en iyi inpainting",
"Produce an image that can be tiled.": "Döşenebilen bir görüntü üretin.",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Bir görüntüyü kısmen daha düşük çözünürlükte oluşturmak, büyütmek ve ardından kompozisyonu değiştirmeden ayrıntıları iyileştirmek için iki adımlı bir işlem kullanın",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Algoritmanın resmin içeriğine ne kadar az saygı göstermesi gerektiğini belirler. 0'da hiçbir şey değişmez ve 1'de ilgisiz bir görüntü elde edersiniz. 1,0'ın altındaki değerlerde işleme, Örnekleme Adımları kaydırıcısının belirttiğinden daha az adım atacaktır.",
"How many batches of images to create": "Kaç görüntü grubu oluşturulacağı",
"How many image to create in a single batch": "Tek bir partide kaç görüntü oluşturulacağı",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Sınıflandırıcı Serbest Rehberlik Ölçeği - görüntünün istemle ne kadar uyumlu olması gerektiği - düşük değerler daha yaratıcı sonuçlar üretir",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Rastgele sayı üretecinin çıktısını belirleyen bir değer - başka bir resimle aynı parametrelere ve tohuma sahip bir resim oluşturursanız, aynı sonucu alırsınız",
"Set seed to -1, which will cause a new random number to be used every time": "Tohum değerini -1 olarak ayarlayın, bu her seferinde yeni bir rastgele sayı kullanılmasına neden olacaktır",
"Reuse seed from last generation, mostly useful if it was randomed": "Son nesilden tohumu yeniden kullanın, çoğunlukla rastgele ise kullanışlıdır",
"Seed of a different picture to be mixed into the generation.": "Nesle karıştırılacak farklı bir resmin tohumu.",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Ne kadar güçlü bir varyasyon üretileceği. 0'da hiçbir etki olmayacaktır. 1'de, varyasyon tohumu ile tam bir resim elde edersiniz (sadece bir şey alacağınız atasal örnekleyiciler hariç).",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Belirtilen çözünürlükte aynı tohumla üretilecek olana benzer bir resim üretme girişiminde bulunun",
"Separate values for X axis using commas.": "X ekseni için değerleri virgül kullanarak ayırın.",
"Separate values for Y axis using commas.": "Y ekseni için değerleri virgül kullanarak ayırın.",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Görüntüyü bir dizine (varsayılan - log/images) ve üretim parametrelerini csv dosyasına yazın.",
"Open images output directory": "Görüntü çıktı dizinini açın",
"How much to blur the mask before processing, in pixels.": "İşlemeden önce maskenin piksel cinsinden ne kadar bulanıklaştırılacağı.",
"What to put inside the masked area before processing it with Stable Diffusion.": "Kararlı Difüzyon ile işlemeden önce maskelenmiş alanın içine ne konulacağı.",
"fill it with colors of the image": "Görüntünün renkleriyle doldurun",
"keep whatever was there originally": "başlangıçta orada ne varsa saklayın",
"fill it with latent space noise": "gizli alan gürültüsü ile doldurun",
"fill it with latent space zeroes": "gizli uzay sıfırları ile doldurun",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Maskelenmiş bölgeyi hedef çözünürlüğe yükseltme, inpainting yapma, ölçeği küçültme ve orijinal görüntüye yapıştırma",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Görüntüyü hedef çözünürlüğe göre yeniden boyutlandırın. Yükseklik ve genişlik eşleşmediği sürece, yanlış en boy oranı elde edersiniz.",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Görüntüyü, hedef çözünürlüğün tamamı görüntüyle dolacak şekilde yeniden boyutlandırın. Dışarıda kalan kısımları kırpın.",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Görüntünün tamamı hedef çözünürlüğün içinde olacak şekilde görüntüyü yeniden boyutlandırın. Boş alanı görüntünün renkleriyle doldurun.",
"How many times to repeat processing an image and using it as input for the next iteration": "Bir görüntüyü işlemeyi kaç kez tekrarlamak ve bir sonraki yineleme için girdi olarak kullanmak",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "Geri döngü modunda, her döngüde denoising gücü bu değerle çarpılır. <1 çeşitliliğin azalması anlamına gelir, böylece diziniz sabit bir resme yakınsayacaktır. >1'den büyük olması çeşitliliğin artması anlamına gelir, böylece sekansınız gittikçe daha kaotik hale gelecektir.",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "SD yükseltme için karolar arasında piksel olarak ne kadar örtüşme olmalıdır. Döşemeler, tekrar tek bir resimde birleştirildiklerinde açıkça görülebilen bir dikiş olmayacak şekilde üst üste biner.",
"A directory on the same machine where the server is running.": "Sunucunun çalıştığı makinedeki bir dizin.",
"Leave blank to save images to the default path.": "Görüntüleri varsayılan yola kaydetmek için boş bırakın.",
"Result = A * (1 - M) + B * M": "Sonuç = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "Sonuç = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "1. ve son rakam 1 olmalıdır. örn:'1, 2, 1'",
"Path to directory with input images": "Girdi resimlerinin bulunduğu dizinin yolu",
"Path to directory where to write outputs": ıktıların yazılacağı dizinin yolu",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "Görüntülerin dosya adlarının nasıl seçileceğini tanımlamak için aşağıdaki etiketleri kullanın: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; varsayılan için boş bırakın.",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Bu seçenek etkinleştirilirse, oluşturulan görüntülere filigran eklenmeyecektir. Uyarı: filigran eklemezseniz, etik olmayan bir şekilde davranıyor olabilirsiniz.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "Görüntüler ve ızgaralar için alt dizinlerin nasıl seçileceğini tanımlamak için aşağıdaki etiketleri kullanın: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; varsayılan için boş bırakın.",
"Restore low quality faces using GFPGAN neural network": "GFPGAN sinir ağını kullanarak düşük kaliteli yüzleri geri yükleme",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Bu düzenli ifade, dosya adından sözcükleri ayıklamak için kullanılır ve bunlar aşağıdaki seçenek kullanılarak eğitim için kullanılan etiket metnine birleştirilir. Dosya adı metnini olduğu gibi tutmak için boş bırakın.",
"This string will be used to join split words into a single line if the option above is enabled.": "Bu dize, yukarıdaki seçenek etkinleştirilirse bölünmüş kelimeleri tek bir satırda birleştirmek için kullanılacaktır.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Normal ayar sekmesi yerine üstteki hızlı erişim çubuğuna gitmesi gereken ayarlar için virgülle ayrılmış ayar adlarının listesi. Ayar adları için modules/shared.py dosyasına bakın. Uygulanması için yeniden başlatma gerekir.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Bu değer sıfır değilse, tohuma eklenecek ve Eta ile örnekleyiciler kullanılırken gürültüler için RNG'yi başlatmak için kullanılacaktır. Bunu daha fazla görüntü çeşitliliği üretmek için kullanabilir veya ne yaptığınızı biliyorsanız diğer yazılımların görüntülerini eşleştirmek için kullanabilirsiniz.."
}

488
localizations/zh_CN.json Normal file
View File

@ -0,0 +1,488 @@
{
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"Loading...": "载入中...",
"view": "查看",
"api": "api",
"•": "•",
"built with gradio": "基于 Gradio 构建",
"Stable Diffusion checkpoint": "Stable Diffusion 模型(ckpt)",
"txt2img": "文生图",
"img2img": "图生图",
"Extras": "后处理",
"PNG Info": "PNG 信息",
"Checkpoint Merger": "模型(ckpt)合并工具",
"Train": "训练",
"Create aesthetic embedding": "生成美术风格 embedding",
"Image Browser": "图库浏览器",
"History": "历史记录",
"Settings": "设置",
"Prompt": "提示词",
"Negative prompt": "反向提示词",
"Run": "运行",
"Skip": "跳过",
"Interrupt": "中止",
"Generate": "生成",
"Style 1": "模版风格 1",
"Style 2": "模版风格 2",
"Label": "标签",
"File": "文件",
"Drop File Here": "拖拽文件到此",
"-": "-",
"or": "或",
"Click to Upload": "点击上传",
"Image": "图像",
"Check progress": "查看进度",
"Check progress (first)": "(首次)查看进度",
"Sampling Steps": "采样迭代步数",
"Sampling method": "采样方法",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
"Heun": "Heun",
"DPM2": "DPM2",
"DPM2 a": "DPM2 a",
"DPM fast": "DPM fast",
"DPM adaptive": "DPM adaptive",
"LMS Karras": "LMS Karras",
"DPM2 Karras": "DPM2 Karras",
"DPM2 a Karras": "DPM2 a Karras",
"DDIM": "DDIM",
"PLMS": "PLMS",
"Width": "宽度",
"Height": "高度",
"Restore faces": "面部修复",
"Tiling": "可平铺(Tiling)",
"Highres. fix": "高分辨率修复",
"Firstpass width": "第一遍的宽度",
"Firstpass height": "第一遍的高度",
"Denoising strength": "去噪强度",
"Batch count": "批次",
"Batch size": "批量",
"CFG Scale": "提示词相关性(CFG Scale)",
"Seed": "随机种子",
"Extra": "额外参数",
"Variation seed": "差异随机种子",
"Variation strength": "差异强度",
"Resize seed from width": "自宽度缩放随机种子",
"Resize seed from height": "自高度缩放随机种子",
"Open for Clip Aesthetic!": "打开美术风格 Clip!",
"▼": "▼",
"Aesthetic weight": "美术风格权重",
"Aesthetic steps": "美术风格迭代步数",
"Aesthetic learning rate": "美术风格学习率",
"Slerp interpolation": "Slerp 插值",
"Aesthetic imgs embedding": "美术风格图集 embedding",
"None": "无",
"Aesthetic text for imgs": "该图集的美术风格描述",
"Slerp angle": "Slerp 角度",
"Is negative text": "是反向提示词",
"Script": "脚本",
"Embedding to Shareable PNG": "将 Embedding 转换为可分享的 PNG",
"Prompt matrix": "提示词矩阵",
"Prompts from file or textbox": "从文本框或文件载入提示词",
"X/Y plot": "X/Y 图表",
"Source embedding to convert": "用于转换的源 Embedding",
"Embedding token": "Embedding 的 token (关键词)",
"Put variable parts at start of prompt": "把变量部分放在提示词文本的开头",
"Show Textbox": "显示文本框",
"File with inputs": "含输入内容的文件",
"Prompts": "提示词",
"X type": "X轴类型",
"Nothing": "无",
"Var. seed": "差异随机种子",
"Var. strength": "差异强度",
"Steps": "迭代步数",
"Prompt S/R": "提示词替换",
"Prompt order": "提示词顺序",
"Sampler": "采样器",
"Checkpoint name": "模型(ckpt)名",
"Hypernetwork": "Hypernetwork",
"Hypernet str.": "Hypernetwork 强度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
"Sigma noise": "Sigma noise",
"Eta": "Eta",
"Clip skip": "Clip 跳过",
"Denoising": "去噪",
"X values": "X轴数值",
"Y type": "Y轴类型",
"Y values": "Y轴数值",
"Draw legend": "在图表中包括轴标题",
"Include Separate Images": "包括独立的图像",
"Keep -1 for seeds": "保持随机种子为-1",
"Drop Image Here": "拖拽图像到此",
"Save": "保存",
"Send to img2img": ">> 图生图",
"Send to inpaint": ">> 内补绘制",
"Send to extras": ">> 后处理",
"Make Zip when Save?": "保存时生成zip压缩文件?",
"Textbox": "文本框",
"Interrogate\nCLIP": "CLIP\n反推提示词",
"Interrogate\nDeepBooru": "DeepBooru\n反推提示词",
"Inpaint": "内补绘制",
"Batch img2img": "批量图生图",
"Image for img2img": "图生图的图像",
"Image for inpainting with mask": "用于内补绘制蒙版内容的图像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
"Mask mode": "蒙版模式",
"Draw mask": "绘制蒙版",
"Upload mask": "上传蒙版",
"Masking mode": "蒙版模式",
"Inpaint masked": "内补绘制蒙版内容",
"Inpaint not masked": "内补绘制非蒙版内容",
"Masked content": "蒙版蒙住的内容",
"fill": "填充",
"original": "原图",
"latent noise": "潜空间噪声",
"latent nothing": "潜空间数值零",
"Inpaint at full resolution": "以完整分辨率进行内补绘制",
"Inpaint at full resolution padding, pixels": "以完整分辨率进行内补绘制 - 填补像素",
"Process images in a directory on the same machine where the server is running.": "在服务器主机上的目录中处理图像",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一个空的文件夹为输出目录而非默认的 output 文件夹为输出目录",
"Disabled when launched with --hide-ui-dir-config.": "启动 --hide-ui-dir-config 时禁用",
"Input directory": "输入目录",
"Output directory": "输出目录",
"Resize mode": "缩放模式",
"Just resize": "只缩放",
"Crop and resize": "缩放并剪裁",
"Resize and fill": "缩放并填充",
"img2img alternative test": "图生图的另一种测试",
"Loopback": "回送",
"Outpainting mk2": "外补绘制第二版",
"Poor man's outpainting": "效果稍差的外补绘制",
"SD upscale": "使用 SD 放大(SD upscale)",
"should be 2 or lower.": "必须小于等于2",
"Override `Sampling method` to Euler?(this method is built for it)": "覆写 `采样方法` 为 Euler?(这个方法就是为这样做设计的)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "覆写 `提示词` 为 `初始提示词`?(包括`反向提示词`)",
"Original prompt": "初始提示词",
"Original negative prompt": "初始反向提示词",
"Override `Sampling Steps` to the same value as `Decode steps`?": "覆写 `采样迭代步数` 为 `解码迭代步数`?",
"Decode steps": "解码迭代步数",
"Override `Denoising strength` to 1?": "覆写 `去噪强度` 为 1?",
"Decode CFG scale": "解码提示词相关性(CFG scale)",
"Randomness": "随机度",
"Sigma adjustment for finding noise for image": "为寻找图中噪点的 Sigma 调整",
"Loops": "迭代次数",
"Denoising strength change factor": "去噪强度的调整系数",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推荐设置采样迭代步数80-100采样器Euler a去噪强度0.8",
"Pixels to expand": "拓展的像素数",
"Outpainting direction": "外补绘制的方向",
"left": "左",
"right": "右",
"up": "上",
"down": "下",
"Fall-off exponent (lower=higher detail)": "衰减指数(越低细节越好)",
"Color variation": "色彩变化",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "将图像放大到两倍尺寸; 使用宽度和高度滑块设置图块尺寸(tile size)",
"Tile overlap": "图块重叠的像素(Tile overlap)",
"Upscaler": "放大算法",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
"ESRGAN_4x": "ESRGAN_4x",
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"SwinIR_4x": "SwinIR 4x",
"Single Image": "单个图像",
"Batch Process": "批量处理",
"Batch from Directory": "从目录进行批量处理",
"Source": "来源",
"Show result images": "显示输出图像",
"Scale by": "等比缩放",
"Scale to": "指定尺寸缩放",
"Resize": "缩放",
"Crop to fit": "裁剪以适应",
"Upscaler 2": "放大算法 2",
"Upscaler 2 visibility": "放大算法 2 可见度",
"GFPGAN visibility": "GFPGAN 可见度",
"CodeFormer visibility": "CodeFormer 可见度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 权重 (0 = 最大效果, 1 = 最小效果)",
"Open output directory": "打开输出目录",
"Send to txt2img": ">> 文生图",
"A merger of the two checkpoints will be generated in your": "合并后的模型(ckpt)会生成在你的",
"checkpoint": "模型(ckpt)",
"directory.": "目录",
"Primary model (A)": "主要模型 (A)",
"Secondary model (B)": "第二模型 (B)",
"Tertiary model (C)": "第三模型 (C)",
"Custom Name (Optional)": "自定义名称 (可选)",
"Multiplier (M) - set to 0 to get model A": "倍率 (M) - 设为 0 等价于模型 A",
"Interpolation Method": "插值方法",
"Weighted sum": "加权和",
"Add difference": "添加差分",
"Save as float16": "以 float16 储存",
"See": "查看",
"wiki": "wiki",
"for detailed explanation.": "以了解详细说明",
"Create embedding": "生成 embedding",
"Create aesthetic images embedding": "生成美术风格图集 embedding",
"Create hypernetwork": "生成 hypernetwork",
"Preprocess images": "图像预处理",
"Name": "名称",
"Initialization text": "初始化文字",
"Number of vectors per token": "每个 token 的向量数",
"Overwrite Old Embedding": "覆写旧的 Embedding",
"Modules": "模块",
"Enter hypernetwork layer structure": "输入 hypernetwork 层结构",
"Select activation function of hypernetwork": "选择 hypernetwork 的激活函数",
"linear": "linear",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
"Add layer normalization": "添加层标准化",
"Use dropout": "采用 dropout 防止过拟合",
"Overwrite Old Hypernetwork": "覆写旧的 Hypernetwork",
"Source directory": "源目录",
"Destination directory": "目标目录",
"Existing Caption txt Action": "对已有的 txt 说明文字的行为",
"ignore": "无视",
"copy": "复制",
"prepend": "放前面",
"append": "放后面",
"Create flipped copies": "生成镜像副本",
"Split oversized images into two": "将过大的图像分为两份",
"Split oversized images": "分割过大的图像",
"Use BLIP for caption": "使用 BLIP 生成说明文字(自然语言描述)",
"Use deepbooru for caption": "使用 deepbooru 生成说明文字(tags)",
"Split image threshold": "图像分割阈值",
"Split image overlap ratio": "分割图像重叠的比率",
"Preprocess": "预处理",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "训练 embedding 必须指定一组具有 1:1 比例图像的目录",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "训练 embedding 或者 hypernetwork 必须指定一组具有 1:1 比例图像的目录",
"[wiki]": "[wiki]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding 学习率",
"Hypernetwork Learning rate": "Hypernetwork 学习率",
"Learning rate": "学习率",
"Dataset directory": "数据集目录",
"Log directory": "日志目录",
"Prompt template file": "提示词模版文件",
"Max steps": "最大迭代步数",
"Save an image to log directory every N steps, 0 to disable": "每 N 步保存一个图像到日志目录0 表示禁用",
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步将 embedding 的副本保存到日志目录0 表示禁用",
"Save images with embedding in PNG chunks": "保存图像并在 PNG 文件中嵌入 embedding 文件",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "进行预览时从文生图选项卡中读取参数(提示词等)",
"Train Hypernetwork": "训练 Hypernetwork",
"Train Embedding": "训练 Embedding",
"Create an aesthetic embedding out of any number of images": "从任意数量的图像中创建美术风格 embedding",
"Create images embedding": "生成图集 embedding",
"txt2img history": "文生图历史记录",
"img2img history": "图生图历史记录",
"extras history": "后处理历史记录",
"Renew Page": "刷新页面",
"extras": "后处理",
"favorites": "收藏夹",
"custom fold": "自定义文件夹",
"Load": "载入",
"Images directory": "图像目录",
"Prev batch": "上一批",
"Next batch": "下一批",
"First Page": "首页",
"Prev Page": "上一页",
"Page Index": "页数",
"Next Page": "下一页",
"End Page": "尾页",
"number of images to delete consecutively next": "接下来要连续删除的图像数",
"Delete": "删除",
"Generate Info": "生成信息",
"File Name": "文件名",
"Collect": "收藏",
"Refresh page": "刷新页面",
"Date to": "日期至",
"Number": "数量",
"set_index": "设置索引",
"Checkbox": "勾选框",
"Apply settings": "保存设置",
"Saving images/grids": "保存图像/概览图",
"Always save all generated images": "始终保存所有生成的图像",
"File format for images": "图像的文件格式",
"Images filename pattern": "图像文件名格式",
"Always save all generated image grids": "始终保存所有生成的概览图",
"File format for grids": "概览图的文件格式",
"Add extended info (seed, prompt) to filename when saving grid": "保存概览时将扩展信息(随机种子、提示词)添加到文件名",
"Do not save grids consisting of one picture": "只有一张图片时不要保存概览图",
"Prevent empty spots in grid (when set to autodetect)": "(在自动检测时)防止概览图中出现空位",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "概览行数; 使用 -1 进行自动检测,使用 0 使其与批量大小相同",
"Save text information about generation parameters as chunks to png files": "将有关生成参数的文本信息作为块保存到 png 文件中",
"Create a text file next to every image with generation parameters.": "保存图像时在每个图像旁边创建一个文本文件储存生成参数",
"Save a copy of image before doing face restoration.": "在进行面部修复之前保存图像副本",
"Quality for saved jpeg images": "保存的 jpeg 图像的质量",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 图像大于 4MB 或宽高大于 4000则缩小并保存副本为 JPG",
"Use original name for output filename during batch process in extras tab": "在后处理选项卡中的批量处理过程中使用原始名称作为输出文件名",
"When using 'Save' button, only save a single selected image": "使用“保存”按钮时,只保存一个选定的图像",
"Do not add watermark to images": "不要给图像加水印",
"Paths for saving": "保存路径",
"Output directory for images; if empty, defaults to three directories below": "图像的输出目录; 如果为空,则默认为以下三个目录",
"Output directory for txt2img images": "文生图的输出目录",
"Output directory for img2img images": "图生图的输出目录",
"Output directory for images from extras tab": "后处理的输出目录",
"Output directory for grids; if empty, defaults to two directories below": "概览图的输出目录; 如果为空,则默认为以下两个目录",
"Output directory for txt2img grids": "文生图概览的输出目录",
"Output directory for img2img grids": "图生图概览的输出目录",
"Directory for saving images using the Save button": "使用“保存”按钮保存图像的目录",
"Saving to a directory": "保存到目录",
"Save images to a subdirectory": "将图像保存到子目录",
"Save grids to a subdirectory": "将概览图保存到子目录",
"When using \"Save\" button, save images to a subdirectory": "使用“保存”按钮时,将图像保存到子目录",
"Directory name pattern": "目录名称格式",
"Max prompt words for [prompt_words] pattern": "[prompt_words] 格式的最大提示词数量",
"Upscaling": "放大",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "ESRGAN 的图块尺寸(Tile size)。0 = 不分块(no tiling)",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "ESRGAN 的图块重叠(Tile overlap)像素。低值 = 可见接缝",
"Tile size for all SwinIR.": "适用所有 SwinIR 系算法的图块尺寸(Tile size)",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR 的图块重叠(Tile overlap)像素。低值 = 可见接缝",
"LDSR processing steps. Lower = faster": "LDSR 处理迭代步数。更低 = 更快",
"Upscaler for img2img": "图生图的放大算法",
"Upscale latent space image when doing hires. fix": "做高分辨率修复时也放大潜空间图像",
"Face restoration": "面部修复",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 权重参数; 0 = 最大效果; 1 = 最小效果",
"Move face restoration model from VRAM into RAM after processing": "面部修复处理完成后将面部修复模型从显存(VRAM)移至内存(RAM)",
"System": "系统",
"VRAM usage polls per second during generation. Set to 0 to disable.": "生成图像时每秒轮询显存(VRAM)使用情况的次数。设置为 0 以禁用",
"Always print all generation info to standard output": "始终将所有生成信息输出到 standard output (一般为控制台)",
"Add a second progress bar to the console that shows progress for an entire job.": "向控制台添加第二个进度条,显示整个作业的进度",
"Training": "训练",
"Unload VAE and CLIP from VRAM when training": "训练时从显存(VRAM)中取消 VAE 和 CLIP 的加载",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "训练时将 VAE 和 CLIP 从显存(VRAM)移放到内存(RAM),节省显存(VRAM)",
"Filename word regex": "文件名用词的正则表达式",
"Filename join string": "文件名连接用字符串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每个 epoch 中单个输入图像的重复次数; 仅用于显示 epoch 数",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步保存一个包含 loss 的 csv 到日志目录0 表示禁用",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "缓存在内存(RAM)中的模型(ckpt)",
"Hypernetwork strength": "Hypernetwork 强度",
"Apply color correction to img2img results to match original colors.": "对图生图结果应用颜色校正以匹配原始颜色",
"Save a copy of image before applying color correction to img2img results": "在对图生图结果应用颜色校正之前保存图像副本",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在进行图生图的时候,确切地执行滑块指定的迭代步数(正常情况下更弱的去噪需要更少的迭代步数)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 采样器中启用量化以获得更清晰、更清晰的结果。这可能会改变现有的随机种子。需要重新启动才能应用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "强调符:使用 (文字) 使模型更关注该文本,使用 [文字] 使其减少关注",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用旧的强调符实现。可用于复现旧随机种子",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 采样器批量生成与生成单个图像时产出相同的图像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "当使用超过 75 个 token 时,通过从 n 个 token 中的最后一个逗号填补来提高一致性",
"Filter NSFW content": "过滤成人内容",
"Stop At last layers of CLIP model": "在 CLIP 模型的最后哪一层停下",
"Interrogate Options": "反推提示词选项",
"Interrogate: keep models in VRAM": "反推: 将模型保存在显存(VRAM)中",
"Interrogate: use artists from artists.csv": "反推: 使用 artists.csv 中的艺术家",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "反推: 在生成结果中包含与模型标签(tags)相匹配的等级(对基于生成自然语言描述的反推没有影响)",
"Interrogate: num_beams for BLIP": "反推: BLIP 的 num_beams",
"Interrogate: minimum description length (excluding artists, etc..)": "反推: 最小描述长度(不包括艺术家, 等…)",
"Interrogate: maximum description length": "反推: 最大描述长度",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: 文本文件中的最大行数0 = 无限制)",
"Interrogate: deepbooru score threshold": "反推: deepbooru 分数阈值",
"Interrogate: deepbooru sort alphabetically": "反推: deepbooru 按字母顺序排序",
"use spaces for tags in deepbooru": "在 deepbooru 中为标签使用空格",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "在 deepbooru 中使用转义 (\\) 括号(因此它们用作文字括号而不是强调符号)",
"User interface": "用户界面",
"Show progressbar": "显示进度条",
"Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 个采样迭代步数显示图像生成进度。设置 0 禁用",
"Show previews of all images generated in a batch as a grid": "以网格的形式预览所有批量生成出来的图像",
"Show grid in results for web": "在网页的结果中显示概览图",
"Do not show any images in results for web": "不在网页的结果中显示任何图像",
"Add model hash to generation information": "将模型的哈希值添加到生成信息",
"Add model name to generation information": "将模型名称添加到生成信息",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "当从文本读取生成参数到 UI从 PNG 信息或粘贴文本)时,不要更改选定的模型(ckpt)",
"Font for image grids that have text": "有文字的概览图使用的字体",
"Enable full page image viewer": "启用整页图像查看器",
"Show images zoomed in by default in full page image viewer": "在整页图像查看器中默认放大显示图像",
"Show generation progress in window title.": "在窗口标题中显示生成进度",
"Quicksettings list": "快速设置列表",
"Localization (requires restart)": "本地化(需要重新启动)",
"Sampler parameters": "采样器参数",
"Hide samplers in user interface (requires restart)": "在用户界面中隐藏采样器(需要重新启动)",
"eta (noise multiplier) for DDIM": "DDIM 的 eta (噪声乘数) ",
"eta (noise multiplier) for ancestral samplers": "ancestral 采样器的 eta (噪声乘数)",
"img2img DDIM discretize": "图生图 DDIM 离散化",
"uniform": "均勻",
"quad": "二阶",
"sigma churn": "sigma churn",
"sigma tmin": "最小(tmin) sigma",
"sigma noise": "sigma 噪声",
"Eta noise seed delta": "Eta 噪声种子偏移(noise seed delta)",
"Images Browser": "图库浏览器",
"Preload images at startup": "在启动时预载图像",
"Number of columns on the page": "每页列数",
"Number of rows on the page": "每页行数",
"Number of pictures displayed on each page": "每页显示的图像数量",
"Minimum number of pages per load": "每次加载的最小页数",
"Number of grids in each row": "每行显示多少格",
"Wildcards": "通配符",
"Use same seed for all images": "为所有图像使用同一个随机种子",
"Request browser notifications": "请求浏览器通知",
"Download localization template": "下载本地化模板",
"Reload custom script bodies (No ui updates, No restart)": "重新加载自定义脚本主体(无 ui 更新,无重启)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重启 Gradio 及刷新组件仅限自定义脚本、ui.py、js 和 css",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示词(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示词(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Add a random artist to the prompt.": "随机添加一个艺术家到提示词中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "从提示词中读取生成参数,如果提示词为空,则读取上一次的生成参数到用户界面",
"Save style": "储存为模版风格",
"Apply selected styles to current prompt": "将所选样式应用于当前提示",
"Stop processing current image and continue processing.": "停止处理当前图像并继续处理下一个",
"Stop processing images and return any results accumulated so far.": "停止处理图像并返回迄今为止累积的任何结果",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要应用的模版风格; 模版风格包含正向和反向提示词,并应用于两者",
"Do not do anything special": "什么都不做",
"Which algorithm to use to produce the image": "使用哪种算法生成图像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有创意,可以根据迭代步数获得完全不同的图像,将迭代步数设置为高于 30-40 不会有正面作用",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅长内补绘制",
"Produce an image that can be tiled.": "生成可用于平铺(tiled)的图像",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用两步处理的时候以较小的分辨率生成初步图像、接着放大图像,然后在不更改构图的情况下改进其中的细节",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "决定算法对图像内容的影响程度。设置 0 时,什么都不会改变,而在 1 时,你将获得不相关的图像。值低于 1.0 时,处理的迭代步数将少于“采样迭代步数”滑块指定的步数",
"How many batches of images to create": "创建多少批次的图像",
"How many image to create in a single batch": "每批创建多少图像",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - 图像应在多大程度上服从提示词 - 较低的值会产生更有创意的结果",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一个固定随机数生成器输出的值 - 以相同参数和随机种子生成的图像会得到相同的结果",
"Set seed to -1, which will cause a new random number to be used every time": "将随机种子设置为-1则每次都会使用一个新的随机数",
"Reuse seed from last generation, mostly useful if it was randomed": "重用上一次使用的随机种子,如果想要固定结果就会很有用",
"Seed of a different picture to be mixed into the generation.": "将要参与生成的另一张图的随机种子",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "想要产生多强烈的变化。设为 0 时,将没有效果。设为 1 时你将获得完全产自差异随机种子的图像ancestral 采样器除外,你只是单纯地生成了一些东西)",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "尝试生成与在指定分辨率下使用相同随机种子生成的图像相似的图片",
"This text is used to rotate the feature space of the imgs embs": "此文本用于旋转图集 embeddings 的特征空间",
"Separate values for X axis using commas.": "使用逗号分隔 X 轴的值",
"Separate values for Y axis using commas.": "使用逗号分隔 Y 轴的值",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "将图像写入目录(默认 - log/images并将生成参数写入 csv 文件",
"Open images output directory": "打开图像输出目录",
"How much to blur the mask before processing, in pixels.": "处理前要对蒙版进行多强的模糊,以像素为单位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 处理蒙版区域之前要在蒙版区域内放置什么",
"fill it with colors of the image": "用图像的颜色填充它",
"keep whatever was there originally": "保留原来的东西",
"fill it with latent space noise": "用潜空间的噪声填充它",
"fill it with latent space zeroes": "用潜空间的零填充它",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "将蒙版区域放大到目标分辨率,做内补绘制,缩小后粘贴到原始图像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "将图像大小调整为目标分辨率。除非高度和宽度匹配,否则你将获得不正确的纵横比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "调整图像大小,使整个目标分辨率都被图像填充。裁剪多出来的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "调整图像大小,使整个图像在目标分辨率内。用图像的颜色填充空白区域",
"How many times to repeat processing an image and using it as input for the next iteration": "重复处理图像并用作下次迭代输入的次数",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每个循环中,去噪强度都会乘以该值。<1 表示减少多样性,因此你的这一组图将集中在固定的图像上。>1 意味着增加多样性,因此你的这一组图将变得越来越混乱",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "使用 SD 放大(SD upscale)时,图块(Tiles)之间应该有多少像素重叠。图块(Tiles)之间需要重叠才可以让它们在合并回一张图像时,没有清晰可见的接缝",
"A directory on the same machine where the server is running.": "与服务器主机上的目录",
"Leave blank to save images to the default path.": "留空以将图像保存到默认路径",
"Result = A * (1 - M) + B * M": "结果 = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "结果 = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "第一个和最后一个数字必须是 1。例:'1, 2, 1'",
"how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "训练应该多快。低值将需要更长的时间来训练,高值可能无法收敛(无法产生准确的结果)以及/也许可能会破坏 embedding如果你在训练信息文本框中看到 Loss: nan 就会发生这种情况。如果发生这种情况,你需要从较旧的未损坏的备份手动恢复 embedding\n\n你可以使用以下语法设置单个数值或多个学习率\n\n 率1:步限1, 率2:步限2, ...\n\n如: 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步将以 0.005 的速率训练,接着直到 1000 步为止以 1e-3 训练,然后剩余所有步以 1e-5 训练",
"Path to directory with input images": "带有输入图像的路径",
"Path to directory where to write outputs": "进行输出的路径",
"Input images directory": "输入图像目录",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像的文件名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果启用此选项,水印将不会添加到生成出来的图像中。警告:如果你不添加水印,你的行为可能是不符合专业操守的",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下标签定义如何选择图像和概览图的子目录: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; 默认请留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神经网络修复低质量面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正则表达式将用于从文件名中提取单词,并将使用以下选项将它们接合到用于训练的标签文本中。留空以保持文件名文本不变",
"This string will be used to join split words into a single line if the option above is enabled.": "如果启用了上述选项,则此处的字符会用于将拆分的单词接合为同一行",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "设置名称列表,以逗号分隔,设置应转到顶部的快速访问栏,而不是通常的设置选项卡。有关设置名称,请参见 modules/shared.py。需要重新启动才能应用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果这个值不为零,它将被添加到随机种子中,并在使用带有 Eta 的采样器时用于初始化随机噪声。你可以使用它来产生更多的图像变化,或者你可以使用它来模仿其他软件生成的图像,如果你知道你在做什么",
"Enable Autocomplete": "开启Tag补全",
"Allowed categories for random artists selection when using the Roll button": "使用抽选艺术家按钮时将会随机的艺术家类别",
"Roll three": "抽三位出來",
"Generate forever": "不停地生成",
"Cancel generate forever": "取消不停地生成"
}

View File

@ -7,6 +7,7 @@ import uvicorn
from fastapi import Body, APIRouter, HTTPException from fastapi import Body, APIRouter, HTTPException
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field, Json from pydantic import BaseModel, Field, Json
from typing import List
import json import json
import io import io
import base64 import base64
@ -15,12 +16,12 @@ from PIL import Image
sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None)
class TextToImageResponse(BaseModel): class TextToImageResponse(BaseModel):
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: Json parameters: Json
info: Json info: Json
class ImageToImageResponse(BaseModel): class ImageToImageResponse(BaseModel):
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: Json parameters: Json
info: Json info: Json
@ -65,7 +66,7 @@ class Api:
i.save(buffer, format="png") i.save(buffer, format="png")
b64images.append(base64.b64encode(buffer.getvalue())) b64images.append(base64.b64encode(buffer.getvalue()))
return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info)) return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.js())
@ -111,7 +112,11 @@ class Api:
i.save(buffer, format="png") i.save(buffer, format="png")
b64images.append(base64.b64encode(buffer.getvalue())) b64images.append(base64.b64encode(buffer.getvalue()))
return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=json.dumps(processed.info)) if (not img2imgreq.include_init_images):
img2imgreq.init_images = None
img2imgreq.mask = None
return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js())
def extrasapi(self): def extrasapi(self):
raise NotImplementedError raise NotImplementedError

View File

@ -31,6 +31,7 @@ class ModelDef(BaseModel):
field_alias: str field_alias: str
field_type: Any field_type: Any
field_value: Any field_value: Any
field_exclude: bool = False
class PydanticModelGenerator: class PydanticModelGenerator:
@ -78,7 +79,8 @@ class PydanticModelGenerator:
field=underscore(fields["key"]), field=underscore(fields["key"]),
field_alias=fields["key"], field_alias=fields["key"],
field_type=fields["type"], field_type=fields["type"],
field_value=fields["default"])) field_value=fields["default"],
field_exclude=fields["exclude"] if "exclude" in fields else False))
def generate_model(self): def generate_model(self):
""" """
@ -86,7 +88,7 @@ class PydanticModelGenerator:
from the json and overrides provided at initialization from the json and overrides provided at initialization
""" """
fields = { fields = {
d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def
} }
DynamicModel = create_model(self._model_name, **fields) DynamicModel = create_model(self._model_name, **fields)
DynamicModel.__config__.allow_population_by_field_name = True DynamicModel.__config__.allow_population_by_field_name = True
@ -102,5 +104,5 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingImg2Img", "StableDiffusionProcessingImg2Img",
StableDiffusionProcessingImg2Img, StableDiffusionProcessingImg2Img,
[{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}]
).generate_model() ).generate_model()

View File

@ -5,6 +5,7 @@ import html
import os import os
import sys import sys
import traceback import traceback
import inspect
import modules.textual_inversion.dataset import modules.textual_inversion.dataset
import torch import torch
@ -15,10 +16,12 @@ from modules import devices, processing, sd_models, shared
from modules.textual_inversion import textual_inversion from modules.textual_inversion import textual_inversion
from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
from collections import defaultdict, deque from collections import defaultdict, deque
from statistics import stdev, mean from statistics import stdev, mean
class HypernetworkModule(torch.nn.Module): class HypernetworkModule(torch.nn.Module):
multiplier = 1.0 multiplier = 1.0
activation_dict = { activation_dict = {
@ -26,9 +29,12 @@ class HypernetworkModule(torch.nn.Module):
"leakyrelu": torch.nn.LeakyReLU, "leakyrelu": torch.nn.LeakyReLU,
"elu": torch.nn.ELU, "elu": torch.nn.ELU,
"swish": torch.nn.Hardswish, "swish": torch.nn.Hardswish,
"tanh": torch.nn.Tanh,
"sigmoid": torch.nn.Sigmoid,
} }
activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
super().__init__() super().__init__()
assert layer_structure is not None, "layer_structure must not be None" assert layer_structure is not None, "layer_structure must not be None"
@ -65,9 +71,24 @@ class HypernetworkModule(torch.nn.Module):
else: else:
for layer in self.linear: for layer in self.linear:
if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
layer.weight.data.normal_(mean=0.0, std=0.01) w, b = layer.weight.data, layer.bias.data
layer.bias.data.zero_() if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
normal_(w, mean=0.0, std=0.01)
normal_(b, mean=0.0, std=0.005)
elif weight_init == 'XavierUniform':
xavier_uniform_(w)
zeros_(b)
elif weight_init == 'XavierNormal':
xavier_normal_(w)
zeros_(b)
elif weight_init == 'KaimingUniform':
kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
elif weight_init == 'KaimingNormal':
kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
zeros_(b)
else:
raise KeyError(f"Key {weight_init} is not defined as initialization!")
self.to(devices.device) self.to(devices.device)
def fix_old_state_dict(self, state_dict): def fix_old_state_dict(self, state_dict):
@ -105,7 +126,7 @@ class Hypernetwork:
filename = None filename = None
name = None name = None
def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
self.filename = None self.filename = None
self.name = name self.name = name
self.layers = {} self.layers = {}
@ -114,13 +135,14 @@ class Hypernetwork:
self.sd_checkpoint_name = None self.sd_checkpoint_name = None
self.layer_structure = layer_structure self.layer_structure = layer_structure
self.activation_func = activation_func self.activation_func = activation_func
self.weight_init = weight_init
self.add_layer_norm = add_layer_norm self.add_layer_norm = add_layer_norm
self.use_dropout = use_dropout self.use_dropout = use_dropout
for size in enable_sizes or []: for size in enable_sizes or []:
self.layers[size] = ( self.layers[size] = (
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
) )
def weights(self): def weights(self):
@ -144,6 +166,7 @@ class Hypernetwork:
state_dict['layer_structure'] = self.layer_structure state_dict['layer_structure'] = self.layer_structure
state_dict['activation_func'] = self.activation_func state_dict['activation_func'] = self.activation_func
state_dict['is_layer_norm'] = self.add_layer_norm state_dict['is_layer_norm'] = self.add_layer_norm
state_dict['weight_initialization'] = self.weight_init
state_dict['use_dropout'] = self.use_dropout state_dict['use_dropout'] = self.use_dropout
state_dict['sd_checkpoint'] = self.sd_checkpoint state_dict['sd_checkpoint'] = self.sd_checkpoint
state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
@ -158,15 +181,21 @@ class Hypernetwork:
state_dict = torch.load(filename, map_location='cpu') state_dict = torch.load(filename, map_location='cpu')
self.layer_structure = state_dict.get('layer_structure', [1, 2, 1]) self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
print(self.layer_structure)
self.activation_func = state_dict.get('activation_func', None) self.activation_func = state_dict.get('activation_func', None)
print(f"Activation function is {self.activation_func}")
self.weight_init = state_dict.get('weight_initialization', 'Normal')
print(f"Weight initialization is {self.weight_init}")
self.add_layer_norm = state_dict.get('is_layer_norm', False) self.add_layer_norm = state_dict.get('is_layer_norm', False)
print(f"Layer norm is set to {self.add_layer_norm}")
self.use_dropout = state_dict.get('use_dropout', False) self.use_dropout = state_dict.get('use_dropout', False)
print(f"Dropout usage is set to {self.use_dropout}" )
for size, sd in state_dict.items(): for size, sd in state_dict.items():
if type(size) == int: if type(size) == int:
self.layers[size] = ( self.layers[size] = (
HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
) )
self.name = state_dict.get('name', self.name) self.name = state_dict.get('name', self.name)
@ -458,7 +487,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if image is not None: if image is not None:
shared.state.current_image = image shared.state.current_image = image
last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename) last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}" last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = hypernetwork.step shared.state.job_no = hypernetwork.step

View File

@ -8,8 +8,9 @@ import modules.textual_inversion.textual_inversion
from modules import devices, sd_hijack, shared from modules import devices, sd_hijack, shared
from modules.hypernetworks import hypernetwork from modules.hypernetworks import hypernetwork
keys = list(hypernetwork.HypernetworkModule.activation_dict.keys())
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
# Remove illegal characters from name. # Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- ")) name = "".join( x for x in name if (x.isalnum() or x in "._- "))
@ -25,6 +26,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
enable_sizes=[int(x) for x in enable_sizes], enable_sizes=[int(x) for x in enable_sizes],
layer_structure=layer_structure, layer_structure=layer_structure,
activation_func=activation_func, activation_func=activation_func,
weight_init=weight_init,
add_layer_norm=add_layer_norm, add_layer_norm=add_layer_norm,
use_dropout=use_dropout, use_dropout=use_dropout,
) )

View File

@ -277,7 +277,7 @@ invalid_filename_chars = '<>:"/\\|?*\n'
invalid_filename_prefix = ' ' invalid_filename_prefix = ' '
invalid_filename_postfix = ' .' invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
re_pattern = re.compile(r"([^\[\]]+|\[([^]]+)]|[\[\]]*)") re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
max_filename_part_length = 128 max_filename_part_length = 128
@ -343,7 +343,7 @@ class FilenameGenerator:
def datetime(self, *args): def datetime(self, *args):
time_datetime = datetime.datetime.now() time_datetime = datetime.datetime.now()
time_format = args[0] if len(args) > 0 else self.default_time_format time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
try: try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
except pytz.exceptions.UnknownTimeZoneError as _: except pytz.exceptions.UnknownTimeZoneError as _:
@ -362,9 +362,9 @@ class FilenameGenerator:
for m in re_pattern.finditer(x): for m in re_pattern.finditer(x):
text, pattern = m.groups() text, pattern = m.groups()
res += text
if pattern is None: if pattern is None:
res += text
continue continue
pattern_args = [] pattern_args = []
@ -385,12 +385,9 @@ class FilenameGenerator:
print(f"Error adding [{pattern}] to filename", file=sys.stderr) print(f"Error adding [{pattern}] to filename", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
if replacement is None: if replacement is not None:
res += f'[{pattern}]'
else:
res += str(replacement) res += str(replacement)
continue
continue
res += f'[{pattern}]' res += f'[{pattern}]'
@ -454,17 +451,6 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
""" """
namegen = FilenameGenerator(p, seed, prompt) namegen = FilenameGenerator(p, seed, prompt)
if extension == 'png' and opts.enable_pnginfo and info is not None:
pnginfo = PngImagePlugin.PngInfo()
if existing_info is not None:
for k, v in existing_info.items():
pnginfo.add_text(k, str(v))
pnginfo.add_text(pnginfo_section_name, info)
else:
pnginfo = None
if save_to_dirs is None: if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
@ -492,19 +478,27 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if add_number: if add_number:
basecount = get_next_sequence_number(path, basename) basecount = get_next_sequence_number(path, basename)
fullfn = None fullfn = None
fullfn_without_extension = None
for i in range(500): for i in range(500):
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}" fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}") fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
fullfn_without_extension = os.path.join(path, f"{fn}{file_decoration}")
if not os.path.exists(fullfn): if not os.path.exists(fullfn):
break break
else: else:
fullfn = os.path.join(path, f"{file_decoration}.{extension}") fullfn = os.path.join(path, f"{file_decoration}.{extension}")
fullfn_without_extension = os.path.join(path, file_decoration)
else: else:
fullfn = os.path.join(path, f"{forced_filename}.{extension}") fullfn = os.path.join(path, f"{forced_filename}.{extension}")
fullfn_without_extension = os.path.join(path, forced_filename)
pnginfo = existing_info or {}
if info is not None:
pnginfo[pnginfo_section_name] = info
params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
script_callbacks.before_image_saved_callback(params)
image = params.image
fullfn = params.filename
info = params.pnginfo.get(pnginfo_section_name, None)
fullfn_without_extension, extension = os.path.splitext(params.filename)
def exif_bytes(): def exif_bytes():
return piexif.dump({ return piexif.dump({
@ -513,12 +507,20 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
}, },
}) })
if extension.lower() in ("jpg", "jpeg", "webp"): if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
image.save(fullfn, quality=opts.jpeg_quality) image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None: if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn) piexif.insert(exif_bytes(), fullfn)
else: else:
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo) image.save(fullfn, quality=opts.jpeg_quality)
target_side_length = 4000 target_side_length = 4000
oversize = image.width > target_side_length or image.height > target_side_length oversize = image.width > target_side_length or image.height > target_side_length
@ -541,7 +543,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
else: else:
txt_fullfn = None txt_fullfn = None
script_callbacks.image_saved_callback(image, p, fullfn, txt_fullfn) script_callbacks.image_saved_callback(params)
return fullfn, txt_fullfn return fullfn, txt_fullfn

View File

@ -39,6 +39,8 @@ def process_batch(p, input_dir, output_dir, args):
break break
img = Image.open(image) img = Image.open(image)
# Use the EXIF orientation of photos taken by smartphones.
img = ImageOps.exif_transpose(img)
p.init_images = [img] * p.batch_size p.init_images = [img] * p.batch_size
proc = modules.scripts.scripts_img2img.run(p, *args) proc = modules.scripts.scripts_img2img.run(p, *args)
@ -61,19 +63,25 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
is_batch = mode == 2 is_batch = mode == 2
if is_inpaint: if is_inpaint:
# Drawn mask
if mask_mode == 0: if mask_mode == 0:
image = init_img_with_mask['image'] image = init_img_with_mask['image']
mask = init_img_with_mask['mask'] mask = init_img_with_mask['mask']
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1') alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L') mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
image = image.convert('RGB') image = image.convert('RGB')
# Uploaded mask
else: else:
image = init_img_inpaint image = init_img_inpaint
mask = init_mask_inpaint mask = init_mask_inpaint
# No mask
else: else:
image = init_img image = init_img
mask = None mask = None
# Use the EXIF orientation of photos taken by smartphones.
image = ImageOps.exif_transpose(image)
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]' assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
p = StableDiffusionProcessingImg2Img( p = StableDiffusionProcessingImg2Img(

View File

@ -77,9 +77,8 @@ def get_correct_sampler(p):
class StableDiffusionProcessing(): class StableDiffusionProcessing():
""" """
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
""" """
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str="", styles: List[str]=None, seed: int=-1, subseed: int=-1, subseed_strength: float=0, seed_resize_from_h: int=-1, seed_resize_from_w: int=-1, seed_enable_extras: bool=True, sampler_index: int=0, batch_size: int=1, n_iter: int=1, steps:int =50, cfg_scale:float=7.0, width:int=512, height:int=512, restore_faces:bool=False, tiling:bool=False, do_not_save_samples:bool=False, do_not_save_grid:bool=False, extra_generation_params: Dict[Any,Any]=None, overlay_images: Any=None, negative_prompt: str=None, eta: float =None, do_not_reload_embeddings: bool=False, denoising_strength: float = 0, ddim_discretize: str = "uniform", s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0): def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_index: int = 0, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None):
self.sd_model = sd_model self.sd_model = sd_model
self.outpath_samples: str = outpath_samples self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids self.outpath_grids: str = outpath_grids
@ -109,13 +108,14 @@ class StableDiffusionProcessing():
self.do_not_reload_embeddings = do_not_reload_embeddings self.do_not_reload_embeddings = do_not_reload_embeddings
self.paste_to = None self.paste_to = None
self.color_corrections = None self.color_corrections = None
self.denoising_strength: float = 0 self.denoising_strength: float = denoising_strength
self.sampler_noise_scheduler_override = None self.sampler_noise_scheduler_override = None
self.ddim_discretize = opts.ddim_discretize self.ddim_discretize = ddim_discretize or opts.ddim_discretize
self.s_churn = s_churn or opts.s_churn self.s_churn = s_churn or opts.s_churn
self.s_tmin = s_tmin or opts.s_tmin self.s_tmin = s_tmin or opts.s_tmin
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise self.s_noise = s_noise or opts.s_noise
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
if not seed_enable_extras: if not seed_enable_extras:
self.subseed = -1 self.subseed = -1
@ -129,7 +129,6 @@ class StableDiffusionProcessing():
self.all_seeds = None self.all_seeds = None
self.all_subseeds = None self.all_subseeds = None
def init(self, all_prompts, all_seeds, all_subseeds): def init(self, all_prompts, all_seeds, all_subseeds):
pass pass
@ -351,6 +350,22 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
def process_images(p: StableDiffusionProcessing) -> Processed: def process_images(p: StableDiffusionProcessing) -> Processed:
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
try:
for k, v in p.override_settings.items():
opts.data[k] = v # we don't call onchange for simplicity which makes changing model, hypernet impossible
res = process_images_inner(p)
finally:
for k, v in stored_opts.items():
opts.data[k] = v
return res
def process_images_inner(p: StableDiffusionProcessing) -> Processed:
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch""" """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
if type(p.prompt) == list: if type(p.prompt) == list:

View File

@ -9,15 +9,34 @@ def report_exception(c, job):
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
class ImageSaveParams:
def __init__(self, image, p, filename, pnginfo):
self.image = image
"""the PIL image itself"""
self.p = p
"""p object with processing parameters; either StableDiffusionProcessing or an object with same fields"""
self.filename = filename
"""name of file that the image would be saved to"""
self.pnginfo = pnginfo
"""dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'"""
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
callbacks_model_loaded = [] callbacks_model_loaded = []
callbacks_ui_tabs = [] callbacks_ui_tabs = []
callbacks_ui_settings = [] callbacks_ui_settings = []
callbacks_before_image_saved = []
callbacks_image_saved = [] callbacks_image_saved = []
def clear_callbacks(): def clear_callbacks():
callbacks_model_loaded.clear() callbacks_model_loaded.clear()
callbacks_ui_tabs.clear() callbacks_ui_tabs.clear()
callbacks_ui_settings.clear()
callbacks_before_image_saved.clear()
callbacks_image_saved.clear() callbacks_image_saved.clear()
@ -49,10 +68,18 @@ def ui_settings_callback():
report_exception(c, 'ui_settings_callback') report_exception(c, 'ui_settings_callback')
def image_saved_callback(image, p, fullfn, txt_fullfn): def before_image_saved_callback(params: ImageSaveParams):
for c in callbacks_image_saved: for c in callbacks_image_saved:
try: try:
c.callback(image, p, fullfn, txt_fullfn) c.callback(params)
except Exception:
report_exception(c, 'before_image_saved_callback')
def image_saved_callback(params: ImageSaveParams):
for c in callbacks_image_saved:
try:
c.callback(params)
except Exception: except Exception:
report_exception(c, 'image_saved_callback') report_exception(c, 'image_saved_callback')
@ -64,7 +91,6 @@ def add_callback(callbacks, fun):
callbacks.append(ScriptCallback(filename, fun)) callbacks.append(ScriptCallback(filename, fun))
def on_model_loaded(callback): def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is """register a function to be called when the stable diffusion model is created; the model is
passed as an argument""" passed as an argument"""
@ -90,11 +116,17 @@ def on_ui_settings(callback):
add_callback(callbacks_ui_settings, callback) add_callback(callbacks_ui_settings, callback)
def on_save_imaged(callback): def on_before_image_saved(callback):
"""register a function to be called after modules.images.save_image is called. """register a function to be called before an image is saved to a file.
The callback is called with three arguments: The callback is called with one argument:
- p - procesing object (or a dummy object with same fields if the image is saved using save button) - params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object.
- fullfn - image filename """
- txt_fullfn - text file with parameters; may be None add_callback(callbacks_before_image_saved, callback)
def on_image_saved(callback):
"""register a function to be called after an image is saved to a file.
The callback is called with one argument:
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
""" """
add_callback(callbacks_image_saved, callback) add_callback(callbacks_image_saved, callback)

View File

@ -84,7 +84,7 @@ parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load mod
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None) parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
cmd_opts = parser.parse_args() cmd_opts = parser.parse_args()
restricted_opts = [ restricted_opts = {
"samples_filename_pattern", "samples_filename_pattern",
"directories_filename_pattern", "directories_filename_pattern",
"outdir_samples", "outdir_samples",
@ -94,7 +94,7 @@ restricted_opts = [
"outdir_grids", "outdir_grids",
"outdir_txt2img_grids", "outdir_txt2img_grids",
"outdir_save", "outdir_save",
] }
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer']) (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])

View File

@ -0,0 +1,341 @@
import cv2
import requests
import os
from collections import defaultdict
from math import log, sqrt
import numpy as np
from PIL import Image, ImageDraw
GREEN = "#0F0"
BLUE = "#00F"
RED = "#F00"
def crop_image(im, settings):
""" Intelligently crop an image to the subject matter """
scale_by = 1
if is_landscape(im.width, im.height):
scale_by = settings.crop_height / im.height
elif is_portrait(im.width, im.height):
scale_by = settings.crop_width / im.width
elif is_square(im.width, im.height):
if is_square(settings.crop_width, settings.crop_height):
scale_by = settings.crop_width / im.width
elif is_landscape(settings.crop_width, settings.crop_height):
scale_by = settings.crop_width / im.width
elif is_portrait(settings.crop_width, settings.crop_height):
scale_by = settings.crop_height / im.height
im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
im_debug = im.copy()
focus = focal_point(im_debug, settings)
# take the focal point and turn it into crop coordinates that try to center over the focal
# point but then get adjusted back into the frame
y_half = int(settings.crop_height / 2)
x_half = int(settings.crop_width / 2)
x1 = focus.x - x_half
if x1 < 0:
x1 = 0
elif x1 + settings.crop_width > im.width:
x1 = im.width - settings.crop_width
y1 = focus.y - y_half
if y1 < 0:
y1 = 0
elif y1 + settings.crop_height > im.height:
y1 = im.height - settings.crop_height
x2 = x1 + settings.crop_width
y2 = y1 + settings.crop_height
crop = [x1, y1, x2, y2]
results = []
results.append(im.crop(tuple(crop)))
if settings.annotate_image:
d = ImageDraw.Draw(im_debug)
rect = list(crop)
rect[2] -= 1
rect[3] -= 1
d.rectangle(rect, outline=GREEN)
results.append(im_debug)
if settings.destop_view_image:
im_debug.show()
return results
def focal_point(im, settings):
corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
pois = []
weight_pref_total = 0
if len(corner_points) > 0:
weight_pref_total += settings.corner_points_weight
if len(entropy_points) > 0:
weight_pref_total += settings.entropy_points_weight
if len(face_points) > 0:
weight_pref_total += settings.face_points_weight
corner_centroid = None
if len(corner_points) > 0:
corner_centroid = centroid(corner_points)
corner_centroid.weight = settings.corner_points_weight / weight_pref_total
pois.append(corner_centroid)
entropy_centroid = None
if len(entropy_points) > 0:
entropy_centroid = centroid(entropy_points)
entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
pois.append(entropy_centroid)
face_centroid = None
if len(face_points) > 0:
face_centroid = centroid(face_points)
face_centroid.weight = settings.face_points_weight / weight_pref_total
pois.append(face_centroid)
average_point = poi_average(pois, settings)
if settings.annotate_image:
d = ImageDraw.Draw(im)
max_size = min(im.width, im.height) * 0.07
if corner_centroid is not None:
color = BLUE
box = corner_centroid.bounding(max_size * corner_centroid.weight)
d.text((box[0], box[1]-15), "Edge: %.02f" % corner_centroid.weight, fill=color)
d.ellipse(box, outline=color)
if len(corner_points) > 1:
for f in corner_points:
d.rectangle(f.bounding(4), outline=color)
if entropy_centroid is not None:
color = "#ff0"
box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
d.text((box[0], box[1]-15), "Entropy: %.02f" % entropy_centroid.weight, fill=color)
d.ellipse(box, outline=color)
if len(entropy_points) > 1:
for f in entropy_points:
d.rectangle(f.bounding(4), outline=color)
if face_centroid is not None:
color = RED
box = face_centroid.bounding(max_size * face_centroid.weight)
d.text((box[0], box[1]-15), "Face: %.02f" % face_centroid.weight, fill=color)
d.ellipse(box, outline=color)
if len(face_points) > 1:
for f in face_points:
d.rectangle(f.bounding(4), outline=color)
d.ellipse(average_point.bounding(max_size), outline=GREEN)
return average_point
def image_face_points(im, settings):
if settings.dnn_model_path is not None:
detector = cv2.FaceDetectorYN.create(
settings.dnn_model_path,
"",
(im.width, im.height),
0.9, # score threshold
0.3, # nms threshold
5000 # keep top k before nms
)
faces = detector.detect(np.array(im))
results = []
if faces[1] is not None:
for face in faces[1]:
x = face[0]
y = face[1]
w = face[2]
h = face[3]
results.append(
PointOfInterest(
int(x + (w * 0.5)), # face focus left/right is center
int(y + (h * 0.33)), # face focus up/down is close to the top of the head
size = w,
weight = 1/len(faces[1])
)
)
return results
else:
np_im = np.array(im)
gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
tries = [
[ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
[ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
]
for t in tries:
classifier = cv2.CascadeClassifier(t[0])
minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
try:
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
except:
continue
if len(faces) > 0:
rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
return []
def image_corner_points(im, settings):
grayscale = im.convert("L")
# naive attempt at preventing focal points from collecting at watermarks near the bottom
gd = ImageDraw.Draw(grayscale)
gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
np_im = np.array(grayscale)
points = cv2.goodFeaturesToTrack(
np_im,
maxCorners=100,
qualityLevel=0.04,
minDistance=min(grayscale.width, grayscale.height)*0.06,
useHarrisDetector=False,
)
if points is None:
return []
focal_points = []
for point in points:
x, y = point.ravel()
focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
return focal_points
def image_entropy_points(im, settings):
landscape = im.height < im.width
portrait = im.height > im.width
if landscape:
move_idx = [0, 2]
move_max = im.size[0]
elif portrait:
move_idx = [1, 3]
move_max = im.size[1]
else:
return []
e_max = 0
crop_current = [0, 0, settings.crop_width, settings.crop_height]
crop_best = crop_current
while crop_current[move_idx[1]] < move_max:
crop = im.crop(tuple(crop_current))
e = image_entropy(crop)
if (e > e_max):
e_max = e
crop_best = list(crop_current)
crop_current[move_idx[0]] += 4
crop_current[move_idx[1]] += 4
x_mid = int(crop_best[0] + settings.crop_width/2)
y_mid = int(crop_best[1] + settings.crop_height/2)
return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
def image_entropy(im):
# greyscale image entropy
# band = np.asarray(im.convert("L"))
band = np.asarray(im.convert("1"), dtype=np.uint8)
hist, _ = np.histogram(band, bins=range(0, 256))
hist = hist[hist > 0]
return -np.log2(hist / hist.sum()).sum()
def centroid(pois):
x = [poi.x for poi in pois]
y = [poi.y for poi in pois]
return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
def poi_average(pois, settings):
weight = 0.0
x = 0.0
y = 0.0
for poi in pois:
weight += poi.weight
x += poi.x * poi.weight
y += poi.y * poi.weight
avg_x = round(x / weight)
avg_y = round(y / weight)
return PointOfInterest(avg_x, avg_y)
def is_landscape(w, h):
return w > h
def is_portrait(w, h):
return h > w
def is_square(w, h):
return w == h
def download_and_cache_models(dirname):
download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
model_file_name = 'face_detection_yunet.onnx'
if not os.path.exists(dirname):
os.makedirs(dirname)
cache_file = os.path.join(dirname, model_file_name)
if not os.path.exists(cache_file):
print(f"downloading face detection model from '{download_url}' to '{cache_file}'")
response = requests.get(download_url)
with open(cache_file, "wb") as f:
f.write(response.content)
if os.path.exists(cache_file):
return cache_file
return None
class PointOfInterest:
def __init__(self, x, y, weight=1.0, size=10):
self.x = x
self.y = y
self.weight = weight
self.size = size
def bounding(self, size):
return [
self.x - size//2,
self.y - size//2,
self.x + size//2,
self.y + size//2
]
class Settings:
def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None):
self.crop_width = crop_width
self.crop_height = crop_height
self.corner_points_weight = corner_points_weight
self.entropy_points_weight = entropy_points_weight
self.face_points_weight = face_points_weight
self.annotate_image = annotate_image
self.destop_view_image = False
self.dnn_model_path = dnn_model_path

View File

@ -7,12 +7,14 @@ import tqdm
import time import time
from modules import shared, images from modules import shared, images
from modules.paths import models_path
from modules.shared import opts, cmd_opts from modules.shared import opts, cmd_opts
from modules.textual_inversion import autocrop
if cmd_opts.deepdanbooru: if cmd_opts.deepdanbooru:
import modules.deepbooru as deepbooru import modules.deepbooru as deepbooru
def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2): def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
try: try:
if process_caption: if process_caption:
shared.interrogator.load() shared.interrogator.load()
@ -22,7 +24,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce
db_opts[deepbooru.OPT_INCLUDE_RANKS] = False db_opts[deepbooru.OPT_INCLUDE_RANKS] = False
deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, db_opts) deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, db_opts)
preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio) preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug)
finally: finally:
@ -34,7 +36,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2): def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
width = process_width width = process_width
height = process_height height = process_height
src = os.path.abspath(process_src) src = os.path.abspath(process_src)
@ -113,6 +115,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
splitted = image.crop((0, y, to_w, y + to_h)) splitted = image.crop((0, y, to_w, y + to_h))
yield splitted yield splitted
for index, imagefile in enumerate(tqdm.tqdm(files)): for index, imagefile in enumerate(tqdm.tqdm(files)):
subindex = [0] subindex = [0]
filename = os.path.join(src, imagefile) filename = os.path.join(src, imagefile)
@ -137,11 +140,36 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
ratio = (img.height * width) / (img.width * height) ratio = (img.height * width) / (img.width * height)
inverse_xy = True inverse_xy = True
process_default_resize = True
if process_split and ratio < 1.0 and ratio <= split_threshold: if process_split and ratio < 1.0 and ratio <= split_threshold:
for splitted in split_pic(img, inverse_xy): for splitted in split_pic(img, inverse_xy):
save_pic(splitted, index, existing_caption=existing_caption) save_pic(splitted, index, existing_caption=existing_caption)
else: process_default_resize = False
if process_focal_crop and img.height != img.width:
dnn_model_path = None
try:
dnn_model_path = autocrop.download_and_cache_models(os.path.join(models_path, "opencv"))
except Exception as e:
print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)
autocrop_settings = autocrop.Settings(
crop_width = width,
crop_height = height,
face_points_weight = process_focal_crop_face_weight,
entropy_points_weight = process_focal_crop_entropy_weight,
corner_points_weight = process_focal_crop_edges_weight,
annotate_image = process_focal_crop_debug,
dnn_model_path = dnn_model_path,
)
for focal in autocrop.crop_image(img, autocrop_settings):
save_pic(focal, index, existing_caption=existing_caption)
process_default_resize = False
if process_default_resize:
img = images.resize_image(1, img, width, height) img = images.resize_image(1, img, width, height)
save_pic(img, index, existing_caption=existing_caption) save_pic(img, index, existing_caption=existing_caption)
shared.state.nextjob() shared.state.nextjob()

View File

@ -10,7 +10,7 @@ import csv
from PIL import Image, PngImagePlugin from PIL import Image, PngImagePlugin
from modules import shared, devices, sd_hijack, processing, sd_models from modules import shared, devices, sd_hijack, processing, sd_models, images
import modules.textual_inversion.dataset import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.learn_schedule import LearnRateScheduler
@ -157,6 +157,9 @@ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
cond_model = shared.sd_model.cond_stage_model cond_model = shared.sd_model.cond_stage_model
embedding_layer = cond_model.wrapped.transformer.text_model.embeddings embedding_layer = cond_model.wrapped.transformer.text_model.embeddings
with devices.autocast():
cond_model([""]) # will send cond model to GPU if lowvram/medvram is active
ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"] ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"]
embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device) vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
@ -164,6 +167,8 @@ def create_embedding(name, num_vectors_per_token, overwrite_old, init_text='*'):
for i in range(num_vectors_per_token): for i in range(num_vectors_per_token):
vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token] vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
# Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- "))
fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt") fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
if not overwrite_old: if not overwrite_old:
assert not os.path.exists(fn), f"file {fn} already exists" assert not os.path.exists(fn), f"file {fn} already exists"
@ -244,6 +249,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
last_saved_file = "<none>" last_saved_file = "<none>"
last_saved_image = "<none>" last_saved_image = "<none>"
forced_filename = "<none>"
embedding_yet_to_be_embedded = False embedding_yet_to_be_embedded = False
ititial_step = embedding.step or 0 ititial_step = embedding.step or 0
@ -283,7 +289,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step}/{len(ds)}]loss: {losses.mean():.7f}") pbar.set_description(f"[Epoch {epoch_num}: {epoch_step}/{len(ds)}]loss: {losses.mean():.7f}")
if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0: if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0:
last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt') # Before saving, change name to match current checkpoint.
embedding.name = f'{embedding_name}-{embedding.step}'
last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt')
embedding.save(last_saved_file) embedding.save(last_saved_file)
embedding_yet_to_be_embedded = True embedding_yet_to_be_embedded = True
@ -293,8 +301,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
}) })
if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0: if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0:
last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png') forced_filename = f'{embedding_name}-{embedding.step}'
last_saved_image = os.path.join(images_dir, forced_filename)
p = processing.StableDiffusionProcessingTxt2Img( p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model, sd_model=shared.sd_model,
do_not_save_grid=True, do_not_save_grid=True,
@ -350,8 +358,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info)
embedding_yet_to_be_embedded = False embedding_yet_to_be_embedded = False
image.save(last_saved_image) last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
last_saved_image += f", prompt: {preview_text}" last_saved_image += f", prompt: {preview_text}"
shared.state.job_no = embedding.step shared.state.job_no = embedding.step
@ -371,6 +378,9 @@ Last saved image: {html.escape(last_saved_image)}<br/>
embedding.sd_checkpoint = checkpoint.hash embedding.sd_checkpoint = checkpoint.hash
embedding.sd_checkpoint_name = checkpoint.model_name embedding.sd_checkpoint_name = checkpoint.model_name
embedding.cached_checksum = None embedding.cached_checksum = None
# Before saving for the last time, change name back to base name (as opposed to the save_embedding_every step-suffixed naming convention).
embedding.name = embedding_name
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding.name}.pt')
embedding.save(filename) embedding.save(filename)
return embedding, filename return embedding, filename

View File

@ -1238,7 +1238,8 @@ def create_ui(wrap_gradio_gpu_call):
new_hypernetwork_name = gr.Textbox(label="Name") new_hypernetwork_name = gr.Textbox(label="Name")
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"]) new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"])
new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'") new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=["linear", "relu", "leakyrelu", "elu", "swish"]) new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=modules.hypernetworks.ui.keys)
new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization") new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization")
new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout") new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout")
overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork") overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork")
@ -1260,6 +1261,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Row(): with gr.Row():
process_flip = gr.Checkbox(label='Create flipped copies') process_flip = gr.Checkbox(label='Create flipped copies')
process_split = gr.Checkbox(label='Split oversized images') process_split = gr.Checkbox(label='Split oversized images')
process_focal_crop = gr.Checkbox(label='Auto focal point crop')
process_caption = gr.Checkbox(label='Use BLIP for caption') process_caption = gr.Checkbox(label='Use BLIP for caption')
process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True if cmd_opts.deepdanbooru else False) process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True if cmd_opts.deepdanbooru else False)
@ -1267,6 +1269,12 @@ def create_ui(wrap_gradio_gpu_call):
process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05) process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05) process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05)
with gr.Row(visible=False) as process_focal_crop_row:
process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05)
process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05)
process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
process_focal_crop_debug = gr.Checkbox(label='Create debug image')
with gr.Row(): with gr.Row():
with gr.Column(scale=3): with gr.Column(scale=3):
gr.HTML(value="") gr.HTML(value="")
@ -1280,6 +1288,12 @@ def create_ui(wrap_gradio_gpu_call):
outputs=[process_split_extra_row], outputs=[process_split_extra_row],
) )
process_focal_crop.change(
fn=lambda show: gr_show(show),
inputs=[process_focal_crop],
outputs=[process_focal_crop_row],
)
with gr.Tab(label="Train"): with gr.Tab(label="Train"):
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>") gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
with gr.Row(): with gr.Row():
@ -1342,6 +1356,7 @@ def create_ui(wrap_gradio_gpu_call):
overwrite_old_hypernetwork, overwrite_old_hypernetwork,
new_hypernetwork_layer_structure, new_hypernetwork_layer_structure,
new_hypernetwork_activation_func, new_hypernetwork_activation_func,
new_hypernetwork_initialization_option,
new_hypernetwork_add_layer_norm, new_hypernetwork_add_layer_norm,
new_hypernetwork_use_dropout new_hypernetwork_use_dropout
], ],
@ -1367,6 +1382,11 @@ def create_ui(wrap_gradio_gpu_call):
process_caption_deepbooru, process_caption_deepbooru,
process_split_threshold, process_split_threshold,
process_overlap_ratio, process_overlap_ratio,
process_focal_crop,
process_focal_crop_face_weight,
process_focal_crop_entropy_weight,
process_focal_crop_edges_weight,
process_focal_crop_debug,
], ],
outputs=[ outputs=[
ti_output, ti_output,

View File

@ -8,6 +8,8 @@ gradio==3.5
invisible-watermark invisible-watermark
numpy numpy
omegaconf omegaconf
opencv-python
requests
piexif piexif
Pillow Pillow
pytorch_lightning pytorch_lightning

View File

@ -1,6 +1,7 @@
import copy import copy
import math import math
import os import os
import random
import sys import sys
import traceback import traceback
import shlex import shlex
@ -81,32 +82,34 @@ def cmdargs(line):
return res return res
def load_prompt_file(file):
if (file is None):
lines = []
else:
lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")]
return None, "\n".join(lines), gr.update(lines=7)
class Script(scripts.Script): class Script(scripts.Script):
def title(self): def title(self):
return "Prompts from file or textbox" return "Prompts from file or textbox"
def ui(self, is_img2img): def ui(self, is_img2img):
# This checkbox would look nicer as two tabs, but there are two problems: checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False)
# 1) There is a bug in Gradio 3.3 that prevents visibility from working on Tabs
# 2) Even with Gradio 3.3.1, returning a control (like Tabs) that can't be used as input
# causes a AttributeError: 'Tabs' object has no attribute 'preprocess' assert,
# due to the way Script assumes all controls returned can be used as inputs.
# Therefore, there's no good way to use grouping components right now,
# so we will use a checkbox! :)
checkbox_txt = gr.Checkbox(label="Show Textbox", value=False)
file = gr.File(label="File with inputs", type='bytes')
prompt_txt = gr.TextArea(label="Prompts")
checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt])
return [checkbox_txt, file, prompt_txt]
def on_show(self, checkbox_txt, file, prompt_txt): prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1)
return [ gr.Checkbox.update(visible = True), gr.File.update(visible = not checkbox_txt), gr.TextArea.update(visible = checkbox_txt) ] file = gr.File(label="Upload prompt inputs", type='bytes')
def run(self, p, checkbox_txt, data: bytes, prompt_txt: str): file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt])
if checkbox_txt:
lines = [x.strip() for x in prompt_txt.splitlines()] # We start at one line. When the text changes, we jump to seven lines, or two lines if no \n.
else: # We don't shrink back to 1, because that causes the control to ignore [enter], and it may
lines = [x.strip() for x in data.decode('utf8', errors='ignore').split("\n")] # be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])
return [checkbox_iterate, file, prompt_txt]
def run(self, p, checkbox_iterate, file, prompt_txt: str):
lines = [x.strip() for x in prompt_txt.splitlines()]
lines = [x for x in lines if len(x) > 0] lines = [x for x in lines if len(x) > 0]
p.do_not_save_grid = True p.do_not_save_grid = True
@ -134,6 +137,9 @@ class Script(scripts.Script):
jobs.append(args) jobs.append(args)
print(f"Will process {len(lines)} lines in {job_count} jobs.") print(f"Will process {len(lines)} lines in {job_count} jobs.")
if (checkbox_iterate and p.seed == -1):
p.seed = int(random.randrange(4294967294))
state.job_count = job_count state.job_count = job_count
images = [] images = []
@ -146,5 +152,9 @@ class Script(scripts.Script):
proc = process_images(copy_p) proc = process_images(copy_p)
images += proc.images images += proc.images
if (checkbox_iterate):
p.seed = p.seed + (p.batch_size * p.n_iter)
return Processed(p, images, p.seed, "")
return Processed(p, images, p.seed, "")