Merge pull request #15632 from brendanhoar/bgh-handle-metadata-issues-more-cleanly
QOL Items - handle metadata issues more cleanly for SD models, Loras and embeddings
This commit is contained in:
commit
6d8d2723a0
|
@ -21,10 +21,12 @@ re_comma = re.compile(r" *, *")
|
|||
def build_tags(metadata):
|
||||
tags = {}
|
||||
|
||||
for _, tags_dict in metadata.get("ss_tag_frequency", {}).items():
|
||||
for tag, tag_count in tags_dict.items():
|
||||
tag = tag.strip()
|
||||
tags[tag] = tags.get(tag, 0) + int(tag_count)
|
||||
ss_tag_frequency = metadata.get("ss_tag_frequency", {})
|
||||
if ss_tag_frequency is not None and hasattr(ss_tag_frequency, 'items'):
|
||||
for _, tags_dict in ss_tag_frequency.items():
|
||||
for tag, tag_count in tags_dict.items():
|
||||
tag = tag.strip()
|
||||
tags[tag] = tags.get(tag, 0) + int(tag_count)
|
||||
|
||||
if tags and is_non_comma_tagset(tags):
|
||||
new_tags = {}
|
||||
|
|
|
@ -282,17 +282,21 @@ def read_metadata_from_safetensors(filename):
|
|||
json_start = file.read(2)
|
||||
|
||||
assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
|
||||
json_data = json_start + file.read(metadata_len-2)
|
||||
json_obj = json.loads(json_data)
|
||||
|
||||
res = {}
|
||||
for k, v in json_obj.get("__metadata__", {}).items():
|
||||
res[k] = v
|
||||
if isinstance(v, str) and v[0:1] == '{':
|
||||
try:
|
||||
res[k] = json.loads(v)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
json_data = json_start + file.read(metadata_len-2)
|
||||
json_obj = json.loads(json_data)
|
||||
for k, v in json_obj.get("__metadata__", {}).items():
|
||||
res[k] = v
|
||||
if isinstance(v, str) and v[0:1] == '{':
|
||||
try:
|
||||
res[k] = json.loads(v)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
errors.report(f"Error reading metadata from file: {filename}", exc_info=True)
|
||||
|
||||
return res
|
||||
|
||||
|
|
|
@ -181,12 +181,16 @@ class EmbeddingDatabase:
|
|||
else:
|
||||
return
|
||||
|
||||
embedding = create_embedding_from_data(data, name, filename=filename, filepath=path)
|
||||
if data is not None:
|
||||
embedding = create_embedding_from_data(data, name, filename=filename, filepath=path)
|
||||
|
||||
if self.expected_shape == -1 or self.expected_shape == embedding.shape:
|
||||
self.register_embedding(embedding, shared.sd_model)
|
||||
if self.expected_shape == -1 or self.expected_shape == embedding.shape:
|
||||
self.register_embedding(embedding, shared.sd_model)
|
||||
else:
|
||||
self.skipped_embeddings[name] = embedding
|
||||
else:
|
||||
self.skipped_embeddings[name] = embedding
|
||||
print(f"Unable to load Textual inversion embedding due to data issue: '{name}'.")
|
||||
|
||||
|
||||
def load_from_dir(self, embdir):
|
||||
if not os.path.isdir(embdir.path):
|
||||
|
|
Loading…
Reference in New Issue