bug in optimizer decay-warmup defaulting
This commit is contained in:
parent
e95a8861e9
commit
20a9b3254f
|
@ -257,8 +257,8 @@ class EveryDreamOptimizer():
|
||||||
lr_scheduler = get_scheduler(
|
lr_scheduler = get_scheduler(
|
||||||
te_config.get("lr_scheduler", args.lr_scheduler),
|
te_config.get("lr_scheduler", args.lr_scheduler),
|
||||||
optimizer=self.optimizer_te,
|
optimizer=self.optimizer_te,
|
||||||
num_warmup_steps=int(te_config.get("lr_warmup_steps", None)) or unet_config["lr_warmup_steps"],
|
num_warmup_steps=int(te_config.get("lr_warmup_steps", None) or unet_config["lr_warmup_steps"]),
|
||||||
num_training_steps=int(te_config.get("lr_decay_steps", None)) or unet_config["lr_decay_steps"]
|
num_training_steps=int(te_config.get("lr_decay_steps", None) or unet_config["lr_decay_steps"])
|
||||||
)
|
)
|
||||||
ret_val.append(lr_scheduler)
|
ret_val.append(lr_scheduler)
|
||||||
|
|
||||||
|
@ -453,10 +453,6 @@ class EveryDreamOptimizer():
|
||||||
growth_rate=growth_rate,
|
growth_rate=growth_rate,
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
|
||||||
import bitsandbytes as bnb
|
|
||||||
opt_class = bnb.optim.AdamW8bit
|
|
||||||
|
|
||||||
if not optimizer:
|
if not optimizer:
|
||||||
optimizer = opt_class(
|
optimizer = opt_class(
|
||||||
itertools.chain(parameters),
|
itertools.chain(parameters),
|
||||||
|
|
Loading…
Reference in New Issue