`trained_betas` ignored in some schedulers (#635)

* correcting the beta value assignment

* updating DDIM and LMSDiscreteFlax schedulers

* bringing back the changes that were lost as part of main branch merge
This commit is contained in:
V Vishnu Anirudh 2022-09-29 18:21:04 +01:00 committed by GitHub
parent f10576ad5c
commit 3dacbb94ca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 5 additions and 5 deletions

View File

@ -131,7 +131,7 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin):
if trained_betas is not None: if trained_betas is not None:
self.betas = torch.from_numpy(trained_betas) self.betas = torch.from_numpy(trained_betas)
if beta_schedule == "linear": elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.

View File

@ -86,7 +86,7 @@ class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
if trained_betas is not None: if trained_betas is not None:
self.betas = torch.from_numpy(trained_betas) self.betas = torch.from_numpy(trained_betas)
if beta_schedule == "linear": elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.

View File

@ -74,7 +74,7 @@ class FlaxLMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
): ):
if trained_betas is not None: if trained_betas is not None:
self.betas = jnp.asarray(trained_betas) self.betas = jnp.asarray(trained_betas)
if beta_schedule == "linear": elif beta_schedule == "linear":
self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32) self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.

View File

@ -111,7 +111,7 @@ class PNDMScheduler(SchedulerMixin, ConfigMixin):
if trained_betas is not None: if trained_betas is not None:
self.betas = torch.from_numpy(trained_betas) self.betas = torch.from_numpy(trained_betas)
if beta_schedule == "linear": elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.

View File

@ -132,7 +132,7 @@ class FlaxPNDMScheduler(SchedulerMixin, ConfigMixin):
): ):
if trained_betas is not None: if trained_betas is not None:
self.betas = jnp.asarray(trained_betas) self.betas = jnp.asarray(trained_betas)
if beta_schedule == "linear": elif beta_schedule == "linear":
self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32) self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
elif beta_schedule == "scaled_linear": elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model. # this schedule is very specific to the latent diffusion model.