def reset_parameters(self, root=None): if root is None: root = self for m in root.modules(): is_sn = check_spectral_norm(m) if is_sn: remove_spectral_norm(m) if isinstance(m, (nn.Conv2d, nn.Linear, nn.Embedding)): m.reset_parameters() elif isinstance(m, nn.BatchNorm2d): m.reset_parameters() if is_sn: spectral_norm(m)
def rm_spectral_norm(module): for name, child in module.named_children(): if isinstance(child, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d)): setattr(module, name, remove_spectral_norm(child)) else: rm_spectral_norm(child)
def turn_off_spectral_norm(self): """ private helper for turning off the spectral normalization :return: None (has side effect) """ from torch.nn.utils import remove_spectral_norm if self.spectral_norm_mode is not None: assert self.spectral_norm_mode is True, \ "can't remove spectral_norm. It is not applied" # remove the applied spectral norm for module in self.layers: remove_spectral_norm(module.conv_1) remove_spectral_norm(module.conv_2) # toggle the state variable: self.spectral_norm_mode = False
def remove_spectral_norm(self): self.conv_0 = remove_spectral_norm(self.conv_0) self.conv_1 = remove_spectral_norm(self.conv_1) if self.learned_shortcut: self.conv_s = remove_spectral_norm(self.conv_s)
def turn_off_spectral_norm(self): for module in self.layers[1:]: module.conv_1 = remove_spectral_norm(module.conv_1) module.conv_2 = remove_spectral_norm(module.conv_2) self.layers[0].tr_conv = remove_spectral_norm(self.layers[0].tr_conv) self.layers[0].conv = remove_spectral_norm(self.layers[0].conv)
def turn_off_spectral_norm(self): for module in self.layers: module.conv_1 = remove_spectral_norm(module.conv_1) module.conv_2 = remove_spectral_norm(module.conv_2) self.final_block.conv_1 = remove_spectral_norm(self.final_block.conv_1) self.final_block.conv_2 = remove_spectral_norm(self.final_block.conv_2)