def __init__(self, num_input_channels, num_latent_dims, num_classes, arch_key, arch_depth, train_all): Module.__init__(self) BaseModel.__init__(self, num_input_channels, num_latent_dims, arch_key, arch_depth) self.classifier = LatentClassifier(num_latent_dims, num_classes) if not train_all: for param in self.encoder.parameters(): param.requires_grad = False
def __init__(self, num_input_channels, num_latent_dims, arch_key, arch_depth): Module.__init__(self) BaseModel.__init__(self, num_input_channels, num_latent_dims, arch_key, arch_depth) # VaeNet attributes self.arch_dec = _ARCH_DICT_DEC[arch_key] self.bottleneck = VAEBottleneck(num_latent_dims) if self.arch_dec == "dlenet": assert arch_depth == 9 self.decoder = getattr(edlenet, self.arch_dec + str(arch_depth))(num_input_channels=num_input_channels, num_latent_dims=num_latent_dims) elif self.arch_dec == "dresnet": assert arch_depth in [18, 34, 50, 101, 152] self.decoder = getattr(edresnet, self.arch_dec + str(arch_depth))(num_input_channels=num_input_channels, num_latent_dims=num_latent_dims) else: raise NotImplementedError