def __init__(self, input_dim=1, output_dim=1, input_size=32): super(Discriminator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size self.conv = nn.Sequential( nn.Conv2d(self.input_dim, 64, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(64, 128, 4, 2, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.2), ) self.fc = nn.Sequential( nn.Linear(128 * (self.input_size // 4) * (self.input_size // 4), 1024), nn.BatchNorm1d(1024), nn.LeakyReLU(0.2), nn.Linear(1024, self.output_dim), # nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, input_dim=1, output_dim=1, input_size=32, cuda=False): super(Discriminator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size config = Munch.fromDict( dict(cuda=cuda, model=dict(linear=dict(bjorck_beta=0.5, bjorck_iter=12, bjorck_order=1, safe_scaling=True)))) self.conv = nn.Sequential( # Conv. BjorckConv2d(self.input_dim, 64, 4, 2, 1, config=config), # Activ. MaxMin(num_units=32, axis=1), # Conv BjorckConv2d(64, 128, 4, 2, 1, config=config), # Activ. MaxMin(num_units=64, axis=1)) self.fc = nn.Sequential( # Linear. BjorckLinear(128 * (self.input_size // 4) * (self.input_size // 4), 1024, config=config), # Activ. MaxMin(num_units=512), # Linear. BjorckLinear(1024, self.output_dim, config=config), # nn.Sigmoid(), ) utils.initialize_weights(self)
def __init__(self, input_dim=100, output_dim=1, input_size=32): super(Generator, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.input_size = input_size self.fc = nn.Sequential( nn.Linear(self.input_dim, 1024), nn.BatchNorm1d(1024), nn.ReLU(), nn.Linear(1024, 128 * (self.input_size // 4) * (self.input_size // 4)), nn.BatchNorm1d(128 * (self.input_size // 4) * (self.input_size // 4)), nn.ReLU(), ) self.deconv = nn.Sequential( nn.ConvTranspose2d(128, 64, 4, 2, 1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1), nn.Tanh(), ) utils.initialize_weights(self)