def __init__(self, step=1, in_channel=256): super(Unit, self).__init__() self.step = step if step == 1: return self.conv_z = Conv1d(in_channel * 2, in_channel, if_bn=True, activation_fn=nn.Sigmoid()) self.conv_r = Conv1d(in_channel * 2, in_channel, if_bn=True, activation_fn=nn.Sigmoid()) self.conv_h = Conv1d(in_channel * 2, in_channel, if_bn=True, activation_fn=nn.Relu())
def __init__(self, latent_dim, n_c, x_shape, verbose=False): super(Generator_CNN, self).__init__() self.name = 'generator' self.latent_dim = latent_dim self.n_c = n_c self.x_shape = x_shape self.ishape = (128, 7, 7) self.iels = int(np.prod(self.ishape)) self.verbose = verbose self.model0 = nn.Sequential( nn.Linear((self.latent_dim + self.n_c), 1024)) self.model1 = nn.Sequential(BatchNorm1d(1024), nn.Leaky_relu(0.2)) self.model2 = nn.Sequential(nn.Linear(1024, self.iels), BatchNorm1d(self.iels), nn.Leaky_relu(0.2)) self.model3 = nn.Sequential( Reshape(self.ishape), nn.ConvTranspose(128, 64, 4, stride=2, padding=1, bias=True), nn.BatchNorm(64), nn.Leaky_relu(0.2)) self.model4 = nn.Sequential( nn.ConvTranspose(64, 1, 4, stride=2, padding=1, bias=True)) self.sigmoid = nn.Sigmoid() initialize_weights(self) if self.verbose: print('Setting up {}...\n'.format(self.name)) print(self.model)
def __init__(self, in_planes, planes, stride=1): super(PreActBlock, self).__init__() self.bn1 = nn.BatchNorm(in_planes) self.conv1 = nn.Conv(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm(planes) self.conv2 = nn.Conv(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential( nn.Conv(in_planes, planes, kernel_size=1, stride=stride, bias=False)) # SE layers self.fc1 = nn.Conv(planes, planes // 16, kernel_size=1) self.fc2 = nn.Conv(planes // 16, planes, kernel_size=1) self.act = nn.Sigmoid()
def __init__(self, in_channels=3, out_channels=1): super(Discriminator, self).__init__() def discriminator_block(in_filters, out_filters, stride=2, normalization=True): 'Returns downsampling layers of each discriminator block' layers = [ nn.Conv(in_filters, out_filters, 4, stride=stride, padding=1) ] if normalization: layers.append(nn.BatchNorm2d(out_filters)) layers.append(nn.LeakyReLU(scale=0.2)) return layers self.model = nn.Sequential( *discriminator_block((in_channels + out_channels), 64, normalization=False), *discriminator_block(64, 128), *discriminator_block(128, 256), *discriminator_block(256, 512, stride=1), nn.Conv(512, 1, 4, stride=1, padding=1), nn.Sigmoid()) for m in self.modules(): weights_init_normal(m)
def __init__(self): super(Discriminator, self).__init__() self.model = nn.Sequential(nn.Linear(int(np.prod(img_shape)), 512), nn.LeakyReLU(scale=0.2), nn.Linear(512, 256), nn.LeakyReLU(scale=0.2), nn.Linear(256, 1), nn.Sigmoid())
def __init__(self, kernel_size=3): super(SpatialAttention, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv = nn.Conv(2, 1, kernel_size, padding=padding, bias=False) self.sigmoid = nn.Sigmoid()
def __init__(self, kernel_size=7): super(SALayer, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) self.sigmoid = nn.Sigmoid()
def __init__(self, C): super(SEModule, self).__init__() mid = max(C // self.reduction, 8) conv1 = Conv2d(C, mid, 1, 1, 0) conv2 = Conv2d(mid, C, 1, 1, 0) self.op = nn.Sequential( nn.AdaptiveAvgPool2d(1), conv1, nn.ReLU(), conv2, nn.Sigmoid() )
def __init__(self, channel, reduction=16): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction, bias=False), nn.Relu(), nn.Linear(channel // reduction, channel, bias=False), nn.Sigmoid() )
def test_bce_loss(self): jt_loss = jnn.BCELoss() tc_loss = tnn.BCELoss() jt_sig = jnn.Sigmoid() tc_sig = tnn.Sigmoid() output = np.random.randn(100).astype(np.float32) target = np.random.randint(2, size=(100)).astype(np.float32) jt_y = jt_loss(jt_sig(jt.array(output)), jt.array(target)) tc_y = tc_loss(tc_sig(torch.from_numpy(output)), torch.from_numpy(target)) assert np.allclose(jt_y.numpy(), tc_y.numpy())
def __init__(self): super(Discriminator, self).__init__() def discriminator_block(in_filters, out_filters, bn=True): block = [nn.Conv(in_filters, out_filters, 3, stride=2, padding=1), nn.LeakyReLU(scale=0.2), nn.Dropout(p=0.25)] if bn: block.append(nn.BatchNorm(out_filters, eps=0.8)) for m in block: weights_init_normal(m) return block self.model = nn.Sequential(*discriminator_block(opt.channels, 16, bn=False), *discriminator_block(16, 32), *discriminator_block(32, 64), *discriminator_block(64, 128)) ds_size = (opt.img_size // (2 ** 4)) self.adv_layer = nn.Sequential(nn.Linear((128 * (ds_size ** 2)), 1), nn.Sigmoid())
def execute(self, x): out = nn.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) # Squeeze w = nn.pool(out, out.shape[2], 'maximum', 0) w = nn.relu(self.fc1(w)) w = nn.Sigmoid()(self.fc2(w)) # Excitation out = out * w # New broadcasting feature from v0.2! out += self.shortcut(x) out = nn.relu(out) return out
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.InstanceNorm2d, use_sigmoid=False, getIntermFeat=False): super(NLayerDiscriminator, self).__init__() self.getIntermFeat = getIntermFeat self.n_layers = n_layers kw = 4 padw = int(np.ceil((kw - 1.0) / 2)) padw = 1 sequence = [[ nn.Conv(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2) ]] nf = ndf for n in range(1, n_layers): nf_prev = nf nf = min(nf * 2, 512) sequence += [[ nn.Conv(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), # norm_layer(nf), nn.LeakyReLU(0.2) ]] nf_prev = nf nf = min(nf * 2, 512) sequence += [[ nn.Conv(nf_prev, nf, kernel_size=kw, stride=1, padding=1), # norm_layer(nf), nn.LeakyReLU(0.2) ]] sequence += [[nn.Conv(nf, 1, kernel_size=kw, stride=1, padding=2)]] if use_sigmoid: sequence += [[nn.Sigmoid()]] if getIntermFeat: for n in range(len(sequence)): setattr(self, 'model' + str(n), nn.Sequential(*sequence[n])) else: sequence_stream = [] for n in range(len(sequence)): sequence_stream += sequence[n] self.model = nn.Sequential(*sequence_stream)
def __init__(self, hidden_unit=[8, 8]): super(DensityNet, self).__init__() self.mlp_convs = nn.ModuleList() self.mlp_bns = nn.ModuleList() self.mlp_convs.append(nn.Conv1d(1, hidden_unit[0], 1)) self.mlp_bns.append(nn.BatchNorm1d(hidden_unit[0])) for i in range(1, len(hidden_unit)): self.mlp_convs.append( nn.Conv1d(hidden_unit[i - 1], hidden_unit[i], 1)) self.mlp_bns.append(nn.BatchNorm1d(hidden_unit[i])) self.mlp_convs.append(nn.Conv1d(hidden_unit[-1], 1, 1)) self.mlp_bns.append(nn.BatchNorm1d(1)) self.sigmoid = nn.Sigmoid() self.relu = nn.ReLU()
def __init__(self, wass_metric=False, verbose=False): super(Discriminator_CNN, self).__init__() self.name = 'discriminator' self.channels = 1 self.cshape = (128, 5, 5) self.iels = int(np.prod(self.cshape)) self.lshape = (self.iels, ) self.wass = wass_metric self.verbose = verbose self.model = nn.Sequential( nn.Conv(self.channels, 64, 4, stride=2, bias=True), nn.Leaky_relu(0.2), nn.Conv(64, 128, 4, stride=2, bias=True), nn.Leaky_relu(0.2), Reshape(self.lshape), nn.Linear(self.iels, 1024), nn.Leaky_relu(0.2), nn.Linear(1024, 1)) if (not self.wass): self.model = nn.Sequential(self.model, nn.Sigmoid()) initialize_weights(self) if self.verbose: print('Setting up {}...\n'.format(self.name)) print(self.model)
def __init__(self): super(Discriminator, self).__init__() self.model = nn.Sequential(nn.Linear(opt.latent_dim, 512), nn.Leaky_relu(0.2), nn.Linear(512, 256), nn.Leaky_relu(0.2), nn.Linear(256, 1), nn.Sigmoid())