def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Sequential( nn.Conv2( in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2, ), nn.ReLU(), nn.MaxPool2d(kernel_size=2), ) self.conv2 = nn.Sequential( nn.Conv2d(16, 32, 5, 1, 2), nn.ReLU(), nn.MaxPool3d(2), ) self.out = nn.Linear(32 * 7 * 7, 10) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = x.view(x.size(0), -1) output = self.out(x) return output, x
def __init__(self, z_dim=100, cond_dim=18, mode='gp'): super(Discriminator, self).__init__() """ Args: z_dim: dimension of z latent space (noise), default=100 cond_dim: dimension of conditional input, default=18 mode: gp(gradient penalty) or wc(weight clipping) """ self.z_dim = z_dim self.cond_dim = cond_dim self.projected_cond_dim = cond_dim//2 self.mode = model # construct main net channels = [c64, c32, c16, c8] netD_list = [nn.Conv2d(output_channels, c64, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2, inplace=True)] for i in range(len(channels)-1): netD_list += [nn.Conv2(channels[i], channels[i+1], kernel_size=4, stride=2, padding=1)] if self.mode == 'gp': netD_list += [nn.BatchNorm2d(channels[i+1])] netD_list += [nn.LeakyReLU(negative_slope=0.2, inplace=True)] self.netD_1 = nn.Sequential(*netD_list) # self.netD_1 = nn.Sequential( # # 3*(128*128) # nn.Conv2d(output_channels, c64, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # 32*(64*64) # nn.Conv2d(c64, c32, kernel_size=4, stride=2, padding=1), # # nn.BatchNorm2d(c32), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # 64*(32*32) # nn.Conv2d(c32, c16, kernel_size=4, stride=2, padding=1), # # nn.BatchNorm2d(c16), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # 128*(16*16) # nn.Conv2d(c16, c8, kernel_size=4, stride=2, padding=1), # # nn.BatchNorm2d(c8), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # 256*(8*8) # # nn.Conv2d(c8, c4, kernel_size=4, stride=2, padding=1), # # # nn.BatchNorm2d(c4), # # nn.LeakyReLU(negative_slope=0.2, inplace=True) # # 256*(4*4) # ) self.projector = Concat_Embedded(self.cond_dim, 8) if self.mode == 'gp': self.netD_2 = nn.Sequential( nn.Linear((c8 + self.projected_cond_dim)*8*8, 1) ) else: self.netD_2 = nn.Sequential( # 256*(4*4) nn.Conv2d(c8 + self.projected_cond_dim, 1, kernel_size=8, stride=1, padding=0), nn.Sigmoid() )
def __init__(self): super(DQN, self).__init__() self.conv1 = nn.Conv2d(3,16, kernel_size=5, strides=2) self.bn1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2(16,32, kernel_size=5, stride=2) self.bn2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32,32, kernel_size=5, stride=2) self.bn3 = nn.BatchNorm2d(32) self.head = nn.Linear(448,2)
def __init__(self): super(Net, self).__init__() # define convolution operations self.conv1 = nn.Conv2d(1, 6, 3) self.conv2 = nn.Conv2(6, 16, 3) # fully connected layers self.fc1 = nn.Linear(16 * 6 * 6, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10)
def __init__(self, input_ch, output_ch, num_filters=64, norm_layer=nn.InstanceNorm2d, num_blocks=9, padding_type='reflect'): """ Parameters: input_ch : number of channels in input images output_ch : number of channels in output images num_filters : number of filters in the last convolution layer norm_layer : type of normalization layer num_blocks : the number of ResnetBlocks """ assert(num_blocks >= 0) super(ResnetGenerator, self).__init__() self.input_ch = input_ch self.output_ch = output_ch self.num_filters = num_filters model = [nn.ReflectionPad2d(3), nn.Conv2(input_ch, num_filters, kernel_size=7, padding=0, bias=True), norm_layer(num_filters), nn.ReLU(True)] num_downsampling = 2 # add downsampling layers for idx in range(num_downsampling): mult = 2 ** idx model += [nn.Conv2d(num_filters * mult, num_filters * mult * 2, kernel_size=3, stride=2, padding=1, bias=True), norm_layer(num_filters * mult * 2), nn.ReLU(True)] mult = 2 ** num_downsampling for idx in range(num_blocks): model += [ResnetBlock(num_filters * mult, padding_type=padding_type, norm_layer=norm_layer)] for idx in range(num_downsampling): mult = 2 ** (num_downsampling - idx) model += [nn.ConvTranspose2d(num_filters * mult, int(num_filters * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1, bias=True), norm_layer(int(num_filters * mult / 2)), nn.ReLU(True)] model += nn.ReflectionPad2d(3) model += nn.Conv2d(num_filters, output_ch, kernel_size=7, padding=0) model += [nn.Tanh()] self.resnet_generator = nn.Sequential(*model)
def __init__(self, inp, oup, stride, expand_ratio, onnx_compatible=False): """ params: inp: input oup: output stride: Stride size expand_ratio: """ super(InvertedResidual, self).__init__() ReLU = nn.ReLU if onnx_compatible else nn.ReLU6 self.stride = stride # Stride check assert stride in [1, 2] hidden_dim = round(inp * expand_ratio) self.use_res_connect = self.stride == 1 and inp == oup if expand_ratio == 1: self.conv = nn.Sequential( nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) else: self.conv = nn.Sequential( nn.Conv2(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), )
def __init__(self): super(enhance_net_nopool, self).__init__() self.relu = nn.ReLU(inplace=True) number_f = 32 * 2 self.e_conv1 = nn.Conv2d(3, number_f, 3, 1, 1, bias=True) self.e_conv2 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True) self.e_conv3 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True) self.e_conv4 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True) self.e_conv5 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True) self.e_conv5_1 = nn.Conv2(number_f, number_f, 3, 1, 1, bias=True) self.e_conv6 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True) self.e_conv7 = nn.Conv2d(number_f * 2, 24, 3, 1, 1, bias=True) self.maxpool = nn.MaxPool2d(2, stride=2, return_indices=False, ceil_mode=False) self.upsample = nn.UpsamplingBilinear2d(scale_factor=2) self.Minseock_ffc_01 = LayerNorm(number_f) self.Minseock_ffc_02 = LayerNorm(number_f) self.Minseock_ffc_03 = LayerNorm(number_f)
def __init__(self, channels): super(ResidualBlock, self).__init__(): self.conv1 = nn.Conv2(channels, channels, kernel_size=3, )