def __init__(self, input_dim, dis_dims, loss, pack): super(Discriminator, self).__init__() torch.cuda.manual_seed(0) torch.manual_seed(0) dim = input_dim * pack self.pack = pack self.packdim = dim seq = [] for item in list(dis_dims): seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)] dim = item seq += [Linear(dim, 1)] if loss == "cross_entropy": seq += [Sigmoid()] self.seq = Sequential(*seq)
def __init__(self, in_channels, mid_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(ConvUnit, self).__init__() self.add_module( "conv1", Conv2d(in_channels, mid_channels, kernel_size, stride, padding, dilation, groups, bias)) self.add_module("act", LeakyReLU(0.1)) self.add_module( "conv2", Conv2d(mid_channels, 1, 1, 1, 0, dilation, groups, bias))
def __init__(self, in_channels, out_channels=64, kernel_size=3, padding=1, embeding_size=128): super(RCBBlock, self).__init__() self.relu = LeakyReLU(0.2) self.pad = ReflectionPad2d(padding=padding) self.conv = Conv2d(out_channels=out_channels, kernel_size=kernel_size, stride=2, padding=0, in_channels=in_channels) self.bn = AdaptiveInstanceNorm(embeding_size, out_channels)
def parse_activation(name): if name in relu_names: return ReLU() if name in leaky_relu_names: return LeakyReLU() if name in sigmoid_names: return Sigmoid() if name in log_sigmoid_name: return LogSigmoid() if name in p_relu_names: return PReLU() if name in tanh_names: return Tanh() if name in softmax_names: return Softmax(dim=1) if name in log_softmax_names: return LogSoftmax(dim=1)
def __init__(self, cfg): super(NodeEConvModel, self).__init__() if 'modules' in cfg: self.model_config = cfg['modules']['node_econv'] else: self.model_config = cfg self.node_in = self.model_config.get('node_feats', 16) self.edge_in = self.model_config.get('edge_feats', 10) # first layer increases number of features from 4 to 16 self.econv_mlp1 = Seq(Lin(2 * self.node_in, 32), LeakyReLU(0.1), Lin(32, 16), LeakyReLU(0.1)) self.econv1 = EdgeConv(self.econv_mlp1, aggr='max') # second layer increases number of features from 16 to 32 self.econv_mlp2 = Seq(Lin(32, 64), LeakyReLU(0.1), Lin(64, 32), LeakyReLU(0.1)) self.econv2 = EdgeConv(self.econv_mlp2, aggr='max') # third layer increases number of features from 32 to 64 self.econv_mlp3 = Seq(Lin(64, 128), LeakyReLU(0.1), Lin(128, 64), LeakyReLU(0.1)) self.econv3 = EdgeConv(self.econv_mlp3, aggr='max') # final prediction layer class EdgeModel(torch.nn.Module): def __init__(self): super(EdgeModel, self).__init__() self.edge_mlp = Seq(Lin(128, 64), LeakyReLU(0.12), Lin(64, 16)) def forward(self, src, dest, edge_attr, u, batch): return self.edge_mlp(torch.cat([src, dest], dim=1)) class NodeModel(torch.nn.Module): def __init__(self): super(NodeModel, self).__init__() self.node_mlp_1 = Seq(Lin(80, 64), LeakyReLU(0.12), Lin(64, 32)) self.node_mlp_2 = Seq(Lin(32, 16), LeakyReLU(0.12), Lin(16, 2)) #self.node_mlp = Seq(Lin(64, 32), LeakyReLU(0.12), Lin(32, 16), LeakyReLU(0.12), Lin(32, 2)) def forward(self, x, edge_index, edge_attr, u, batch): row, col = edge_index out = torch.cat([x[col], edge_attr], dim=1) out = self.node_mlp_1(out) out = scatter_mean(out, row, dim=0, dim_size=x.size(0)) return self.node_mlp_2(out) self.predictor = MetaLayer(EdgeModel(), NodeModel())
def create(self, architecture: Architecture, metadata: Metadata, arguments: Configuration) -> Any: # create input layer input_layer_configuration = {} if self.code: input_layer_configuration[ "input_size"] = architecture.arguments.code_size else: input_layer_configuration[ "input_size"] = metadata.get_num_features() input_layer = self.create_other( "SingleInputLayer", architecture, metadata, Configuration(input_layer_configuration)) # conditional if "conditional" in architecture.arguments: # wrap the input layer with a conditional layer input_layer = ConditionalLayer( input_layer, metadata, **architecture.arguments.conditional) # mini-batch averaging if arguments.get("mini_batch_averaging", False): input_layer = MiniBatchAveraging(input_layer) # create the hidden layers factory hidden_layers_factory = self.create_other( "HiddenLayers", architecture, metadata, arguments.get("hidden_layers", {})) # create the output activation if self.critic: output_activation = View(-1) else: output_activation = Sequential(Sigmoid(), View(-1)) # create the output layer factory output_layer_factory = SingleOutputLayerFactory( 1, activation=output_activation) # create the discriminator return FeedForward(input_layer, hidden_layers_factory, output_layer_factory, default_hidden_activation=LeakyReLU(0.2))
def __init__(self): print("\ninitializing \"decoder\"") super(decoder, self).__init__() self.n_deconvfilter = [128, 128, 128, 64, 32, 2] # 3d conv1 conv1_kernel_size = 3 self.conv1 = Conv3d(in_channels=self.n_deconvfilter[0], out_channels=self.n_deconvfilter[1], kernel_size=conv1_kernel_size, padding=int((conv1_kernel_size - 1) / 2)) # 3d conv2 conv2_kernel_size = 3 self.conv2 = Conv3d(in_channels=self.n_deconvfilter[1], out_channels=self.n_deconvfilter[2], kernel_size=conv2_kernel_size, padding=int((conv2_kernel_size - 1) / 2)) # 3d conv3 conv3_kernel_size = 3 self.conv3 = Conv3d(in_channels=self.n_deconvfilter[2], out_channels=self.n_deconvfilter[3], kernel_size=conv3_kernel_size, padding=int((conv3_kernel_size - 1) / 2)) # 3d conv4 conv4_kernel_size = 3 self.conv4 = Conv3d(in_channels=self.n_deconvfilter[3], out_channels=self.n_deconvfilter[4], kernel_size=conv4_kernel_size, padding=int((conv4_kernel_size - 1) / 2)) # 3d conv5 conv5_kernel_size = 3 self.conv5 = Conv3d(in_channels=self.n_deconvfilter[4], out_channels=self.n_deconvfilter[5], kernel_size=conv5_kernel_size, padding=int((conv5_kernel_size - 1) / 2)) # pooling layer self.unpool3d = Unpool3DLayer(unpool_size=2) # nonlinearities of the network self.leaky_relu = LeakyReLU(negative_slope=0.01) self.log_softmax = nn.LogSoftmax()
def __init__(self, embedding_dim, max_log_var=0.1): super(Encoder, self).__init__() self.embedding_dim = embedding_dim self.max_log_var = max_log_var self.emb_range_limit = Tanh() self.net = Sequential(Conv3d(1, 8, kernel_size=3, stride=1), BatchNorm3d(8), LeakyReLU(), Conv3d(8, 16, kernel_size=3, stride=1), BatchNorm3d(16), LeakyReLU(), Conv3d(16, 32, kernel_size=3, stride=1), BatchNorm3d(32), LeakyReLU(), Conv3d(32, 64, kernel_size=3, stride=1), BatchNorm3d(64), LeakyReLU(), Conv3d(64, 64, kernel_size=3, stride=2), BatchNorm3d(64), LeakyReLU(), Conv3d(64, 32, kernel_size=3, stride=1), BatchNorm3d(32), LeakyReLU(), Conv3d(32, 16, kernel_size=3, stride=1), BatchNorm3d(16), LeakyReLU(), Conv3d(16, 8, kernel_size=3, stride=1), BatchNorm3d(8), LeakyReLU(), Conv3d(8, 4, kernel_size=3, stride=1), BatchNorm3d(4), LeakyReLU(), Flatten())
def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, same_padding=False): super(ConvLayer, self).__init__() padding = kernel_size // 2 if same_padding else 0 self.conv = Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=False) self.bn = BatchNorm2d(out_channels) self.relu = LeakyReLU(0.1, inplace=True) if relu else None
def __init__(self, n_emb): super(Discriminator, self).__init__() self.emb_dim = n_emb self.dis_layers = 2 self.dis_hid_dim = n_emb self.dis_dropout = 0.1 self.dis_input_dropout = 0.5 layers = [Dropout(self.dis_input_dropout)] for i in range(self.dis_layers + 1): input_dim = self.emb_dim if i == 0 else self.dis_hid_dim output_dim = 1 if i == self.dis_layers else self.dis_hid_dim layers.append(Linear(input_dim, output_dim)) if i < self.dis_layers: layers.append(LeakyReLU(0.2)) layers.append(Dropout(self.dis_dropout)) layers.append(Sigmoid()) self.layers = Sequential(*layers)
def __init__(self): super(Classifier, self).__init__() self.darknet53 = Darknet53(5) self.avgpool = AdaptiveAvgPool2d(output_size=(6, 6)) self.linear1 = nn.Sequential(Dropout(0.5), Linear(2304, 1024, bias=True)) self.linear2 = nn.Sequential(Dropout(0.5), Linear(2304, 1024, bias=True)) self.linear3 = nn.Sequential(Dropout(0.5), Linear(2304, 1024, bias=True)) self.classifier = nn.Sequential( LeakyReLU(0.01, inplace=True), Dropout(p=0.5), Linear(in_features=3072, out_features=3, bias=True), )
def build_convunit(self, in_channels, mid_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): module_list = ModuleList([]) module_list.append( Conv2d(in_channels, mid_channels, kernel_size, stride, padding, dilation, groups, bias)) module_list.append(LeakyReLU(0.1)) module_list.append( Conv2d(mid_channels, 1, 1, 1, 0, dilation=1, groups=1, bias=True)) ConvUnit = Sequential(*module_list) return ConvUnit
def __init__(self, i_dim: int, o_dim: int, h_dims: list = list((128, )), activation=None, dropout=0.0, residual=False): super(GraphConvolutionLayer, self).__init__() in_dims = [i_dim] + h_dims out_dims = h_dims + [o_dim] self.linears = ModuleList([ Linear(in_dim, out_dim, bias=True) for in_dim, out_dim in zip(in_dims, out_dims) ]) self.relu = LeakyReLU() self.activation = activation self.dropout = Dropout(dropout) self.residual = residual
def __init__(self, in_channel, out_channel, kernel_size=4, strides=2, padding=1, activation=True, batch_norm=True): super(ConvBlock, self).__init__() self.conv = Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=kernel_size, stride=strides, padding=padding) self.activation = activation self.lrelu = LeakyReLU(negative_slope=0.2, inplace=True) self.batch_norm = batch_norm self.bn = BatchNorm2d(out_channel)
def __init__(self, cfg): super(EdgeConvModel, self).__init__() if 'modules' in cfg: self.model_config = cfg['modules']['attention_gnn'] else: self.model_config = cfg self.aggr = self.model_config.get('aggr', 'max') self.leak = self.model_config.get('leak', 0.1) self.node_in = self.model_config.get('node_feats', 16) self.edge_in = self.model_config.get('edge_feats', 10) # perform batch normalization self.bn_node = BatchNorm1d(self.node_in) self.bn_edge = BatchNorm1d(self.edge_in) # go from 16 to 24 node features ninput = self.node_in noutput = 24 self.nn0 = Seq(Lin(2 * ninput, 2 * noutput), LeakyReLU(self.leak), Lin(2 * noutput, noutput), LeakyReLU(self.leak), Lin(noutput, noutput)) self.layer0 = EdgeConv(self.nn0, aggr=self.aggr) # go from 24 to 32 node features ninput = 24 noutput = 32 self.nn1 = Seq(Lin(2 * ninput, 2 * noutput), LeakyReLU(self.leak), Lin(2 * noutput, noutput), LeakyReLU(self.leak), Lin(noutput, noutput)) self.layer1 = EdgeConv(self.nn1, aggr=self.aggr) # go from 32 to 64 node features ninput = 32 noutput = 64 self.nn2 = Seq(Lin(2 * ninput, 2 * noutput), LeakyReLU(self.leak), Lin(2 * noutput, noutput), LeakyReLU(self.leak), Lin(noutput, noutput)) self.layer2 = EdgeConv(self.nn2, aggr=self.aggr) # final prediction layer pred_cfg = self.model_config.get('pred_model', 'basic') if pred_cfg == 'basic': self.edge_predictor = MetaLayer( EdgeModel(noutput, self.edge_in, self.leak)) elif pred_cfg == 'bilin': self.edge_predictor = MetaLayer( BilinEdgeModel(noutput, self.edge_in, self.leak)) else: raise Exception('unrecognized prediction model: ' + pred_cfg)
def __init__(self, input_dim, dis_dims, pack=10): super(Discriminator, self).__init__() dim = input_dim * pack self.pack = pack self.packdim = dim seq = [] for item in list(dis_dims): seq += [ Linear(dim, item), BatchNorm1d(item), LeakyReLU(0.2), Dropout(0.5) ] # seq += [Linear(dim, item), Tanh()] dim = item seq += [Linear(dim, 1)] self.seq = Sequential(*seq)
def __init__(self, in_channels, out_channels, use_eql=True, add_noise=False): """ constructor of the class :param in_channels: number of input channels :param out_channels: number of output channels :param use_eql: whether to use equalized learning rate """ from torch.nn import AvgPool2d, LeakyReLU from torch.nn import Conv2d super().__init__() if use_eql: self.conv_1 = _equalized_conv2d(in_channels, in_channels, (3, 3), pad=1, bias=True) self.conv_2 = _equalized_conv2d(in_channels, out_channels, (3, 3), pad=1, bias=True) else: # convolutional modules self.conv_1 = Conv2d(in_channels, in_channels, (3, 3), padding=1, bias=True) self.conv_2 = Conv2d(in_channels, out_channels, (3, 3), padding=1, bias=True) self.downSampler = AvgPool2d(2) # downsampler # leaky_relu: self.lrelu = LeakyReLU(0.2) # gaussian noise self.add_noise = add_noise self.gaussian_noise = GaussianNoise()
def __init__(self, nef=64, out_channels=3, in_channels=3, useNorm='BN'): super(Pix2pix256, self).__init__() # 256*256*3-->256*256*32 self.pad1 = ReflectionPad2d(padding=1) self.conv1 = Conv2d(in_channels, nef, 3, 1, 0) # 256*256*32-->128*128*64 self.rcb0 = RCBBlock(nef, nef*2, useNorm=useNorm) # 128*128*64-->64*64*128 self.rcb1 = RCBBlock(nef*2, nef*4, useNorm=useNorm) # 64*64*128-->32*32*256 self.rcb2 = RCBBlock(nef*4, nef*8, useNorm=useNorm) # 32*32*256-->16*16*512 self.rcb3 = RCBBlock(nef*8, nef*8, useNorm=useNorm) # 16*16*512-->8*8*512 self.rcb4 = RCBBlock(nef*8, nef*8, useNorm=useNorm) # 8*8*512-->4*4*512 self.rcb5 = RCBBlock(nef*8, nef*8, useNorm=useNorm) # 4*4*512-->2*2*512 self.rcb6 = RCBBlock(nef*8, nef*8, useNorm=useNorm) # 2*2*512-->1*1*512 self.relu = LeakyReLU(0.2) self.pad2 = ReflectionPad2d(padding=1) self.conv7 = Conv2d(nef*8, nef*8, 4, 2, 0) # 1*1*512-->2*2*512 self.rdcb7 = RDCBBlock(nef*8, nef*8, useNorm=useNorm, up=True, padding = 'repeat') # 2*2*1024-->4*4*512 self.rdcb6 = RDCBBlock(nef*16, nef*8, useNorm=useNorm, up=True) # 4*4*1024-->8*8*512 self.rdcb5 = RDCBBlock(nef*16, nef*8, useNorm=useNorm, up=True) # 8*8*1024-->16*16*512 self.rdcb4 = RDCBBlock(nef*16, nef*8, useNorm=useNorm, up=True) # 16*16*512-->32*32*256 self.rdcb3 = RDCBBlock(nef*16, nef*8, useNorm=useNorm, up=True) # 32*32*512-->64*64*128 self.rdcb2 = RDCBBlock(nef*16, nef*4, useNorm=useNorm, up=True) # 64*64*256-->128*128*64 self.rdcb1 = RDCBBlock(nef*8, nef*2, useNorm=useNorm, up=True) # 128*128*128-->256*256*32 self.rdcb0 = RDCBBlock(nef*4, nef, useNorm=useNorm, up=True) # 256*256*32-->256*256*3 self.pad3 = ReflectionPad2d(padding=1) self.dconv1 = Conv2d(nef*2, out_channels, 3, 1, 0) self.tanh = Tanh()
def __init__(self, in_channels, out_channels, activation=None, attn_heads=8, alpha=0.2, reduction='concat', dropout=0.6, use_bias=False): super().__init__() if reduction not in {'concat', 'average'}: raise ValueError('Possbile reduction methods: concat, average') self.in_channels = in_channels self.out_channels = out_channels self.activation = get_activation(activation) self.dropout = Dropout(dropout) self.attn_heads = attn_heads self.reduction = reduction self.kernels = ParameterList() self.att_kernels = ParameterList() self.biases = ParameterList() self.use_bias = use_bias if not use_bias: self.register_parameter('bias', None) # Initialize weights for each attention head for head in range(self.attn_heads): W = Parameter(torch.Tensor(in_channels, out_channels)) self.kernels.append(W) a = Parameter(torch.Tensor(1, 2 * out_channels)) self.att_kernels.append(a) if use_bias: bias = Parameter(torch.Tensor(out_channels)) self.biases.append(bias) self.leakyrelu = LeakyReLU(alpha) self.special_spmm = SpecialSpmm() self.reset_parameters()
def __init__(self, in_dim): super(discriminator, self).__init__() self.dis = nn.Sequential( Conv2d(in_channels=in_dim, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False), BatchNorm2d(16), LeakyReLU(0.2, True), nn.Dropout2d(0.5), Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=2, padding=1, bias=False), BatchNorm2d(32), LeakyReLU(0.2, True), nn.Dropout2d(0.5), Conv2d(kernel_size=3, in_channels=32, out_channels=64, stride=1, padding=1, bias=False), BatchNorm2d(64), LeakyReLU(0.2, True), nn.Dropout2d(0.5), Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1, bias=False), LeakyReLU(0.2, True), nn.Dropout2d(0.5), Conv2d(kernel_size=3, in_channels=64, out_channels=128, stride=1, padding=1, bias=False), BatchNorm2d(128), LeakyReLU(0.2, True), nn.Dropout2d(0.5), Conv2d(kernel_size=3, in_channels=128, out_channels=128, stride=1, padding=1, bias=False), BatchNorm2d(128), LeakyReLU(0.2, True), nn.Dropout2d(0.5), Conv2d(kernel_size=3, in_channels=128, out_channels=1, stride=1, padding=1, bias=False), nn.Sigmoid())
def __init__(self, n_deconvfilter, h_shape): print("\ninitializing \"decoder\"") super(decoder, self).__init__() #3d conv7 conv7_kernel_size = 3 self.conv7 = Conv3d(in_channels= n_deconvfilter[0], \ out_channels= n_deconvfilter[1], \ kernel_size= conv7_kernel_size, \ padding = int((conv7_kernel_size - 1) / 2)) #3d conv7 conv8_kernel_size = 3 self.conv8 = Conv3d(in_channels= n_deconvfilter[1], \ out_channels= n_deconvfilter[2], \ kernel_size= conv8_kernel_size, \ padding = int((conv8_kernel_size - 1) / 2)) #3d conv7 conv9_kernel_size = 3 self.conv9 = Conv3d(in_channels= n_deconvfilter[2], \ out_channels= n_deconvfilter[3], \ kernel_size= conv9_kernel_size, \ padding = int((conv9_kernel_size - 1) / 2)) #3d conv7 conv10_kernel_size = 3 self.conv10 = Conv3d(in_channels= n_deconvfilter[3], \ out_channels= n_deconvfilter[4], \ kernel_size= conv10_kernel_size, \ padding = int((conv10_kernel_size - 1) / 2)) #3d conv7 conv11_kernel_size = 3 self.conv11 = Conv3d(in_channels= n_deconvfilter[4], \ out_channels= n_deconvfilter[5], \ kernel_size= conv11_kernel_size, \ padding = int((conv11_kernel_size - 1) / 2)) #pooling layer self.unpool3d = Unpool3DLayer(unpool_size=2) #nonlinearities of the network self.leaky_relu = LeakyReLU(negative_slope=0.01)
def __init__(self, in_channels: int, out_channels: int, use_eql: bool) -> None: super(GenGeneralConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = in_channels self.use_eql = use_eql ConvBlock = EqualizedConv2d if use_eql else Conv2d self.conv_1 = ConvBlock(in_channels, out_channels, (3, 3), padding=1, bias=True) self.conv_2 = ConvBlock(out_channels, out_channels, (3, 3), padding=1, bias=True) self.pixNorm = PixelwiseNorm() self.lrelu = LeakyReLU(0.2)
def __init__(self, in_channels: int, out_channels: int, use_eql: bool) -> None: super(DisGeneralConvBlock, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.use_eql = use_eql ConvBlock = EqualizedConv2d if use_eql else Conv2d self.conv_1 = ConvBlock(in_channels, in_channels, (3, 3), padding=1, bias=True) self.conv_2 = ConvBlock(in_channels, out_channels, (3, 3), padding=1, bias=True) self.downSampler = AvgPool2d(2) self.lrelu = LeakyReLU(0.2)
def __init__(self, input_dim, discriminator_dim, loss, pac=1): super(Discriminator, self).__init__() torch.cuda.manual_seed(0) torch.manual_seed(0) dim = input_dim * pac # print ('now dim is {}'.format(dim)) self.pac = pac self.pacdim = dim seq = [] for item in list(discriminator_dim): seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)] dim = item seq += [Linear(dim, 1)] if loss == "cross_entropy": seq += [Sigmoid()] self.seq = Sequential(*seq)
def create_activ(): """Creates an activation layer. Note: The type and parameters are configured in :attr:`pytorch_layers.Config.activ_mode` and :attr:`pytorch_layers.Config.activ_kwargs`. Returns: torch.nn.Module: The created activation layer. """ config = Config() if config.activ_mode is ActivMode.RELU: from torch.nn import ReLU return ReLU() elif config.activ_mode is ActivMode.LEAKY_RELU: from torch.nn import LeakyReLU return LeakyReLU(**config.activ_kwargs)
def __init__(self, channels, activation=LeakyReLU(0.2), squeeze_factor=8, bias=True): """ constructor for the layer """ from torch.nn import Conv2d, Parameter, Softmax # base constructor call super().__init__() # state of the layer self.activation = activation self.gamma = Parameter(th.zeros(1)) # Modules required for computations self.query_conv = Conv2d( # query convolution in_channels=channels, out_channels=channels // squeeze_factor, kernel_size=(1, 1), stride=1, padding=0, bias=bias) self.key_conv = Conv2d( # key convolution in_channels=channels, out_channels=channels // squeeze_factor, kernel_size=(1, 1), stride=1, padding=0, bias=bias) self.value_conv = Conv2d( # value convolution in_channels=channels, out_channels=channels, kernel_size=(1, 1), stride=1, padding=0, bias=bias) # softmax module for applying attention self.softmax = Softmax(dim=-1)
def __init__(self, in_channels, use_eql=True, add_noise=False): """ constructor of the class :param in_channels: number of input channels :param use_eql: whether to use equalized learning rate """ from torch.nn import LeakyReLU from torch.nn import Conv2d super().__init__() # declare the required modules for forward pass self.batch_discriminator = MinibatchStdDev() if use_eql: self.conv_1 = _equalized_conv2d(in_channels + 1, in_channels, (3, 3), pad=1, bias=True) self.conv_2 = _equalized_conv2d(in_channels, in_channels, (4, 4), bias=True) self.conv_3 = _equalized_conv2d( in_channels, 1, (1, 1), bias=True) # final layer emulates the fully connected layer else: # modules required: self.conv_1 = Conv2d(in_channels + 1, in_channels, (3, 3), padding=1, bias=True) self.conv_2 = Conv2d(in_channels, in_channels, (4, 4), bias=True) self.conv_3 = Conv2d( in_channels, 1, (1, 1), bias=True) # final conv layer emulates a fully connected layer # leaky_relu: self.lrelu = LeakyReLU(0.2) # gaussian noise self.add_noise = add_noise self.gaussian_noise = GaussianNoise()
def __init__(self, in_channels): """ constructor for the inner class :param in_channels: number of input channels to the block """ from torch.nn import LeakyReLU from torch.nn import Conv2d, ConvTranspose2d super().__init__() self.conv_1 = ConvTranspose2d(in_channels, in_channels, (4, 4), bias=True) self.conv_2 = Conv2d(in_channels, in_channels, (3, 3), padding=1, bias=True) # leaky_relu: self.lrelu = LeakyReLU(0.2)
def _create_backbone(self): backbone = Sequential() section1 = Sequential() section1.add_module('Conv1', Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1)) section1.add_module('BatchNorm1', BatchNorm2d(num_features=32)) section1.add_module('Activation1', LeakyReLU(negative_slope=.1, inplace=True)) backbone.add_module('Section1', section1) backbone.add_module('Section2', YOLOv3Block(index=2, in_channels=32, out_channels=64, convs_residual_count=1)) backbone.add_module('Section3', YOLOv3Block(index=5, in_channels=64, out_channels=128, convs_residual_count=2)) backbone.add_module('Section4', YOLOv3Block(index=10, in_channels=128, out_channels=256, convs_residual_count=8)) backbone.add_module('Section5', YOLOv3Block(index=27, in_channels=256, out_channels=512, convs_residual_count=8)) backbone.add_module('Section6', YOLOv3Block(index=44, in_channels=512, out_channels=1024, convs_residual_count=4)) return backbone
def __init__(self, in_channels, out_channels, use_eql): """ constructor for the class :param in_channels: number of input channels to the block :param out_channels: number of output channels required :param use_eql: whether to use equalized learning rate """ super().__init__() self.upsample = lambda x: interpolate(x, scale_factor=2) if use_eql: self.conv_1 = _equalized_conv2d(in_channels, out_channels, (3, 3),pad=1, bias=True) self.conv_2 = _equalized_conv2d(out_channels, out_channels, (3, 3),pad=1, bias=True) else: self.conv_1 = Conv2d(in_channels, out_channels, (3, 3),padding=1, bias=True) self.conv_2 = Conv2d(out_channels, out_channels, (3, 3),padding=1, bias=True) # Pixelwise feature vector normalization operation self.pixNorm = PixelwiseNorm() # leaky_relu: self.lrelu = LeakyReLU(0.2)