コード例 #1
0
    def __init__(self, h, n, input_dims=(3, 64, 64)):
        super(dicriminator_model, self).__init__()


        self.n = n
        self.h = h
        channel, width, height = input_dims

        encoder = []

        self.blocks = int(np.log2(width) - 2)

        encoder.append(nn.Conv2d(n, n, kernel_size=3, padding=1))
        prev_channel_size = n

        for i in range(self.blocks):
            channel_size = (i+1) * n
            encoder.append(nn.Conv2d(prev_channel_size, channel_size, kernel_size=3, padding=1))
            encoder.append(nn.ELU())
            encoder.append(nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1))
            encoder.append(nn.ELU())
            

            if i < self.blocks - 1:
                encoder.append(nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1))
                encoder.append(nn.ELU())


            prev_channel_size = channel_size


        
        self.encoder_model = nn.Sequential(*encoder)



        self.fc1 = nn.Linear(8 * 8 * self.blocks * n, h)

        decoder = []

        for i in range(self.blocks):
            decoder.append(nn.Conv2d(n, n, kernel_size=3, padding=1))
            decoder.append(nn.ELU())
            decoder.append(nn.Conv2d(n, n, kernel_size=3, padding=1))
            decoder.append(nn.ELU())
            
            if i < self.blocks - 1:
                decoder.append(nn.UpsamplingNearest2d(scale_factor=2))

        
        
        decoder.append(nn.Conv2d(n, channel, kernel_size=3, padding=1))
        decoder_model = nn.Sequential(*decoder)
        
        self.fc2 = nn.Linear(h, 8 * 8 * n)


        def forward(self, x):

            x = self.encoder_model(x).view(x.size(0), -1)
            x = self.fc1(x)

            x = self.fc2(x).view(-1, self.n, 8, 8)
            x = self.decoder_model(x)

            return x
コード例 #2
0
ファイル: ModelComp.py プロジェクト: pan-webis-de/gagala18
    def __init__(self,
                 embedding=128,
                 nb_words=0,
                 frag_size=0,
                 channels=0,
                 kernel_size=0,
                 padding=0,
                 stride=0,
                 dilation=0):
        super(Encoder, self).__init__()

        self.channels = channels
        self.kernel_size = kernel_size
        self.padding = padding
        self.stride = stride
        self.dilation = dilation
        self.embedding = embedding
        self.nb_words = nb_words
        self.frag_size = frag_size

        self.channel = 300
        self.len_encoding = 500

        self.embedder_words = nn.Embedding(self.nb_words,
                                           self.embedding,
                                           max_norm=5.0,
                                           norm_type=2.0)
        #self.embedder_words.weight.requires_grad=False

        self.lstm = nn.LSTM(self.embedding,
                            self.embedding,
                            num_layers=3,
                            dropout=0.5,
                            bidirectional=True)

        self.sru1 = SRU(
            self.embedding,
            self.embedding,
            num_layers=4,  # number of stacking RNN layers
            dropout=0.2,  # dropout applied between RNN layers
            rnn_dropout=
            0.2,  # variational dropout applied on linear transformation
            use_tanh=1,  # use tanh?
            use_relu=0,  # use ReLU?
            #use_selu = 0,            # use SeLU?
            bidirectional=True  # bidirectional RNN ?
        )
        self.sru2 = SRU(
            self.embedding * 2 * 4,
            self.embedding,
            num_layers=4,  # number of stacking RNN layers
            dropout=0.2,  # dropout applied between RNN layers
            rnn_dropout=
            0.2,  # variational dropout applied on linear transformation
            use_tanh=1,  # use tanh?
            use_relu=0,  # use ReLU?
            #use_selu = 0,            # use SeLU?
            bidirectional=True  # bidirectional RNN ?
        )

        self.att1 = nn.Parameter(torch.randn(self.embedding * 2 * 4) / 100)
        self.att2 = nn.Parameter(torch.randn(self.embedding * 2 * 4) / 100)
        self.att3 = nn.Parameter(torch.randn(self.len_encoding) / 100)

        #self.channel = 256

        self.fc1 = nn.Linear(1502, 300)
        self.fc2 = nn.Linear(300, 300)
        self.fc3 = nn.Linear(300, 300)
        self.fc4 = nn.Linear(1000, 500)
        #self.fc2 = nn.Linear(128, 64)
        #self.fc1 = nn.Linear(20,1)
        #self.fc2 = nn.Linear(128, 64)

        #self.fc3 = nn.Linear(self.embedding*2 ,self.embedding)

        self.dropout1 = nn.Dropout(0.5)
        self.dropout2 = nn.Dropout(0.2)
        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()
        self.elu = nn.ELU()
        self.leaky = nn.LeakyReLU()
        self.drop1 = nn.Dropout2d(0.5)
        self.softmax = nn.Softmax(dim=0)
        #for encoding

        self.conv_1 = nn.Conv2d(1,
                                self.channel,
                                kernel_size=(3, self.embedding),
                                padding=(1, 0),
                                stride=2,
                                dilation=1,
                                groups=1)
        output_size1 = ((self.frag_size + (2 * 0) - 1 * (3 - 1) - 1) / 2) + 1
        output_size2 = ((self.embedding + (2 * 0) - 1 *
                         (self.embedding - 1) - 1) / 2) + 1
        #print(output_size1,output_size2,'after conv1')

        self.conv_2 = nn.Conv2d(self.channel,
                                self.channel,
                                kernel_size=(500, 1),
                                padding=(0, 0),
                                stride=2,
                                dilation=1,
                                groups=1)
        output_size1 = ((output_size1 + (2 * 0) - 1 * (3 - 1) - 1) / 2) + 1
        output_size2 = ((output_size2 + (2 * 0) - 1 * (1 - 1) - 1) / 2) + 1

        #print(output_size1,output_size2,'after conv2')

        self.conv_3 = nn.Conv2d(self.channel,
                                self.len_encoding,
                                kernel_size=(1, 1),
                                padding=(0, 0),
                                stride=2,
                                dilation=1,
                                groups=1)

        kernel_value_reversed = self.dilation * (
            self.kernel_size - 1) + 2  #look at website of pytorch

        self.conv_4 = nn.ConvTranspose2d(100,
                                         self.channel,
                                         kernel_size=(10, 1),
                                         padding=(0, 0),
                                         stride=10,
                                         dilation=1,
                                         groups=1)
        output_size1 = ((output_size1 - 1) * (self.stride) -
                        (2 * self.padding) + (kernel_value_reversed))
        output_size2 = ((output_size2 - 1) * (self.stride) -
                        (2 * self.padding) + (kernel_value_reversed))
        #print(output_size1,output_size2,'after deconvpierwszy')

        self.conv_5 = nn.ConvTranspose2d(self.channel,
                                         self.channel,
                                         kernel_size=(10, 1),
                                         padding=(0, 0),
                                         stride=10,
                                         dilation=1,
                                         groups=1)

        #print(output_size1, output_size2,
        #      output_size1*output_size2*(self.channels),self.embedding,'after maxpool2')

        self.conv_6 = nn.ConvTranspose2d(self.channel,
                                         1,
                                         kernel_size=(10, self.embedding),
                                         padding=(0, 0),
                                         stride=10,
                                         dilation=1,
                                         groups=1)
        output_size1 = ((output_size1 - 1) * (self.stride) -
                        (2 * self.padding) + (kernel_value_reversed))
        output_size2 = ((output_size2 - 1) * (self.stride) -
                        (2 * self.padding) + (kernel_value_reversed))
        #print(output_size1,output_size2,'after decconv1drugi')

        output_size1 = ((output_size1 - 1) * (self.stride) -
                        (2 * self.padding) + (kernel_value_reversed))
        output_size2 = ((output_size2 - 1) * (self.stride) -
                        (2 * self.padding) + (kernel_value_reversed))

        #print(output_size1,output_size2,'after decconv1dtrzeci')

        #print(output_size1,output_size2,'last deconv')
        self.splitter = nn.Linear(1, 50)
        self.merger = nn.Linear(50, 1)

        self.final_layer_words = torch.nn.Linear(self.embedding, self.nb_words)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2.0 / n))
コード例 #3
0
ファイル: train.py プロジェクト: zengyi-li/MDSM
def main():

    args = cfg.parse_args()
    torch.cuda.manual_seed(args.rand_seed)

    #switch datasets and models

    if args.dataset == 'cifar':
        from data.cifar import inf_train_gen
        itr = inf_train_gen(args.batch_size, flip=False)
        netE = Res18_Quadratic(3,
                               args.n_chan,
                               32,
                               normalize=False,
                               AF=nn.ELU())
        #netE = SE_Res18_Quadratic(3,args.n_chan,32,normalize=False,AF=Swish())

    elif args.dataset == 'mnist':
        from data.mnist_32 import inf_train_gen
        itr = inf_train_gen(args.batch_size)
        netE = Res12_Quadratic(1,
                               args.n_chan,
                               32,
                               normalize=False,
                               AF=nn.ELU())

    elif args.dataset == 'fmnist':
        #print(dataset+str(args.n_chan))
        from data.fashion_mnist_32 import inf_train_gen
        itr = inf_train_gen(args.batch_size)
        netE = Res12_Quadratic(1,
                               args.n_chan,
                               32,
                               normalize=False,
                               AF=nn.ELU())

    else:
        NotImplementedError('{} unknown dataset'.format(args.dataset))

    #setup gpu
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    netE = netE.to(device)
    if args.n_gpus > 1:
        netE = nn.DataParallel(netE)

    #setup path

    now = datetime.now()
    timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
    #pdb.set_trace()
    print(str(args.cont))
    #print(str(args.time))
    if args.cont == True:
        root = 'logs/' + args.log + '_' + args.time  #compose string for loading
        #load network
        file_name = 'netE_' + str(args.net_indx) + '.pt'
        netE.load_state_dict(torch.load(root + '/models/' + file_name))
    else:  # start new will create logging folder
        root = 'logs/' + args.log + '_' + timestamp  #add timestemp
        #over write if folder already exist, not likely to happen as timestamp is used
        if os.path.isdir(root):
            shutil.rmtree(root)
        os.makedirs(root)
        os.makedirs(root + '/models')
        os.makedirs(root + '/samples')

    writer = SummaryWriter(root)

    # setup optimizer and lr scheduler
    params = {'lr': args.max_lr, 'betas': (0.9, 0.95)}
    optimizerE = torch.optim.Adam(netE.parameters(), **params)
    if args.lr_schedule == 'exp':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizerE,
                                                    int(args.n_iter / 6))

    elif args.lr_schedule == 'cosine':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizerE,
                                                               args.n_iter,
                                                               eta_min=1e-6,
                                                               last_epoch=-1)

    elif args.lr_schedule == 'const':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizerE,
                                                    int(args.n_iter))

    #train
    print_interval = 50
    max_iter = args.n_iter + args.net_indx
    batchSize = args.batch_size
    sigma0 = 0.1
    sigma02 = sigma0**2

    if args.noise_distribution == 'exp':
        sigmas_np = np.logspace(np.log10(args.min_noise),
                                np.log10(args.max_noise), batchSize)
    elif args.noise_distribution == 'lin':
        sigmas_np = np.linspace(args.min_noise, args.max_noise, batchSize)

    sigmas = torch.Tensor(sigmas_np).view((batchSize, 1, 1, 1)).to(device)

    start_time = time.time()

    for i in range(args.net_indx, args.net_indx + args.n_iter):
        x_real = itr.__next__().to(device)
        x_noisy = x_real + sigmas * torch.randn_like(x_real)

        x_noisy = x_noisy.requires_grad_()
        E = netE(x_noisy).sum()
        grad_x = torch.autograd.grad(E, x_noisy, create_graph=True)[0]
        x_noisy.detach()

        optimizerE.zero_grad()

        LS_loss = (((
            (x_real - x_noisy) / sigmas / sigma02 + grad_x / sigmas)**2) /
                   batchSize).sum()

        LS_loss.backward()
        optimizerE.step()
        scheduler.step()

        if (i + 1) % print_interval == 0:
            time_spent = time.time() - start_time
            start_time = time.time()
            netE.eval()
            E_real = netE(x_real).mean()
            E_noise = netE(torch.rand_like(x_real)).mean()
            netE.train()

            print(
                'Iteration {}/{} ({:.0f}%), E_real {:e}, E_noise {:e}, Normalized Loss {:e}, time {:4.1f}'
                .format(i + 1, max_iter, 100 * ((i + 1) / max_iter),
                        E_real.item(), E_noise.item(),
                        (sigma02**2) * (LS_loss.item()), time_spent))

            writer.add_scalar('E_real', E_real.item(), i + 1)
            writer.add_scalar('E_noise', E_noise.item(), i + 1)
            writer.add_scalar('loss', (sigma02**2) * LS_loss.item(), i + 1)
            del E_real, E_noise, x_real, x_noisy

        if (i + 1) % args.save_every == 0:
            print("-" * 50)
            file_name = args.file_name + str(i + 1) + '.pt'
            torch.save(netE.state_dict(), root + '/models/' + file_name)
コード例 #4
0
ファイル: embed_dnn.py プロジェクト: sheilsarda/banditml
    def forward(
        self,
        X_float: torch.FloatTensor,
        X_id_list: torch.LongTensor = None,
        X_id_list_idxs: torch.LongTensor = None,
    ) -> torch.FloatTensor:
        """ Forward pass for generic feed-forward DNNs. Assumes activation names
        are valid pytorch activation names.
        """

        id_feature_col_idx = 0
        # concat embedded feature to the float feature tensor
        for feature_name in self.id_feature_order:
            embedding_table_id = self.feature_specs[feature_name]["product_set_id"]
            embedding_table = self.embeddings[
                self.embeddings_idx_map[embedding_table_id]
            ]
            col_start = X_id_list_idxs[0][id_feature_col_idx]
            col_end = X_id_list_idxs[0][id_feature_col_idx + 1]
            embeddings = embedding_table(X_id_list[:, col_start:col_end])

            # https://datascience.stackexchange.com/questions/44635/does-sum-of-embeddings-make-sense
            # average the embeddings, but first drop the padded 0 embeddings
            # using a shape that maintains the tensor dimensions
            valid_row_mask = (embeddings.sum(dim=(2), keepdim=True) != 0).float()
            denom = valid_row_mask.sum(dim=(1, 2)).unsqueeze(dim=1)
            emedding_sum = embeddings.sum(dim=1)
            avg_embeddings = emedding_sum / denom
            # set Nan values to zero (case where there is an empty id list)
            avg_embeddings[avg_embeddings != avg_embeddings] = 0

            # concatenate the float tensor and embedded tensor
            X_float = torch.cat((X_float, avg_embeddings), dim=1)
            id_feature_col_idx += 2

        x = X_float
        for i, activation in enumerate(self.activations):
            if self.use_batch_norm:
                x = self.batch_norm_ops[i](x)
            x = self.layers[i](x)
            if self.use_layer_norm and i < len(self.layer_norm_ops):
                x = self.layer_norm_ops[i](x)
            if activation == "linear":
                pass
            elif activation == "tanh":
                x = torch.tanh(x)
            else:
                x = getattr(F, activation)(x)
            if self.use_dropout and i < len(self.dropout_layers):
                x = self.dropout_layers[i](x)
            if self.is_mdn and i == (len(self.activations)-2):
                sigma = self.mdn_layer[0](x)
                m = nn.ELU()
                sigma = m(sigma) + 1.0


        if self.is_classification:
            x = F.softmax(x, dim=1)

        if self.is_mdn:
            x = torch.cat([x, sigma], dim=0)

        return x
コード例 #5
0
 def __init__(self, nhidden):
     super().__init__()
     self.nhidden = nhidden
     self.elu = nn.ELU()
     self.fc1 = nn.Linear(1, nhidden)
     self.fc2 = nn.Linear(nhidden, nhidden)
コード例 #6
0
ファイル: buildingblocks.py プロジェクト: tzofi/ssl-for-axons
def create_conv(in_channels,
                out_channels,
                kernel_size,
                order,
                num_groups,
                padding=1):
    """
    Create a list of modules with together constitute a single conv layer with non-linearity
    and optional batchnorm/groupnorm.

    Args:
        in_channels (int): number of input channels
        out_channels (int): number of output channels
        order (string): order of things, e.g.
            'cr' -> conv + ReLU
            'crg' -> conv + ReLU + groupnorm
            'cl' -> conv + LeakyReLU
            'ce' -> conv + ELU
        num_groups (int): number of groups for the GroupNorm
        padding (int): add zero-padding to the input

    Return:
        list of tuple (name, module)
    """
    assert 'c' in order, "Conv layer MUST be present"
    assert order[
        0] not in 'rle', 'Non-linearity cannot be the first operation in the layer'

    modules = []
    for i, char in enumerate(order):
        if char == 'r':
            modules.append(('ReLU', nn.ReLU(inplace=True)))
        elif char == 'l':
            modules.append(
                ('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True)))
        elif char == 'e':
            modules.append(('ELU', nn.ELU(inplace=True)))
        elif char == 'c':
            # add learnable bias only in the absence of gatchnorm/groupnorm
            bias = not ('g' in order or 'b' in order)
            modules.append(('conv',
                            conv3d(in_channels,
                                   out_channels,
                                   kernel_size,
                                   bias,
                                   padding=padding)))
        elif char == 'g':
            is_before_conv = i < order.index('c')
            assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
            # number of groups must be less or equal the number of channels
            if out_channels < num_groups:
                num_groups = out_channels
            modules.append(('groupnorm',
                            nn.GroupNorm(num_groups=num_groups,
                                         num_channels=out_channels)))
        elif char == 'b':
            is_before_conv = i < order.index('c')
            if is_before_conv:
                modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
            else:
                modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
        else:
            raise ValueError(
                f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
            )

    return modules
コード例 #7
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 conv_padding=0,
                 dilation=1,
                 weight_norm='none',
                 norm='none',
                 activation='relu',
                 pad_type='zero',
                 transpose=False):
        super(Conv2dBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        elif pad_type == 'none':
            self.pad = None
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm2d(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        if weight_norm == 'sn':
            self.weight_norm = spectral_norm_fn
        elif weight_norm == 'wn':
            self.weight_norm = weight_norm_fn
        elif weight_norm == 'none':
            self.weight_norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(weight_norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'elu':
            self.activation = nn.ELU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # initialize convolution
        if transpose:
            self.conv = nn.ConvTranspose2d(input_dim,
                                           output_dim,
                                           kernel_size,
                                           stride,
                                           padding=conv_padding,
                                           output_padding=conv_padding,
                                           dilation=dilation,
                                           bias=self.use_bias)
        else:
            self.conv = nn.Conv2d(input_dim,
                                  output_dim,
                                  kernel_size,
                                  stride,
                                  padding=conv_padding,
                                  dilation=dilation,
                                  bias=self.use_bias)

        if self.weight_norm:
            self.conv = self.weight_norm(self.conv)
コード例 #8
0
    def __init__(self, resolution_inp=256, resolution_op=256, channel=6):
        super(AntiSpoofing, self).__init__()

        self.resolution_inp = resolution_inp
        self.resolution_op = resolution_op
        self.channel = channel
        # define layers
        self.zeropad2d = nn.ZeroPad2d((1, 1, 1, 1))
        self.elu = nn.ELU()
        self.conv1 = nn.Conv2d(in_channels=6,
                               out_channels=64,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(num_features=64)
        self.conv2 = nn.Conv2d(in_channels=64,
                               out_channels=128,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(num_features=128)
        self.conv3 = nn.Conv2d(in_channels=128,
                               out_channels=196,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(num_features=196)
        self.conv4 = nn.Conv2d(in_channels=196,
                               out_channels=128,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn4 = nn.BatchNorm2d(num_features=128)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv5 = nn.Conv2d(in_channels=128,
                               out_channels=128,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn5 = nn.BatchNorm2d(num_features=128)
        self.conv6 = nn.Conv2d(in_channels=128,
                               out_channels=196,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn6 = nn.BatchNorm2d(num_features=196)
        self.conv7 = nn.Conv2d(in_channels=196,
                               out_channels=128,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn7 = nn.BatchNorm2d(num_features=128)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv8 = nn.Conv2d(in_channels=128,
                               out_channels=128,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn8 = nn.BatchNorm2d(num_features=128)
        self.conv9 = nn.Conv2d(in_channels=128,
                               out_channels=196,
                               kernel_size=3,
                               stride=1,
                               bias=False)
        self.bn9 = nn.BatchNorm2d(num_features=196)
        self.conv10 = nn.Conv2d(in_channels=196,
                                out_channels=128,
                                kernel_size=3,
                                stride=1,
                                bias=False)
        self.bn10 = nn.BatchNorm2d(num_features=128)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv11 = nn.Conv2d(in_channels=384,
                                out_channels=128,
                                kernel_size=3,
                                stride=1,
                                bias=False)
        self.bn11 = nn.BatchNorm2d(num_features=128)
        self.conv12 = nn.Conv2d(in_channels=128,
                                out_channels=3,
                                kernel_size=3,
                                stride=1,
                                bias=False)
        self.bn12 = nn.BatchNorm2d(num_features=3)
        self.conv13 = nn.Conv2d(in_channels=3,
                                out_channels=1,
                                kernel_size=3,
                                stride=1,
                                bias=False)
        self.bn13 = nn.BatchNorm2d(num_features=1)
コード例 #9
0
ファイル: assoc.py プロジェクト: zhmd/salad
def conv2d(m, n, k, act=True):
    layers = [nn.Conv2d(m, n, k, padding=1)]

    if act: layers += [nn.ELU()]

    return nn.Sequential(*layers)
コード例 #10
0
    def __init__(self,
                 in_dim,
                 num_labels,
                 layers,
                 out_dim,
                 input_kw=2,
                 input_dil=1,
                 positions=False,
                 softmax=False,
                 causal=False,
                 batch_norm=False):
        """
        Constructor for WaveNetClassifier.

        Args:
        * num_features: python int; the number of channels in the featurized input sequence.
        * feature_kwidth: python int; the kernel width of the featurization layer.
        * num_labels: the number of labels in the softmax distribution at the output.
        * layers: list of (non-causal) convolutional layers to stack. Each entry is of the form
          (in_channels, out_channels, kernel_size, dilation).
        * out_dim: the final dimension of the output before the dimensionality reduction to logits over labels.
        * input_kw: size of the internal kernel of the conv1d going from input to conv-stack.
        * input_dil: dilation for conv block going from input to conv-stack.
        * positions: if True, use conv1x1 to mix position information to features.
        * softmax: if True, softmax the output layer before returning. If False, return un-normalized sequence.
        * causal: if True, use causal convolutions; otherwise use standard convolutions.
        * batch_norm: if True, apply a batch-norm before each residual block.
        """
        ### parent constructor
        super(RawCTCNet, self).__init__()

        ### attributes
        self.in_dim = in_dim
        self.num_labels = num_labels
        self.layers = layers
        self.num_layers = len(layers)
        self.out_dim = out_dim
        # input 1x1Conv layer:
        self.input_kw = input_kw
        self.input_dil = input_dil
        # position-mixing on/off:
        self.positions = positions
        # softmax on/off:
        self.softmax = softmax
        # causal vs. standard convolutions:
        self.causal = causal
        # batch-norm on/off:
        self.batch_norm = batch_norm

        ### submodules
        # convolutional featurization layer:
        self.feature_layer = nn.Sequential(
            nn.Conv1d(in_dim, in_dim, kernel_size=1, padding=0, dilation=1),
            nn.ELU(),
            nn.Conv1d(in_dim, in_dim, kernel_size=1, padding=0, dilation=1),
            nn.ELU())

        # position-mixing bilinear layer:
        if self.positions:
            self.positions_conv1x1 = nn.Sequential(
                nn.Conv1d(1, in_dim, kernel_size=1, padding=0, dilation=1),
                nn.Hardtanh())

        # input layer:
        self.input_block = ResidualBlock(in_dim,
                                         layers[0][0],
                                         input_kw,
                                         input_dil,
                                         causal=self.causal)
        self.input_skip_bottleneck = nn.Conv1d(layers[0][0],
                                               out_dim,
                                               kernel_size=1,
                                               padding=0,
                                               dilation=1)

        # stack of residual convolutions and their bottlenecks for skip connections:
        convolutions = []
        skip_conn_bottlenecks = []
        for (c_in, c_out, k, d) in layers:
            convolutions.append(
                ResidualBlock(c_in, c_out, k, d, causal=self.causal))
            skip_conn_bottlenecks.append(
                nn.Conv1d(c_out, out_dim, kernel_size=1, padding=0,
                          dilation=1))
        self.convolutions = nn.ModuleList(convolutions)
        self.bottlenecks = nn.ModuleList(skip_conn_bottlenecks)

        # optional batch norms:
        if self.batch_norm:
            batch_norms = []
            for (c_in, _, _, _) in layers:
                batch_norms.append(nn.BatchNorm1d(c_in, affine=True))
            self.batch_norms = nn.ModuleList(batch_norms)

        # (1x1 Conv + ReLU + 1x1 Conv) stack, going from output dimension to logits over labels:
        self.output_block = nn.Sequential(
            nn.LeakyReLU(0.01),
            nn.Conv1d(out_dim, out_dim, kernel_size=1, dilation=1),
            nn.LeakyReLU(0.01),
            nn.Conv1d(out_dim, num_labels, kernel_size=1, dilation=1))

        ### sensible initializations for parameters:
        eps = 0.0001
        if self.positions:
            for p in self.positions_conv1x1.parameters():
                if len(p.size()) > 1:
                    nn_init.eye(p.view(p.size(0), p.size(1)))
                    p.data.add_(torch.randn(p.size()).mul_(eps))
                if len(p.size()) == 1:
                    p.data.zero_().add_(torch.randn(p.size()).mul_(eps))
        for p in self.feature_layer.parameters():
            if len(p.size()) > 1: nn_init.kaiming_uniform(p)
            if len(p.size()) == 1:
                p.data.zero_().add_(torch.randn(p.size()).mul_(eps))
        for p in self.input_block.parameters():
            if len(p.size()) > 1: nn_init.kaiming_uniform(p)
            if len(p.size()) == 1:
                p.data.zero_().add_(torch.randn(p.size()).mul_(eps))
        for p in self.convolutions.parameters():
            if len(p.size()) > 1: nn_init.kaiming_uniform(p)
            if len(p.size()) == 1:
                p.data.zero_().add_(torch.randn(p.size()).mul_(eps))
        for p in self.bottlenecks.parameters():
            if len(p.size()) > 1:
                nn_init.eye(p.view(p.size(0), p.size(1)))
                p.data.add_(torch.randn(p.size()).mul_(eps))
            if len(p.size()) == 1:
                p.data.zero_().add_(torch.randn(p.size()).mul_(eps))
        for p in self.output_block.parameters():
            if len(p.size()) > 1: nn_init.kaiming_uniform(p)
            if len(p.size()) == 1:
                p.data.zero_().add_(torch.randn(p.size()).mul_(eps))
コード例 #11
0
 def __init__(self, input_dim, output_dim):
     super(_Aggregation, self).__init__()
     self.aggre = nn.Sequential(
         nn.Linear(input_dim, output_dim, bias=True),
         nn.ELU(),
     )
コード例 #12
0
class Conv2dExtractor(nn.Module):
    """
    Provides inteface of convolutional extractor.

    Note:
        Do not use this class directly, use one of the sub classes.
        Define the 'self.conv' class variable.

    Inputs: inputs, input_lengths
        - **inputs** (batch, time, dim): Tensor containing input vectors
        - **input_lengths**: Tensor containing containing sequence lengths

    Returns: outputs, output_lengths
        - **outputs**: Tensor produced by the convolution
        - **output_lengths**: Tensor containing sequence lengths produced by the convolution
    """
    supported_activations = {
        'hardtanh': nn.Hardtanh(0, 20, inplace=True),
        'relu': nn.ReLU(inplace=True),
        'elu': nn.ELU(inplace=True),
        'leaky_relu': nn.LeakyReLU(inplace=True),
        'gelu': nn.GELU(),
        'swish': Swish(),
    }

    def __init__(self, input_dim: int, activation: str = 'hardtanh') -> None:
        super(Conv2dExtractor, self).__init__()
        self.input_dim = input_dim
        self.activation = Conv2dExtractor.supported_activations[activation]
        self.conv = None

    def get_output_lengths(self, seq_lengths: Tensor):
        assert self.conv is not None, "self.conv should be defined"

        for module in self.conv:
            if isinstance(module, nn.Conv2d):
                numerator = seq_lengths + 2 * module.padding[
                    1] - module.dilation[1] * (module.kernel_size[1] - 1) - 1
                seq_lengths = numerator.float() / float(module.stride[1])
                seq_lengths = seq_lengths.int() + 1

            elif isinstance(module, nn.MaxPool2d):
                seq_lengths >>= 1

        return seq_lengths.int()

    def get_output_dim(self):
        if isinstance(self, VGGExtractor):
            output_dim = (self.input_dim - 1
                          ) << 5 if self.input_dim % 2 else self.input_dim << 5

        elif isinstance(self, DeepSpeech2Extractor):
            output_dim = int(math.floor(self.input_dim + 2 * 20 - 41) / 2 + 1)
            output_dim = int(math.floor(output_dim + 2 * 10 - 21) / 2 + 1)
            output_dim <<= 5

        elif isinstance(self, Conv2dSubsampling):
            factor = ((self.input_dim - 1) // 2 - 1) // 2
            output_dim = self.out_channels * factor

        else:
            raise ValueError(f"Unsupported Extractor : {self.extractor}")

        return output_dim

    def forward(self, inputs: Tensor,
                input_lengths: Tensor) -> Tuple[Tensor, Tensor]:
        """
        inputs: torch.FloatTensor (batch, time, dimension)
        input_lengths: torch.IntTensor (batch)
        """
        outputs, output_lengths = self.conv(
            inputs.unsqueeze(1).transpose(2, 3), input_lengths)

        batch_size, channels, dimension, seq_lengths = outputs.size()
        outputs = outputs.permute(0, 3, 1, 2)
        outputs = outputs.view(batch_size, seq_lengths, channels * dimension)

        return outputs, output_lengths
コード例 #13
0
ファイル: eegnet.py プロジェクト: poplar17/eeg_thesis
    def __init__(self, in_chans,
                 n_classes,
                 final_conv_length='auto',
                 input_time_length=None,
                 pool_mode='mean',
                 f1=8,
                 d=2,
                 f2=16,  # usually set to F1*D (?)
                 kernel_length=64,
                 third_kernel_size=(8, 4),
                 drop_prob=0.25,
                 siamese=False,
                 i_feature_alignment_layer=None  # 0-based index modules
                 ):
        super(EEGNet, self).__init__()

        if i_feature_alignment_layer is None:
            i_feature_alignment_layer = 2  # default alignment layer
        if final_conv_length == 'auto':
            assert input_time_length is not None

        # Assigns all parameters in init to self.param_name
        self.__dict__.update(locals())
        del self.self

        # Define kind of pooling used:
        pool_class = dict(max=nn.MaxPool2d, mean=nn.AvgPool2d)[self.pool_mode]

        # Convolution accros temporal axis
        self.temporal_conv = nn.Sequential(
            # Rearrange dimensions, dimshuffle,
            #   tranform to shape required by pytorch:
            Expression(_transpose_to_b_1_c_0),
            # Temporal conv layer:
            nn.Conv2d(in_channels=1, out_channels=self.f1,
                      kernel_size=(1, self.kernel_length),
                      stride=1,
                      bias=False,
                      padding=(0, self.kernel_length // 2)),
            nn.BatchNorm2d(self.f1, momentum=0.01, affine=True, eps=1e-3)
        )

        self.spatial_conv = nn.Sequential(
            # Spatial conv layer:
            Conv2dWithConstraint(self.f1, self.f1 * self.d, (self.in_chans, 1),
                                 max_norm=1, stride=1, bias=False,
                                 groups=self.f1, padding=(0, 0)),
            nn.BatchNorm2d(self.f1 * self.d, momentum=0.01, affine=True,
                           eps=1e-3),
            nn.ELU(),
            pool_class(kernel_size=(1, 4), stride=(1, 4))
        )

        self.separable_conv = nn.Sequential(
            nn.Dropout(p=self.drop_prob),
            # Separable conv layer:
            nn.Conv2d(self.f1 * self.d, self.f1 * self.d, (1, 16), stride=1,
                      bias=False, groups=self.f1 * self.d,
                      padding=(0, 16 // 2)),
            nn.Conv2d(self.f1 * self.d, self.f2, (1, 1), stride=1, bias=False,
                      padding=(0, 0)),
            nn.BatchNorm2d(self.f2, momentum=0.01, affine=True, eps=1e-3),
            nn.ELU(),
            pool_class(kernel_size=(1, 8), stride=(1, 8))
        )

        out = np_to_var(
            np.ones((1, self.in_chans, self.input_time_length, 1),
                    dtype=np.float32))
        out = self.forward_init(out)
        # out = self.separable_conv(self.spatial_conv(self.temporal_conv(out)))
        n_out_virtual_chans = out.cpu().data.numpy().shape[2]

        if self.final_conv_length == 'auto':
            n_out_time = out.cpu().data.numpy().shape[3]
            self.final_conv_length = n_out_time

        # Classifier part:
        self.cls = nn.Sequential(
            nn.Dropout(p=self.drop_prob),
            nn.Conv2d(self.f2, self.n_classes,
                      (n_out_virtual_chans, self.final_conv_length),
                      bias=True),
            nn.LogSoftmax(dim=1),
            # Transpose back to the the logic of _braindecode,
            #   so time in third dimension (axis=2)
            # Transform back to original shape and
            #   squeeze to (batch_size, n_classes) size
            Expression(_transpose_1_0),
            Expression(_squeeze_final_output)
        )

        # Initialize weights of the network
        self.apply(glorot_weight_zero_bias)

        # Set feature space alignment layer, used in siamese training/testing
        assert 0 <= self.i_feature_alignment_layer < len(self._modules), \
            "Given feature space alignment layer does not " \
            "exist for current model"
        self.feature_alignment_layer = \
            list(self._modules.items())[self.i_feature_alignment_layer][0]
コード例 #14
0
    def __init__(self, config, corpus_target, embReader):
        super().__init__(config)

        ####
        # init parameters
        self.max_num_sents = config.max_num_sents  # document length, in terms of the number of sentences
        self.max_len_sent = config.max_len_sent  # sentence length, in terms of words
        self.max_len_doc = config.max_len_doc  # document length, in terms of words
        self.batch_size = config.batch_size

        self.size_avg_pool_sent = config.size_avg_pool_sent

        self.corpus_target = config.corpus_target
        self.vocab = corpus_target.vocab  # word2id
        self.rev_vocab = corpus_target.rev_vocab  # id2word
        self.pad_id = corpus_target.pad_id
        self.num_special_vocab = corpus_target.num_special_vocab

        self.dropout_rate = config.dropout
        self.rnn_cell_size = config.rnn_cell_size
        self.output_size = config.output_size  # the number of final output class
        self.pad_level = config.pad_level

        self.use_gpu = config.use_gpu
        self.gen_logs = config.gen_logs

        if not hasattr(config, "freeze_step"):
            config.freeze_step = 5000

        ########
        #
        # self.base_encoder = Encoder_Coh(config, embReader)
        self.base_encoder = Encoder_Main(config, embReader)

        #
        self.sim_cosine = torch.nn.CosineSimilarity(dim=2)

        self.conv_sent = nn.Conv1d(in_channels=1,
                                   out_channels=1,
                                   kernel_size=3,
                                   stride=2,
                                   padding=1,
                                   dilation=1,
                                   groups=1,
                                   bias=True)
        # bias=False)

        self.max_adapt_pool1_sent = nn.AdaptiveMaxPool1d(
            self.size_avg_pool_sent)

        #
        fc_in_size = self.base_encoder.encoder_out_size + self.size_avg_pool_sent

        linear_1_out = fc_in_size // 2
        linear_2_out = linear_1_out // 2

        self.linear_1 = nn.Linear(fc_in_size, linear_1_out)
        nn.init.xavier_normal_(self.linear_1.weight)

        self.linear_2 = nn.Linear(linear_1_out, linear_2_out)
        nn.init.xavier_uniform_(self.linear_2.weight)
        nn.init.xavier_normal_(self.linear_2.weight)

        self.linear_out = nn.Linear(linear_2_out, self.output_size)
        if corpus_target.output_bias is not None:  # bias
            init_mean_val = np.expand_dims(corpus_target.output_bias, axis=1)
            bias_val = (np.log(init_mean_val) - np.log(1 - init_mean_val))
            self.linear_out.bias.data = torch.from_numpy(bias_val).type(
                torch.FloatTensor)
        # nn.init.xavier_uniform_(self.linear_out.weight)
        nn.init.xavier_normal_(self.linear_out.weight)

        #
        self.selu = nn.SELU()
        self.elu = nn.ELU()
        self.leak_relu = nn.LeakyReLU()
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()
        self.dropout_layer = nn.Dropout(self.dropout_rate)

        self.dropout_01 = nn.Dropout(0.1)
        self.dropout_02 = nn.Dropout(0.2)

        self.softmax = nn.Softmax(dim=1)

        return
コード例 #15
0
 def __init__(self, input_nc, output_nc, kernel_size, stride, padding, activation_func=nn.ELU()):
     super(Conv, self).__init__()
     self.conv = nn.Conv2d(in_channels=input_nc,
                           out_channels=output_nc,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=0,
                           bias=True)
     self.activation_fn = activation_func
     self.pad_fn = nn.ReplicationPad2d(padding)
コード例 #16
0
    def __init__(self, setting: dict):
        super(PointCNN, self).__init__()
        self.setting = setting
        self.fts_is_None = self.setting['fts_is_None']
        self.xconv_params = self.setting['xconv_params']
        if not self.fts_is_None:
            self.features_hd_generator=nn.Sequential(nn.Linear(self.setting['data_dim']-3,\
                self.setting['xconv_params'][0]['C']),nn.ELU(),View((-1,self.setting['xconv_params'][0]['C'])),\
                nn.BatchNorm1d(num_features=self.setting["xconv_params"][0]['C']),\
                View((-1,setting["sample_num"],self.setting['xconv_params'][0]['C'])))
        self.net = nn.Sequential()
        self.with_X_transformation = setting['with_X_transformation']
        self.fc_params = setting['fc_params']
        self.links = []

        self.sampling = setting['sampling']
        prev_P = setting["sample_num"]

        self.xconv_output_dim = []
        param_dict = {
            "xconv_output_dim": self.xconv_output_dim,
            "xconv_params": self.setting["xconv_params"]
        }
        if "xdconv_params" in setting:
            param_dict["xdconv_params"] = self.setting["xdconv_params"]
        if self.fts_is_None:
            self.xconv_output_dim.append(0)
        else:
            self.xconv_output_dim.append(self.setting['xconv_params'][0]['C'])

        for layer_idx, layer_param in enumerate(self.setting['xconv_params']):
            K = layer_param[
                'K']  # K:在inverse density sampling时,需要根据每个点到与其最近的K个点的平均距离估算该点的概率密度,从而实现inverse denstiy sampling;这个K也是XConv操作里的K
            D = layer_param['D']  # D:Dilation
            if layer_param['P'] > 0:
                P = layer_param['P']  # P:表示下一层要选用的样本点的数量
            elif layer_param["P"] == -1:
                P = prev_P
            else:
                print("P should either be positive integer or eqaul to -1!")
                #exit()()
            prev_P = P
            C = layer_param['C']
            with_global = (setting["with_global"]
                           and layer_idx == len(self.xconv_params) - 1)
            self.links.append(layer_param['links'])
            if layer_idx == 0:
                C_pts_fts = C // 2 if self.fts_is_None else C // 4  # //表示除后向下取整数
                depth_multiplier = 4
            else:
                C_prev = self.xconv_params[layer_idx - 1]['C']
                C_pts_fts = C_prev // 4
                depth_multiplier = math.ceil(C / C_prev)
            last_layer_flag = (layer_idx == (
                len(self.setting['xconv_params']) - 1))
            self.net.add_module(
                "layer{}".format(layer_idx),
                module=XConv(setting=param_dict,
                             idx=layer_idx,
                             K=K,
                             dilation=D,
                             P=P,
                             C=C,
                             C_pts_fts=C_pts_fts,
                             depth_multiplier=depth_multiplier,
                             with_X_transfomation=self.with_X_transformation,
                             with_global=with_global,
                             sorting_method=None).to(config.device))
            if last_layer_flag and with_global:
                self.xconv_output_dim.append(C + C // 4)
            else:
                self.xconv_output_dim.append(C)

        if "xdconv_params" in setting:
            for layer_idx, layer_param in enumerate(setting["xdconv_params"]):
                K = layer_param['K']
                D = layer_param['D']
                pts_layer_idx = layer_param['pts_layer_idx']
                qrs_layer_idx = layer_param['qrs_layer_idx']
                P = setting["xconv_params"][qrs_layer_idx]['P']
                C = setting["xconv_params"][qrs_layer_idx]['C']
                C_prev = setting["xconv_params"][pts_layer_idx]['C']
                C_pts_fts = C_prev // 4
                depth_multiplier = 1
                with_global = False
                last_layer_flag = (layer_idx == (
                    len(self.setting['xdconv_params']) - 1))
                self.net.add_module(
                    "xdconv_layer{}".format(layer_idx),
                    module=XConv(
                        setting=param_dict,
                        idx=len(self.setting['xconv_params']) + layer_idx,
                        K=K,
                        dilation=D,
                        P=P,
                        C=C,
                        C_pts_fts=C_pts_fts,
                        depth_multiplier=depth_multiplier,
                        with_X_transfomation=self.with_X_transformation,
                        with_global=with_global,
                        sorting_method=None).to(config.device))

                layer_name = "xdconv_dense{}".format(layer_idx)
                self.net.add_module(
                    name=layer_name,
                    module=nn.Sequential(
                        nn.Linear(self.xconv_output_dim[qrs_layer_idx + 1] + C,
                                  C).to(config.device), View((-1, C, P)),
                        nn.BatchNorm1d(num_features=C).to(config.device),
                        View((-1, P, C))).to(config.device))
                self.xconv_output_dim.append(C)

        for layer_idx, layer_param in enumerate(self.fc_params):
            C = layer_param['C']
            if layer_idx > 0:
                if "xdconv_params" in setting:
                    P = setting["xconv_params"][setting["xdconv_params"][-1]
                                                ["qrs_layer_idx"]]["P"]
                else:
                    P = setting["xconv_params"][-1]["P"]
                self.net.add_module(name="dense{}".format(layer_idx),
                                    module=nn.Sequential(
                                        nn.Linear(prev_C, C), View((-1, C)),
                                        nn.BatchNorm1d(num_features=C),
                                        View((-1, P, C))).to(config.device))
            else:
                if "xdconv_params" in setting:
                    prev_C = setting["xconv_params"][
                        setting["xdconv_params"][-1]["qrs_layer_idx"]]["C"]
                    P = setting["xconv_params"][setting["xdconv_params"][-1]
                                                ["qrs_layer_idx"]]["P"]
                else:
                    prev_C = self.setting["xconv_params"][-1]['C']
                    P = setting["xconv_params"][-1]["P"]
                    if self.setting["with_global"]:
                        prev_C += (prev_C // 4)
                self.net.add_module(name="dense{}".format(layer_idx),
                                    module=nn.Sequential(
                                        nn.Linear(prev_C, C), View((-1, C)),
                                        nn.BatchNorm1d(num_features=C),
                                        View((-1, P, C))).to(config.device))
            self.net.add_module(name="fc_dropout_{}".format(layer_idx),
                                module=nn.Dropout(
                                    layer_param['dropout_rate']).to(
                                        config.device))
            prev_C = layer_param['C']

        self.net.add_module(name="logits",
                            module=nn.Linear(self.fc_params[-1]["C"],
                                             self.setting["num_classes"],
                                             bias=True).to(config.device))
コード例 #17
0
 def __init__(self, input_nc, output_nc, scale, kernel_size, padding, activation_func=nn.ELU()):
     super().__init__()
     self.up = nn.Upsample(scale_factor = scale)
     self.conv1 = Conv(input_nc, output_nc, kernel_size, 1, padding, activation_func)
コード例 #18
0
    def __init__(self, xs=(3, 32, 32), nz=1, zchannels=16, nprocessing=1, kernel_size=3, resdepth=2,
                 reswidth=256, dropout_p=0., tag='', root_process=True):
        super().__init__()
        # default: disable compressing mode
        # if activated, tensors will be flattened
        self.compressing = False

        # hyperparameters
        self.xs = xs
        self.nz = nz
        self.zchannels = zchannels
        self.nprocessing = nprocessing
        # latent height/width is always 16,
        # the number of channels depends on the dataset
        self.zdim = (self.zchannels, 16, 16)
        self.resdepth = resdepth
        self.reswidth = reswidth
        self.kernel_size = kernel_size

        # apply these two factors (i.e. on the ELBO) in sequence and it results in "bits/dim"
        # factor to convert "nats" to bits
        self.bitsscale = np.log2(np.e)
        # factor to divide by the data dimension
        self.perdimsscale = 1. / np.prod(self.xs)

        # calculate processing layers convolutions options
        # kernel/filter is 5, so in order to ensure same-size outputs, we have to pad by 2
        padding_proc = (5 - 1) / 2
        assert padding_proc.is_integer()
        padding_proc = int(padding_proc)

        # calculate other convolutions options
        padding = (self.kernel_size - 1) / 2
        assert padding.is_integer()
        padding = int(padding)

        # create loggers
        self.tag = tag
        if root_process:
            current_time = datetime.now().strftime('%b%d_%H-%M-%S')
            log_dir = os.path.join(
                'runs/imagenet/', current_time + '_' + socket.gethostname() + tag)
            self.log_dir = log_dir
            self.logger = SummaryWriter(log_dir=self.log_dir)

        # set-up current "best elbo"
        self.best_elbo = np.inf

        # distribute ResNet blocks over latent layers
        resdepth = [0] * (self.nz)
        i = 0
        for _ in range(self.resdepth):
            i = 0 if i == (self.nz) else i
            resdepth[i] += 1
            i += 1

        # reduce initial variance of distributions corresponding
        # to latent layers if latent nz increases
        scale = 1.0 / (self.nz ** 0.5)

        # activations
        self.softplus = nn.Softplus()
        self.sigmoid = nn.Sigmoid()
        self.act = nn.ELU()
        self.actresnet = nn.ELU()

        # Below we build up the main model architecture of the inference- and generative-models
        # All the architecure components are built up from different custom are existing PyTorch modules

        # <===== INFERENCE MODEL =====>
        # the bottom (zi=1) inference model
        self.infer_in = nn.Sequential(
            # shape: [1,32,32] -> [4,16,16]
            modules.Squeeze2d(factor=2),

            # shape: [4,16,16] -> [32,16,16]
            modules.WnConv2d(4 * xs[0],
                             self.reswidth,
                             5,
                             1,
                             padding_proc,
                             init_scale=1.0,
                             loggain=True),
            self.act
        )
        self.infer_res0 = nn.Sequential(
            # shape: [32,16,16] -> [32,16,16]
            modules.ResNetBlock(self.reswidth,
                                self.reswidth,
                                5,
                                1,
                                padding_proc,
                                self.nprocessing,
                                dropout_p,
                                self.actresnet),
            self.act
        ) if self.nprocessing > 0 else modules.Pass()

        self.infer_res1 = nn.Sequential(
            # shape: [32,16,16] -> [32,16,16]
            modules.ResNetBlock(self.reswidth,
                                self.reswidth,
                                self.kernel_size,
                                1,
                                padding,
                                resdepth[0],
                                dropout_p,
                                self.actresnet),
            self.act
        ) if resdepth[0] > 0 else modules.Pass()

        # shape: [32,16,16] -> [1,16,16]
        self.infer_mu = modules.WnConv2d(self.reswidth,
                                         self.zchannels,
                                         self.kernel_size,
                                         1,
                                         padding,
                                         init_scale=scale if self.nz > 1 else 2 ** 0.5 * scale)

        # shape: [32,16,16] -> [1,16,16]
        self.infer_std = modules.WnConv2d(self.reswidth,
                                          self.zchannels,
                                          self.kernel_size,
                                          1,
                                          padding,
                                          init_scale=scale if self.nz > 1 else 2 ** 0.5 * scale)

        # <===== DEEP INFERENCE MODEL =====>
        # the deeper (zi > 1) inference models
        self.deepinfer_in = nn.ModuleList([
            # shape: [1,16,16] -> [32,16,16]
            nn.Sequential(
                modules.WnConv2d(self.zchannels,
                                 self.reswidth,
                                 self.kernel_size,
                                 1,
                                 padding,
                                 init_scale=1.0,
                                 loggain=True),
                self.act
            )
            for _ in range(self.nz - 1)])

        self.deepinfer_res = nn.ModuleList([
            # shape: [32,16,16] -> [32,16,16]
            nn.Sequential(
                modules.ResNetBlock(self.reswidth,
                                    self.reswidth,
                                    self.kernel_size,
                                    1,
                                    padding,
                                    resdepth[i + 1],
                                    dropout_p,
                                    self.actresnet),
                self.act
            ) if resdepth[i + 1] > 0 else modules.Pass()
            for i in range(self.nz - 1)])

        self.deepinfer_mu = nn.ModuleList([
            # shape: [32,16,16] -> [1,16,16]
            nn.Sequential(
                modules.WnConv2d(self.reswidth,
                                 self.zchannels,
                                 self.kernel_size,
                                 1,
                                 padding,
                                 init_scale=scale if i < self.nz - 2 else 2 ** 0.5 * scale)
            )
            for i in range(self.nz - 1)])

        self.deepinfer_std = nn.ModuleList([
            # shape: [32,16,16] -> [1,16,16]
            nn.Sequential(
                modules.WnConv2d(self.reswidth,
                                 self.zchannels,
                                 self.kernel_size,
                                 1,
                                 padding,
                                 init_scale=scale if i < self.nz - 2 else 2 ** 0.5 * scale)
            )
            for i in range(self.nz - 1)])

        # <===== DEEP GENERATIVE MODEL =====>
        # the deeper (zi > 1) generative models
        self.deepgen_in = nn.ModuleList([
            # shape: [1,16,16] -> [32,16,16]
            nn.Sequential(
                modules.WnConv2d(self.zchannels,
                                 self.reswidth,
                                 self.kernel_size,
                                 1,
                                 padding,
                                 init_scale=1.0,
                                 loggain=True),
                self.act
            )
            for _ in range(self.nz - 1)])

        self.deepgen_res = nn.ModuleList([
            # shape: [32,16,16] -> [32,16,16]
            nn.Sequential(
                modules.ResNetBlock(self.reswidth,
                                    self.reswidth,
                                    self.kernel_size,
                                    1,
                                    padding,
                                    resdepth[i + 1],
                                    dropout_p,
                                    self.actresnet),
                self.act
            ) if resdepth[i + 1] > 0 else modules.Pass()
            for i in range(self.nz - 1)])

        self.deepgen_mu = nn.ModuleList([
            # shape: [32,16,16] -> [1,16,16]
            nn.Sequential(
                modules.WnConv2d(self.reswidth,
                                 self.zchannels,
                                 self.kernel_size,
                                 1,
                                 padding,
                                 init_scale=scale)
            )
            for _ in range(self.nz - 1)])

        self.deepgen_std = nn.ModuleList([
            # shape: [32,16,16] -> [1,16,16]
            nn.Sequential(
                modules.WnConv2d(self.reswidth,
                                 self.zchannels,
                                 self.kernel_size,
                                 1,
                                 padding, init_scale=scale)
            )
            for _ in range(self.nz - 1)])

        # <===== GENERATIVE MODEL =====>
        # the bottom (zi = 1) inference model
        self.gen_in = nn.Sequential(
            # shape: [1,16,16] -> [32,16,16]
            modules.WnConv2d(self.zchannels,
                             self.reswidth,
                             self.kernel_size,
                             1,
                             padding,
                             init_scale=1.0,
                             loggain=True),
            self.act
        )

        self.gen_res1 = nn.Sequential(
            # shape: [32,16,16] -> [32,16,16]
            modules.ResNetBlock(self.reswidth,
                                self.reswidth,
                                self.kernel_size,
                                1,
                                padding,
                                resdepth[0],
                                dropout_p,
                                self.actresnet),
            self.act
        ) if resdepth[0] > 0 else modules.Pass()

        self.gen_res0 = nn.Sequential(
            # shape: [32,16,16] -> [32,16,16]
            modules.ResNetBlock(self.reswidth,
                                self.reswidth,
                                5,
                                1,
                                padding_proc,
                                self.nprocessing,
                                dropout_p,
                                self.actresnet),
            self.act
        ) if self.nprocessing > 0 else modules.Pass()

        self.gen_mu = nn.Sequential(
            # shape: [32,16,16] -> [4,16,16]
            modules.WnConv2d(self.reswidth,
                             4 * xs[0],
                             self.kernel_size,
                             1,
                             padding,
                             init_scale=0.1),
            # shape: [4,16,16] -> [1,32,23]
            modules.UnSqueeze2d(factor=2)
        )

        # the scale parameter of the bottom (zi = 1) generative model is modelled unconditional
        self.gen_std = nn.Parameter(torch.Tensor(*self.xs))
        nn.init.zeros_(self.gen_std)
コード例 #19
0
ファイル: ofos.py プロジェクト: wwpww/ICASSP19
    def __init__(self, config):
        super().__init__()

        self.config = config
        self.batch_norm = nn.BatchNorm2d(1)
        self.conv_stem = nn.Sequential(
            nn.Conv2d(1, 30, (3, 3), bias=True),
            nn.ELU(),
            GaussianDropout(0.1),
            GaussianNoise(0.1),

            nn.Conv2d(30, 30, (1, 35), bias=True),
            nn.ELU(),
            GaussianDropout(0.1),
            GaussianNoise(0.1),

            nn.Conv2d(30, 30, (7, 1), bias=True),
            nn.ELU(),
            GaussianDropout(0.1),
            GaussianNoise(0.1)
        )

        self.note_frames = nn.Sequential(
            nn.Conv2d(30, 10, (3, 3), bias=True),
            nn.ELU(),
            GaussianDropout(0.5),
            GaussianNoise(0.1),

            Flatten(),

            nn.Linear(1060, 88)
        )

        self.note_onsets = nn.Sequential(
            nn.Conv2d(30, 10, (3, 3), bias=True),
            nn.ELU(),
            GaussianDropout(0.5),
            GaussianNoise(0.1),

            Flatten(),

            nn.Linear(1060, 88)
        )

        self.note_offsets = nn.Sequential(
            nn.Conv2d(30, 10, (3, 3), bias=True),
            nn.ELU(),
            GaussianDropout(0.5),
            GaussianNoise(0.1),

            Flatten(),

            nn.Linear(1060, 88)
        )

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        self.best = dict(
            f=0,
            loss=10.
        )
コード例 #20
0
ファイル: vnet3d.py プロジェクト: yunshangyue71/mycodes
def ELUCons(elu, nchan):
    if elu:
        return nn.ELU(inplace=True)
    else:
        return nn.PReLU(nchan)
コード例 #21
0
ファイル: layers.py プロジェクト: fogfog2/fbnet
    def __init__(self, in_channels, out_channels):
        super(ConvBlock, self).__init__()

        self.conv = Conv3x3(in_channels, out_channels)
        self.nonlin = nn.ELU(inplace=True)
コード例 #22
0
    def __init__(
        self,
        shape,
        n_class,
        channel,
        kernel_size,
        n_block,
        n_res_block,
        res_channel,
        attention=True,
        dropout=0.1,
        n_cond_res_block=0,
        cond_res_channel=0,
        cond_res_kernel=3,
        n_out_res_block=0,
    ):
        super().__init__()

        height, width = shape

        self.n_class = n_class

        if kernel_size % 2 == 0:
            kernel = kernel_size + 1

        else:
            kernel = kernel_size

        self.horizontal = CausalConv2d(n_class,
                                       channel, [kernel // 2, kernel],
                                       padding='down')
        self.vertical = CausalConv2d(n_class,
                                     channel, [(kernel + 1) // 2, kernel // 2],
                                     padding='downright')

        coord_x = (torch.arange(height).float() - height / 2) / height
        coord_x = coord_x.view(1, 1, height, 1).expand(1, 1, height, width)
        coord_y = (torch.arange(width).float() - width / 2) / width
        coord_y = coord_y.view(1, 1, 1, width).expand(1, 1, height, width)
        self.register_buffer('background', torch.cat([coord_x, coord_y], 1))

        self.blocks = nn.ModuleList()

        for i in range(n_block):
            self.blocks.append(
                PixelBlock(
                    channel,
                    res_channel,
                    kernel_size,
                    n_res_block,
                    attention=attention,
                    dropout=dropout,
                    condition_dim=cond_res_channel,
                ))

        if n_cond_res_block > 0:
            self.cond_resnet = CondResNet(n_class, cond_res_channel,
                                          cond_res_kernel, n_cond_res_block)

        out = []

        for i in range(n_out_res_block):
            out.append(GatedResBlock(channel, res_channel, 1))

        out.extend([nn.ELU(inplace=True), WNConv2d(channel, n_class, 1)])

        self.out = nn.Sequential(*out)
コード例 #23
0
ファイル: clevr_base.py プロジェクト: nirbhayjm/temp-1
    def __init__(
        self,
        obs_spaces: collections.OrderedDict,
        recurrent: bool = False,
        hidden_size: int = 64,
        use_critic: bool = True,
        critic_detach: bool = True,
    ):
        num_inputs = 0
        self.obs_keys = obs_spaces.keys()
        self.image_space = obs_spaces['image']
        mlp_obs_spaces = obs_spaces.copy()
        mlp_obs_spaces.update({
            'image':
            spaces.Box(
                low=0.0,  # Arbitrary value
                high=1.0,  # Arbitrary value
                shape=(hidden_size, ),
                dtype='float',
            )
        })
        self.mlp_obs_keys = mlp_obs_spaces.keys()

        super().__init__(
            obs_spaces=mlp_obs_spaces,
            recurrent=recurrent,
            hidden_size=hidden_size,
            use_critic=use_critic,
            critic_detach=critic_detach,
        )

        _neg_slope = 0.1
        init_ = lambda m: init(
            m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0),
            nn.init.calculate_gain('leaky_relu', param=_neg_slope))

        NEW_CNN = True

        H, W, num_channels = self.image_space.shape
        if NEW_CNN:
            self.cnn = nn.Sequential(
                init_(nn.Conv2d(num_channels, 16, (3, 3), padding=1)),
                nn.ReLU(),
                # nn.MaxPool2d((2, 2)),
                init_(nn.Conv2d(16, 32, (2, 2))),
                nn.ReLU(),
                init_(nn.Conv2d(32, 64, (2, 2))),
                nn.ReLU(),
                Flatten(),
            )
        else:
            self.cnn = nn.Sequential(
                init_(nn.Conv2d(num_channels, 16, 1, stride=1)),
                # nn.LeakyReLU(_neg_slope),
                nn.ELU(),
                init_(nn.Conv2d(16, 8, 3, stride=1, padding=2)),
                # nn.LeakyReLU(_neg_slope),
                nn.ELU(),
                # init_(nn.Conv2d(64, 64, 5, stride=1, padding=2)),
                # nn.LeakyReLU(_neg_slope),
                Flatten(),
            )
        output_h_w, out_channels = utils.conv_sequential_output_shape((H, W),
                                                                      self.cnn)
        h_w_prod = output_h_w[0] * output_h_w[1]
        self.fc = nn.Sequential(
            init_(nn.Linear(out_channels * h_w_prod, hidden_size)),
            # nn.LeakyReLU(_neg_slope),
            # nn.ELU(),
        )
        self.apply(initialize_parameters)
コード例 #24
0
    def __init__(self,
                 model,
                 image_size=(340, 480),
                 finetune=False,
                 restore=RESTORE_FROM,
                 n_layers=2,
                 n_heads=NUM_CLASSES,
                 softmax=True):
        super(SegAttention, self).__init__()
        self.image_h, self.image_w = image_size
        self.seg_net = Seg_Model(num_classes=NUM_CLASSES)
        self.seg_net.eval()
        self.softmax = nn.Softmax(dim=1) if softmax else None
        self.semantic_drop = nn.Dropout2d(0.3)
        try:
            self.cnn = model(pretrained=True).features
        except AttributeError:
            self.cnn = nn.Sequential(*list(model(
                pretrained=True).children())[:-3])
        if not finetune:
            for param in self.cnn.parameters():  # freeze cnn params
                param.requires_grad = False

        self.n_layers = n_layers
        self.n_heads = n_heads
        self.hidden_dim = NUM_CLASSES
        if restore is not None:
            self.seg_net.load_state_dict(
                torch.load(restore, map_location=device))

        for param in self.seg_net.parameters():  # freeze segnet params
            param.requires_grad = False

        sample = torch.randn([3, self.image_h, self.image_w]).unsqueeze(0)

        self.seg_dims = self.seg_net(
            sample)[0].size()  # for layer size definitionlayers
        self.cnn_size = self.cnn(sample).size()

        # self.upsample = nn.Upsample((self.seg_dims[2], self.seg_dims[3]))
        self.upsample = nn.ConvTranspose2d(self.cnn_size[1],
                                           self.cnn_size[1],
                                           kernel_size=3,
                                           stride=2,
                                           padding=1,
                                           dilation=1)
        self.cnn_size = self.upsample(self.cnn(sample)).size()
        self.attentions = nn.ModuleList([
            nn.MultiheadAttention(embed_dim=self.hidden_dim,
                                  num_heads=self.n_heads,
                                  dropout=0.1,
                                  kdim=NUM_CLASSES,
                                  vdim=self.cnn_size[1],
                                  qdim=self.cnn_size[1])
            for _ in range(self.n_layers)
        ])
        self.fc = nn.Linear(
            self.hidden_dim * self.seg_dims[2] * self.seg_dims[3], 256)
        self.activation = nn.ELU()
        self.dropout = nn.Dropout(0.1)
        self.output = nn.Linear(256, 1)
コード例 #25
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 resample=None,
                 act=nn.ELU(),
                 normalization=nn.BatchNorm2d,
                 adjust_padding=False,
                 dilation=None,
                 spec_norm=False):
        super().__init__()
        self.non_linearity = act
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.resample = resample
        self.normalization = normalization
        if resample == 'down':
            if dilation is not None:
                self.conv1 = dilated_conv3x3(input_dim,
                                             input_dim,
                                             dilation=dilation,
                                             spec_norm=spec_norm)
                self.normalize2 = normalization(input_dim)
                self.conv2 = dilated_conv3x3(input_dim,
                                             output_dim,
                                             dilation=dilation,
                                             spec_norm=spec_norm)
                conv_shortcut = partial(dilated_conv3x3,
                                        dilation=dilation,
                                        spec_norm=spec_norm)
            else:
                self.conv1 = conv3x3(input_dim, input_dim, spec_norm=spec_norm)
                self.normalize2 = normalization(input_dim)
                self.conv2 = ConvMeanPool(input_dim,
                                          output_dim,
                                          3,
                                          adjust_padding=adjust_padding,
                                          spec_norm=spec_norm)
                conv_shortcut = partial(ConvMeanPool,
                                        kernel_size=1,
                                        adjust_padding=adjust_padding,
                                        spec_norm=spec_norm)

        elif resample is None:
            if dilation is not None:
                conv_shortcut = partial(dilated_conv3x3,
                                        dilation=dilation,
                                        spec_norm=spec_norm)
                self.conv1 = dilated_conv3x3(input_dim,
                                             output_dim,
                                             dilation=dilation,
                                             spec_norm=spec_norm)
                self.normalize2 = normalization(output_dim)
                self.conv2 = dilated_conv3x3(output_dim,
                                             output_dim,
                                             dilation=dilation,
                                             spec_norm=spec_norm)
            else:
                # conv_shortcut = nn.Conv2d ### Something wierd here.
                conv_shortcut = partial(conv1x1, spec_norm=spec_norm)
                self.conv1 = conv3x3(input_dim,
                                     output_dim,
                                     spec_norm=spec_norm)
                self.normalize2 = normalization(output_dim)
                self.conv2 = conv3x3(output_dim,
                                     output_dim,
                                     spec_norm=spec_norm)
        else:
            raise Exception('invalid resample value')

        if output_dim != input_dim or resample is not None:
            self.shortcut = conv_shortcut(input_dim, output_dim)

        self.normalize1 = normalization(input_dim)
コード例 #26
0
ファイル: test_operators.py プロジェクト: zshihang/pytorch
 def test_elu(self):
     x = Variable(torch.randn(1, 2, 3, 4), requires_grad=True)
     self.assertONNX(nn.ELU(), x)
コード例 #27
0
ファイル: ModelComp.py プロジェクト: pan-webis-de/gagala18
    def __init__(self,
                 nb_words=[],
                 nb_authors=0,
                 freq_feature=[],
                 nb_fcat=0,
                 freq_masking=[]):
        super(BinaryMeasurer, self).__init__()

        #nb_bi, nb_tri, nb_tetra = nb_words
        #print(nb_words)
        self.freq_feature = freq_feature
        self.nb_fcat = nb_fcat
        self.freq_masking = freq_masking

        #nb_words_limits = [s.squeeze().size(0) for x in self.freq_masking for s in x]
        #print(len(freq_feature[0]))
        #quit()
        #self.n_neurons = 1502#*4*2
        #self.n_neurons_2 = 600
        #self.fc1 = nn.Linear(self.n_neurons,self.n_neurons_2//2)
        #self.fc2 = nn.Linear(self.n_neurons_2//2,self.n_neurons_2//2)

        #if use_cuda:
        #    module

        #self.list_modules = []

        #for cat in range(nb_fcat):

        nw = sum([x // 10 for x in nb_words])
        #print(nw)
        #quit()
        self.layer1 = nn.ModuleList(
            [nn.Linear(nb_words[r], nb_words[r]) for r in range(nb_fcat)])
        self.layer2 = nn.ModuleList(
            [nn.Linear(nb_words[r], nb_words[r]) for r in range(nb_fcat)])
        self.layer3 = nn.ModuleList(
            [nn.Linear(nb_words[r], nb_words[r]) for r in range(nb_fcat)])

        #self.fl = nn.ModuleList([nn.Linear(nb_words[r],nb_authors) for r in range(nb_fcat)])
        self.par = nn.Parameter(torch.FloatTensor(
            [1.0]))  # for r in range(nb_fcat)])
        #self.attn2 =  nn.Parameter(torch.randn(nw)/nw)# for r in range(nb_fcat)])
        #self.fc1 = nn.Linear(len(nb_words),nb_authors)
        self.fc2 = nn.Linear(nb_authors * 2, nb_authors)
        self.fc3 = nn.Linear(nw, nb_authors)
        self.fc4 = nn.Linear(nw, nb_authors)
        #self.conv1 = nn.Conv1d(1,nb_authors,nw, padding=0)
        #kernel_size_2 = (nw+2*2-(5-1)-1)+1
        #self.conv2 = nn.Conv1d(nb_authors,nb_authors,1, padding=0)

        #(nw+2*2-(5-1)-1)+1

        #self.fc5 = nn.Linear(nb_bi,nb_bi)
        #self.fc6 = nn.Linear(nb_bi,nb_bi)
        #self.fc7 = nn.Linear(nb_bi,nb_bi)
        #self.fc8 = nn.Linear(nb_bi,nb_bi)

        #self.fc9 = nn.Linear(nb_tri,nb_tri)
        #self.fc10 = nn.Linear(nb_tri,nb_tri)
        #self.fc11 = nn.Linear(nb_tri,nb_tri)
        #self.fc12 = nn.Linear(nb_tri,nb_tri)

        #self.fc13 = nn.Linear(nb_tetra,nb_tetra)
        #self.fc14 = nn.Linear(nb_tetra,nb_tetra)
        #self.fc15 = nn.Linear(nb_tetra,nb_tetra)
        #self.fc16 = nn.Linear(nb_tetra,nb_tetra)

        self.dropout1 = nn.Dropout(0.5)
        self.dropout1_2 = nn.Dropout(0.0)
        self.dropout2 = nn.Dropout(0.5)
        self.relu = nn.ReLU()
        self.elu = nn.ELU()
        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()
        self.softmax_ = nn.Softmax(dim=0)
        self.softmax = nn.LogSoftmax(dim=0)
コード例 #28
0
 def __init__(self, in_ch, out_ch, dim):
     super(ConvBlock, self).__init__()
     self.conv = getattr(nn, 'Conv{}d'.format(dim))(in_ch, out_ch, K_D, stride=dim, padding=K_D // 2)
     self.bnrm = getattr(nn, 'BatchNorm{}d'.format(dim))(out_ch)
     self.drop = nn.Sequential(perm(), nn.Dropout2d(DR_S), perm()) if dim == 1 else nn.Dropout2d(DR_S)
     self.block = nn.Sequential(self.conv, nn.ELU(), self.bnrm, self.drop)
コード例 #29
0
def deconv_block(in_dim, out_dim):
    return nn.Sequential(
        nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
        nn.ELU(True),
        nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
        nn.ELU(True), nn.UpsamplingNearest2d(scale_factor=2))
コード例 #30
0
    def __init__(self, num_classes, encoder=None):
        super(Net, self).__init__()

        self.num_classes = num_classes

        self.conv1 = nn.Conv2d(3, 96, kernel_size=3, stride=2, padding=1)  # 32
        # self.bn1 = nn.BatchNorm2d(96)
        self.relu1 = nn.ELU(inplace=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)  # 16
        self.fire1_1 = Fire(96, 16, 64)
        self.fire1_2 = Fire(128, 16, 64)
        self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)  # 8
        self.fire2_1 = Fire(128, 32, 128)
        self.fire2_2 = Fire(256, 32, 128)
        self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)  # 4
        self.fire3_1 = Fire(256, 64, 256)
        self.fire3_2 = Fire(512, 64, 256)
        self.fire3_3 = Fire(512, 64, 256)
        self.parallel = ParallelDilatedConv(512, 512)
        self.deconv1 = nn.ConvTranspose2d(512,
                                          256,
                                          3,
                                          stride=2,
                                          padding=1,
                                          output_padding=1)
        # self.bn2 = nn.BatchNorm2d(256)
        self.relu2 = nn.ELU(inplace=True)
        self.deconv2 = nn.ConvTranspose2d(512,
                                          128,
                                          3,
                                          stride=2,
                                          padding=1,
                                          output_padding=1)
        # self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ELU(inplace=True)
        self.deconv3 = nn.ConvTranspose2d(256,
                                          96,
                                          3,
                                          stride=2,
                                          padding=1,
                                          output_padding=1)
        # self.bn4 = nn.BatchNorm2d(96)
        self.relu4 = nn.ELU(inplace=True)
        self.deconv4 = nn.ConvTranspose2d(192,
                                          self.num_classes,
                                          3,
                                          stride=2,
                                          padding=1,
                                          output_padding=1)

        self.conv3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1,
                                 padding=1)  # 32
        self.conv3_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1,
                                 padding=1)  # 32
        self.conv2_1 = nn.Conv2d(128, 128, kernel_size=3, stride=1,
                                 padding=1)  # 32
        self.conv2_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1,
                                 padding=1)  # 32
        self.conv1_1 = nn.Conv2d(96, 96, kernel_size=3, stride=1,
                                 padding=1)  # 32
        self.conv1_2 = nn.Conv2d(192, 192, kernel_size=3, stride=1,
                                 padding=1)  # 32

        self.relu1_1 = nn.ELU(inplace=True)
        self.relu1_2 = nn.ELU(inplace=True)
        self.relu2_1 = nn.ELU(inplace=True)
        self.relu2_2 = nn.ELU(inplace=True)
        self.relu3_1 = nn.ELU(inplace=True)
        self.relu3_2 = nn.ELU(inplace=True)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()