Exemple #1
0
 def __init__(self, channels, reduction):
     super(SEModule, self).__init__()
     self.avg_pool = AdaptiveAvgPool2d(1)
     self.fc1 = Conv2d(
         channels, channels // reduction, kernel_size=1, padding=0, bias=False)
     self.relu = ReLU(inplace=True)
     self.fc2 = Conv2d(
         channels // reduction, channels, kernel_size=1, padding=0, bias=False)
     self.sigmoid = Sigmoid()
def get_activation_module(output_activation: str = "sigmoid"):
    if output_activation == "sigmoid":
        return Sigmoid()
    elif output_activation == "softmax":
        return Softmax()
    elif output_activation == "relu":
        return ReLU()
    else:
        raise RuntimeError("Unknown activation function {}".format(output_activation))
Exemple #3
0
    def __init__(self):
        super(Discriminator, self).__init__()
        d_input = 28 * 28
        d_output = 1

        self.input = Sequential(Linear(d_input, 256), ReLU())
        self.hidden1 = Sequential(Linear(256, 128), ReLU())
        self.hidden2 = Sequential(Linear(128, 64), ReLU())
        self.output = Sequential(Linear(64, d_output), Sigmoid())
Exemple #4
0
    def __init__(self):
        super(Discriminator, self).__init__()
        d_input = 28 * 28
        d_output = 1

        self.input = Sequential(Linear(d_input, 1024), ReLU(), Dropout(0.2))
        self.hidden1 = Sequential(Linear(1024, 512), ReLU(), Dropout(0.2))
        self.hidden2 = Sequential(Linear(512, 256), ReLU(), Dropout(0.2))
        self.output = Sequential(Linear(256, d_output), Sigmoid())
Exemple #5
0
 def __init__(self, img_dim=4096, label_dim=114, latent_dim=200):
     super(Decoder, self).__init__()
     self.img_dim = img_dim
     self.label_dim = label_dim
     self.latent_dim = latent_dim
     self.fc1 = Linear(latent_dim + label_dim, 1000)
     self.fc2 = Linear(1000, img_dim)
     self.softplus = Softplus()
     self.sigmoid = Sigmoid()
Exemple #6
0
  def __init__(self,input_size,hidden_size,num_layers,l):
      super(modrnn, self).__init__()
      self.rnnlist=ModuleList()
      for i in range(l):
          self.rnnlist.append(\
 period(input_size=input_size,hidden_size=hidden_size,num_layers=num_layers))
      self.l=l    
      self.fc=Sequential(BatchNorm1d(hidden_size*2*l),Linear(hidden_size*2*l,l),ReLU(),\
          BatchNorm1d(l),Linear(l,1),Sigmoid())
Exemple #7
0
    def __init__(self, config: BertMultipleLabelConfig):
        super(BertNerMultipleTypePredictionHead, self).__init__()
        self.type_decoder = nn.Linear(config.lstm_hidden_size * 2,
                                      config.reference_size * 2,
                                      bias=False)
        self.bias = nn.Parameter(torch.zeros(config.reference_size * 2))
        self.sig = Sigmoid()
        # TODO: activation function

        pass
Exemple #8
0
 def __init__(self, config):
     super().__init__(config)
     self.roberta = RobertaModel(config, add_pooling_layer=False)
     self.dense = Linear(config.hidden_size, config.hidden_size)
     self.dropout = Dropout(config.dropout_rate)
     self.classifier = Linear(config.hidden_size, 1)
     self.loss_fct = BCEWithLogitsLoss(reduction="none")
     self.sigmoid = Sigmoid()
     self.tanh = Tanh()
     self.init_weights()
Exemple #9
0
    def __init__(self, in_filters, bottleneck_filters):
        super(RatLesNetv2_SE1, self).__init__()

        self.seq = nn.Sequential(
                ReLU(),
                Conv3d(in_filters, bottleneck_filters, 1),
                ReLU(),
                Conv3d(bottleneck_filters, in_filters, 1),
                Sigmoid()
            )
Exemple #10
0
    def __init__(self):
        super(DeepPix, self).__init__()

        self.conv0 = DenseNet.features.conv0
        self.norm0 = DenseNet.features.norm0
        self.relu0 = DenseNet.features.relu0
        self.pool0 = DenseNet.features.pool0
        self.denseblock1 = DenseNet.features.denseblock1
        self.transition1 = DenseNet.features.transition1
        self.denseblock2 = DenseNet.features.denseblock2
        self.transition2 = DenseNet.features.transition2

        for param in self.conv0.parameters():
            param.requires_grad = True

        for param in self.norm0.parameters():
            param.requires_grad = True

        for param in self.relu0.parameters():
            param.requires_grad = True

        for param in self.pool0.parameters():
            param.requires_grad = True

        for param in self.denseblock1.parameters():
            param.requires_grad = True

        for param in self.transition1.parameters():
            param.requires_grad = True

        for param in self.denseblock2.parameters():
            param.requires_grad = True

        for param in self.transition2.parameters():
            param.requires_grad = True

        self.conv1x1 = Conv2d(256, 1, kernel_size=1, stride=1)

        self.sigmoid1 = Sigmoid()

        self.linear1 = Linear(196, 1)

        self.sigmoid2 = Sigmoid()
Exemple #11
0
 def __init__(self):
     super(encoder, self).__init__()
     self.input = Sequential(Linear(in_features=784, out_features=128),
                             ReLU())
     self.encoder_m = Sequential(Linear(in_features=128, out_features=64),
                                 ReLU())
     self.hidden = Sequential(Linear(in_features=64, out_features=128),
                              ReLU())
     self.output = Sequential(Linear(in_features=128, out_features=784),
                              Sigmoid())
Exemple #12
0
 def __init__(self):
     super(model,self).__init__()
     self.fc = Linear(1000, 10000*3)
     self.conv = nn.Sequential(
         UpsamplingBilinear2d(scale_factor=2),
         Conv2d(3,16,5,1,2),
         UpsamplingBilinear2d(scale_factor=2),
         Conv2d(16,3,5,1,2),
         Sigmoid()
     )
Exemple #13
0
def determine_layers(side, random_dim, num_channels):
    assert side >= 4 and side <= 32

    layer_dims = [(1, side), (num_channels, side // 2)]

    while layer_dims[-1][1] > 3 and len(layer_dims) < 4:
        layer_dims.append((layer_dims[-1][0] * 2, layer_dims[-1][1] // 2))

    layers_D = []
    for prev, curr in zip(layer_dims, layer_dims[1:]):
        layers_D += [
            Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
            BatchNorm2d(curr[0]),
            LeakyReLU(0.2, inplace=True)
        ]
    layers_D += [
        Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0),
        Sigmoid()
    ]

    layers_G = [
        ConvTranspose2d(random_dim,
                        layer_dims[-1][0],
                        layer_dims[-1][1],
                        1,
                        0,
                        output_padding=0,
                        bias=False)
    ]

    for prev, curr in zip(reversed(layer_dims), reversed(layer_dims[:-1])):
        layers_G += [
            BatchNorm2d(prev[0]),
            ReLU(True),
            ConvTranspose2d(prev[0],
                            curr[0],
                            4,
                            2,
                            1,
                            output_padding=0,
                            bias=True)
        ]
    layers_G += [Tanh()]

    layers_C = []
    for prev, curr in zip(layer_dims, layer_dims[1:]):
        layers_C += [
            Conv2d(prev[0], curr[0], 4, 2, 1, bias=False),
            BatchNorm2d(curr[0]),
            LeakyReLU(0.2, inplace=True)
        ]

    layers_C += [Conv2d(layer_dims[-1][0], 1, layer_dims[-1][1], 1, 0)]

    return layers_D, layers_G, layers_C
Exemple #14
0
    def __init__(self, input_shape, n_convfilter, \
                 n_fc_filters, h_shape, conv3d_filter_shape):
        print("\ninitializing \"encoder\"")
        #input_shape = (self.batch_size, 3, img_w, img_h)
        super(encoder, self).__init__()
        #conv1
        self.conv1a = Conv2d(input_shape[1], n_convfilter[0], 7, padding=3)
        self.conv1b = Conv2d(n_convfilter[0], n_convfilter[0], 3, padding=1)

        #conv2
        self.conv2a = Conv2d(n_convfilter[0], n_convfilter[1], 3, padding=1)
        self.conv2b = Conv2d(n_convfilter[1], n_convfilter[1], 3, padding=1)
        self.conv2c = Conv2d(n_convfilter[0], n_convfilter[1], 1)

        #conv3
        self.conv3a = Conv2d(n_convfilter[1], n_convfilter[2], 3, padding=1)
        self.conv3b = Conv2d(n_convfilter[2], n_convfilter[2], 3, padding=1)
        self.conv3c = Conv2d(n_convfilter[1], n_convfilter[2], 1)

        #conv4
        self.conv4a = Conv2d(n_convfilter[2], n_convfilter[3], 3, padding=1)
        self.conv4b = Conv2d(n_convfilter[3], n_convfilter[3], 3, padding=1)

        #conv5
        self.conv5a = Conv2d(n_convfilter[3], n_convfilter[4], 3, padding=1)
        self.conv5b = Conv2d(n_convfilter[4], n_convfilter[4], 3, padding=1)
        self.conv5c = Conv2d(n_convfilter[3], n_convfilter[4], 1)

        #conv6
        self.conv6a = Conv2d(n_convfilter[4], n_convfilter[5], 3, padding=1)
        self.conv6b = Conv2d(n_convfilter[5], n_convfilter[5], 3, padding=1)

        #pooling layer
        self.pool = MaxPool2d(kernel_size=2, padding=1)

        #nonlinearities of the network
        self.leaky_relu = LeakyReLU(negative_slope=0.01)
        self.sigmoid = Sigmoid()
        self.tanh = Tanh()

        #find the input feature map size of the fully connected layer
        fc7_feat_w, fc7_feat_h = self.fc_in_featmap_size(input_shape,
                                                         num_pooling=6)
        #define the fully connected layer
        self.fc7 = Linear(int(n_convfilter[5] * fc7_feat_w * fc7_feat_h),
                          n_fc_filters[0])

        #define the FCConv3DLayers in 3d convolutional gru unit
        self.t_x_s_update = BN_FCConv3DLayer_torch(n_fc_filters[0],
                                                   conv3d_filter_shape,
                                                   h_shape)
        self.t_x_s_reset = BN_FCConv3DLayer_torch(n_fc_filters[0],
                                                  conv3d_filter_shape, h_shape)
        self.t_x_rs = BN_FCConv3DLayer_torch(n_fc_filters[0],
                                             conv3d_filter_shape, h_shape)
Exemple #15
0
def n_layer_nn(optimiser_function, layer_dims=[28*28 + 1, 128, 10], learning_rate=0.1, epochs=100):
    layers = len(layer_dims)
    assert layers >= 3, "Please give at leaset 3 dimensions"
    
    modules = [Linear(layer_dims[0], layer_dims[1]), Relu()]
    for i in range(1, layers - 2):
        modules.append(Linear(layer_dims[i], layer_dims[i+1]))
        modules.append(Relu())
    
    modules.append(Linear(layer_dims[layers-2], layer_dims[layers-1]))
    modules.append(Sigmoid())
    print(modules)
    model = Sequential(*modules).cuda('cuda:0')

    loss_function = CrossEntropyLoss()

    optimiser = optimiser_function(model.parameters(), lr=learning_rate)

    stopper = EarlyStop(patience=3)
    train_losses=[]
    val_losses=[]
    accuracy=[]

    for epoch in range(epochs):
        losses=[]
        for i,(X, y) in enumerate(get_minibatches(train_loader, device)):
            optimiser.zero_grad()
            yhat = model.forward(X)
            loss = loss_function(yhat, y.argmax(1))
            losses.append(loss.item())
            loss.backward()
            optimiser.step()

        train_losses.append(np.mean(losses))

        if epoch % 3 == 0:
            with torch.no_grad():
                losses = []
                corrects = 0
                for i,(X, y) in enumerate(get_minibatches(val_loader, device)):
                    y = y.argmax(1)
                    yhat = model.forward(X)
                    losses.append(loss_function(yhat, y).item())
                    ypred = yhat.argmax(1)
                    corrects += (ypred == y).sum()
                val_loss = np.mean(losses)
                val_losses.append(val_loss)
                acc = corrects.cpu().numpy() / val_size
                #print("Accuracy {}".format(acc))
                accuracy.append(acc)
                if not stopper.continue_still(val_loss):
                    print("Early stop at epoch {}".format(epoch))
                    break
    return val_losses, accuracy
    def __init__(self, input_size, hidden_size=30, num_labels=21):
        super(ClassifierExtension, self).__init__()
        self.name = "cls"
        self.linear1 = Linear(input_size, hidden_size)
        self.hidden_activation = Tanh()
        self.linear2 = Linear(hidden_size, num_labels)

        if num_labels == 1:
            self.final_activation = Softmax()
        else:
            self.final_activation = Sigmoid()
Exemple #17
0
    def __init__(self, num_channels=1, feat_channels=[4, 8, 16, 32, 64], residual='conv'):

        # residual: conv for residual input x through 1*1 conv across every layer for downsampling, None for removal of residuals

        super(UNet3D, self).__init__()

        layers = [2, 2, 2, 2]
        block = BasicBlock
        self.inplanes = 16
        self.dilation = 1
        self.groups = 1
        self.base_width = 64
        self.conv1 = nn.Conv2d(9, self.inplanes, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.layer1 = self._make_layer(block, 32, layers[0])
        self.layer2 = self._make_layer(block, 64, layers[1], stride=2,
                                       dilate=False)
        self.layer3 = self._make_layer(block, 128, layers[2], stride=2,
                                       dilate=False)

        # Encoder downsamplers
        self.pool1 = MaxPool3d(kernel_size=3, stride=2, padding=1)
        self.pool2 = MaxPool3d(kernel_size=3, stride=2, padding=1)
        self.pool3 = MaxPool3d(kernel_size=3, stride=2, padding=1)
        self.pool4 = MaxPool3d(kernel_size=3, stride=2, padding=1)

        # Encoder convolutions
        self.conv_blk1 = Conv3D_Block(num_channels, feat_channels[0], residual=residual)
        self.conv_blk2 = Conv3D_Block(feat_channels[0], feat_channels[1], residual=residual)
        self.conv_blk3 = Conv3D_Block(feat_channels[1], feat_channels[2], residual=residual)
        self.conv_blk4 = Conv3D_Block(feat_channels[2], feat_channels[3], residual=residual)
        self.conv_blk5 = Conv3D_Block(feat_channels[3], feat_channels[4], residual=residual)

        # Decoder convolutions
        self.dec_conv_blk4 = Conv3D_Block(2*feat_channels[3], feat_channels[3], residual=residual)
        self.dec_conv_blk3 = Conv3D_Block(2*feat_channels[2], feat_channels[2], residual=residual)
        self.dec_conv_blk2 = Conv3D_Block(2*feat_channels[1], feat_channels[1], residual=residual)
        self.dec_conv_blk1 = Conv3D_Block(2*feat_channels[0], feat_channels[0], residual=residual)

        # Decoder upsamplers
        self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3])
        self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2])
        self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1])
        self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0])

        # Final 1*1 Conv Segmentation map
        self.one_conv     = Conv3d(feat_channels[0], num_channels, kernel_size=1, stride=1, padding=0, bias=True)
        self.one_one_conv = Conv3d(8, num_channels, kernel_size=1, stride=1, padding=0, bias=True)

        # Activation function
        self.activation = Sigmoid()
Exemple #18
0
    def __init__(self):
        super(MNISTConvDecoder, self).__init__()

        self.fc1 = Linear(15, 256)
        self.fc2 = Linear(256, 64 * 4 * 4)

        self.deconv1 = ConvTranspose2d(64, 32, (4, 4), stride=2, padding=1)
        self.deconv2 = ConvTranspose2d(32, 32, (4, 4), stride=2, padding=1)
        self.deconv3 = ConvTranspose2d(32, 1, (4, 4), stride=2, padding=1)

        self.sig = Sigmoid()
Exemple #19
0
    def __init__(self, channel, reduction=4):
        super(CALayer, self).__init__()

        # feature channel downscale and upscale --> channel weight
        self.layers = nn.Sequential(
            AdaptiveAvgPool2d(1),
            Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
            ReLU(inplace=True),
            Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
            Sigmoid()
        )
    def __init__(self,
                 in_channels,
                 out_channels,
                 input_spatial_dim,
                 p=1,
                 t=2,
                 r=1,
                 net_mode='ir'):
        super(AttentionModule, self).__init__()
        self.func = {
            'ir': BottleneckIR,
            'irse': BottleneckIRSE,
            'irse_v2': BottleneckIRSE_V2,
            'basic': BasicResBlock
        }

        # start branch
        self.start_branch = ModuleList()
        self.start_branch.append(self.func[net_mode](in_channels, out_channels,
                                                     1))
        for i in range(p - 1):
            self.start_branch.append(self.func[net_mode](out_channels,
                                                         out_channels, 1))

        # trunk branch
        self.trunk_branch = ModuleList()
        for i in range(t):
            self.trunk_branch.append(self.func[net_mode](out_channels,
                                                         out_channels, 1))

        # mask branch
        # 1st, determine how many down-sample operations should be executed.
        num_down_sample_times = 0
        resolution = input_spatial_dim
        while resolution > 4 and resolution not in (8, 7, 6, 5):
            num_down_sample_times += 1
            resolution = (resolution - 2) / 2 + 1
        self.num_down_sample_times = min(num_down_sample_times, 100)
        self.mask_branch = MaskModule(num_down_sample_times, out_channels, r,
                                      net_mode)

        self.mask_helper = Sequential(
            Conv2d(out_channels, out_channels, 1, 1, 0, bias=False),
            BatchNorm2d(out_channels), ReLU(inplace=True),
            Conv2d(out_channels, out_channels, 1, 1, 0, bias=False),
            BatchNorm2d(out_channels), Sigmoid())
        # output branch
        self.out_branch = ModuleList()
        for i in range(p):
            self.out_branch.append(self.func[net_mode](out_channels,
                                                       out_channels, 1))
        self.p = p
        self.t = t
        self.r = r
 def forward(self, x_int, x_float):
     embedded = embedding(x_int, self.grid_size, self.embedding_layers)
     x = torch.cat((embedded, x_float), 1)
     features = self.features(x)
     out = F.relu(features, inplace=True)
     out = F.adaptive_avg_pool2d(out, (1, 1))
     out = torch.flatten(out, 1)
     out = self.classifier(out)
     if self.prob_type == "classification":
         x = Sigmoid()(x)
     return out
Exemple #22
0
 def __init__(self,inputs):
     super().__init__()
     self.hidden1 = Linear(inputs,10) # layer 1 - 10 units
     self.hidden2 = Linear(10,8) # layer 2 - 8 units
     self.output  = Linear(8,1) # layer 3 (output) - 1 unit
     self.relu    = ReLU() # no learnable parameters here
     self.sigmoid = Sigmoid() # no learnable parameters here
     # He Kaiming initialization
     kaiming_uniform_(self.hidden1.weight, nonlinearity='relu')
     kaiming_uniform_(self.hidden2.weight, nonlinearity='relu')
     kaiming_uniform_(self.output.weight, nonlinearity='sigmoid')
Exemple #23
0
 def __init__(self, n_inputs):
     super(MLP, self).__init__()
     self.hidden1 = Linear(n_inputs, 16)
     kaiming_uniform_(self.hidden1.weight, nonlinearity='relu')
     self.act1 = ReLU()
     self.hidden2 = Linear(16, 8)
     kaiming_uniform_(self.hidden2.weight, nonlinearity='relu')
     self.act2 = ReLU()
     self.hidden3 = Linear(8, 1)
     xavier_uniform_(self.hidden3.weight)
     self.act3 = Sigmoid()
Exemple #24
0
 def __init__(self):
     super().__init__()
     self.seq = Sequential(
         OrderedDict([
             ("fc1", Linear(8, 16, bias=True)),
             ("act1", ReLU()),
             ("fc2", Linear(16, 32, bias=True)),
             ("act2", ReLU()),
             ("fc3", Linear(32, 64, bias=True)),
             ("sig", Sigmoid()),
         ]))
    def __init__(self,
                 dataset,
                 num_layers,
                 hidden,
                 weight_conv='WeightConv1',
                 multi_channel='False'):
        super(SMG_2h_JK, self).__init__()
        self.lin0 = Linear(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers):
            self.convs.append(SparseConv(hidden, hidden))

        self.ma1hs = torch.nn.ModuleList()
        self.ma2hs = torch.nn.ModuleList()
        if multi_channel == 'True':
            out_channel = hidden
        else:
            out_channel = 1
        if weight_conv != 'WeightConv2':
            for i in range(num_layers):
                self.ma1hs.append(
                    WeightConv1(hidden, hidden, hidden, aggr='mean'))
                self.ma2hs.append(
                    WeightConv1(hidden, hidden, out_channel, aggr='mean'))
        else:
            for i in range(num_layers):
                self.ma1hs.append(
                    WeightConv2(Sequential(Linear(hidden * 2, hidden), ReLU(),
                                           Linear(hidden, hidden), ReLU(),
                                           Linear(hidden, hidden), Sigmoid()),
                                aggr='mean'))
                self.ma2hs.append(
                    WeightConv2(Sequential(Linear(hidden * 2, hidden), ReLU(),
                                           Linear(hidden, hidden), ReLU(),
                                           Linear(hidden, out_channel),
                                           Sigmoid()),
                                aggr='mean'))

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(num_layers * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)
Exemple #26
0
    def __init__(self):
        super(CNN, self).__init__()

        self.cnn_layers = Sequential(
            #primeira camada convolucional
            Conv2d(1, 10, kernel_size=3, stride=1, padding=1),
            BatchNorm2d(10),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),

            # Defining another 2D convolution layer
            Conv2d(10, 5, kernel_size=3, stride=1, padding=1),
            BatchNorm2d(5),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),

            # Defining another 2D convolution layer
            Conv2d(5, 4, kernel_size=3, stride=1, padding=1),
            BatchNorm2d(4),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2),
        )

        self.linear_layers = Sequential(
            #computação das características
            #camada 1
            Linear(1 * 40 * 40, 20),
            ReLU(),
            #camada 2
            Linear(20, 10),
            ReLU(),
            #camada 3
            Linear(10, 5),
            ReLU(),
            #camada 4
            Linear(5, 4),
            Sigmoid(),
            #camada 5
            Linear(4, 2),
            Sigmoid(),
        )
 def __init__(self, n_inputs=DDM_NUM, n_outputs=DDM_NUM + DIFFERECE_COL):
     super(DDM, self).__init__()
     # # input to very beginning hidden layer
     self.hidden = Linear(n_inputs, 256)
     xavier_uniform_(self.hidden.weight)
     self.act = Sigmoid()
     # input to beginning hidden layer
     self.hidden0 = Linear(256, 256)
     xavier_uniform_(self.hidden0.weight)
     self.act0 = Sigmoid()
     # self.act0 = ReLU()
     # input to first hidden layer
     self.hidden1 = Linear(256, 256)
     xavier_uniform_(self.hidden1.weight)
     self.act1 = Sigmoid()
     # self.act1 = ReLU()
     # second hidden layer
     self.hidden2 = Linear(256, 128)
     xavier_uniform_(self.hidden2.weight)
     self.act2 = Sigmoid()
     # self.act2 = ReLU()
     # third hidden layer
     self.hidden3 = Linear(128, 64)
     xavier_uniform_(self.hidden3.weight)
     self.act3 = Sigmoid()
     # self.act3 = ReLU()
     # 4th hidden layer
     self.hidden4 = Linear(64, n_outputs)
     xavier_uniform_(self.hidden4.weight)
     # self.act4 = Sigmoid()
     # # third hidden layer and output
     # self.hidden3 = Linear(64, 6)
     # xavier_uniform_(self.hidden3.weight)
     # self.act3 = Softmax(dim=1)
     self.dropout = Dropout(p=0.5)
     self.batchnorm = BatchNorm1d(256)
     self.batchnorm0 = BatchNorm1d(256)
     self.batchnorm1 = BatchNorm1d(256)
     self.batchnorm2 = BatchNorm1d(128)
     self.batchnorm3 = BatchNorm1d(64)
     self.batchnorm4 = BatchNorm1d(n_outputs)
Exemple #28
0
def run_dis(x):
    out = []
    out_dis, hid = Net_D(x)
    out += [out_dis]
    if c1_len:
        out += [Softmax()(Q_cat(hid))]
    if c2_len:
        out += [Q_con(hid)]
    if c3_len:
        out += [Sigmoid()(Q_bin(hid))]

    return out
Exemple #29
0
    def __init__(self, in_channels: int, classes: int, class_type: str):
        super().__init__()
        self.avgpool = AdaptiveAvgPool2d(1)
        self.fc = Linear(in_channels, classes)

        if class_type == "single":
            self.softmax = Softmax(dim=1)
        elif class_type == "multi":
            self.softmax = Sigmoid()
        else:
            raise ValueError(
                "unknown class_type given of {}".format(class_type))
  def __init__(self, adapterClassifier, id2label, lr) -> None:
      super().__init__()

      self.classifier = adapterClassifier
      self.id2label = id2label

      self.lr = lr
      self.criterion = BCEWithLogitsLoss(pos_weight=torch.full((len(id2label),), 1.))

      self.sig = Sigmoid()

      self.declare_metrics(self.id2label)