Esempio n. 1
0
 def forward(self, x):
     x = self.linear1(x)
     x = nn.sigmoid(x)
     x = self.linear2(x)
     x = nn.sigmoid(x)
     x = self.linear3(x)
     x = nn.sigmoid(x)
     return x
Esempio n. 2
0
 def __init__(self, token_dim, sense_dim, hidden_dim):
     super(sense_for_token, self).__init__()
     self.mlp = nn.Sequential(nn.Linear(token_dim,
                                        hidden_dim), nn.sigmoid(),
                              nn.Linear(hidden_dim,
                                        hidden_dim), nn.sigmoid(),
                              nn.Linear(hidden_dim, sense_dim),
                              nn.sigmoid()
                              #nn.BatchNorm1d(sense_dim)
                              )
Esempio n. 3
0
    def __init__(self, num_classes=1, num_channels=3, pretrained=True):
        super().__init__()
        assert num_channels == 3
        self.num_classes = num_classes
        filters = [64, 128, 256, 512]
        resnet = models.resnet34(pretrained=pretrained)

        self.firstconv = resnet.conv1
        self.firstbn = resnet.bn1
        self.firstrelu = resnet.relu
        self.firstmaxpool = resnet.maxpool
        self.encoder1 = resnet.layer1
        self.encoder2 = resnet.layer2
        self.encoder3 = resnet.layer3
        self.encoder4 = resnet.layer4

        # Decoder
        self.decoder4 = DecoderBlockLinkNet(filters[3], filters[2])
        self.decoder3 = DecoderBlockLinkNet(filters[2], filters[1])
        self.decoder2 = DecoderBlockLinkNet(filters[1], filters[0])
        self.decoder1 = DecoderBlockLinkNet(filters[0], filters[0])

        # Final Classifier
        self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2)
        self.finalrelu1 = nn.ReLU(inplace=True)
        self.finalconv2 = nn.Conv2d(32, 32, 3)
        self.finalrelu2 = nn.ReLU(inplace=True)
        self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1)
        self.sig = nn.sigmoid()
Esempio n. 4
0
 def __init__(self, img_shape):
     super(D_mlp, self).__init__()
     self.img_shape = img_shape
     self.model = nn.Sequential(
         nn.Linear(int(np.prod(self.img_shape)), 512),
         nn.LeakyReLU(0.2, inplace=True), nn.Linear(512, 256),
         nn.LeakyReLU(0.2, inplace=True), nn.Linear(256, 1), nn.sigmoid())
Esempio n. 5
0
    def forward(self, x):

        #x durch erste convolutionalschicht schicken
        x = self.conv1(x)

        #das Max der Matrix herausfinden
        x = F.max_pool2d(x, 2, stride=None, padding=0)

        #x wieder aktivieren
        x = F.relu(x, inplace=False)

        x = self.conv2(x)
        x = F.max_pool2d(x, 2, stride=None, padding=0)
        x = F.relu(x, inplace=False)
        x = self.conv3(x)
        x = F.max_pool2d(x, 2, stride=None, padding=0)
        x = F.relu(x, inplace=False)
        x = self.conv4(x)
        x = F.max_pool2d(x, 2, stride=None, padding=0)
        x = F.relu(x, inplace=False)

        #erste dimension ignorieren
        x = x.view(-1, 3456)
        x = F.relu(self.fcl1(x))
        x = F.relu(self.fcl2(x))
        x = self.fcl2(x)
        return nn.sigmoid(x)
Esempio n. 6
0
    def predict(self, Xtest):
        self.net.eval()

        # Processing data
        self.batches = CraterDataset(Xtest, BATCH_SIZE)

        # Raw prediction (using sigmoid activation since not in forward)
        to_proba = nn.sigmoid()
Esempio n. 7
0
 def forward(self, x, phase='train'):
     for layer in self.model:
         x = layer(x)
     
     if phase == 'train':
         return x
     elif phase == 'test':
         return nn.sigmoid(x)
Esempio n. 8
0
 def forward(self, x_in):
     """
     x_in:an input tensor
     x_in.shape:(batch, num_features)
     return:
         (batch,)
     """
     return nn.sigmoid(self.fc1(x_in)).squeeze()
def eucl_non_lin(eucl_h, non_lin):
    if non_lin == 'id':
        return eucl_h
    elif non_lin == 'relu':
        return nn.relu(eucl_h)
    elif non_lin == 'tanh':
        return tanh(eucl_h)
    elif non_lin == 'sigmoid':
        return nn.sigmoid(eucl_h)
    return eucl_h
Esempio n. 10
0
    def __inint__(self):
        super(D, self).__init__()

        self.main = nn.Sequential(
            nn.Conv2d(1024, 1, 1),
            nn.sigmoid()
        )

        def forward(self, x):
            output = self.main(x).view(-1, 1)
            return output
Esempio n. 11
0
    def forward(self, x):
        #输入的x经过卷积conv1之后,经过relu激活函数,再通过2*2的窗口进行最大池化
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
        #view函数将张量x变形为一维向量的形式,总特征数不变,为接下来的全连接做准备
        x = x.view(-1, 30 * 30)

        #输入x,经过全连接1层再经过relu函数,然后更新x
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = nn.sigmoid(self.fc3(x))
        return x
Esempio n. 12
0
 def __init__(self):
     super(SAE, self).__init__()
     self.fc1 = nn.Linear(
         nb_movies, 20
     )  # first Full connection between first encoded vector and first input vector features
     # no_input_features is equal to no_of_movies
     # 20 neurons in first hidden layers
     self.fc2 = nn.Linear(20, 10)  # Second hidden layer with 10 neurons
     self.fc3 = nn.Linear(10, 20)  # Now in Decoding
     self.fc4 = nn.Linear(20, nb_movies)
     self.activation = nn.sigmoid(
     )  # Sigmoid function is doing better than rectifier function
Esempio n. 13
0
    def forward(self, input):
        conv1 = self.conv1(input)
        conv2 = self.conv2(conv1)
        conv3 = self.conv3(conv2)
        conv4 = self.conv4(conv3)
        conv5 = self.conv5(conv4)

        output = conv5
        if self.use_sigmoid:
            output = nn.sigmoid(conv5)

        return output, [conv1, conv2, conv3, conv4, conv5]
Esempio n. 14
0
    def __init__(self,input_size,num_heads,num_hidden,dropout):
        super().__init__()      # Inherit super class from nn.module

        self.input_size = input_size
        self.num_heads = num_heads
        self.num_hidden = num_hidden
        self.device = device

        self.fc1 = nn.Linear(input_size,input_size) 
        self.softmax = nn.Softmax(dim=1) # Per row
        self.activation = nn.SELU()
        self.sigmoid = nn.sigmoid()
        self.dropout = nn.Dropout(p = dropout)
        self.fc2 = nn.Linear(num_heads,num_hidden)
        self.fc3 = 
Esempio n. 15
0
 def __init__(self):
     super(Network, self).__init__()
     self.block = nn.Sequential()
     for i in range(len(R_variable['full_net'])-2):
         self.block.add_module('linear'+str(i), nn.Linear(R_variable['full_net'][i],R_variable['full_net'][i+1]))
         if R_variable['ActFuc']==1:
             self.block.add_module('tanh'+str(i), nn.Tanh())
         elif R_variable['ActFuc']==3:
             self.block.add_module('sin'+str(i), nn.sin())
         elif R_variable['ActFuc']==0:
             self.block.add_module('relu'+str(i), nn.ReLU())
         elif R_variable['ActFuc']==4:
             self.block.add_module('**50'+str(i), Act_op())
         elif R_variable['ActFuc']==5:
             self.block.add_module('sigmoid'+str(i), nn.sigmoid())
     i = len(R_variable['full_net'])-2
     self.block.add_module('linear'+str(i), nn.Linear(R_variable['full_net'][i],R_variable['full_net'][i+1]))
    def forward(self, users: torch.tensor,
                items: torch.tensor) -> torch.tensor:
        """
        Forward pass through the model.

        Parameters
        ----------
        users: tensor, 1-d
            Array of user indices
        items: tensor, 1-d
            Array of item indices

        Returns
        -------
        preds: tensor, 1-d
            Predicted ratings or rankings

        """
        user_embedding_cf = self.user_embeddings_cf(users)
        item_embedding_cf = self.item_embeddings_cf(items)
        output_cf = user_embedding_cf * item_embedding_cf

        user_embedding_mlp = self.user_embeddings_mlp(users)
        item_embedding_mlp = self.item_embeddings_mlp(items)
        interaction = torch.cat((user_embedding_mlp, item_embedding_mlp), -1)
        output_mlp = self.mlp_layers(interaction)

        concat = torch.cat((output_cf, output_mlp), -1)

        prediction = self.predict_layer(concat)

        if callable(self.hparams.final_layer):
            prediction = self.hparams.final_layer(prediction)
        elif self.hparams.final_layer == 'sigmoid':
            prediction = nn.sigmoid(prediction)
        elif self.hparams.final_layer == 'relu':
            prediction = nn.ReLU(prediction)
        elif self.hparams.final_layer == 'leaky_relu':
            prediction = F.leaky_relu(prediction)
        elif self.hparams.final_layer is not None:
            raise ValueError(
                f'{self.hparams.final_layer} not valid final layer value!')

        return prediction.view(-1)
Esempio n. 17
0
 def __init__(
     self,
 ):  #it is the fn of class SAE with 'self' as class's default object. The gap after 'self' indicates that we are importing the modules of inherited class(which is 'Module' class). 'self' basically represents AE.
     super(
         SAE, self
     ).__init__  #super is a pre-defined fn. It helps to import the functions or modules of inherited class.Its format includes(here) mentioning the class name, class's object as parameters and attaching it to the fn in which it is going to be used.
     self.fc1 = nn.Linear(
         nb_movies, 20
     )  #now we are creating the connection layer.'fc1' simply represents 'full connection' which is an object of 'self'. We are importing the 'Linear' class of 'nn' module.It has 2 parameters , 1st one represents the total no.of input nodes.cont
     #which ,here, is total no. of movies. The 2nd parameter is total no. of hidden nodes in the 1st layer(it is an experimented value , can be optimized). Similarly, we will create other input layers.
     self.fc2 = nn.Linear(
         20, 10
     )  #this is the 2nd hidden layer with 1st parameter as HNs of 1st layer and 2nd parameter as HNs of 2nd layer.(Thats the format)
     self.fc3 = nn.Linear(
         10, 20
     )  #this is the 3nd hidden layer with 1st parameter as HNs of 2nd layer and 2nd parameter as HNs of 3rd layer.
     self.fc4 = nn.Linear(
         20, nb_movies
     )  #this is the 4nd hidden layer with 1st parameter as HNs of 3rd layer and 2nd parameter as HNs of 4th layer(last layer).The last hidden layer has as many nodes as the input layer()
     self.activation = nn.sigmoid(
     )  #using 'sigmoid' activation fn of 'nn' module to activate the nodes in Hidden layers.
Esempio n. 18
0
 def __init__(self, ngpu):
     super(_netD, self).__init__()
     self.ngpu = ngpu
     self.main == nn.Sequential(
         # input is (nc) x 64 x 64
         nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf) x 32 x 32
         nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
         nn.BatchNorm2d(ndf * 2),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf*2) x 16 x 16
         nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
         nn.BatchNorm2d(ndf * 4),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf*4) x 8 x 8
         nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
         nn.BatchNorm2d(ndf * 8),
         nn.LeakyReLU(0.2, inplace=True),
         # state size. (ndf*8) x 4 x 4
         nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
         nn.sigmoid()
     )
Esempio n. 19
0
        self.gama = positive_scaling_factor

        self.Z = [[[0]*10 for i in range(10)] for j in range(10)]
        for i in range(10):
            for j in range(10):
                if i==j:
                    self.Z[0][i][j] = self.rp
                else:
                    self.Z[0][i][j] = 0

net = nn.Sequential(
    nn.Linear(1, 100),
    nn.Linear(100, 100),
    nn.ReLU(),
    nn.Linear(100, 1),
    nn.sigmoid()
    )  
                  
T = 10
K = 10
U = []
for t in range(T-1):   #run t times
    MAX = 0
    for a in range(K-1):    #has a arms
          
        X = [[0]*10 for i in range(10)]
        for i in range(10):
            for j in range(10):
                k1 = np.random.normal(0, 0.1, 1)
                k2 = np.random.normal(0, 0.1, 1)
                k1 = list(k1)
Esempio n. 20
0
    T_sample = T(torch.cat([X, z_sample], 1))

    disc = torch.mean(-T_sample)
    loglike = -nn.binary_cross_entropy(X_sample, X,
                                       size_average=False) / mb_size

    elbo = -(disc + loglike)

    elbo.backward()
    Q_solver.step()
    P_solver.step()
    reset_grad()

    # Discriminator T(X, z)
    z_sample = Q(torch.cat([X, eps], 1))
    T_q = nn.sigmoid(T(torch.cat([X, z_sample], 1)))
    T_prior = nn.sigmoid(T(torch.cat([X, z], 1)))

    T_loss = -torch.mean(log(T_q) + log(1. - T_prior))

    T_loss.backward()
    T_solver.step()
    reset_grad()

    # Print and plot every now and then
    if it % 1000 == 0:
        print('Iter-{}; ELBO: {:.4}; T_loss: {:.4}'.format(
            it, -elbo.data[0], -T_loss.data[0]))

        samples = P(z).data.numpy()[:16]
Esempio n. 21
0
 def __init__(self, input_size, output_size):
     super(LinearRegression, self).__init__()
     self.input_size, self.output_size = input_size, output_size
     self.fc1 = nn.Linear(self.input_size, self.output_size)
     self.sigmoid = nn.sigmoid()
 def forward(self, x):
     y_pred = nn.sigmoid(self.linear(x))
     return y_pred
Esempio n. 23
0
    def __init__(self, args, num_node_features, num_edge_features):
        super(GNN, self).__init__()
        self.args = args
        if args.attn_type == 'tang' and args.heads != 1:
          raise RuntimeError('tang attention must have heads = 1.')
        self.attn_type = args.attn_type

        if not self.attn_type in SUPPORTED_ATTN_TYPE:
          raise RuntimeError(f'Attention type {self.attn_type} not supported')
        self.depth = args.depth
        self.hidden_size = args.hidden_size
        self.dropout = args.dropout
        self.gnn_type = args.gnn_type
        self.graph_pool = args.graph_pool
        self.tetra = args.tetra
        self.task = args.task
        self.n_layers = args.n_layers
        self.skip_coef = args.skip_coef
        self.gat_act = args.gat_act

        # attention layer
        if self.attn_type == 'gat':
          self.gat_depth = args.gat_depth
          self.heads = args.heads
          self.attn_dropout = args.attn_dropout
          self.gat = torch.nn.ModuleList()
          # Set activation function
          if self.gat_act == 'relu':
            self.gat_act_func = nn.ReLU()
          elif self.gat_act == 'leakyrelu':
            if args.alpha is None:
              raise RuntimeError('LeakyRelu activation is used but alpha is not specified')
            self.gat_act_func = nn.LeakyReLU(negative_slope=args.alpha)
          elif self.gat_act == 'sigmoid':
            self.gat_act_func = nn.sigmoid()
          else:
            raise RuntimeError('activation function not supported')

          if 'concat' in dir(args):
            self.concat = args.concat
            if self.concat and self.gat_depth == 1:
                raise RuntimeError('Cannot set concatenation when there is only 1 attention layer')
            if not self.concat:
                for d in range(self.gat_depth):
                    self.gat.append(GATConv(self.hidden_size, self.hidden_size, heads=self.heads, dropout=self.attn_dropout, concat=False))
            else:
                self.gat.append(GATConv(self.hidden_size, self.hidden_size, heads=self.heads, dropout=self.attn_dropout, concat=True))
                for d in range(self.gat_depth-1):
                    if d == self.gat_depth-2: # last layer can't use concat
                        self.gat.append(GATConv(self.heads*self.hidden_size, self.hidden_size, heads=self.heads, dropout=self.attn_dropout, concat=False))
                    else:
                        self.gat.append(GATConv(self.heads*self.hidden_size, self.hidden_size, heads=self.heads, dropout=self.attn_dropout, concat=True))
          elif 'concat' not in dir(args):
            # self.concat = args.concat
            for _ in range(self.gat_depth):
                # self.gat.append(GATLayer(in_dim=self.hidden_size, out_dim=self.hidden_size))
                self.gat.append(GATConv(self.hidden_size, self.hidden_size, heads=self.heads, dropout=self.attn_dropout, concat=False))
            # self.gat.append(GATConv(self.hidden_size, self.hidden_size, heads=self.heads, dropout=self.gat_dropout, concat=False))

        elif self.attn_type == 'tang':
          self.heads = args.heads
          self.attn_dropout = args.attn_dropout

        if self.gnn_type == 'dmpnn':
            self.edge_init = nn.Linear(num_node_features + num_edge_features, self.hidden_size)
            self.edge_to_node = DMPNNConv(args)
        elif self.gnn_type == 'orig_dmpnn':
            self.edge_init = nn.Linear(num_node_features + num_edge_features, self.hidden_size)
            self.edge_to_node = OrigDMPNNConv(args, node_agg=True)
        else:
            self.node_init = nn.Linear(num_node_features, self.hidden_size)
            self.edge_init = nn.Linear(num_edge_features, self.hidden_size)

        # layers
        self.convs = torch.nn.ModuleList()

        for d in range(self.depth):
            if args.ft_boost and d>0:
                custom_hidden_size=args.hidden_size+3
            else:
                custom_hidden_size = None
            if self.gnn_type == 'gin':
                self.convs.append(GINEConv(args, custom_hidden_size=custom_hidden_size))
            elif self.gnn_type == 'gcn':
                self.convs.append(GCNConv(args, custom_hidden_size=custom_hidden_size))
            elif self.gnn_type == 'dmpnn':
                self.convs.append(DMPNNConv(args, custom_hidden_size=custom_hidden_size))
            elif self.gnn_type == 'orig_dmpnn':
                self.convs.append(OrigDMPNNConv(args, custom_hidden_size=custom_hidden_size))
            else:
                ValueError('Undefined GNN type called {}'.format(self.gnn_type))

        # graph pooling
        if self.tetra:
            self.tetra_update = get_tetra_update(args)

        if self.graph_pool == "sum":
            self.pool = global_add_pool
        elif self.graph_pool == "mean":
            self.pool  = global_mean_pool
        elif self.graph_pool == "max":
            self.pool = global_max_pool
        elif self.graph_pool == "attn":
            self.pool = GlobalAttention(
                gate_nn=torch.nn.Sequential(torch.nn.Linear(self.hidden_size, 2 * self.hidden_size),
                                           torch.nn.BatchNorm1d(2 * self.hidden_size),
                                            torch.nn.ReLU(),
                                            torch.nn.Linear(2 * self.hidden_size, 1)))
        elif self.graph_pool == "set2set":
            self.pool = Set2Set(self.hidden_size, processing_steps=2)
        else:
            raise ValueError("Invalid graph pooling type.")

        # ffn
        self.ffn = torch.nn.ModuleList()
        self.mult = 2 if self.graph_pool == "set2set" else 1
        for n in range(self.n_layers):
          if n != self.n_layers - 1:
            self.ffn.append(nn.Linear(self.mult * self.hidden_size, self.mult * self.hidden_size))
          else:
            self.ffn.append(nn.Linear(self.mult * self.hidden_size, 1))


        #### added ####
        self.cached_zero_vector = nn.Parameter(torch.zeros(self.hidden_size), requires_grad=False)
        self.W_a = nn.Linear(self.hidden_size, self.hidden_size)
        self.W_b = nn.Linear(self.hidden_size, self.hidden_size)
        self.act_func = nn.ReLU()
        self.dropout_layer = nn.Dropout(p=self.attn_dropout)

        self.use_input_features = False
        if self.use_input_features:
            features_batch = torch.from_numpy(np.stack(features_batch)).float()
 def forward(self, x1, x2):
 	x = torch.cat([x1, x2], dim=1)
     x = self.convTrans(x1)
     x = nn.sigmoid(x)
     return x
Esempio n. 25
0
 def __init__(self, inplace=False):
     super(Swish, self).__init__()
     self.inplace = inplace
     self.sigmoid = nn.sigmoid(inplace = self.inplace)