Exemple #1
0
    def forward(self, f_embs, s_embs):

        if self.mode == 'all':
            x = torch.cat([f_embs, s_embs], dim=1)
            x = F.rrelu(self.Linear1(x))
            x = F.rrelu(self.Linear2(x))
            x = F.rrelu(self.Linear3(x))
            cos_x = self.cos(f_embs, s_embs).unsqueeze(1)
            dot_x = torch.mul(f_embs, s_embs).sum(dim=1, keepdim=True)
            pdist_x = self.pdist(f_embs, s_embs)
            x = torch.cat([x, cos_x, dot_x, pdist_x], dim=1)
        elif self.mode == 'cos':
            x = self.cos(f_embs, s_embs).unsqueeze(1)
        elif self.mode == 'dot':
            x = torch.mul(f_embs, s_embs).sum(dim=1, keepdim=True)
        elif self.mode == 'pdist':
            x = self.pdist(f_embs, s_embs)

        if self.BCE_mode:
            return x.squeeze()
            # return (x/x.max()).squeeze()
        else:
            x = self.linear_output(x)
            x = F.rrelu(x)
            # x = torch.cat((x,-x),dim=1)
            return x
Exemple #2
0
    def forward(self, x):
        out = F.rrelu(self.fc1(x))  # followed by sigmoid activation function
        out = F.rrelu(self.fc2(out))  # followed by sigmoid activation function

        #  out = F.rrelu(self.fc3(out))

        return out
 def forward(self, x):
     x = F.rrelu(F.max_pool2d(self.conv1(x), 2))
     x = F.rrelu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(-1, 320)
     x = F.rrelu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     x = self.fc2(x)
     return F.log_softmax(x, dim=1)
Exemple #4
0
    def forward(self, x):
        x = self.fc1(x)
        x = F.rrelu(x)
        x = self.fc2(x)
        x = F.rrelu(x)
        x = self.fc3(x)

        return(x)
Exemple #5
0
 def forward(self, state, actor):
     x = torch.cat([state, actor], 1)
     x = self.fc_1(x)
     x = F.rrelu(x)
     x = self.fc_2(x)
     x = F.rrelu(x)
     x = self.fc_3(x)
     return x
Exemple #6
0
 def forward(self, x):
     skip = self.conv3(self.UpNN(x))
     
     x = F.rrelu(self.BN1(self.conv1(x)))
     x = self.UpNN(x)
     x = self.BN2(self.conv2(x))
     
     x = F.rrelu(x + skip)
     return x
Exemple #7
0
    def forward(self, x):
        skip = self.conv3(self.UpNN(x))

        x = F.rrelu(self.BatchNorm_Layer1(self.conv1_layer(x)))
        x = self.UpNN(x)
        x = self.BatchNorm_Layer2(self.conv2_layer(x))

        x = F.rrelu(x + skip)
        return x
Exemple #8
0
 def forward(self, text_vec):
     hidden = text_vec
     hidden = torch.cat([hidden, F.rrelu(self.conv_1(hidden))], dim=1)
     hidden = torch.cat([hidden, F.rrelu(self.conv_2(hidden))], dim=1)
     hidden = torch.cat([hidden, F.rrelu(self.conv_3(hidden))], dim=1)
     hidden = torch.cat([hidden, F.rrelu(self.conv_4(hidden))], dim=1)
     hidden = hidden[:, self.input_size:, :]  # drop raw input
     hidden = self.norm_1(hidden) if hidden.size(2) > 1 else hidden
     hidden = torch.max(hidden, dim=2)[0]
     hidden = self.linear_2(F.rrelu(self.linear_1(hidden)))
     return hidden
 def forward(self, x):
     #forward
     y1 = self.pool1(F.rrelu(self.conv1(x)))
     #print(y1.shape)
     y2 = self.pool2(self.bn1(F.rrelu(self.conv2(y1))))
     #print(y2.shape)
     y3 = self.bn2(F.rrelu(self.conv3(y2)))
     #print(y3.shape)
     y4 = self.bn3(F.rrelu(self.h4(y3.squeeze(3).squeeze(2))))
     #print(y4.shape)
     output_prob = F.softmax(self.h5(y4))
     return output_prob
Exemple #10
0
    def forward(self, data,IsTrain=False):
        out = F.rrelu(self.lin_node(self.norm_x(data.x)))
        edge_attr = F.rrelu(self.lin_edge_attr(data.edge_attr))
        h = out.unsqueeze(0)
        # edge_*3 only does not repeat for undirected graph. Hence need to add (j,i) to (i,j) in edges
        edge_index3 = torch.cat([data.edge_index3,data.edge_index3[[1,0]]],1)
        edge_attr3 = torch.cat([data.edge_attr3,data.edge_attr3],0)
        
        for i in range(2):
            # using bonding as edge
            m = F.rrelu(self.conv1(out, data.edge_index, edge_attr))
            out, h = self.gru1(m.unsqueeze(0), h)
            out = out.squeeze(0)
        
        out = self.lin_covert(out)
        h = out.unsqueeze(0)
        for i in range(2):
            # using couping as edge
            m = F.rrelu(self.conv2(out, edge_index3, edge_attr3))
            out, h = self.gru2(m.unsqueeze(0), h)
            out = out.squeeze(0)  
            
        n = data.edge_index3.shape[1]
        range_ = torch.arange(n)
        batch_index = torch.cat([range_,range_]).to('cuda:0')
        temp = out[data.edge_index3].reshape(2*n,-1) # (2*n_target,d)

        coupling_batch_index = data.batch[data.edge_index3[0]]
        pool = self.pool(out, data.batch) # (m,d)
        pool = pool[coupling_batch_index] # (n_target,d)        
        
        h0,h1 = self.h_lin(data.edge_attr3).split(self.num_layers*self.dim*2,1)
        h = (h0.reshape(-1,self.num_layers,self.dim*2).transpose(0,1).contiguous(),\
             h1.reshape(-1,self.num_layers,self.dim*2).transpose(0,1).contiguous())
        q_star = self.q_star_lin(pool)
            
        yhat = self.head(temp,batch_index,h,q_star)  
        yhat = self.norm(yhat)
        
        weight = self.lin_weight(data.edge_attr3)
        bias = self.lin_bias(data.edge_attr3)
        yhat = torch.sum(yhat * weight,1,keepdim=True) + bias
        yhat = yhat.squeeze(1)
        
        if IsTrain:
            k = torch.sum(data.edge_attr3,0)
            nonzeroIndex = torch.nonzero(k).squeeze(1)
            abs_ = torch.abs(data.y-yhat).unsqueeze(1)
            loss = torch.sum(torch.log(torch.sum(abs_ * data.edge_attr3[:,nonzeroIndex],0)/k[nonzeroIndex]))/nonzeroIndex.shape[0]
            return loss
        else:
            return yhat       
Exemple #11
0
    def forward(self, batch):
        """Pass the batch of images through each layer of the network, applying
        non-linearities after each layer.

        Note that this function *needs* to be called "forward" for PyTorch to
        automagically perform the forward pass.

        Params:
        -------
        - batch: (Tensor) An input batch of images

        Returns:
        --------
        - logits: (Variable) The output of the network
        """

        # Apply first convolution, followed by ReLU non-linearity;
        # use batch-normalization on its outputs
        batch = func.rrelu(self.conv1_normed(self.conv1(batch)))

        batch = self.pool1(batch)
        # Apply conv2 and conv3 similarly
        batch = func.rrelu(self.conv2_normed(self.conv2(batch)))
        batch = self.pool2(batch)

        batch = func.rrelu(self.conv3_normed(self.conv3(batch)))
        batch = func.rrelu(self.conv4_normed(self.conv4(batch)))
        batch = self.pool3(batch)
        batch = func.rrelu(self.conv5_normed(self.conv5(batch)))
        batch = func.rrelu(self.conv6_normed(self.conv6(batch)))
        # Pass the output of conv3 to the pooling layer
        batch = self.pool4(batch)
        batch = func.rrelu(self.conv7_normed(self.conv7(batch)))
        batch = func.rrelu(self.conv8_normed(self.conv8(batch)))
        # Pass the output of conv3 to the pooling layer
        batch = self.pool5(batch)

        # Reshape the output of the conv3 to pass to fully-connected layer
        batch = batch.view(-1, self.num_flat_features(batch))

        # Connect the reshaped features of the pooled conv3 to fc1
        batch =  func.rrelu(self.fc1_normed(self.fc1(batch)))
        batch =  func.rrelu(self.fc2_normed(self.fc2(batch)))

        # Connect fc1 to fc2 - this layer is slightly different than the rest (why?)
        batch = self.fc3(batch)


        # Return the class predictions
        #TODO: apply an activition function to 'batch'
        #batch = func.sigmoid(batch)
        return batch
Exemple #12
0
 def forward(self, X, *args):
     # X = F.relu(self.conv1(X))
     # X = self.dropout(X)
     # X = F.relu(self.conv2(X))
     # X = self.max_pool(X)
     # X = self.dropout(X)
     # X = F.relu(self.de_conv1(X))
     # X = self.de_conv2(X)
     X = F.rrelu(self.inp_layer(X))
     X = F.rrelu(self.middle_layer(X))
     X = self.out_layer(X)
     # X = F.sigmoid(X)
     return X
    def forward(self, seq0, seq1):
        out0 = self.embed_0(seq0)
        out1 = self.embed_1(seq1)
        out0 = out0.view(out0.size()[0],self.embedding_size)
        out1 = out1.view(out1.size()[0],self.embedding_size)
        out0 = functional.rrelu(out0)# + p
        out1 = functional.rrelu(out1)

        bias_0 = self.embed_0_0(seq0)
        bias_1 = self.embed_1_0(seq1)
        out = bias_0 + bias_1 #+ p.view(mean.size()[0],1,1)
        out = out.view(out.size()[0],1)
        ma = (out0 * out1).sum(1)
        ma = ma.view(ma.size()[0],1)
        return ma+out
    def forward(self, g):
        # input from BiDAF modeling layer w/ shape:  (batch_size, context_len, 2 * hid_size)

        # g size torch.Size([64, 307, 800])
        # batch_size      64
        # context_len     307

        # Multiplicative self-attention
        Wh = self.WP(g)
        # print("W:", W.size())    # [64, 307, 800]
        s_t = torch.bmm(Wh, g.permute([0, 2, 1]))
        # print("s:", s_t.size())  # [64, 307, 307]
        a_t = F.softmax(s_t, 1).squeeze()
        # print("a:", a_t.size())  # [64, 307, 307]
        c_t = torch.bmm(a_t, g).squeeze()
        # print("c:", c_t.size())  # [64, 307, 800]

        # torch.cat(tensors, dim=0, out=None)
        out = torch.cat((g, c_t, g * c_t), dim=2)
        out = self.linear2(out)
        out = F.rrelu(out)

        # print("out size: ", out.size())
        # out = F.dropout(out, self.training)

        return out
Exemple #15
0
    def matching_layer(self, title, intention, context_mf, grid):
        # Highway network for mf
        concat_seq = list()
        if not self.config.no_context:
            concat_seq.append(grid.to(self.device))
            concat_seq.insert(0, context_mf)
        if not self.config.no_intention:
            concat_seq.insert(0, intention)
        else:
            if not self.config.no_title:
                concat_seq.insert(0, title)
        assert len(concat_seq) > 0

        if len(concat_seq) > 1:
            concat = torch.cat(concat_seq, 1)
        else:
            concat = concat_seq[0]

        nonl = F.rrelu(self.mt_nonl(concat))
        gate = torch.sigmoid(self.mt_gate(concat))
        output = torch.mul(gate, nonl) + torch.mul(1 - gate, concat)
        output = F.dropout(output, p=self.config.output_dr,
                           training=self.training)

        return self.output_fc1(output)
Exemple #16
0
 def test_rrelu(self):
     inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
     output = F.rrelu(inp,
                      lower=1. / 8,
                      upper=1. / 3,
                      training=False,
                      inplace=False)
Exemple #17
0
 def evaluate(self, f_embs, s_embs):
     if self.mode == 'all':
         x = torch.cat([f_embs, s_embs], dim=1)
         x = F.rrelu(self.Linear1(x))
         x = F.rrelu(self.Linear2(x))
         x = F.rrelu(self.Linear3(x))
         cos_x = self.cos(f_embs, s_embs).unsqueeze(1)
         dot_x = torch.mul(f_embs, s_embs).sum(dim=1, keepdim=True)
         pdist_x = self.pdist(f_embs, s_embs)
         x = torch.cat([x, cos_x, dot_x, pdist_x], dim=1)
     elif self.mode == 'cos':
         x = self.cos(f_embs, s_embs)
     elif self.mode == 'dot':
         x = torch.mul(f_embs, s_embs).sum(dim=1)
     elif self.mode == 'pdist':
         x = -self.pdist(f_embs, s_embs).squeeze()
     return x
Exemple #18
0
 def intention_layer(self, user, dur, title):
     # Highway network on concat
     if not self.config.no_title:
         concat = torch.cat((user, dur, title), 1)
     else:
         concat = torch.cat((user, dur), 1)
     nonl = F.rrelu(self.it_nonl(concat))
     gate = torch.sigmoid(self.it_gate(concat))
     return torch.mul(gate, nonl) + torch.mul(1 - gate, concat)
Exemple #19
0
 def activation_function(self, x, activation):
     if activation == 'linear': x = x
     elif activation == 'sigmoid': x = torch.sigmoid(x)
     elif activation == 'relu': x = F.relu(x)
     elif activation == 'rrelu': x = F.rrelu(x)
     elif activation == 'tanh': x = torch.tanh(x)
     elif activation == 'elu': x = F.elu(x)
     else: raise NotImplementedError
     return x
 def forward(self, x):
     # print(x.shape)  # (8, 32) --> 1D vector of 8*32 = 256
     x = x.view(-1, 256)                                                     # this number here is [D] and should be equal to [C]
     x = F.leaky_relu(self.fc1(x))
     x = F.relu_(self.fc2(x))
     x = F.relu6(self.fc3(x))
     x = F.rrelu(self.fc4(x))
     x = torch.tanh(self.fc5(x)) #self.fc6(x)
     return x
Exemple #21
0
 def forward(self, data,IsTrain=False):
     out = F.rrelu(self.lin_node(self.norm_x(data.x)))
     edge_attr = F.rrelu(self.lin_edge_attr(data.edge_attr))
     h = out.unsqueeze(0)
     # edge_*3 only does not repeat for undirected graph. Hence need to add (j,i) to (i,j) in edges
     edge_index3 = torch.cat([data.edge_index3,data.edge_index3[[1,0]]],1)
     edge_attr3 = torch.cat([data.edge_attr3,data.edge_attr3],0)
     
     for i in range(2):
         # using bonding as edge
         m = F.rrelu(self.conv1(out, data.edge_index, edge_attr))
         out, h = self.gru1(m.unsqueeze(0), h)
         out = out.squeeze(0)
     
     out = self.lin_covert(out)
     h = out.unsqueeze(0)
     for i in range(2):
         # using couping as edge
         m = F.rrelu(self.conv2(out, edge_index3, edge_attr3))
         out, h = self.gru2(m.unsqueeze(0), h)
         out = out.squeeze(0)  
         
     n = data.edge_index3.shape[1]
     range_ = torch.arange(n)
     batch_index = torch.cat([range_,range_]).to('cuda:0')
     temp = out[data.edge_index3].reshape(2*n,-1) # (2*n_target,d)
     yhat = self.set2set(temp,batch_index)  
     yhat = self.norm(yhat)
     
     weight = self.lin_weight(data.edge_attr3)
     bias = self.lin_bias(data.edge_attr3)
     yhat = torch.sum(yhat * weight,1,keepdim=True) + bias
     yhat = yhat.squeeze(1)
     
     if IsTrain:
         k = torch.sum(data.edge_attr3,0)
         nonzeroIndex = torch.nonzero(k).squeeze(1)
         abs_ = torch.abs(data.y-yhat).unsqueeze(1)
         loss = torch.sum(torch.log(torch.sum(abs_ * data.edge_attr3[:,nonzeroIndex],0)/k[nonzeroIndex]))/nonzeroIndex.shape[0]
         return loss
     else:
         return yhat
Exemple #22
0
 def forward(self, data,IsTrain=False):
     out = F.rrelu(self.lin_node(self.norm_x(data.x)))
     edge_attr = F.rrelu(self.lin_edge_attr(data.edge_attr))
     h = out.unsqueeze(0)
     # edge_*3 only does not repeat for undirected graph. Hence need to add (j,i) to (i,j) in edges
     edge_index3 = torch.cat([data.edge_index3,data.edge_index3[[1,0]]],1)
     edge_attr3 = torch.cat([data.edge_attr3,data.edge_attr3],0)
     
     for i in range(2):
         # using bonding as edge
         m = F.rrelu(self.conv1(out, data.edge_index, edge_attr))
         out, h = self.gru1(m.unsqueeze(0), h)
         out = out.squeeze(0)
     
     out = self.lin_covert(out)
     h = out.unsqueeze(0)
     for i in range(2):
         # using couping as edge
         m = F.rrelu(self.conv2(out, edge_index3, edge_attr3))
         out, h = self.gru2(m.unsqueeze(0), h)
         out = out.squeeze(0)  
         
     coupling_batch_index = data.batch[data.edge_index3[0]]
     pool = self.set2set(out, data.batch) # (m,d)
     pool = pool[coupling_batch_index] # (n_target,d)
     temp = out[data.edge_index3] # (2,n_target,d)
     yhat = torch.cat([temp.mean(0),temp[0]*temp[1],(temp[0]-temp[1])**2],1)  
     
     yhat = self.norm(yhat)
     yhat = self.head(torch.cat([data.edge_attr3,pool],1),yhat)
     yhat = yhat.squeeze(1)
     
     if IsTrain:
         k = torch.sum(data.edge_attr3,0)
         nonzeroIndex = torch.nonzero(k).squeeze(1)
         abs_ = torch.abs(data.y-yhat).unsqueeze(1)
         loss = torch.sum(torch.log(torch.sum(abs_ * data.edge_attr3[:,nonzeroIndex],0)/k[nonzeroIndex]))/nonzeroIndex.shape[0]
         return loss
     else:
         return yhat
Exemple #23
0
	def forward_once(self, x):
		x = F.rrelu(self.fc1(x))
		x = F.rrelu(self.drp(x))
		x = F.rrelu(self.fc2(x))
		x = F.rrelu(self.fcc(x))
		x = F.rrelu(self.fc3(x))
		x = F.rrelu(self.fc4(x))
		return self.fc5(x)
Exemple #24
0
    def forward(self, data,IsTrain=False):
        out = F.rrelu(self.lin_node(self.norm_x(data.x)))
        # edge_*3 only does not repeat for undirected graph. Hence need to add (j,i) to (i,j) in edges
        edge_index3 = torch.cat([data.edge_index3,data.edge_index3[[1,0]]],1)
        
        m = F.rrelu(self.conv1(out, data.edge_index))
        out = out + m
        out = self.lin_covert1(out)
        
        m = F.rrelu(self.conv2(out, data.edge_index))
        out = out + m
        out = self.lin_covert2(out)        
        
        m = F.rrelu(self.conv3(out, edge_index3))
        out = out + m
        out = self.lin_covert3(out)          

        m = F.rrelu(self.conv4(out, edge_index3))
        out = out + m
        out = self.lin_covert4(out)
            
        temp = out[data.edge_index3] # (2,N,d)
        yhat = torch.cat([temp.mean(0),temp[0]*temp[1],(temp[0]-temp[1])**2],1)
        yhat = self.norm(yhat)
        weight = self.lin_weight(data.edge_attr3)
        bias = self.lin_bias(data.edge_attr3)
        yhat = torch.sum(yhat * weight,1,keepdim=True) + bias
        yhat = yhat.squeeze(1)
        
        if IsTrain:
            k = torch.sum(data.edge_attr3,0)
            nonzeroIndex = torch.nonzero(k).squeeze(1)
            abs_ = torch.abs(data.y-yhat).unsqueeze(1)
            loss = torch.sum(torch.log(torch.sum(abs_ * data.edge_attr3[:,nonzeroIndex],0)/k[nonzeroIndex]))/nonzeroIndex.shape[0]
            return loss
        else:
            return yhat                    
Exemple #25
0
    def forward(self, nodes):
        """
        Generates embeddings for a batch of nodes for the next iteration.

        Args:
            nodes (Tensor): list of nodes
        """

        # collect aggregated neighborhood vector for each node
        feat_agg_neighbour = self.aggregator.forward(
            nodes, [self.adj_lists[node] for node in nodes])

        # apply nonlinear activation function with added randomness
        feat_self = F.rrelu(self.weight.mm(feat_agg_neighbour.t()))

        return feat_self
Exemple #26
0
 def forward(self, A_list, node_embs_list, mask_list=None):
     GCN_weights = self.GCN_init_weights
     out_seq = []
     for t, Ahat in enumerate(A_list):
         node_embs = node_embs_list[t]
         # first evolve the weights from the initial and use the new weights with the node_embs
         if self.egcn_type == 'EGCNO':
             GCN_weights = self.evolve_weights(GCN_weights)
         else:  # 'EGCNH'
             if mask_list is not None:
                 GCN_weights = self.evolve_weights(GCN_weights, node_embs,
                                                   mask_list[t])
             else:
                 GCN_weights = self.evolve_weights(GCN_weights, node_embs)
         node_embs = F.rrelu(Ahat.matmul(node_embs.matmul(GCN_weights)))
         # node_embs = torch.sigmoid(Ahat.matmul(node_embs.matmul(GCN_weights)))
         out_seq.append(node_embs)
     return out_seq
Exemple #27
0
 def forward(self, x):
     x = F.max_pool2d(F.rrelu(self.conv1(x)), (2, 2))
     x = F.max_pool2d(F.rrelu(self.conv2(x)), (2, 2))
     x = F.rrelu(self.conv3(x))
     x = F.max_pool2d(F.rrelu(self.conv4(x)), (2, 2), padding=1)
     x = x.view(-1, self.num_flat_features(x))
     x = F.rrelu(self.fc1(x))
     x = self.drop1(x)
     x = F.rrelu(self.fc2(x))
     x = self.drop2(x)
     x = self.bn(x)
     x = F.softmax(self.fc3(x))
     return x
    def forward(self,
                x):  #use randomized relu (rrelu) or else this won't work!
        x = self.pool(F.rrelu(self.conv1_bn(self.conv1(x))))  #N 32 x 32
        x = self.pool(F.rrelu(self.conv2_bn(self.conv2(x))))  #2*N 16 x 16
        x = self.pool(F.rrelu(self.conv3_bn(self.conv3(x))))  #4*N 8 x 8
        x = self.pool(F.rrelu(self.conv4_bn(self.conv4(x))))  #4*N 3 x 3
        x = x.view(self.batch_num, -1)  #straighten the images

        x = F.rrelu(self.fc1_bn(self.fc1(x)))  # 1000
        x = F.dropout(x, p=0.25, training=self.training)
        x = F.rrelu(self.fc2_bn(self.fc2(x)))  # 1000
        x = F.dropout(x, p=0.25, training=self.training)

        x_cat = F.softmax(self.fc3_cat(x))  # z_dim_cat

        return x_cat
Exemple #29
0
    def forward(self, input, L, M, dt):
        x = input
        z = input.unsqueeze(2)
        idx = 0
        for layer in self.fc_hidden_layers:
            if self.activation_vec[idx] == 'linear': x = layer(x)
            elif self.activation_vec[idx] == 'sigmoid':
                x = torch.sigmoid(layer(x))
            elif self.activation_vec[idx] == 'relu':
                x = F.relu(layer(x))
            elif self.activation_vec[idx] == 'rrelu':
                x = F.rrelu(layer(x))
            elif self.activation_vec[idx] == 'tanh':
                x = torch.tanh(layer(x))
            elif self.activation_vec[idx] == 'sin':
                x = torch.sin(layer(x))
            elif self.activation_vec[idx] == 'elu':
                x = F.elu(layer(x))
            else:
                raise NotImplementedError
            idx += 1
        A_out, B_out = x[:, 0:self.dim_in * self.dim_in], x[:, self.dim_in *
                                                            self.dim_in:]
        A_out, B_out = A_out.view(-1, self.dim_in, self.dim_in), B_out.view(
            -1, self.dim_in, self.dim_in)

        DE = torch.bmm(A_out, z)
        DS = torch.bmm(B_out, z)
        L_batch = L.expand(z.size(0), z.size(1), z.size(1))
        M_batch = M.expand(z.size(0), z.size(1), z.size(1))
        z1_out = z + dt * (torch.bmm(L_batch, DE) + torch.bmm(M_batch, DS))

        deg_E = torch.bmm(M_batch, DE)
        deg_S = torch.bmm(L_batch, DS)

        return z1_out.view(-1, self.dim_in), deg_E.view(
            -1, self.dim_in), deg_S.view(-1, self.dim_in)
Exemple #30
0
 def forward(self, x):
     y1 = F.rrelu(self.h1_layer(x))
     y2 = F.rrelu(self.h2_layer(y1))
     y3 = self.o_layer(y2)
     rezultat = F.rrelu(y3)
     return rezultat