Exemple #1
0
    def forward(self, batched_data):
        """"""
        x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
        # Atom Embedding:
        x = F.leaky_relu_(self.atom_encoder(x))
        edge_attr = self.bond_encoder(edge_attr)

        h = F.elu_(self.atom_convs[0](x, edge_index, edge_attr))
        h = F.dropout(h, p=self.drop_ratio, training=self.training)
        x = self.atom_grus[0](h, x).relu_()

        for conv, gru in zip(self.atom_convs[1:], self.atom_grus[1:]):
            h = F.elu_(conv(x, edge_index))
            h = F.dropout(h, p=self.drop_ratio, training=self.training)
            x = gru(h, x).relu_()

        # Molecule Embedding:
        row = torch.arange(batch.size(0), device=batch.device)
        edge_index = torch.stack([row, batch], dim=0)

        out = global_add_pool(x, batch).relu_()
        for t in range(self.num_timesteps):
            h = F.elu_(self.mol_conv((x, out), edge_index))
            h = F.dropout(h, p=self.drop_ratio, training=self.training)
            out = self.mol_gru(h, out).relu_()

        # Predictor:
        out = F.dropout(out, p=self.drop_ratio, training=self.training)
        return self.graph_pred_linear(out)
    def forward(self, x, edge_index, edge_attr, batch):
        """"""
        # Atom Embedding:
        x = F.leaky_relu_(self.lin1(x))

        h = F.elu_(self.atom_convs[0](x, edge_index, edge_attr))
        h = F.dropout(h, p=self.dropout, training=self.training)
        x = self.atom_grus[0](h, x).relu_()

        for conv, gru in zip(self.atom_convs[1:], self.atom_grus[1:]):
            h = F.elu_(conv(x, edge_index))
            h = F.dropout(h, p=self.dropout, training=self.training)
            x = gru(h, x).relu_()

        # Molecule Embedding:
        row = torch.arange(batch.size(0), device=batch.device)
        edge_index = torch.stack([row, batch], dim=0)

        out = global_add_pool(x, batch).relu_()
        for t in range(self.num_timesteps):
            h = F.elu_(self.mol_conv((x, out), edge_index))
            h = F.dropout(h, p=self.dropout, training=self.training)
            out = self.mol_gru(h, out).relu_()

        # Predictor:
        out = F.dropout(out, p=self.dropout, training=self.training)
        return self.lin2(out)
Exemple #3
0
 def forward(self, inputs):
   out = inputs
   for block in self.blocks:
     out = func.elu_(block(out))
     out = func.max_pool1d(out, 2)
   out = func.adaptive_max_pool1d(out, 1)
   return out.reshape(out.size(0), -1)
Exemple #4
0
    def forward(self, x):

        # input (128, N, N)
        identity = x

        out = self.bn1(x)
        out = F.elu_(out)
        out = self.proj_down(out)  # (64, N, N)
        out = self.bn2(out)
        out = F.elu_(out)
        out = self.dilation(out)
        out = self.bn3(out)
        out = F.elu_(out)
        out = self.proj_up(out)  # (128, N, N)

        return out + identity
 def forward(self, x):
     for name, layer in self.layers[:-1]:
         x = layer(x)
         if self.activation_type == 'relu':
             x = F.relu_(x)
         elif self.activation_type == 'leaky_relu':
             x = F.leaky_relu_(x)
         elif self.activation_type == 'elu':
             x = F.elu_(x)
         elif self.activation_type == 'selu':
             x = F.selu_(x)
         elif self.activation_type == 'tanh':
             x = torch.tanh_(x)
         elif self.activation_type == 'sigmoid':
             x = torch.sigmoid_(x)
         elif self.activation_type == 'none':
             pass
         else:
             raise ValueError('Unknown activation function "%s"' % self.activation_type)
     x = self.layers[-1][1](x)  # No activation on output of last layer
     x = F.normalize(x, dim=-1)  # Normalize
     return x
Exemple #6
0
 def test_elu_(self):
     inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
     output = F.elu_(inp, alpha=1.0)
 'tanh':
 dict(func=lambda x, **_: torch.tanh_(x),
      alpha=None,
      gain=1.0,
      cuda_idx=4,
      ref='y',
      zero_2nd_grad=False),
 'sigmoid':
 dict(func=lambda x, **_: torch.sigmoid_(x),
      alpha=None,
      gain=1.0,
      cuda_idx=5,
      ref='y',
      zero_2nd_grad=False),
 'elu':
 dict(func=lambda x, **_: F.elu_(x),
      alpha=None,
      gain=1.0,
      cuda_idx=6,
      ref='y',
      zero_2nd_grad=False),
 'selu':
 dict(func=lambda x, **_: torch.selu_(x),
      alpha=None,
      gain=1.0,
      cuda_idx=7,
      ref='y',
      zero_2nd_grad=False),
 'softplus':
 dict(func=lambda x, **_: F.softplus(x),
      alpha=None,
Exemple #8
0
 def forward(self, embeds):
     x = F.elu_(self.fc1(embeds))
     x = F.elu_(self.fc2(x))
     logists = torch.log_softmax(x, 1)
     return logists