Exemple #1
0
 def forward(self, x):
     x = F.celu(self.input(x))
     x = F.celu(self.hidden_1(x))
     x = F.celu(self.hidden_2(x))
     x = F.celu(self.hidden_3(x))
     x = self.output(x)
     return x
 def forward(self, x):
     x = self.pool(F.celu(self.conv1(x)))
     x = self.pool(F.celu(self.conv2(x)))
     x = x.view(-1, 32 * 38 * 23)
     x = F.relu(self.fc1(x))
     x = self.fc3(x)
     return x
Exemple #3
0
 def forward(self, x):
     x_short = x
     x = F.celu(self.bn1(self.conv1(x)), alpha=0.075)
     x = F.celu(self.bn2(self.conv2(x)), alpha=0.075)
     x = self.bn3(self.conv3(x))
     x += self.shortcut(x_short)
     x = F.celu(x, alpha=0.075)
     return x
 def forward(self, x):
     """
     Applies the forward pass.
     """
     x = self.pool(F.celu(self.conv1(x)))
     x = self.pool(F.celu(self.conv2(x)))
     x = x.view(-1, 16 * 5 * 5)
     x = F.celu(self.fc1(x))
     x = F.celu(self.fc2(x))
     x = F.celu(self.fc3(x))
     return x
 def forward(self, x):
     """
     Applies the forward pass.
     """
     x = torch.unsqueeze(x[:, 0, :, :], 1)  # unsqueeze for convl
     x = self.pool1(F.celu(self.conv1(x)))
     x = self.pool2(F.celu(self.conv2(x)))
     x = x.view(-1, 16 * 5 * 5)
     x = F.celu(self.fc1(x))
     x = F.celu(self.fc2(x))
     x = F.celu(self.fc3(x))
     return x
Exemple #6
0
    def decode(self, z):
        x = F.celu(self.recons(z))  # [batch, 256]

        x = x.view(-1, 256, 1, 1)

        x = F.celu(self.t_conv1(x))
        x = F.celu(self.t_conv2(x))
        x = F.celu(self.t_conv3(x))
        x = F.celu(self.t_conv4(x))
        x = torch.clamp(torch.sigmoid(self.t_conv5(x)), 0, 1)

        return x
Exemple #7
0
    def forward(self, x):
        # shape(x) = N, L, C
        # conv1d needs N,C,L
        x = x.transpose(1, 2)
        x = self.conv_blocks(x)

        x = x.flatten(start_dim=1)
        h_phase = F.celu(self.fc_hp(x))
        h_dm = F.celu(self.fc_hdm(x))
        ## add activation layers if desired:
        # F.relu(self.out_p(h)) or F.sigmoid(self.out_p(h))
        phase = self.out_p(h_phase)
        dm = self.out_dm(h_dm)

        return phase, dm
Exemple #8
0
    def encode(self, x):
        assert len(x.shape) == 4

        x = F.celu(self.conv1(x))
        x = F.celu(self.conv2(x))
        x = F.celu(self.conv3(x))
        x = F.celu(self.conv4(x))
        x = F.celu(self.conv5(x))

        x = x.view(-1, 256 * 4)

        mu = self.mu(x)
        sigma = torch.exp(torch.clamp(self.log_std(x), -3, 3))

        return mu, sigma
 def forward(self, x):
     #x = self.batch_norm1(x)
     x = self.dropout1(x)
     x = F.celu(self.dense1(x), alpha=0.06)
     x = x.reshape(x.shape[0], self.cha_1, self.cha_1_reshape)
     #x = self.batch_norm_c1(x)
     x = self.dropout_c1(x)
     x = F.relu(self.conv1(x))
     x = self.ave_po_c1(x)
     x = self.batch_norm_c2(x)
     x = self.dropout_c2(x)
     x = F.relu(self.conv2(x))
     x_s = x
     x = self.batch_norm_c2_1(x)
     x = self.dropout_c2_1(x)
     x = F.relu(self.conv2_1(x))
     x = self.batch_norm_c2_2(x)
     x = self.dropout_c2_2(x)
     x = F.relu(self.conv2_2(x))
     x = x * x_s
     x = self.max_po_c2(x)
     x = self.flt(x)
     x = self.batch_norm3(x)
     x = self.dropout3(x)
     x = self.dense3(x)
     return x
Exemple #10
0
 def forward(self, x, edge_index, edge_attr):
     h = x.unsqueeze(0)
     for i in range(self.time_step):
         m = F.celu(self.conv(x, edge_index, edge_attr))
         x, h = self.gru(m.unsqueeze(0), h)
         x = self.ln(x.squeeze(0))
     return x
Exemple #11
0
 def forward(self, data):
     x = F.celu(self.lin0(data.x))
     for conv in self.convs:
         x = x + conv(x, data.edge_index, data.edge_attr)
     x = self.set2set(x, data.batch)
     x = self.lin1(x)
     return x.view(-1)
Exemple #12
0
def celu(input, *args, **kwargs):
    output = F.celu(input.F, *args, **kwargs)
    return SparseTensor(
        output,
        coordinate_map_key=input.coordinate_map_key,
        coordinate_manager=input.coordinate_manager,
    )
Exemple #13
0
    def forward(self, x):
        x_celu = F.celu(x)

        x_gap = F.relu(x) - x_celu

        x_out = x_celu + x_gap.data

        return x_out
Exemple #14
0
 def forward(self, data):
     x = F.celu(self.lin0(data.x))
     for conv in self.convs:
         x = x + F.dropout(conv(x, data.edge_index, data.edge_attr),
                           p=self.dropout,
                           training=self.training)
     x = self.set2set(x, data.batch)
     x = self.out(F.dropout(x, p=self.dropout, training=self.training))
     return x
Exemple #15
0
    def forward_batch(self, data, edge_index, batch, alpha=None):
        x1 = F.celu(self.conv1(data, edge_index))
        x1 = self.bn1(x1)

        x2 = F.celu(self.conv2(x1, edge_index))
        x2 = self.bn2(x2)

        x3 = F.celu(self.conv3(x2, edge_index))
        x3 = self.bn3(x3)

        x_embedding = gmp(x3, batch)
        x_embedding_mean = F.celu(self.linear_before(x_embedding))
        x_embedding_drop = F.dropout(x_embedding_mean,
                                     p=0.1,
                                     training=self.training)
        mean = self.linear_mean(x_embedding_drop)
        mean = self.out_layer(mean)
        return mean
    def forward(self, inputs, T):
        # inputs: B x T x nf_in
        inputs = inputs.reshape(-1, T, self.nf_in)
        inputs = inputs.transpose(1, 2)

        # inputs: B x nf_in x T
        x = F.celu(self.conv1(inputs))
        x = self.bn1(x)
        x = F.dropout(x, self.dropout_prob, training=self.training)
        x = self.pool(x)
        # x = F.celu(self.conv2(x))
        # x = self.bn2(x)
        # x = F.dropout(x, self.dropout_prob, training=self.training)
        # x = self.pool(x)
        x = F.celu(self.conv3(x))
        x = self.bn3(x)
        pred = self.conv_predict(x)
        # ret: B x nf_out
        ret = pred.max(dim=2)[0]
        return ret
Exemple #17
0
    def forward(self, x):
        assert len(x.shape) == 2

        x1 = F.celu(self.q1_fc1(x))
        x1_V = F.celu(self.q1_fc2_V(x1))
        x1_A = F.celu(self.q1_fc2_A(x1))

        x1_V = self.q1_V(x1_V)
        x1_A = self.q1_A(x1_A)

        x1_A_mean = torch.mean(x1_A, dim=1, keepdim=True)
        x1_A_mean = torch.cat([x1_A_mean] * self.action_dim, dim=1)

        x1_A = x1_A - x1_A_mean

        x1 = x1_V + x1_A

        x2 = F.celu(self.q2_fc1(x))
        x2_V = F.celu(self.q2_fc2_V(x2))
        x2_A = F.celu(self.q2_fc2_A(x2))

        x2_V = self.q2_V(x2_V)
        x2_A = self.q2_A(x2_A)

        x2_A_mean = torch.mean(x2_A, dim=1, keepdim=True)
        x2_A_mean = torch.cat([x2_A_mean] * self.action_dim, dim=1)

        x2_A = x2_A - x2_A_mean

        x2 = x2_V + x2_A

        return x1, x2
 def forward(self, x):
     x = x.view(-1, self.input_size)
     for lay_id, lay in enumerate(self.fc):
         x = lay(x)
         if self.nonlin == 'relu':
             x = F.relu(x)
         if self.nonlin == 'leakyrelu':
             x = F.leaky_relu(x, negative_slope=0.03)
         if self.nonlin == 'tanh':
             x = F.tanh(x)
         if self.nonlin == 'celu':
             x = F.celu(x)
     return x
Exemple #19
0
    def forward(self,
                image_tensor: torch.Tensor,
                agent_state_vector: torch.Tensor,
                noise:torch.Tensor=None) -> torch.Tensor:
        """
        Forward pass of the model.
        :param image_tensor: Tensor of images shape [batch_size, n_channels, length, width].
        :param agent_state_vector: Tensor of floats representing the agent state.
            [batch_size, 3].
        :returns: Tensor of dimension [batch_size, number_of_modes * number_of_predictions_per_mode + number_of_modes]
            storing the predicted trajectory and mode probabilities. Mode probabilities are normalized to sum
            to 1 during inference.
        """

        backbone_features = self.backbone(input_tensor=image_tensor)

        features = torch.cat([backbone_features, agent_state_vector], dim=1)
        self.env_feature = features
        if noise is not None:
           features = torch.cat([features, noise], dim=1)

        
        if self.output_activation == 'sigmoid':
            predictions = f.sigmoid(self.fc2(f.celu(self.fc1(features))))
        elif self.output_activation == 'linear':
            predictions = self.fc2(f.celu(self.fc1(features)))
        elif self.output_activation == 'tanh':
            predictions = torch.tanh(self.fc2(f.celu(self.fc1(features))))
        else:
            raise ValueError(f"activation {self.output_activation} is not supported")
            
        # Normalize the probabilities to sum to 1 for inference.
        mode_probabilities = predictions[:, -self.num_modes:].clone()
        if not self.training:
            mode_probabilities = f.softmax(mode_probabilities, dim=-1)
            
        predictions = predictions[:, :-self.num_modes]

        return torch.cat((predictions, mode_probabilities), 1)
Exemple #20
0
    def forward(self, x, dropout_rate=0.5, dropout_mask=None):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = F.celu(self.fc(x))
        if dropout_mask is not None:
            x = x * dropout_mask(x, dropout_rate, 0)
        else:
            x = F.dropout(x, self.dropout_rate)
        x = F.celu(self.fc2(x))
        x = F.dropout(x, self.dropout_rate)
        x = self.fc3(x)

        return x
    def forward(self, x):
        """
        Applies the forward pass.
        """
        x0 = torch.unsqueeze(x[:, 0, :, :], 1)  # unsqueeze for convl
        x0 = self.pool1(F.celu(self.conv1(x0)))
        x0 = self.pool2(F.celu(self.conv2(x0)))
        x0 = x0.view(-1, 16 * 5 * 5)
        x0 = F.celu(self.fc1(x0))
        x0 = F.celu(self.fc2(x0))
        x0 = F.celu(self.fc3(x0))

        x1 = torch.unsqueeze(x[:, 1, :, :], 1)
        x1 = self.pool1(F.celu(self.conv1(x1)))
        x1 = self.pool2(F.celu(self.conv2(x1)))
        x1 = x1.view(-1, 16 * 5 * 5)
        x1 = F.celu(self.fc1(x1))
        x1 = F.celu(self.fc2(x1))
        x1 = F.celu(self.fc3(x1))

        x = torch.cat((x0, x1), 1)
        x = self.fc4(x)
        return x
Exemple #22
0
    def forward(self, species_aev: Tuple[Tensor, Tensor]) -> SpeciesEnergies:

        species, aev = species_aev

        # Reshape: [num_mols, num_atoms, num_features] --> [num_mols, num_atoms, 1, num_features, 1]
        vectors = aev.unsqueeze(-2).unsqueeze(-1)

        vectors = batchedLinear(vectors, self.layer0_weights,
                                self.layer0_biases)  # Linear 0
        vectors = F.celu(vectors, alpha=0.1)  # CELU   1
        vectors = batchedLinear(vectors, self.layer2_weights,
                                self.layer2_biases)  # Linear 2
        vectors = F.celu(vectors, alpha=0.1)  # CELU   3
        vectors = batchedLinear(vectors, self.layer4_weights,
                                self.layer4_biases)  # Linear 4
        vectors = F.celu(vectors, alpha=0.1)  # CELU   5
        vectors = batchedLinear(vectors, self.layer6_weights,
                                self.layer6_biases)  # Linear 6

        # Sum: [num_mols, num_atoms, num_models, 1, 1] --> [num_mols, num_models]
        # Mean: [num_mols, num_models] --> [num_mols]
        energies = torch.mean(torch.sum(vectors, (1, 3, 4)), 1)

        return SpeciesEnergies(species, energies)
    def forward(self, inputs, T):
        # inputs: B x T x nf_in
        # inputs = inputs.reshape(-1, T, self.nf_in)
        # inputs = inputs.transpose(1, 2)

        x = self.cnn_enc(inputs)
        x  # [BS, T//4, 32, 32]
        x = x.max(dim=1)[0]  # Max pooling [BS, 1, 32, 32]
        x = self.cnn_dec(x)
        # inputs: B x nf_in x T
        x = F.celu(self.conv1(inputs))
        x = self.bn1(x)
        x = F.dropout(x, self.dropout_prob, training=self.training)
        x = self.pool(x)
        # x = F.celu(self.conv2(x))
        # x = self.bn2(x)
        # x = F.dropout(x, self.dropout_prob, training=self.training)
        # x = self.pool(x)
        x = F.celu(self.conv3(x))
        x = self.bn3(x)
        pred = self.conv_predict(x)
        # ret: B x nf_out
        ret = pred.max(dim=2)[0]
        return ret
Exemple #24
0
    def forward(self, x):  # (B, n_in, N)
        if self.conv is not None:
            x = self.conv(x)  # (B, C, N)
        x = torch.transpose(x, 1, 2)  # (B, N, C)

        if self.lstm is not None:
            x_a, _ = self.lstm(x)
            x_a = self.lstm_ln(x_a)
            x_a = self.dropout(F.celu(x_a))  # (B, N, H*2)
            x = x + x_a if self.resnet and x.shape[2] == x_a.shape[2] else x_a

        if self.att is not None:
            x = torch.transpose(x, 0, 1)
            x_a, _ = self.att(x, x, x)
            x = x + x_a
            x = torch.transpose(x, 0, 1)

        return x
Exemple #25
0
    def forward(self, z_what):
        """
        
        Args:
            z_what: (B, D)

        Returns:
            glimpse: (B, 3, H, W)
        """
        B, D = z_what.size()
        x = F.celu(self.fc(z_what))
        # (B, 128, E, E)
        x = x.view(B, 128, self.embed_size, self.embed_size)
        x = self.net(x)
        x = torch.sigmoid(x)
        # (B, 3, H, W), (B, 1, H, W)
        o_att, alpha_att = x.split([3, 1], dim=1)

        return o_att, alpha_att
Exemple #26
0
    def forward_batch(self, data, edge_index, batch, alpha=None):
        x1 = F.celu(self.conv1(data, edge_index))
        x1 = self.bn1(x1)

        x2 = F.celu(self.conv2(x1, edge_index))
        x2 = self.bn2(x2)

        x3 = F.celu(self.conv3(x2, edge_index))
        x3 = self.bn3(x3)

        x_embedding = gmp(x3, batch)
        x_embedding_mean = F.celu(self.linear_before(x_embedding))
        x_embedding_drop = F.dropout(x_embedding_mean, p=0.1, training=self.training)
        mean = self.linear_mean(x_embedding_drop)

        x_embedding_std = F.celu(self.linear_before_std(x_embedding))
        std = F.celu(self.linear_std(x_embedding_std))

        std = torch.exp(std / 2)
        eps = torch.randn_like(std)
        x_sample = gaussian_layer(mean, std, eps)

        return x_sample, mean, std
Exemple #27
0
 def test_celu(self):
     inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
     output = F.celu(inp, alpha=1.0, inplace=False)
Exemple #28
0
    def forward(self, x):
        assert len(x.shape) == 4

        x1 = F.celu(self.q1_conv1(x))
        x1 = F.celu(self.q1_conv2(x1))
        x1 = F.celu(self.q1_conv3(x1))
        #x1 = F.celu(self.q1_conv4(x1))

        x1 = x1.view(-1, self.fc_input_size)

        x1 = F.celu(self.q1_fc1(x1))
        x1_V = F.celu(self.q1_fc2_V(x1))
        x1_A = F.celu(self.q1_fc2_A(x1))

        x1_V = self.q1_V(x1_V)
        x1_A = self.q1_A(x1_A)

        x1_A_mean = torch.mean(x1_A, dim=1, keepdim=True)
        x1_A_mean = torch.cat([x1_A_mean] * self.action_dim, dim=1)

        x1_A = x1_A - x1_A_mean

        x1 = x1_V + x1_A

        x2 = F.celu(self.q2_conv1(x))
        x2 = F.celu(self.q2_conv2(x2))
        x2 = F.celu(self.q2_conv3(x2))
        #x2 = F.celu(self.q2_conv4(x2))

        x2 = x2.view(-1, self.fc_input_size)

        x2 = F.celu(self.q2_fc1(x2))
        x2_V = F.celu(self.q2_fc2_V(x2))
        x2_A = F.celu(self.q2_fc2_A(x2))

        x2_V = self.q2_V(x2_V)
        x2_A = self.q2_A(x2_A)

        x2_A_mean = torch.mean(x2_A, dim=1, keepdim=True)
        x2_A_mean = torch.cat([x2_A_mean] * self.action_dim, dim=1)

        x2_A = x2_A - x2_A_mean

        x2 = x2_V + x2_A

        return x1, x2
Exemple #29
0
import torch.nn.functional as F
activations = {
    "id": (lambda x: x),
    "relu": F.relu_,
    "hardtanh": F.hardtanh_,
    "relu6": (lambda x: F.relu6_(x, inplace=True)),
    "elu": F.elu_,
    "selu": (lambda x: F.selu(x, inplace=True)),
    "celu": (lambda x: F.celu(x, inplace=True)),
    "leaky_relu": F.leaky_relu_,
    "rrelu": F.rrelu_,
    "gelu": F.gelu,
    "logsigmoid": F.logsigmoid,
    "hardshrink": F.hardshrink,
    "tanhshrink": F.tanhshrink,
    "softsign": F.softsign,
    "softplus": F.softplus,
    "softmin": (lambda x: F.softmin(x, 1)),
    "softmax": (lambda x: F.softmax(x, 1)),
    "softshrink": F.softshrink,
    "gumbel_softmax": F.gumbel_softmax,
    "log_softmax": (lambda x: F.log_softmax(x, 1)),
    "tanh": F.tanh,
    "sigmoid": F.sigmoid,
}
Exemple #30
0
    def forward(self, x):
        for layer, layer_norm in zip(self.feature_layers, self.layer_norms):
            x = layer_norm(F.celu(layer(x)))

        return x