Ejemplo n.º 1
0
 def __init__(self, in_channels=8, out_channels=8):
     super(SimpleConv, self).__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.activation = nn.ReLU()
     self.conv_net = nn.Sequential(
         nn.Conv1d(in_channels=self.in_channels,
                   out_channels=16,
                   kernel_size=5,
                   stride=1,
                   padding=2),
         nn.ReLU(),
         nn.Conv1d(in_channels=16,
                   out_channels=32,
                   kernel_size=5,
                   stride=1,
                   padding=2),
         nn.ReLU(),
         nn.Conv1d(in_channels=32,
                   out_channels=16,
                   kernel_size=5,
                   stride=1,
                   padding=2),
         nn.ReLU(),
         nn.Conv1d(in_channels=16,
                   out_channels=self.out_channels,
                   kernel_size=5,
                   stride=1,
                   padding=2),
         nn.ReLU(),
     )
     init_sequential_weights(self.conv_net)
     self.num_halving_layers = 0
    def build_weight_model(self):
        """Returns a function point-wise function that transforms the
        (in_channels + 1)-dimensional representation to dimensionality
        out_channels.

        Returns:
            torch.nn.Module: Linear layer applied point-wise to channels.
        """
        model = nn.Sequential(nn.Linear(self.in_channels, self.out_channels), )
        init_sequential_weights(model)
        return model
Ejemplo n.º 3
0
    def __init__(
            self,
            in_channels=8,
            conv_channels=64,  #MGM addition -- rename "hidden_channels to conv_channels"
            out_channels=8,
            num_layers=7,
            kernel_size=15,
            separable=True):
        super(DepthSepConv1d, self).__init__(
        )  #MGM addition -- replace "Conv1D by DepthSepConv1D" in this line
        self.in_channels = in_channels
        self.conv_channels = conv_channels
        self.out_channels = out_channels  #MGM addition
        self.num_halving_layers = 0  #MGM addition

        # Switch between depthwise separable and standard convolutions
        layer = SeparableConv1d if separable else Conv1d
        self.activation = nn.ReLU()

        # Initialize operations with single hidden layer
        operations = nn.ModuleList([
            layer(in_channels=in_channels,
                  out_channels=conv_channels,
                  kernel_size=kernel_size)
        ])
        operations.append(nn.ReLU())

        # Add hidden layers as required
        for _ in range(1, num_layers - 1):
            operations.append(
                layer(in_channels=conv_channels,
                      out_channels=conv_channels,
                      kernel_size=kernel_size))
            operations.append(nn.ReLU())

        # Add final convolution layer
        operations.append(
            layer(in_channels=conv_channels,
                  out_channels=out_channels,
                  kernel_size=kernel_size))

        # Initialize network
        self.conv_net = nn.Sequential(*operations)
        init_sequential_weights(self.conv_net)
Ejemplo n.º 4
0
    def __init__(self, input_dim, latent_dim, output_dim):
        super(StandardDecoder, self).__init__()

        self.input_dim = input_dim
        self.latent_dim = latent_dim
        self.output_dim = output_dim

        post_pooling_fn = nn.Sequential(
            BatchLinear(self.latent_dim + self.input_dim, self.latent_dim),
            nn.ReLU(),
            BatchLinear(self.latent_dim, self.latent_dim),
            nn.ReLU(),
            BatchLinear(self.latent_dim, 2 * self.output_dim),
        )
        self.post_pooling_fn = init_sequential_weights(post_pooling_fn)
        self.sigma_fn = nn.functional.softplus
Ejemplo n.º 5
0
    def __init__(self, input_dim, latent_dim, use_attention=False):
        super(StandardEncoder, self).__init__()

        self.latent_dim = latent_dim
        self.input_dim = input_dim
        self.use_attention = use_attention

        pre_pooling_fn = nn.Sequential(
            BatchLinear(self.input_dim, self.latent_dim),
            nn.ReLU(),
            BatchLinear(self.latent_dim, self.latent_dim),
            nn.ReLU(),
            BatchLinear(self.latent_dim, self.latent_dim),
        )
        self.pre_pooling_fn = init_sequential_weights(pre_pooling_fn)
        if self.use_attention:
            self.pooling_fn = CrossAttention()
        else:
            self.pooling_fn = MeanPooling(pooling_dim=1)