Пример #1
0
    def test_conv_output_shape(self):
        from molecules.ml.unsupervised.vae.utils import conv_output_shape

        # Optimal fs-peptide
        assert conv_output_shape(input_dim=22,
                                 kernel_size=5,
                                 stride=1,
                                 padding=2,
                                 num_filters=100) == (100, 22, 22)
        assert conv_output_shape(input_dim=22,
                                 kernel_size=5,
                                 stride=2,
                                 padding=1,
                                 num_filters=100) == (100, 10, 10)
        # Test fs-peptide
        assert conv_output_shape(input_dim=22,
                                 kernel_size=3,
                                 stride=1,
                                 padding=1,
                                 num_filters=64) == (64, 22, 22)
        assert conv_output_shape(input_dim=22,
                                 kernel_size=3,
                                 stride=2,
                                 padding=1,
                                 num_filters=64) == (64, 11, 11)
Пример #2
0
    def _encoder_layers(self):
        layers = []

        padding = same_padding(self.input_shape[1], kernel_size=5, stride=1)

        layers.append(nn.Conv1d(in_channels=self.input_shape[0], # should be num_residues
                                out_channels=self.hparams.enc_filters,
                                kernel_size=5,
                                stride=1,
                                padding=padding))

        layers.append(get_activation(self.hparams.activation))

        res_input_shape = conv_output_shape(self.input_shape[1],
                                            kernel_size=5,
                                            stride=1,
                                            padding=padding,
                                            num_filters=self.hparams.enc_filters,
                                            dim=1)

        # Add residual layers
        for lidx in range(self.hparams.enc_reslayers):

            filters = self.hparams.enc_filters * self.hparams.enc_filter_growth_fac**lidx
            filters = round(filters) # To nearest int

            layers.append(ResidualConv1d(res_input_shape,
                                         filters,
                                         self.hparams.enc_kernel_size,
                                         self.hparams.activation,
                                         shrink=True))

            res_input_shape = layers[-1].output_shape

        return nn.Sequential(*layers, nn.Flatten()), prod(res_input_shape)
Пример #3
0
    def _conv_layers(self):
        """
        Compose convolution layers.

        Returns
        -------
        layers : list
            Convolution layers
        """

        layers = []

        act = get_activation(self.hparams.activation)

        for filter_, kernel, stride in zip(self.hparams.filters,
                                           self.hparams.kernels,
                                           self.hparams.strides):

            padding = same_padding(self.shapes[-1][1:], kernel, stride)

            layers.append(nn.Conv2d(in_channels=self.shapes[-1][0],
                                    out_channels=filter_,
                                    kernel_size=kernel,
                                    stride=stride,
                                    padding=padding))

            layers.append(act)

            # Output shape is (channels, height, width)
            self.shapes.append(conv_output_shape(self.shapes[-1][1:], kernel,
                                                 stride, padding, filter_))

        return layers
Пример #4
0
    def __init__(self, input_shape, filters, kernel_size,
                 activation='ReLU', shrink=False, kfac=2):
        super(ResidualConv1d, self).__init__()

        self.input_shape = input_shape
        self.output_shape = input_shape
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activation
        self.shrink = shrink
        self.kfac = kfac

        self.residual = self._residual_layers()

        shape = self.input_shape
        if shape[1] == 1:
            shape = (shape[1], shape[0])
        padding = same_padding(shape[1], 1, 1)
        self.conv = nn.Conv1d(shape[0],
                              self.filters,
                              kernel_size=1,
                              stride=1,
                              padding=padding)

        self.output_shape  = conv_output_shape(input_dim=shape[1],
                                               kernel_size=1,
                                               stride=1,
                                               padding=padding,
                                               num_filters=self.filters,
                                               dim=1)

        self.activation_fnc = get_activation(self.activation)

        if self.shrink:
            self.shrink_layer, self.output_shape = self._shrink_layer()
Пример #5
0
    def _shrink_layer(self):

        # TODO: if this layer is added, there are 2 conv layers back to back
        #       without activation. The input to this layer is x + residual.
        #       Consider if it should be wrapped activation(x + residual).
        #       See forward function.

        padding = same_padding(self.output_shape[1], self.kfac, self.kfac)

        conv = nn.Conv1d(in_channels=self.output_shape[0],
                         out_channels=self.filters,
                         kernel_size=self.kfac,
                         stride=self.kfac,
                         padding=padding)

        #act = get_activation(self.activation)

        shape = conv_output_shape(input_dim=self.output_shape[1],
                                  kernel_size=self.kfac,
                                  stride=self.kfac,
                                  padding=padding,
                                  num_filters=self.filters,
                                  dim=1)

        # print('\nResidualConv1d::_shrink_layer\n',
        #       f'\t input_shape: {self.input_shape}\n',
        #       f'\t out_shape: {shape}\n',
        #       f'\t filters: {self.filters}\n',
        #       f'\t kernel_size: {self.kfac}\n',
        #       f'\t stride: {self.kfac}\n',
        #       f'\t padding: {padding}\n\n')

        return nn.Sequential(conv), shape
Пример #6
0
    def _residual_layers(self):
        # TODO: check out SyncBatchNorm
        # TODO: could add activation for bottleneck layers
        # TODO: prefer wide layers and shallower autoencoder
        #       see https://arxiv.org/pdf/1605.07146.pdf

        layers = []

        # First add bottleneck layer

        bottleneck_padding = same_padding(self.input_shape[1],
                                          kernel_size=1,
                                          stride=1)

        layers.append(
            nn.Conv1d(in_channels=self.input_shape[0],
                      out_channels=self.filters,
                      kernel_size=1,
                      stride=1,
                      padding=bottleneck_padding))

        shape = (self.filters, self.input_shape[1])

        # Now add residual layers
        for _ in range(self.depth):
            layers.append(nn.BatchNorm1d(num_features=self.filters))

            layers.append(get_activation(self.activation))

            padding = same_padding(shape[1], self.kernel_size, stride=1)

            layers.append(
                nn.Conv1d(in_channels=shape[0],
                          out_channels=self.filters,
                          kernel_size=self.kernel_size,
                          stride=1,
                          padding=padding))

            shape = conv_output_shape(input_dim=shape[1],
                                      kernel_size=self.kernel_size,
                                      stride=1,
                                      padding=padding,
                                      num_filters=self.filters,
                                      dim=1)

        # Project back up (undo bottleneck)
        layers.append(nn.BatchNorm1d(num_features=self.filters))

        layers.append(get_activation(self.activation))

        # TODO: this does not appear to be in keras code (it uses self.kernel_size)
        layers.append(
            nn.Conv1d(in_channels=self.filters,
                      out_channels=self.input_shape[0],
                      kernel_size=1,
                      stride=1,
                      padding=bottleneck_padding))

        return nn.Sequential(*layers)