Пример #1
0
    def _residual_layers(self):
        # TODO: check out SyncBatchNorm
        # TODO: could add activation for bottleneck layers
        # TODO: prefer wide layers and shallower autoencoder
        #       see https://arxiv.org/pdf/1605.07146.pdf

        layers = []

        # First add bottleneck layer

        bottleneck_padding = same_padding(self.input_shape[1],
                                          kernel_size=1,
                                          stride=1)

        layers.append(
            nn.Conv1d(in_channels=self.input_shape[0],
                      out_channels=self.filters,
                      kernel_size=1,
                      stride=1,
                      padding=bottleneck_padding))

        shape = (self.filters, self.input_shape[1])

        # Now add residual layers
        for _ in range(self.depth):
            layers.append(nn.BatchNorm1d(num_features=self.filters))

            layers.append(get_activation(self.activation))

            padding = same_padding(shape[1], self.kernel_size, stride=1)

            layers.append(
                nn.Conv1d(in_channels=shape[0],
                          out_channels=self.filters,
                          kernel_size=self.kernel_size,
                          stride=1,
                          padding=padding))

            shape = conv_output_shape(input_dim=shape[1],
                                      kernel_size=self.kernel_size,
                                      stride=1,
                                      padding=padding,
                                      num_filters=self.filters,
                                      dim=1)

        # Project back up (undo bottleneck)
        layers.append(nn.BatchNorm1d(num_features=self.filters))

        layers.append(get_activation(self.activation))

        # TODO: this does not appear to be in keras code (it uses self.kernel_size)
        layers.append(
            nn.Conv1d(in_channels=self.filters,
                      out_channels=self.input_shape[0],
                      kernel_size=1,
                      stride=1,
                      padding=bottleneck_padding))

        return nn.Sequential(*layers)
Пример #2
0
    def _conv_layers(self):
        """
        Compose convolution layers.

        Returns
        -------
        layers : list
            Convolution layers
        """
        layers = []

        in_channels = self.hparams.filters[-1]

        # Dimension of square matrix
        input_dim = self.output_shape[1]

        # Set last filter to be the number of channels in the reconstructed image.
        tmp = self.hparams.filters[0]
        self.hparams.filters[0] = self.output_shape[0]

        for filter_, kernel, stride in reversedzip(self.hparams.filters,
                                                   self.hparams.kernels,
                                                   self.hparams.strides):

            padding = same_padding(input_dim, kernel, stride)

            layers.append(
                nn.ConvTranspose2d(in_channels=in_channels,
                                   out_channels=filter_,
                                   kernel_size=kernel,
                                   stride=stride,
                                   padding=padding,
                                   output_padding=1 if stride != 1 else 0))

            # TODO: revist output_padding, see github issue.
            #       This code may not generalize to other examples. Needs testing.

            layers.append(get_activation(self.hparams.activation))

            # Subsequent layers in_channels is the current layers number of filters
            # Except for the last layer which is 1 (or output_shape channels)
            in_channels = filter_

            # Compute non-channel dimension given to next layer
            input_dim = conv_output_dim(input_dim,
                                        kernel,
                                        stride,
                                        padding,
                                        transpose=True)

        # Overwrite output activation
        layers[-1] = get_activation(self.hparams.output_activation)

        # Restore invariant state
        self.hparams.filters[0] = tmp

        return layers
Пример #3
0
    def _conv_layers(self):
        """
        Compose convolution layers.

        Returns
        -------
        layers : list
            Convolution layers
        activations : list
            Activation functions
        """
        layers, activations = [], []

        act = get_activation(self.hparams.activation)

        # The first out_channels should be the second to last filter size
        tmp = self.hparams.filters.pop()

        # self.output_shape[0] Needs to be the last out_channels to match the input matrix
        for i, (filter_, kernel, stride) in enumerate(reversedzip((self.output_shape[0],
                                                                  *self.hparams.filters),
                                                                  self.hparams.kernels,
                                                                  self.hparams.strides)):
            shape = self.encoder_shapes[-1*i -1]

            # TODO: this is a quick fix but might not generalize to some architectures
            if stride == 1:
                padding = same_padding(shape[1:], kernel, stride)
            else:
                padding = tuple(int(dim % 2 == 0) for dim in self.encoder_shapes[-1*i -2][1:])

            layers.append(nn.ConvTranspose2d(in_channels=shape[0],
                                             out_channels=filter_,
                                             kernel_size=kernel,
                                             stride=stride,
                                             padding=padding))

            # TODO: revist padding, output_padding, see github issue.
            #       This code may not generalize to other examples. Needs testing.
            #       this also needs to be addressed in conv_output_dim

            activations.append(act)

        # Overwrite output activation
        activations[-1] = get_activation(self.hparams.output_activation)

        # Restore invariant state
        self.hparams.filters.append(tmp)

        return nn.ModuleList(layers), activations
Пример #4
0
    def _encoder_layers(self):
        layers = []

        padding = same_padding(self.input_shape[1], kernel_size=5, stride=1)

        layers.append(nn.Conv1d(in_channels=self.input_shape[0], # should be num_residues
                                out_channels=self.hparams.enc_filters,
                                kernel_size=5,
                                stride=1,
                                padding=padding))

        layers.append(get_activation(self.hparams.activation))

        res_input_shape = conv_output_shape(self.input_shape[1],
                                            kernel_size=5,
                                            stride=1,
                                            padding=padding,
                                            num_filters=self.hparams.enc_filters,
                                            dim=1)

        # Add residual layers
        for lidx in range(self.hparams.enc_reslayers):

            filters = self.hparams.enc_filters * self.hparams.enc_filter_growth_fac**lidx
            filters = round(filters) # To nearest int

            layers.append(ResidualConv1d(res_input_shape,
                                         filters,
                                         self.hparams.enc_kernel_size,
                                         self.hparams.activation,
                                         shrink=True))

            res_input_shape = layers[-1].output_shape

        return nn.Sequential(*layers, nn.Flatten()), prod(res_input_shape)
Пример #5
0
    def _affine_layers(self):
        """
        Compose affine layers.

        Returns
        -------
        layers : list
            Linear layers
        """

        layers = []

        # First layer gets flattened convolutional output
        in_features = prod(self.shapes[-1])

        for width, dropout in zip(self.hparams.affine_widths,
                                  self.hparams.affine_dropouts):

            layers.append(nn.Linear(in_features=in_features,
                                    out_features=width))

            layers.append(get_activation(self.hparams.activation))

            if not isclose(dropout, 0):
                layers.append(nn.Dropout(p=dropout))

            # Subsequent layers in_features is the current layers width
            in_features = width

        return layers
Пример #6
0
    def _conv_layers(self):
        """
        Compose convolution layers.

        Returns
        -------
        layers : list
            Convolution layers
        """

        layers = []

        act = get_activation(self.hparams.activation)

        for filter_, kernel, stride in zip(self.hparams.filters,
                                           self.hparams.kernels,
                                           self.hparams.strides):

            padding = same_padding(self.shapes[-1][1:], kernel, stride)

            layers.append(nn.Conv2d(in_channels=self.shapes[-1][0],
                                    out_channels=filter_,
                                    kernel_size=kernel,
                                    stride=stride,
                                    padding=padding))

            layers.append(act)

            # Output shape is (channels, height, width)
            self.shapes.append(conv_output_shape(self.shapes[-1][1:], kernel,
                                                 stride, padding, filter_))

        return layers
Пример #7
0
    def __init__(self, input_shape, filters, kernel_size,
                 activation='ReLU', shrink=False, kfac=2):
        super(ResidualConv1d, self).__init__()

        self.input_shape = input_shape
        self.output_shape = input_shape
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activation
        self.shrink = shrink
        self.kfac = kfac

        self.residual = self._residual_layers()

        shape = self.input_shape
        if shape[1] == 1:
            shape = (shape[1], shape[0])
        padding = same_padding(shape[1], 1, 1)
        self.conv = nn.Conv1d(shape[0],
                              self.filters,
                              kernel_size=1,
                              stride=1,
                              padding=padding)

        self.output_shape  = conv_output_shape(input_dim=shape[1],
                                               kernel_size=1,
                                               stride=1,
                                               padding=padding,
                                               num_filters=self.filters,
                                               dim=1)

        self.activation_fnc = get_activation(self.activation)

        if self.shrink:
            self.shrink_layer, self.output_shape = self._shrink_layer()
Пример #8
0
    def _conv_layers(self):
        """
        Compose convolution layers.

        Returns
        -------
        layers : list
            Convolution layers
        """

        layers = []

        # Contact matrices have one channel
        in_channels = self.input_shape[0]

        for filter_, kernel, stride in zip(self.hparams.filters,
                                           self.hparams.kernels,
                                           self.hparams.strides):

            padding = same_padding(self.encoder_dim, kernel, stride)

            layers.append(nn.Conv2d(in_channels=in_channels,
                                    out_channels=filter_,
                                    kernel_size=kernel,
                                    stride=stride,
                                    padding=padding))

            layers.append(get_activation(self.hparams.activation))

            # Subsequent layers in_channels is the current layers number of filters
            in_channels = filter_

            self.encoder_dim = conv_output_dim(self.encoder_dim, kernel, stride, padding)

        return layers
Пример #9
0
    def __init__(self,
                 input_shape,
                 filters,
                 kernel_size,
                 activation='ReLU',
                 shrink=False,
                 kfac=2,
                 depth=1):
        super(ResidualConv1d, self).__init__()

        self.input_shape = input_shape
        self.output_shape = input_shape
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activation
        self.shrink = shrink
        self.kfac = kfac
        # Depth of residual module
        self.depth = depth

        self.residual = self._residual_layers()

        self.activation_fnc = get_activation(self.activation)

        if self.shrink:
            self.shrink_layer, self.output_shape = self._shrink_layer()
Пример #10
0
    def _shrink_layer(self):

        # TODO: if this layer is added, there are 2 conv layers back to back
        #       without activation. The input to this layer is x + residual.
        #       Consider if it should be wrapped activation(x + residual).
        #       See forward function.

        padding = same_padding(self.input_shape[1], self.kfac, self.kfac)

        conv = nn.Conv1d(in_channels=self.input_shape[0],
                         out_channels=self.filters,
                         kernel_size=self.kfac,
                         stride=self.kfac,
                         padding=padding)

        act = get_activation(self.activation)

        shape = conv_output_shape(input_dim=self.input_shape[1],
                                  kernel_size=self.kfac,
                                  stride=self.kfac,
                                  padding=padding,
                                  num_filters=self.filters,
                                  dim=1)

        # print('\nResidualConv1d::_shrink_layer\n',
        #       f'\t input_shape: {self.input_shape}\n',
        #       f'\t out_shape: {shape}\n',
        #       f'\t filters: {self.filters}\n',
        #       f'\t kernel_size: {self.kfac}\n',
        #       f'\t stride: {self.kfac}\n',
        #       f'\t padding: {padding}\n\n')

        return nn.Sequential(conv, act), shape
Пример #11
0
    def _affine_layers(self):
        """
        Compose affine layers.

        Returns
        -------
        layers : list
            Linear layers
        """

        layers = []

        in_features = self.hparams.latent_dim

        for width, dropout in reversedzip(self.hparams.affine_widths,
                                          self.hparams.affine_dropouts):

            layers.append(
                nn.Linear(in_features=in_features, out_features=width))

            layers.append(get_activation(self.hparams.activation))

            if not isclose(dropout, 0):
                layers.append(nn.Dropout(p=dropout))

            # Subsequent layers in_features is the current layers width
            in_features = width

        # Add last layer with dims to connect the last linear layer to
        # the first convolutional decoder layer
        layers.append(
            nn.Linear(in_features=self.hparams.affine_widths[0],
                      out_features=self.hparams.filters[-1] *
                      self.encoder_dim**2))
        layers.append(get_activation(self.hparams.activation))

        return layers
Пример #12
0
    def _decoder_layers(self):

        layers = []

        res_input_shape = (1, self.hparams.latent_dim)

        for lidx in range(self.hparams.dec_reslayers):

            filters = self.hparams.dec_filters * self.hparams.dec_filter_growth_rate**lidx
            filters = round(filters)

            if self.hparams.shrink_rounds:
                self.hparams.shrink_rounds -= 1

            layers.append(ResidualConv1d(res_input_shape,
                                         filters,
                                         self.hparams.dec_kernel_size,
                                         self.hparams.activation,
                                         shrink=self.hparams.shrink_rounds))

            res_input_shape = layers[-1].output_shape

            if self.hparams.upsample_rounds:
                # TODO: consider upsample mode nearest neightbor etc.
                #       https://pytorch.org/docs/master/generated/torch.nn.Upsample.html
                scale_factor = 2
                layers.append(nn.Upsample(scale_factor=scale_factor))
                self.hparams.upsample_rounds -= 1
                res_input_shape = (res_input_shape[0], res_input_shape[1] * scale_factor)


        padding = same_padding(res_input_shape[1],
                               self.hparams.dec_kernel_size,
                               stride=1)

        layers.append(nn.Conv1d(in_channels=res_input_shape[0],
                                out_channels=self.output_shape[1], # should be num_residues i.e. nchars
                                kernel_size=self.hparams.dec_kernel_size,
                                stride=1,
                                padding=padding))

        layers.append(get_activation(self.hparams.output_activation))


        return nn.Sequential(*layers)