Exemple #1
0
    def _configure_test_13(self):
        # === Layer 1 ===
        input_size = self.input_size
        hidden_size = self.hidden_size
        output_size = hidden_size

        self.layer_1 = SequentialMod(
            PairwiseSeqConv(
                input_size,
                output_size,
                self.passthrough_fraction,
                normalize=True,
                add_counts=True,
                bias=False,
            ),
            nn.ReLU(),
        )

        # === Layer N ===
        input_size = output_size
        hidden_size = int(input_size * 2)
        output_size = 1

        self.layer_n = nn.Sequential(
            nn.Conv1d(
                input_size,
                hidden_size,
                kernel_size=self.kernel_size,
                stride=self.stride,
                padding=self.padding,
                bias=True,
            ),
            FinalLayer(hidden_size, output_size, bias=True),
        )
Exemple #2
0
    def _configure_two_layer_seqadj(self):
        # === Layer 1 ===
        input_size = self.input_size
        hidden_size = self.hidden_size
        output_size = hidden_size

        self.layer_1_pre = nn.Conv1d(input_size,
                                     hidden_size,
                                     kernel_size=1,
                                     stride=1,
                                     padding=0)
        num_seq_features = int(hidden_size * self.passthrough_fraction)
        self.layer_1_seq = nn.Sequential()
        self.layer_1_adj = SequentialMod(
            PairwiseConv(int(hidden_size - num_seq_features),
                         int(hidden_size - num_seq_features)))
        self.layer_1_post = nn.Sequential(
            nn.ReLU(), nn.Dropout(p=self.dropout_probability))

        # nn.MaxPool1d(kernel_size=self.kernel_size, stride=self.stride, padding=self.padding)

        # === Layer 2 ===
        input_size = output_size
        hidden_size = int(input_size * 2)
        output_size = hidden_size

        self.layer_2_pre = nn.Sequential(
            nn.Conv1d(
                input_size,
                output_size,
                kernel_size=self.kernel_size,
                stride=self.stride,
                padding=self.padding,
            ))
        num_seq_features = int(hidden_size * self.passthrough_fraction)
        self.layer_2_seq = nn.Sequential()
        self.layer_2_adj = SequentialMod(
            PairwiseConv(int(hidden_size - num_seq_features),
                         int(hidden_size - num_seq_features)))
        self.layer_2_post = nn.Sequential(nn.ReLU())

        # === Layer N ===
        input_size = output_size
        output_size = 1

        self.layer_n = FinalLayer(input_size, output_size, bias=True)
Exemple #3
0
    def _configure_single_pairwise(self):
        # === Layer 1 ===
        input_size = self.input_size
        hidden_size = self.hidden_size
        output_size = hidden_size

        self.layer_1 = SequentialMod(
            PairwiseConv(
                input_size,
                output_size,
                normalize=False,
                add_counts=False,
                bias=False,
                wself=True,
                barcode_method="combined.pretrained",
                max_distance=3,
            ),
            nn.ReLU(),
        )

        # === Layer N ===
        input_size = output_size
        hidden_size = int(input_size * 2)
        output_size = 1

        # self.layer_n = nn.Sequential(
        #     nn.Conv1d(
        #         input_size,
        #         hidden_size,
        #         kernel_size=self.kernel_size,
        #         stride=self.stride,
        #         padding=self.padding,
        #         bias=True,
        #     ),
        #     FinalLayer(hidden_size, output_size, bias=True),
        # )
        self.layer_n = nn.Sequential(
            nn.Conv1d(
                input_size,
                hidden_size,
                kernel_size=self.kernel_size,
                stride=self.stride,
                padding=self.padding,
                bias=True,
            ),
            RepeatPad(self.max_pool_kernel_size - 1),
            nn.MaxPool1d(self.max_pool_kernel_size),
            nn.Conv1d(hidden_size, output_size, kernel_size=1, bias=True),
        )
Exemple #4
0
    def _configure_single_graphconv(self):
        input_size = self.input_size
        hidden_size = self.hidden_size
        output_size = int(hidden_size * 2)

        self.layer_1 = SequentialMod(
            #
            GraphConv(input_size, hidden_size),
            nn.ReLU(inplace=True),
        )
        self.linear_n = nn.Sequential(
            nn.Conv1d(
                hidden_size,
                output_size,
                kernel_size=self.kernel_size,
                stride=self.stride,
                padding=self.padding,
            ),
            FinalLayer(output_size, 1, bias=True),
        )
    def _configure_encoder(self):
        conv_kwargs = dict(kernel_size=self.kernel_size,
                           stride=self.stride,
                           padding=self.padding,
                           bias=self.bias)
        input_channels = self.input_size
        for i in range(0, self.n_layers):
            output_channels = int(input_channels *
                                  2) if i > 0 else self.hidden_size
            negative_slope = 0.2 if i == 0 else 0.01
            # Input
            if i == 0:
                setattr(
                    self,
                    f"encoder_pre_{i}",
                    nn.Sequential(
                        nn.Conv1d(input_channels,
                                  output_channels // 2,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0)),
                )
            else:
                setattr(self, f"encoder_pre_{i}", nn.Sequential())
            # Adjacency Conv
            if i < self.n_convs:
                setattr(
                    self,
                    f"encoder_0_{i}",
                    SequentialMod(
                        AdjacencyConv(output_channels // 4,
                                      output_channels // 4),
                        nn.LeakyReLU(negative_slope, inplace=True),
                        nn.InstanceNorm1d(
                            output_channels // 4,
                            momentum=0.01,
                            affine=True,
                            track_running_stats=True,
                        ),
                    ),
                )
            else:
                setattr(self, f"encoder_0_{i}", SequentialMod())
            # Sequence Conv
            setattr(
                self,
                f"encoder_1_{i}",
                SequentialMod(
                    SequenceConv(output_channels // 2, output_channels,
                                 **conv_kwargs)),
            )
            if i < (self.n_layers - 1):
                setattr(
                    self,
                    f"encoder_post_{i}",
                    nn.Sequential(
                        nn.LeakyReLU(negative_slope, inplace=True),
                        nn.InstanceNorm1d(output_channels,
                                          momentum=0.01,
                                          affine=True,
                                          track_running_stats=True),
                    ),
                )
            else:
                setattr(self, f"encoder_post_{i}", nn.Sequential())
            input_channels = output_channels

        if self.bottleneck_size > 0:
            self.linear_in = nn.Linear(2048, self.bottleneck_size, bias=True)
            self.conv_in = nn.Conv1d(512,
                                     self.bottleneck_size,
                                     kernel_size=4,
                                     stride=4,
                                     padding=0,
                                     bias=True)

        return input_channels
    def _configure_decoder(self, encoder_net=None):
        if encoder_net is None:
            encoder_net = self

        input_channels = self.bottleneck_features

        if self.bottleneck_size > 0:
            self.linear_out = nn.Linear(self.bottleneck_size, 2048, bias=True)
            self.conv_out = nn.Conv1d(self.bottleneck_size,
                                      512 * 4,
                                      kernel_size=1,
                                      stride=1,
                                      padding=0,
                                      bias=True)

        convt_kwargs = dict(
            kernel_size=self.kernel_size,
            stride=self.stride,
            padding=self.padding - 1,
            bias=self.bias,
        )
        for i in range(self.n_layers - 1, -1, -1):
            output_channels = input_channels // 2 if i > 0 else self.input_size
            if i < (self.n_layers - 1):
                setattr(
                    self,
                    f"decoder_pre_{i}",
                    nn.Sequential(
                        nn.ReLU(inplace=True),
                        nn.InstanceNorm1d(input_channels,
                                          momentum=0.01,
                                          affine=True,
                                          track_running_stats=True),
                    ),
                )
            else:
                setattr(self, f"decoder_pre_{i}", nn.Sequential())
            # Sequence Conv
            setattr(
                self,
                f"decoder_0_{i}",
                SequentialMod(
                    SequenceConvTranspose(input_channels, input_channels // 2,
                                          **convt_kwargs)),
            )
            # Adjacency Conv
            if i < self.n_convs:
                setattr(
                    self,
                    f"decoder_1_{i}",
                    SequentialMod(
                        nn.ReLU(inplace=True),
                        nn.InstanceNorm1d(
                            input_channels // 4,
                            momentum=0.01,
                            affine=True,
                            track_running_stats=True,
                        ),
                        AdjacencyConvTranspose(
                            getattr(encoder_net,
                                    f"encoder_0_{i}")[0].spatial_conv),
                    ),
                )
            else:
                setattr(self, f"decoder_1_{i}", SequentialMod())
            # Output
            if i == 0:
                setattr(
                    self,
                    f"decoder_post_{i}",
                    nn.Sequential(
                        nn.Conv1d(input_channels // 2,
                                  output_channels,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0)),
                )
            else:
                setattr(self, f"decoder_post_{i}", nn.Sequential())
            input_channels = output_channels
Exemple #7
0
    def __init__(
        self,
        n_layers,
        bottleneck_size,
        input_size=20,
        hidden_size=64,
        kernel_size=3,
        stride=2,
        padding=1,
        bias=False,
    ):
        super().__init__()

        self.n_layers = n_layers
        self.n_convs = 3
        self.bottleneck_size = bottleneck_size

        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding

        print("---")

        # === Encoder ===
        conv_kwargs = dict(kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           bias=bias)
        input_channels = input_size
        for i in range(0, n_layers):
            output_channels = int(input_channels * 2) if i > 0 else hidden_size
            negative_slope = 0.2 if i == 0 else 0.01
            # Input
            setattr(
                self,
                f"encoder_pre_{i}",
                nn.Sequential(
                    nn.Conv1d(input_channels,
                              output_channels // 2,
                              kernel_size=1,
                              stride=1,
                              padding=0)),
            )
            # Downsample
            setattr(
                self,
                f"encoder_downsample_seq_{i}",
                SequentialMod(
                    SequenceConv(output_channels // 4, output_channels // 2,
                                 **conv_kwargs)),
            )
            setattr(
                self,
                f"encoder_downsample_adj_{i}",
                SequentialMod(
                    AdjacencyConv(output_channels // 4, output_channels // 2)),
            )
            # Output
            if i < (n_layers - 1):
                setattr(
                    self,
                    f"encoder_post_{i}",
                    nn.Sequential(
                        nn.LeakyReLU(negative_slope, inplace=True),
                        nn.InstanceNorm1d(output_channels,
                                          momentum=0.01,
                                          affine=True,
                                          track_running_stats=True),
                    ),
                )
            else:
                setattr(self, f"encoder_post_{i}", nn.Sequential())
            input_channels = output_channels

        # === Linear ===
        if self.bottleneck_size > 0:
            self.linear_in = nn.Linear(2048, self.bottleneck_size, bias=True)
            self.linear_out = nn.Linear(self.bottleneck_size, 2048, bias=True)
            self.conv_in = nn.Conv1d(512,
                                     self.bottleneck_size,
                                     kernel_size=4,
                                     stride=4,
                                     padding=0,
                                     bias=True)
            self.conv_out = nn.Conv1d(self.bottleneck_size,
                                      512 * 4,
                                      kernel_size=1,
                                      stride=1,
                                      padding=0,
                                      bias=True)

        # === Decoder ===
        convt_kwargs = dict(kernel_size=kernel_size,
                            stride=stride,
                            padding=padding - 1,
                            bias=bias)
        for i in range(n_layers - 1, -1, -1):
            output_channels = input_channels // 2 if i > 0 else input_size
            # Upsample
            setattr(
                self,
                f"decoder_upsample_seq_{i}",
                SequentialMod(
                    SequenceConvTranspose(input_channels // 2,
                                          input_channels // 4,
                                          **convt_kwargs)),
            )
            setattr(
                self,
                f"decoder_upsample_adj_{i}",
                SequentialMod(
                    AdjacencyConvTranspose(
                        getattr(
                            self,
                            f"encoder_downsample_adj_{i}")[0].spatial_conv)),
            )
            # Output
            if i > 0:
                setattr(
                    self,
                    f"decoder_post_{i}",
                    nn.Sequential(
                        nn.ReLU(True),
                        nn.InstanceNorm1d(
                            input_channels // 2,
                            momentum=0.01,
                            affine=True,
                            track_running_stats=True,
                        ),
                        nn.Conv1d(input_channels // 2,
                                  output_channels,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0),
                    ),
                )
            else:
                setattr(
                    self,
                    f"decoder_post_{i}",
                    nn.Sequential(
                        nn.ReLU(True),
                        nn.InstanceNorm1d(
                            input_channels // 2,
                            momentum=0.01,
                            affine=True,
                            track_running_stats=True,
                        ),
                        nn.Conv1d(input_channels // 2,
                                  output_channels,
                                  kernel_size=1,
                                  stride=1,
                                  padding=0),
                        # nn.Softmax(1),
                    ),
                )
            input_channels = output_channels
Exemple #8
0
    def _configure_encoder(self):
        conv_kwargs = dict(kernel_size=self.kernel_size,
                           stride=self.stride,
                           padding=self.padding,
                           bias=self.bias)

        input_channels = self.input_size
        for i in range(0, self.n_layers):
            output_channels = int(input_channels *
                                  2) if i > 0 else self.hidden_size
            negative_slope = 0.2 if i == 0 else 0.01

            # Input
            if i == 0:
                encoder_pre = nn.Sequential(
                    nn.Conv1d(input_channels,
                              output_channels // 2,
                              kernel_size=1,
                              stride=1,
                              padding=0))
            elif i % 2 == 1:
                encoder_pre = nn.Sequential(
                    nn.Conv1d(input_channels, output_channels, **conv_kwargs))
            else:
                encoder_pre = nn.Sequential()
            setattr(self, f"encoder_pre_{i}", encoder_pre)

            # Sequence Conv
            if i % 2 == 0:
                encoder_seq = SequentialMod(
                    SequenceConv(output_channels // 4, output_channels // 2,
                                 **conv_kwargs))
            else:
                encoder_seq = SequentialMod()
            setattr(self, f"encoder_seq_{i}", encoder_seq)

            # Adjacency Conv
            if i % 2 == 0:
                encoder_adj = SequentialMod(
                    AdjacencyConv(output_channels // 4, output_channels // 2),
                    nn.Conv1d(output_channels // 2, output_channels // 2,
                              **conv_kwargs),
                    # nn.LeakyReLU(negative_slope, inplace=True),
                    # nn.InstanceNorm1d(
                    #     output_channels // 4,
                    #     momentum=0.01,
                    #     affine=True,
                    #     track_running_stats=True,
                    # ),
                )
            else:
                encoder_adj = SequentialMod()
            setattr(self, f"encoder_adj_{i}", encoder_adj)

            # Output
            if i < (self.n_layers - 1):
                encoder_post = nn.Sequential(
                    nn.LeakyReLU(negative_slope, inplace=True),
                    nn.BatchNorm1d(output_channels,
                                   momentum=0.01,
                                   affine=True,
                                   track_running_stats=True),
                )
            else:
                encoder_post = nn.Sequential()
            setattr(self, f"encoder_post_{i}", encoder_post)

            input_channels = output_channels

        logger.info("Final output_channels: %s", output_channels)

        if self.bottleneck_size == 0:
            self.linear_in = nn.Linear(output_channels, 1, bias=True)
            self.conv_in = nn.Conv1d(output_channels,
                                     1,
                                     kernel_size=output_channels,
                                     bias=True)
        else:
            raise NotImplementedError
            self.linear_in = nn.Linear(2048, self.bottleneck_size, bias=True)
            self.conv_in = nn.Conv1d(512,
                                     self.bottleneck_size,
                                     kernel_size=4,
                                     stride=4,
                                     padding=0,
                                     bias=True)

        return input_channels