Ejemplo n.º 1
0
 def _make_shortcut_layer(self) -> Union[nn.Module, None]:
     """ finished, NOT checked,
     """
     if self.__DEBUG__:
         print(
             f"down_scale = {self.__down_scale}, increase_channels = {self.__increase_channels}"
         )
     if self.__down_scale > 1 or self.__increase_channels:
         if self.config.increase_channels_method.lower() == 'conv':
             shortcut = DownSample(
                 down_scale=self.__down_scale,
                 in_channels=self.__in_channels,
                 out_channels=self.__out_channels,
                 groups=self.__groups,
                 batch_norm=True,
                 mode=self.config.subsample_mode,
             )
         if self.config.increase_channels_method.lower() == 'zero_padding':
             batch_norm = False if self.config.subsample_mode.lower(
             ) != 'conv' else True
             shortcut = nn.Sequential(
                 DownSample(
                     down_scale=self.__down_scale,
                     in_channels=self.__in_channels,
                     out_channels=self.__in_channels,
                     batch_norm=batch_norm,
                     mode=self.config.subsample_mode,
                 ),
                 ZeroPadding(self.__in_channels, self.__out_channels),
             )
     else:
         shortcut = None
     return shortcut
Ejemplo n.º 2
0
    def __init__(self,
                 down_scale:int,
                 in_channels:int,
                 out_channels:Sequence[Sequence[int]],
                 filter_lengths:Union[Sequence[Sequence[int]],Sequence[int],int],
                 dilations:Union[Sequence[Sequence[int]],Sequence[int],int]=1,
                 groups:int=1,
                 dropouts:Union[Sequence[float],float]=0.0,
                 mode:str="max",
                 **config) -> NoReturn:
        """ finished, NOT checked,

        Parameters
        ----------
        down_scale: int,
            down sampling scale
        in_channels: int,
            number of channels in the input
        out_channels: sequence of sequence of int,
            number of channels produced by the (last) convolutional layer(s)
        filter_lengths: int or sequence of int,
            length(s) of the filters (kernel size)
        subsample_lengths: int or sequence of int,
            subsample length(s) (stride(s)) of the convolutions
        groups: int, default 1,
            connection pattern (of channels) of the inputs and outputs
        dropouts: float or sequence of float, default 0.0,
            dropout ratio after each `Conv_Bn_Activation`
        config: dict,
            other parameters, including
            activation choices, weight initializer, batch normalization choices, etc.
            for the convolutional layers
        """
        super().__init__()
        self.__mode = mode.lower()
        assert self.__mode in self.__MODES__
        self.__down_scale = down_scale
        self.__in_channels = in_channels
        self.__out_channels = out_channels
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}")

        self.down_sample = DownSample(
            down_scale=self.__down_scale,
            in_channels=self.__in_channels,
            batch_norm=False,
            mode=mode,
        )
        self.branched_conv = BranchedConv(
            in_channels=self.__in_channels,
            out_channels=self.__out_channels,
            filter_lengths=filter_lengths,
            subsample_lengths=1,
            dilations=dilations,
            groups=groups,
            dropouts=dropouts,
            **(self.config)
        )
Ejemplo n.º 3
0
    def __init__(self,
                 down_scale: int,
                 in_channels: int,
                 out_channels: int,
                 filter_lengths: Union[Sequence[int], int],
                 groups: int = 1,
                 dropouts: Union[Sequence[float], float] = 0.0,
                 mid_channels: Optional[int] = None,
                 mode: str = "max",
                 **config) -> NoReturn:
        """ finished, checked,

        Parameters:
        -----------
        down_scale: int,
            down sampling scale
        in_channels: int,
            number of channels in the input
        out_channels: int,
            number of channels produced by the last convolutional layer
        filter_lengths: int or sequence of int,
            length(s) of the filters (kernel size)
        groups: int, default 1,
            connection pattern (of channels) of the inputs and outputs
        dropouts: float or sequence of float, default 0.0,
            dropout ratio after each `Conv_Bn_Activation`
        mid_channels: int, optional,
            number of channels produced by the first convolutional layer,
            defaults to `out_channels`
        mode: str, default "max",
            mode for down sampling,
            can be one of `DownSample.__MODES__`
        config: dict,
            other parameters, including
            activation choices, weight initializer, batch normalization choices, etc.
            for the convolutional layers
        """
        super().__init__()
        self.__mode = mode.lower()
        assert self.__mode in self.__MODES__
        self.__down_scale = down_scale
        self.__in_channels = in_channels
        self.__mid_channels = mid_channels if mid_channels is not None else out_channels
        self.__out_channels = out_channels
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(
                f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}"
            )

        self.add_module(
            "down_sample",
            DownSample(
                down_scale=self.__down_scale,
                in_channels=self.__in_channels,
                batch_norm=False,
                mode=mode,
            ))
        self.add_module(
            "double_conv",
            DoubleConv(in_channels=self.__in_channels,
                       out_channels=self.__out_channels,
                       filter_lengths=filter_lengths,
                       subsample_lengths=1,
                       groups=groups,
                       dropouts=dropouts,
                       mid_channels=self.__mid_channels,
                       **(self.config)),
        )
Ejemplo n.º 4
0
class DownBranchedDoubleConv(nn.Module):
    """
    the bottom block of the `subtract_unet`
    """
    __DEBUG__ = True
    __name__ = "DownBranchedDoubleConv"
    __MODES__ = deepcopy(DownSample.__MODES__)

    def __init__(self,
                 down_scale: int,
                 in_channels: int,
                 out_channels: Sequence[Sequence[int]],
                 filter_lengths: Union[Sequence[Sequence[int]], Sequence[int],
                                       int],
                 dilations: Union[Sequence[Sequence[int]], Sequence[int],
                                  int] = 1,
                 groups: int = 1,
                 dropouts: Union[Sequence[float], float] = 0.0,
                 mode: str = "max",
                 **config) -> NoReturn:
        """ finished, NOT checked,

        Parameters:
        -----------
        down_scale: int,
            down sampling scale
        in_channels: int,
            number of channels in the input
        out_channels: sequence of sequence of int,
            number of channels produced by the (last) convolutional layer(s)
        filter_lengths: int or sequence of int,
            length(s) of the filters (kernel size)
        subsample_lengths: int or sequence of int,
            subsample length(s) (stride(s)) of the convolutions
        groups: int, default 1,
            connection pattern (of channels) of the inputs and outputs
        dropouts: float or sequence of float, default 0.0,
            dropout ratio after each `Conv_Bn_Activation`
        config: dict,
            other parameters, including
            activation choices, weight initializer, batch normalization choices, etc.
            for the convolutional layers
        """
        super().__init__()
        self.__mode = mode.lower()
        assert self.__mode in self.__MODES__
        self.__down_scale = down_scale
        self.__in_channels = in_channels
        self.__out_channels = out_channels
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(
                f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}"
            )

        self.down_sample = DownSample(
            down_scale=self.__down_scale,
            in_channels=self.__in_channels,
            batch_norm=False,
            mode=mode,
        )
        self.branched_conv = BranchedConv(in_channels=self.__in_channels,
                                          out_channels=self.__out_channels,
                                          filter_lengths=filter_lengths,
                                          subsample_lengths=1,
                                          dilations=dilations,
                                          groups=groups,
                                          dropouts=dropouts,
                                          **(self.config))

    def forward(self, input: Tensor) -> Tensor:
        """
        input: of shape (batch_size, channels, seq_len)
        out: of shape (batch_size, channels, seq_len)
        """
        out = self.down_sample(input)
        out = self.branched_conv(out)
        # SUBTRACT
        # currently (micro scope) - (macro scope)
        # TODO: consider (macro scope) - (micro scope)
        out.append(out[0] - out[1])
        out = torch.cat(out, dim=1)  # concate along the channel axis
        return out

    def compute_output_shape(
            self,
            seq_len: int,
            batch_size: Optional[int] = None) -> Sequence[Union[int, None]]:
        """ finished, checked,

        Parameters:
        -----------
        seq_len: int,
            length of the 1d sequence
        batch_size: int, optional,
            the batch size, can be None

        Returns:
        --------
        output_shape: sequence,
            the output shape of this `DownDoubleConv` layer, given `seq_len` and `batch_size`
        """
        _seq_len = seq_len
        output_shape = self.down_sample.compute_output_shape(seq_len=_seq_len)
        _, _, _seq_len = output_shape
        output_shapes = self.branched_conv.compute_output_shape(
            seq_len=_seq_len)
        # output_shape = output_shapes[0][0], sum([s[1] for s in output_shapes]), output_shapes[0][-1]
        n_branches = len(output_shapes)
        output_shape = output_shapes[0][0], (
            n_branches + 1) * output_shapes[0][1], output_shapes[0][-1]
        return output_shape