Esempio n. 1
0
    def __init__(
        self,
        in_channels: int,
        growth_rate: int,
        bn_size: int,
        dropout_rate: float,
        activation: str = "relu",
    ) -> None:
        super().__init__()
        activation_fn = activation_function(activation)
        self.dense_layer = [
            nn.BatchNorm2d(in_channels),
            activation_fn,
            nn.Conv2d(
                in_channels=in_channels,
                out_channels=bn_size * growth_rate,
                kernel_size=1,
                stride=1,
                bias=False,
            ),
            nn.BatchNorm2d(bn_size * growth_rate),
            activation_fn,
            nn.Conv2d(
                in_channels=bn_size * growth_rate,
                out_channels=growth_rate,
                kernel_size=3,
                stride=1,
                padding=1,
                bias=False,
            ),
        ]
        if dropout_rate:
            self.dense_layer.append(nn.Dropout(p=dropout_rate))

        self.dense_layer = nn.Sequential(*self.dense_layer)
    def __init__(self,
                 in_channels: int = 1,
                 block_sizes: Union[int, List[int]] = (32, 64),
                 depths: Union[int, List[int]] = (2, 2),
                 activation: str = "relu",
                 block: Type[nn.Module] = BasicBlock,
                 levels: int = 1,
                 *args,
                 **kwargs) -> None:
        super().__init__()
        self.block_sizes = (block_sizes if isinstance(block_sizes, list) else
                            [block_sizes] * levels)
        self.depths = depths if isinstance(depths, list) else [depths] * levels
        self.activation = activation
        self.gate = nn.Sequential(
            nn.Conv2d(
                in_channels=in_channels,
                out_channels=self.block_sizes[0],
                kernel_size=7,
                stride=2,
                padding=1,
                bias=False,
            ),
            nn.BatchNorm2d(self.block_sizes[0]),
            activation_function(self.activation),
            # nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
        )

        self.blocks = self._configure_blocks(block)
Esempio n. 3
0
 def __init__(
     self,
     channels: List[int],
     activation: str,
     num_groups: int,
     dropout_rate: float = 0.1,
     kernel_size: int = 3,
     dilation: int = 1,
     padding: int = 0,
 ) -> None:
     super().__init__()
     self.channels = channels
     self.dropout_rate = dropout_rate
     self.kernel_size = kernel_size
     self.dilation = dilation
     self.padding = padding
     self.num_groups = num_groups
     self.activation = activation_function(activation)
     self.block = self._configure_block()
     self.residual_conv = nn.Sequential(
         nn.Conv2d(self.channels[0],
                   self.channels[-1],
                   kernel_size=3,
                   stride=1,
                   padding=1),
         self.activation,
     )
Esempio n. 4
0
    def _configure_densenet(
        self,
        in_channels: int,
        base_channels: int,
        num_classes: int,
        growth_rate: int,
        block_config: List[int],
        bn_size: int,
        dropout_rate: float,
        classifier: bool,
        activation: str,
    ) -> nn.Sequential:
        activation_fn = activation_function(activation)
        densenet = [
            nn.Conv2d(
                in_channels=in_channels,
                out_channels=base_channels,
                kernel_size=3,
                stride=1,
                padding=1,
                bias=False,
            ),
            nn.BatchNorm2d(base_channels),
            activation_fn,
        ]

        num_features = base_channels

        for i, num_layers in enumerate(block_config):
            densenet.append(
                _DenseBlock(
                    num_layers=num_layers,
                    in_channels=num_features,
                    bn_size=bn_size,
                    growth_rate=growth_rate,
                    dropout_rate=dropout_rate,
                    activation=activation,
                ))
            num_features = num_features + num_layers * growth_rate
            if i != len(block_config) - 1:
                densenet.append(
                    _Transition(
                        in_channels=num_features,
                        out_channels=num_features // 2,
                        activation=activation,
                    ))
                num_features = num_features // 2

        densenet.append(activation_fn)

        if classifier:
            densenet.append(nn.AdaptiveAvgPool2d((1, 1)))
            densenet.append(Rearrange("b c h w -> b (c h w)"))
            densenet.append(
                nn.Linear(in_features=num_features, out_features=num_classes))

        return nn.Sequential(*densenet)
 def __init__(self,
              in_channels: int,
              out_channels: int,
              activation: str = "relu") -> None:
     super().__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.blocks = nn.Identity()
     self.activation_fn = activation_function(activation)
     self.shortcut = nn.Identity()
Esempio n. 6
0
    def _build_network(
        self,
        channels: Tuple[int, ...],
        kernel_sizes: Tuple[int, ...],
        strides: Tuple[int, ...],
        max_pool_kernel: int,
        dropout_rate: float,
        activation: str,
    ) -> nn.Sequential:
        # Load activation function.
        activation_fn = activation_function(activation)

        channels = list(channels)
        in_channels = channels.pop(0)
        configuration = zip(channels, kernel_sizes, strides)

        modules = nn.ModuleList([])

        for i, (out_channels, kernel_size, stride) in enumerate(configuration):
            # Add max pool to reduce output size.
            if i == len(channels) // 2:
                modules.append(nn.MaxPool2d(max_pool_kernel))
            if i == 0:
                modules.append(
                    nn.Conv2d(in_channels,
                              out_channels,
                              kernel_size,
                              stride=stride,
                              padding=1))
            else:
                modules.append(
                    nn.Sequential(
                        activation_fn,
                        nn.BatchNorm2d(in_channels),
                        nn.Conv2d(
                            in_channels,
                            out_channels,
                            kernel_size,
                            stride=stride,
                            padding=1,
                        ),
                    ))

            if dropout_rate:
                modules.append(nn.Dropout2d(p=dropout_rate))

            in_channels = out_channels

        return nn.Sequential(*modules)
Esempio n. 7
0
    def __init__(
        self,
        channels: Tuple[int, ...] = (1, 32, 64),
        kernel_sizes: Tuple[int, ...] = (3, 3, 2),
        hidden_size: Tuple[int, ...] = (9216, 128),
        dropout_rate: float = 0.2,
        num_classes: int = 10,
        activation_fn: Optional[str] = "relu",
    ) -> None:
        """Initialization of the LeNet network.

        Args:
            channels (Tuple[int, ...]): Channels in the convolutional layers. Defaults to (1, 32, 64).
            kernel_sizes (Tuple[int, ...]): Kernel sizes in the convolutional layers. Defaults to (3, 3, 2).
            hidden_size (Tuple[int, ...]): Size of the flattend output form the convolutional layers.
                Defaults to (9216, 128).
            dropout_rate (float): The dropout rate. Defaults to 0.2.
            num_classes (int): Number of classes. Defaults to 10.
            activation_fn (Optional[str]): The name of non-linear activation function. Defaults to relu.

        """
        super().__init__()

        activation_fn = activation_function(activation_fn)

        self.layers = [
            nn.Conv2d(
                in_channels=channels[0],
                out_channels=channels[1],
                kernel_size=kernel_sizes[0],
            ),
            activation_fn,
            nn.Conv2d(
                in_channels=channels[1],
                out_channels=channels[2],
                kernel_size=kernel_sizes[1],
            ),
            activation_fn,
            nn.MaxPool2d(kernel_sizes[2]),
            nn.Dropout(p=dropout_rate),
            Rearrange("b c h w -> b (c h w)"),
            nn.Linear(in_features=hidden_size[0], out_features=hidden_size[1]),
            activation_fn,
            nn.Dropout(p=dropout_rate),
            nn.Linear(in_features=hidden_size[1], out_features=num_classes),
        ]

        self.layers = nn.Sequential(*self.layers)
Esempio n. 8
0
    def __init__(
        self,
        input_size: int = 784,
        num_classes: int = 10,
        hidden_size: Union[int, List] = 128,
        num_layers: int = 3,
        dropout_rate: float = 0.2,
        activation_fn: str = "relu",
    ) -> None:
        """Initialization of the MLP network.

        Args:
            input_size (int): The input shape of the network. Defaults to 784.
            num_classes (int): Number of classes in the dataset. Defaults to 10.
            hidden_size (Union[int, List]): The number of `neurons` in each hidden layer. Defaults to 128.
            num_layers (int): The number of hidden layers. Defaults to 3.
            dropout_rate (float): The dropout rate at each layer. Defaults to 0.2.
            activation_fn (str): Name of the activation function in the hidden layers. Defaults to
                relu.

        """
        super().__init__()

        activation_fn = activation_function(activation_fn)

        if isinstance(hidden_size, int):
            hidden_size = [hidden_size] * num_layers

        self.layers = [
            Rearrange("b c h w -> b (c h w)"),
            nn.Linear(in_features=input_size, out_features=hidden_size[0]),
            activation_fn,
        ]

        for i in range(num_layers - 1):
            self.layers += [
                nn.Linear(in_features=hidden_size[i],
                          out_features=hidden_size[i + 1]),
                activation_fn,
            ]

            if dropout_rate:
                self.layers.append(nn.Dropout(p=dropout_rate))

        self.layers.append(
            nn.Linear(in_features=hidden_size[-1], out_features=num_classes))

        self.layers = nn.Sequential(*self.layers)
Esempio n. 9
0
    def __init__(
        self,
        hidden_dim: int,
        expansion_dim: int,
        dropout_rate: float,
        activation: str = "relu",
    ) -> None:
        super().__init__()

        in_projection = (nn.Sequential(nn.Linear(hidden_dim, expansion_dim),
                                       activation_function(activation))
                         if activation != "glu" else GEGLU(
                             hidden_dim, expansion_dim))

        self.layer = nn.Sequential(
            in_projection,
            nn.Dropout(p=dropout_rate),
            nn.Linear(in_features=expansion_dim, out_features=hidden_dim),
        )
Esempio n. 10
0
    def __init__(
        self,
        in_channels: int,
        channels: List[int],
        kernel_sizes: List[int],
        strides: List[int],
        num_residual_layers: int,
        embedding_dim: int,
        num_embeddings: int,
        beta: float = 0.25,
        activation: str = "leaky_relu",
        dropout_rate: float = 0.0,
    ) -> None:
        super().__init__()

        if dropout_rate:
            if activation == "selu":
                dropout = nn.AlphaDropout(p=dropout_rate)
            else:
                dropout = nn.Dropout(p=dropout_rate)
        else:
            dropout = None

        self.embedding_dim = embedding_dim
        self.num_embeddings = num_embeddings
        self.beta = beta
        activation = activation_function(activation)

        # Configure encoder.
        self.encoder = self._build_encoder(
            in_channels,
            channels,
            kernel_sizes,
            strides,
            num_residual_layers,
            activation,
            dropout,
        )

        # Configure Vector Quantizer.
        self.vector_quantizer = VectorQuantizer(
            self.num_embeddings, self.embedding_dim, self.beta
        )
Esempio n. 11
0
 def __init__(
     self,
     in_channels: int,
     out_channels: int,
     activation: str = "relu",
 ) -> None:
     super().__init__()
     activation_fn = activation_function(activation)
     self.transition = nn.Sequential(
         nn.BatchNorm2d(in_channels),
         activation_fn,
         nn.Conv2d(
             in_channels=in_channels,
             out_channels=out_channels,
             kernel_size=1,
             stride=1,
             bias=False,
         ),
         nn.AvgPool2d(kernel_size=2, stride=2),
     )
Esempio n. 12
0
    def __init__(
        self,
        channels: List[int],
        kernel_sizes: List[int],
        strides: List[int],
        num_residual_layers: int,
        embedding_dim: int,
        upsampling: Optional[List[List[int]]] = None,
        activation: str = "leaky_relu",
        dropout_rate: float = 0.0,
    ) -> None:
        super().__init__()

        if dropout_rate:
            if activation == "selu":
                dropout = nn.AlphaDropout(p=dropout_rate)
            else:
                dropout = nn.Dropout(p=dropout_rate)
        else:
            dropout = None

        self.upsampling = upsampling

        self.res_block = nn.ModuleList([])
        self.upsampling_block = nn.ModuleList([])

        self.embedding_dim = embedding_dim
        activation = activation_function(activation)

        # Configure encoder.
        self.decoder = self._build_decoder(
            channels,
            kernel_sizes,
            strides,
            num_residual_layers,
            activation,
            dropout,
        )
Esempio n. 13
0
    def __init__(
        self,
        in_planes: int,
        out_planes: int,
        dropout_rate: float,
        stride: int = 1,
        activation: str = "relu",
    ) -> None:
        super().__init__()
        self.in_planes = in_planes
        self.out_planes = out_planes
        self.dropout_rate = dropout_rate
        self.stride = stride
        self.activation = activation_function(activation)

        # Build blocks.
        self.blocks = nn.Sequential(
            nn.BatchNorm2d(self.in_planes),
            self.activation,
            conv3x3(in_planes=self.in_planes, out_planes=self.out_planes),
            nn.Dropout(p=self.dropout_rate),
            nn.BatchNorm2d(self.out_planes),
            self.activation,
            conv3x3(
                in_planes=self.out_planes,
                out_planes=self.out_planes,
                stride=self.stride,
            ),
        )

        self.shortcut = (nn.Sequential(
            nn.Conv2d(
                in_channels=self.in_planes,
                out_channels=self.out_planes,
                kernel_size=1,
                stride=self.stride,
                bias=False,
            ), ) if self._apply_shortcut else None)
Esempio n. 14
0
    def __init__(
        self,
        in_channels: int = 1,
        in_planes: int = 16,
        num_classes: int = 80,
        depth: int = 16,
        width_factor: int = 10,
        dropout_rate: float = 0.0,
        num_layers: int = 3,
        block: Type[nn.Module] = WideBlock,
        num_stages: Optional[List[int]] = None,
        activation: str = "relu",
        use_decoder: bool = True,
    ) -> None:
        """The initialization of the WideResNet.

        Args:
            in_channels (int): Number of input channels. Defaults to 1.
            in_planes (int): Number of channels to use in the first output kernel. Defaults to 16.
            num_classes (int): Number of classes. Defaults to 80.
            depth (int): Set the number of blocks to use. Defaults to 16.
            width_factor (int): Factor for scaling the number of channels in the network. Defaults to 10.
            dropout_rate (float): The dropout rate. Defaults to 0.0.
            num_layers (int): Number of layers of blocks. Defaults to 3.
            block (Type[nn.Module]): The default block is WideBlock. Defaults to WideBlock.
            num_stages (List[int]): If given, will use these channel values. Defaults to None.
            activation (str): Name of the activation to use. Defaults to "relu".
            use_decoder (bool): If True, the network output character predictions, if False, the network outputs a
                latent vector. Defaults to True.

        Raises:
            RuntimeError: If the depth is not of the size `6n+4`.

        """

        super().__init__()
        if (depth - 4) % 6 != 0:
            raise RuntimeError("Wide-resnet depth should be 6n+4")
        self.in_channels = in_channels
        self.in_planes = in_planes
        self.num_classes = num_classes
        self.num_blocks = (depth - 4) // 6
        self.width_factor = width_factor
        self.num_layers = num_layers
        self.block = block
        self.dropout_rate = dropout_rate
        self.activation = activation_function(activation)

        if num_stages is None:
            self.num_stages = [self.in_planes] + [
                self.in_planes * 2**n * self.width_factor
                for n in range(self.num_layers)
            ]
        else:
            self.num_stages = [self.in_planes] + num_stages

        self.num_stages = list(zip(self.num_stages, self.num_stages[1:]))
        self.strides = [1] + [2] * (self.num_layers - 1)

        self.encoder = nn.Sequential(
            conv3x3(in_planes=self.in_channels, out_planes=self.in_planes),
            *[
                self._configure_wide_layer(
                    in_planes=in_planes,
                    out_planes=out_planes,
                    stride=stride,
                    activation=activation,
                )
                for (in_planes,
                     out_planes), stride in zip(self.num_stages, self.strides)
            ],
        )

        self.decoder = (nn.Sequential(
            nn.BatchNorm2d(self.num_stages[-1][-1], momentum=0.8),
            self.activation,
            Reduce("b c h w -> b c", "mean"),
            nn.Linear(in_features=self.num_stages[-1][-1],
                      out_features=self.num_classes),
        ) if use_decoder else None)