Exemple #1
0
    def __init__(self, height: int, width: int, initial_channels: int,
                 output_size: int):
        super().__init__()
        self.h_size = output_size
        conv_1_hw = conv_output_shape((height, width), 8, 4)
        conv_2_hw = conv_output_shape(conv_1_hw, 4, 2)
        conv_3_hw = conv_output_shape(conv_2_hw, 3, 1)
        self.final_flat = conv_3_hw[0] * conv_3_hw[1] * 64

        self.conv_layers = nn.Sequential(
            nn.Conv2d(initial_channels, 32, [8, 8], [4, 4]),
            nn.LeakyReLU(),
            nn.Conv2d(32, 64, [4, 4], [2, 2]),
            nn.LeakyReLU(),
            nn.Conv2d(64, 64, [3, 3], [1, 1]),
            nn.LeakyReLU(),
        )
        self.dense = nn.Sequential(
            linear_layer(
                self.final_flat,
                self.h_size,
                kernel_init=Initialization.KaimingHeNormal,
                kernel_gain=1.41,  # Use ReLU gain
            ),
            nn.LeakyReLU(),
        )
Exemple #2
0
 def __init__(self, channel: int):
     """
     Creates a ResNet Block.
     :param channel: The number of channels in the input (and output) tensors of the
     convolutions
     """
     super().__init__()
     self.layers = nn.Sequential(
         Swish(),
         nn.Conv2d(channel, channel, [3, 3], [1, 1], padding=1),
         Swish(),
         nn.Conv2d(channel, channel, [3, 3], [1, 1], padding=1),
     )
Exemple #3
0
 def __init__(self, height: int, width: int, initial_channels: int,
              output_size: int):
     super().__init__()
     self.output_size = output_size
     self.input_size = height * width * initial_channels
     self.dense = nn.Sequential(
         linear_layer(
             self.input_size,
             self.output_size,
             kernel_init=Initialization.KaimingHeNormal,
             kernel_gain=1.41,  # Use ReLU gain
         ),
         nn.LeakyReLU(),
     )
Exemple #4
0
    def __init__(
        self, height: int, width: int, initial_channels: int, output_size: int
    ):
        super().__init__()
        self.h_size = output_size
        conv_1_hw = conv_output_shape((height, width), 3, 1)
        conv_2_hw = conv_output_shape(conv_1_hw, 3, 1)
        self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 144

        self.conv_layers = nn.Sequential(
            nn.Conv2d(initial_channels, 35, [3, 3], [1, 1]),
            nn.LeakyReLU(),
            nn.Conv2d(35, 144, [3, 3], [1, 1]),
            nn.LeakyReLU(),
        )
        self.dense = nn.Sequential(
            linear_layer(
                self.final_flat,
                self.h_size,
                kernel_init=Initialization.KaimingHeNormal,
                kernel_gain=1.0,
            ),
            nn.LeakyReLU(),
        )
Exemple #5
0
 def __init__(self, height: int, width: int, initial_channels: int,
              output_size: int):
     super().__init__()
     n_channels = [16, 32, 32]  # channel for each stack
     n_blocks = 2  # number of residual blocks
     layers = []
     last_channel = initial_channels
     for _, channel in enumerate(n_channels):
         layers.append(
             nn.Conv2d(last_channel, channel, [3, 3], [1, 1], padding=1))
         layers.append(nn.MaxPool2d([3, 3], [2, 2]))
         height, width = pool_out_shape((height, width), 3)
         for _ in range(n_blocks):
             layers.append(ResNetBlock(channel))
         last_channel = channel
     layers.append(Swish())
     self.dense = linear_layer(
         n_channels[-1] * height * width,
         output_size,
         kernel_init=Initialization.KaimingHeNormal,
         kernel_gain=1.0,
     )
     self.sequential = nn.Sequential(*layers)