Пример #1
0
        def __init__(self,
                     input_shape,
                     z_size,
                     channels=3,
                     first_kernel=5,
                     first_stride=2,
                     second_kernel=5,
                     second_stride=2):
            nn.Module.__init__(self)
            # batchnorm in autoencoding is a thing
            # https://arxiv.org/pdf/1602.02282.pdf

            from mentalitystorm.util import conv_output_shape

            s1 = 32
            s2 = 64
            # encoder
            self.e_conv1 = nn.Conv2d(channels,
                                     s1,
                                     kernel_size=first_kernel,
                                     stride=first_stride)
            self.e_bn1 = nn.BatchNorm2d(s1)
            output_shape = conv_output_shape(input_shape,
                                             kernel_size=first_kernel,
                                             stride=first_stride)

            self.e_conv2 = nn.Conv2d(s1,
                                     s2,
                                     kernel_size=second_kernel,
                                     stride=second_stride)
            self.e_bn2 = nn.BatchNorm2d(s2)
            output_shape = conv_output_shape(output_shape,
                                             kernel_size=second_kernel,
                                             stride=second_stride)

            self.e_conv3 = nn.Conv2d(s2,
                                     s2,
                                     kernel_size=second_kernel,
                                     stride=second_stride)
            self.e_bn3 = nn.BatchNorm2d(s2)
            output_shape = conv_output_shape(output_shape,
                                             kernel_size=second_kernel,
                                             stride=second_stride)

            self.e_conv4 = nn.Conv2d(s2, s2, kernel_size=2, stride=2)
            self.e_bn4 = nn.BatchNorm2d(s2)
            output_shape = conv_output_shape(output_shape,
                                             kernel_size=2,
                                             stride=2)

            self.e_mean = nn.Conv2d(s2, z_size, kernel_size=2, stride=2)
            self.e_logvar = nn.Conv2d(s2, z_size, kernel_size=2, stride=2)
            self.z_shape = conv_output_shape(output_shape,
                                             kernel_size=2,
                                             stride=2)
Пример #2
0
        def __init__(self,
                     input_shape,
                     z_size,
                     first_kernel=5,
                     first_stride=2,
                     second_kernel=5,
                     second_stride=2):
            nn.Module.__init__(self)
            # batchnorm in autoencoding is a thing
            # https://arxiv.org/pdf/1602.02282.pdf

            from mentalitystorm.util import conv_output_shape

            # encoder
            self.e_conv1 = nn.Conv2d(3,
                                     32,
                                     kernel_size=first_kernel,
                                     stride=first_stride)
            self.e_bn1 = nn.BatchNorm2d(32)
            output_shape = conv_output_shape(input_shape,
                                             kernel_size=first_kernel,
                                             stride=first_stride)

            self.e_conv2 = nn.Conv2d(32,
                                     128,
                                     kernel_size=second_kernel,
                                     stride=second_stride)
            self.e_bn2 = nn.BatchNorm2d(128)
            output_shape = conv_output_shape(output_shape,
                                             kernel_size=second_kernel,
                                             stride=second_stride)

            self.e_conv3 = nn.Conv2d(128,
                                     128,
                                     kernel_size=second_kernel,
                                     stride=second_stride)
            self.e_bn3 = nn.BatchNorm2d(128)
            self.z_shape = conv_output_shape(output_shape,
                                             kernel_size=second_kernel,
                                             stride=second_stride)

            self.e_mean = nn.Conv2d(128,
                                    z_size,
                                    kernel_size=self.z_shape,
                                    stride=1)
            self.e_logvar = nn.Conv2d(128,
                                      z_size,
                                      kernel_size=self.z_shape,
                                      stride=1)
Пример #3
0
    def __init__(self, input_size, input_channels, actions):
        nn.Module.__init__(self)
        self.num_actions = actions

        self.conv = nn.Conv2d(input_channels, 1, kernel_size=2, stride=2)
        output_shape = conv_output_shape(input_size, kernel_size=2, stride=2)
        self.output_len = output_shape[0] * output_shape[1]

        self.lin = nn.Linear(self.output_len, actions)
        self.action = nn.Softmax(dim=1)
Пример #4
0
    def __init__(self, input_size, input_channels):
        nn.Module.__init__(self)

        self.conv = nn.Conv2d(input_channels, 2, kernel_size=2, stride=2)
        output_shape = conv_output_shape(input_size, kernel_size=2, stride=2)
        self.output_len = output_shape[0] * output_shape[1]

        # NOP(0), RIGHT(3), LEFT(4)
        self.move = nn.Linear(self.output_len, 3)

        # FIRE(1)
        self.fire = nn.Linear(self.output_len, 1)

        #UP(1)
        self.up = nn.Parameter(torch.Tensor([0]).unsqueeze(0))

        self.permute = torch.Tensor([0, 3, 1, 2]).long()

        self.action = nn.Softmax(dim=1)