Exemple #1
0
    def __init__(self, params):
        super(Discriminator, self).__init__()
        self.model_str = 'WDisc'

        self.latent_dim = params.get('latent_dim', 32)
        self.batchnorm = params.get('batchnorm', False)
        self.n_filters = params.get('n_filters', 32)
        self.kernel_size = params.get('kernel_size', 5)
        self.padding = params.get('padding', 2)

        self.conv1 = conv(1,
                          self.n_filters,
                          self.kernel_size,
                          padding=self.padding,
                          stride=1)
        self.maxpool1 = maxpool(2, 2)
        self.conv2 = conv(self.n_filters,
                          self.n_filters * 2,
                          self.kernel_size,
                          padding=self.padding,
                          stride=1)
        self.maxpool2 = maxpool(2, 2)
        self.conv3 = conv(self.n_filters * 2,
                          self.n_filters * 4,
                          self.kernel_size,
                          padding=self.padding,
                          stride=1)
        self.maxpool3 = maxpool(2, 2)
        self.fc1 = fc(self.n_filters * 4 * 3 * 3, 1)
        if self.batchnorm:
            self.bn_conv1 = BN2d(self.n_filters, eps=1e-5, momentum=.9)
            self.bn_conv2 = BN2d(2 * self.n_filters, eps=1e-5, momentum=.9)
            self.bn_conv3 = BN2d(4 * self.n_filters, eps=1e-5, momentum=.9)
            self.bn_fc1 = BN1d(1, eps=1e-5, momentum=.9)
Exemple #2
0
    def __init__(self, latentVectorSize, classVectorSize):

        super(MyConGANGen, self).__init__()

        self.weight = 1
        self.bias = 0
        self.lD = latentVectorSize * self.weight + self.bias
        self.cD = classVectorSize * self.weight + self.bias

        def concatenate(a, b):
            return a + b

        self.PackedLayersofGen = nn.Sequential(
            #layer 1
            CT2d(
                in_channels=concatenate(
                    self.lD, self.cD),  #concatenate latent and class vec
                out_channels=1024,
                kernel_size=4,
                stride=1,
                bias=False),
            BN2d(1024),
            nn.ReLU(inplace=True),
            #layer 2
            CT2d(in_channels=1024,
                 out_channels=512,
                 kernel_size=4,
                 stride=2,
                 padding=1,
                 bias=False),
            BN2d(512),
            nn.ReLU(inplace=True),
            #layer 3
            CT2d(in_channels=512,
                 out_channels=256,
                 kernel_size=4,
                 stride=2,
                 padding=1,
                 bias=False),
            BN2d(256),
            nn.ReLU(inplace=True),
            #layer 4
            CT2d(in_channels=256,
                 out_channels=128,
                 kernel_size=4,
                 stride=2,
                 padding=1,
                 bias=False),
            BN2d(128),
            nn.ReLU(inplace=True),
            #layer 5
            CT2d(in_channels=128,
                 out_channels=3,
                 kernel_size=4,
                 stride=2,
                 padding=1),
            nn.Tanh())
        return
Exemple #3
0
    def __init__(self, countOfClasses):
        super(MyConDisc, self).__init__()

        print("total number of classes:")
        print(countOfClasses)

        self.countOfClasses = countOfClasses
        self.PackedLayersOfDisc = nn.Sequential(
            C2d(in_channels=3,
                out_channels=128,
                kernel_size=4,
                stride=2,
                padding=1,
                bias=False), nn.LeakyReLU(0.2, inplace=True),
            C2d(in_channels=128,
                out_channels=256,
                kernel_size=4,
                stride=2,
                padding=1,
                bias=False), BN2d(256), nn.LeakyReLU(0.2, inplace=True),
            C2d(in_channels=256,
                out_channels=512,
                kernel_size=4,
                stride=2,
                padding=1,
                bias=False), BN2d(512), nn.LeakyReLU(0.2, inplace=True),
            C2d(in_channels=512,
                out_channels=1024,
                kernel_size=4,
                stride=2,
                padding=1,
                bias=False), BN2d(1024), nn.LeakyReLU(0.2, inplace=True))
        print("packing discriminator dense layers finished..")

        self.bin_classifier = nn.Sequential(
            C2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1),
            nn.Sigmoid())

        print("Binary classifier layer initialized..")

        self.extraBotNeck = nn.Sequential(
            C2d(in_channels=1024, out_channels=512, kernel_size=4, stride=1),
            BN2d(512), nn.LeakyReLU(0.2))
        print(
            "added an extra layer to compensate for size of input to ouput channel"
        )

        self.multilableClassificationLayer = nn.Sequential(
            nn.Linear(512, self.countOfClasses), nn.Sigmoid())
        print("multilable classifier layer created..")
        return
    def __init__(self, params):
        super(PixelCNN, self).__init__()
        # get params
        self.n_layers = params.get('n_layers', 2)
        self.batchnorm = params.get('batchnorm', True)
        self.latent_dim = params.get('latent_dim', 2)
        self.filters = params.get('n_filters', 10)
        self.kernel_size = params.get('kernel_size', 3)
        self.padding = params.get('padding', 1)

        for k in range(1, self.n_layers):
            setattr(
                self, 'conv%da' % k,
                MaskedConv2d('A' if k == 1 else 'B',
                             in_channels=1 if k == 1 else self.filters,
                             out_channels=self.filters,
                             kernel_size=self.kernel_size,
                             stride=1,
                             padding=self.padding,
                             dilation=1))
            setattr(
                self, 'conv%db' % k,
                MaskedConv2d('A' if k == 1 else 'B',
                             in_channels=1 if k == 1 else self.filters,
                             out_channels=self.filters,
                             kernel_size=self.kernel_size,
                             stride=1,
                             padding=self.padding,
                             dilation=1))

            setattr(self, 'fc%da' % k, fc(self.latent_dim, 784))
            setattr(self, 'fc%db' % k, fc(self.latent_dim, 784))
            if self.batchnorm:
                setattr(self, 'bn_conv%da' % k,
                        BN2d(self.filters, eps=1e-5, momentum=.9))
                setattr(self, 'bn_conv%db' % k,
                        BN2d(self.filters, eps=1e-5, momentum=.9))
                setattr(self, 'bn_fc%da' % k, BN1d(784, eps=1e-5, momentum=.9))
                setattr(self, 'bn_fc%db' % k, BN1d(784, eps=1e-5, momentum=.9))

        self.final_conv = MaskedConv2d('B',
                                       in_channels=self.filters,
                                       out_channels=1,
                                       kernel_size=self.kernel_size,
                                       padding=self.padding,
                                       dilation=1)
        if self.batchnorm:
            self.bn_final_conv = BN2d(1, eps=1e-5, momentum=.9)
    def __init__(self,
                 input_size,
                 output_size,
                 kernel,
                 bn=True,
                 downsample=False):
        super(Conv, self).__init__()

        self.l1 = nn.Conv2d(input_size,
                            output_size,
                            kernel,
                            padding=(kernel - 1) // 2)
        self.l2 = nn.Conv2d(output_size,
                            output_size,
                            kernel,
                            padding=(kernel - 1) // 2)
        self.l3 = nn.Conv2d(output_size,
                            output_size,
                            kernel,
                            padding=(kernel - 1) // 2)
        self.l4 = nn.Conv2d(output_size,
                            output_size,
                            kernel,
                            padding=(kernel - 1) // 2)
        self.l5 = nn.Conv2d(output_size,
                            output_size,
                            kernel,
                            padding=(kernel - 1) // 2)
        self.l6 = nn.Conv2d(output_size,
                            output_size,
                            kernel,
                            padding=(kernel - 1) // 2)
        self.l7 = nn.Conv2d(output_size,
                            output_size,
                            kernel,
                            padding=(kernel - 1) // 2)

        if downsample:
            self.downsample = Downsample(output_size, output_size, 3, True, 2)
        else:
            self.downsample = lambda x: x

        if bn:
            self.bn1 = BN2d(output_size)
            self.bn2 = BN2d(output_size)
            self.bn3 = BN2d(output_size)
            self.bn4 = BN2d(output_size)
            self.bn5 = BN2d(output_size)
            self.bn6 = BN2d(output_size)
            self.bn7 = BN2d(output_size)
        else:
            i = lambda x: x
            self.bn1 = i
            self.bn2 = i
            self.bn3 = i
            self.bn4 = i
            self.bn5 = i
            self.bn6 = i
            self.bn7 = i
Exemple #6
0
    def __init__(self, params):
        super(Generator, self).__init__()
        self.model_str = 'WGen'

        self.latent_dim = params.get('latent_dim', 32)
        self.batchnorm = params.get('batchnorm', False)
        self.n_filters = params.get('n_filters', 32)
        self.kernel_size = params.get('kernel_size', 5)
        self.padding = params.get('padding', 2)

        self.fc1 = fc(self.latent_dim, self.n_filters * 4 * 7 * 7)
        self.deconv1 = deconv(4 * self.n_filters,
                              2 * self.n_filters,
                              self.kernel_size,
                              stride=2,
                              padding=self.padding,
                              output_padding=1)
        self.deconv2 = deconv(2 * self.n_filters,
                              self.n_filters,
                              self.kernel_size,
                              stride=2,
                              padding=self.padding,
                              output_padding=1)
        self.deconv3 = deconv(self.n_filters,
                              1,
                              self.kernel_size,
                              stride=1,
                              padding=self.padding,
                              output_padding=0)
        if self.batchnorm:
            self.bn_fc1 = BN1d(self.n_filters * 4 * 7 * 7,
                               eps=1e-5,
                               momentum=.9)
            self.bn_deconv1 = BN2d(2 * self.n_filters, eps=1e-5, momentum=.9)
            self.bn_deconv2 = BN2d(self.n_filters, eps=1e-5, momentum=.9)
            self.bn_deconv3 = BN2d(1, eps=1e-5, momentum=.9)
    def __init__(self, input_size, output_size, kernel, bn=False, stride=2):
        super(Upsample, self).__init__()

        # TODO: calculate padding on the fly
        self.conv2dt = nn.ConvTranspose2d(input_size,
                                          output_size,
                                          kernel,
                                          stride=stride,
                                          padding=1,
                                          output_padding=1)

        if bn:
            self.bn = BN2d(output_size)
        else:
            self.bn = lambda x: x
    def __init__(self, input_size, output_size, kernel, bn=True, stride=2):
        super(Downsample, self).__init__()

        # TODO: calculate padding on the fly
        self.conv2d = nn.Conv2d(input_size,
                                output_size,
                                kernel,
                                padding=1,
                                stride=stride)
        self.pool = nn.MaxPool2d(2, stride=2)

        if bn:
            self.bn = BN2d(output_size)
        else:
            self.bn = lambda x: x
Exemple #9
0
    def __init__(self, params):
        super(ConVAE, self).__init__()
        self.model_str = 'ConVAE'
        self.is_cuda = False

        self.latent_dim = latent_dim = params.get('latent_dim', 2)
        self.hdim = hdim = params.get('hdim', 400)
        self.batchnorm = params.get('batchnorm', True)
        self.kernel_size = params.get('kernel_size', 3)
        self.padding = params.get('padding', 1)
        self.n_filters = params.get('n_filters', 64)

        # encoder
        self.conv1 = conv(1,
                          self.n_filters,
                          self.kernel_size,
                          padding=self.padding)
        self.conv2 = conv(self.n_filters,
                          self.n_filters * 2,
                          self.kernel_size,
                          padding=self.padding,
                          stride=2)
        self.conv3 = conv(self.n_filters * 2,
                          self.n_filters * 4,
                          self.kernel_size,
                          padding=self.padding,
                          stride=2)
        self.fc1 = fc(7 * 7 * self.n_filters * 4, hdim)

        self.fc_mu = fc(hdim, latent_dim)  # output the mean of z
        self.fc_logvar = fc(hdim,
                            latent_dim)  # output the log of the variance of z

        if self.batchnorm:
            self.bn_conv1 = BN2d(self.n_filters, momentum=.1)
            self.bn_conv2 = BN2d(self.n_filters * 2, momentum=.1)
            self.bn_conv3 = BN2d(self.n_filters * 4, momentum=.1)
            self.bn_fc1 = BN1d(self.hdim, momentum=.1)
            self.bn_mu = BN1d(latent_dim, momentum=.1)
            self.bn_logvar = BN1d(latent_dim, momentum=.1)

        # decoder
        self.fc2 = fc(latent_dim, 7 * 7 * self.n_filters * 4)
        self.deconv1 = deconv(self.n_filters * 4,
                              self.n_filters * 2,
                              self.kernel_size,
                              stride=2,
                              padding=self.padding,
                              output_padding=1)
        self.deconv2 = deconv(self.n_filters * 2,
                              self.n_filters,
                              self.kernel_size,
                              stride=2,
                              padding=self.padding,
                              output_padding=1)
        self.deconv3 = deconv(self.n_filters,
                              1,
                              self.kernel_size,
                              stride=1,
                              padding=self.padding,
                              output_padding=0)
        if self.batchnorm:
            self.bn_fc2 = BN1d(7 * 7 * self.n_filters * 4, momentum=.1)
            self.bn_deconv1 = BN2d(self.n_filters * 2, momentum=.1)
            self.bn_deconv2 = BN2d(self.n_filters, momentum=.1)
            self.bn_deconv3 = BN2d(1, momentum=.1)
    def __init__(self, num_players, window_size):
        super(Tower, self).__init__()

        self.conv1d_op1, self.bn_op1 = nn.Conv1d(3 * window_size, F_SIZE,
                                                 1), BN1d(F_SIZE)
        #self.conv1d_op2, self.bn_op2 = nn.Conv1d(F_SIZE, F_SIZE, 1), BN1d(F_SIZE)

        self.dense_p1, self.bn_p1 = nn.Linear(12 * window_size,
                                              F_SIZE), BN1d(F_SIZE)
        #self.dense_p2, self.bn_p2 = nn.Linear(F_SIZE, F_SIZE), BN1d(F_SIZE)

        #self.conv2d_1a, self.bn_1a = nn.Conv2d(7*window_size + 6*(window_size-1) + F_SIZE*2, SIZE, 1), BN2d(SIZE) # includes previous moves
        self.conv2d_1a, self.bn_1a = nn.Conv2d(
            7 * window_size + 6 * (window_size - 1), SIZE, 1), BN2d(SIZE)
        #self.conv2d_1b, self.bn_1b = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2d_1d, self.bn_1d = nn.Conv2d(SIZE,
                                               SIZE,
                                               3,
                                               padding=1,
                                               stride=2), BN2d(SIZE)

        #self.conv2d_2a, self.bn_2a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2d_2d, self.bn_2d = nn.Conv2d(SIZE,
                                               SIZE,
                                               3,
                                               padding=1,
                                               stride=2), BN2d(SIZE)

        #self.conv2d_3a, self.bn_3a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2d_3d, self.bn_3d = nn.Conv2d(SIZE,
                                               SIZE,
                                               3,
                                               padding=1,
                                               stride=2), BN2d(SIZE)

        #self.conv2d_4a, self.bn_4a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2d_4d, self.bn_4d = nn.Conv2d(SIZE,
                                               SIZE,
                                               3,
                                               padding=1,
                                               stride=2), BN2d(SIZE)

        #self.conv2d_5a, self.bn_5a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2d_5d, self.bn_5d = nn.Conv2d(SIZE,
                                               SIZE,
                                               3,
                                               padding=1,
                                               stride=2), BN2d(SIZE)

        #self.conv2d_6a, self.bn_6a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2d_6d, self.bn_6d = nn.Conv2d(SIZE,
                                               SIZE,
                                               3,
                                               padding=1,
                                               stride=2), BN2d(SIZE)

        #self.conv2d_7a, self.bn_7a = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        #self.conv2d_7d, self.bn_7d = nn.Conv2d(SIZE, SIZE, 3, padding=1, stride=2), BN2d(SIZE)

        self.conv2d_l1, self.bn_l1 = nn.Conv2d(SIZE + F_SIZE * 2, L_SIZE,
                                               1), BN2d(L_SIZE)

        #self.conv2dt_1 = torch.nn.ConvTranspose2d(L_SIZE, SIZE, 3, stride=2, padding=1, output_padding=1)

        #self.conv2d_u1, self.bn_u1 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2dt_2 = torch.nn.ConvTranspose2d(L_SIZE,
                                                  SIZE,
                                                  3,
                                                  stride=2,
                                                  padding=1,
                                                  output_padding=1)

        #self.conv2d_u2, self.bn_u2 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2dt_3 = torch.nn.ConvTranspose2d(SIZE,
                                                  SIZE,
                                                  3,
                                                  stride=2,
                                                  padding=1,
                                                  output_padding=1)

        #self.conv2d_u3, self.bn_u3 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2dt_4 = torch.nn.ConvTranspose2d(SIZE,
                                                  SIZE,
                                                  3,
                                                  stride=2,
                                                  padding=1,
                                                  output_padding=1)

        #self.conv2d_u4, self.bn_u4 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2dt_5 = torch.nn.ConvTranspose2d(SIZE,
                                                  SIZE,
                                                  3,
                                                  stride=2,
                                                  padding=1,
                                                  output_padding=1)

        #self.conv2d_u5, self.bn_u5 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2dt_6 = torch.nn.ConvTranspose2d(SIZE,
                                                  SIZE,
                                                  3,
                                                  stride=2,
                                                  padding=1,
                                                  output_padding=1)

        #self.conv2d_u6, self.bn_u6 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        self.conv2dt_7 = torch.nn.ConvTranspose2d(SIZE,
                                                  SIZE,
                                                  3,
                                                  stride=2,
                                                  padding=1,
                                                  output_padding=1)

        #self.conv2d_f1, self.bn_f1 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        #self.conv2d_f2, self.bn_f2 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        #self.conv2d_f3, self.bn_f3 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)
        #self.conv2d_f4, self.bn_f4 = nn.Conv2d(SIZE, SIZE, 3, padding=1), BN2d(SIZE)

        #self.gen_l1s = nn.ModuleList([nn.Linear(L_SIZE, SIZE) for _ in range(num_players)])
        #self.gen_bn1s = nn.ModuleList([BN1d(SIZE) for _ in range(num_players)])

        #self.gen_l2s = nn.ModuleList([nn.Linear(SIZE, 32) for _ in range(num_players)])
        #self.gen_bn2s = nn.ModuleList([BN1d(32) for _ in range(num_players)])

        self.gen_l3s = nn.ModuleList(
            [nn.Linear(L_SIZE, 1) for _ in range(num_players)])

        #self.move_l1s = nn.ModuleList([nn.Conv2d(SIZE, SIZE, 1) for _ in range(num_players)])
        #self.move_bn1s = nn.ModuleList([BN2d(SIZE) for _ in range(num_players)])

        self.move_l2s = nn.ModuleList(
            [nn.Conv2d(SIZE, 6, 1) for _ in range(num_players)])