Esempio n. 1
0
    def __init__(self, n_in_planes, n_out_planes, stride=1):
        super().__init__()
        assert stride == 1 or stride == 2

        self.block = nn.Sequential(
            nn_ops.conv3x3(n_in_planes, n_out_planes, stride),
            nn.BatchNorm2d(n_out_planes), nn.ReLU(inplace=True),
            nn_ops.conv3x3(n_out_planes, n_out_planes),
            nn.BatchNorm2d(n_out_planes))

        self.identity = identity_func(n_in_planes, n_out_planes, stride)
Esempio n. 2
0
	def __init__(self, d_latent, device='cuda', log_dir=''):
		super().__init__()
		self.d_latent = d_latent
		self.device = device
			
		n_blocks = [1, 1, 1, 1]
		mult = 8
		n_output_planes = [16 * mult, 32 * mult, 64 * mult, 128 * mult]
		self.n_in_planes = n_output_planes[0]
		
		self.layer0 = nn.Sequential(
		  nn_ops.conv3x3(3, self.n_in_planes, 1),
		  nn.BatchNorm2d(self.n_in_planes),
		  nn.ReLU(inplace=True)
		)
		self.layer1 = self._make_layer(BasicBlock, n_blocks[0], n_output_planes[0], 2)
		self.layer2 = self._make_layer(BasicBlock, n_blocks[1], n_output_planes[1], 2)
		self.layer3 = self._make_layer(BasicBlock, n_blocks[2], n_output_planes[2], 2)
		self.layer4 = self._make_layer(BasicBlock, n_blocks[3], n_output_planes[3], 2)
		self.latent_mapping = nn.Sequential(
			nn.Linear(n_output_planes[3] * BasicBlock.expansion, d_latent, True),
			nn.BatchNorm1d(d_latent),
			nn.Tanh()
		)
		
		self.apply(nn_ops.variable_init)
		self.to(device)
		utils.model_info(self, 'celebA_encoder', log_dir)
Esempio n. 3
0
    def __init__(self, n_in_planes, n_out_planes, stride=1):
        super().__init__()

        self.conv1 = nn.Conv2d(n_in_planes, n_out_planes, kernel_size=1)
        self.bn1 = nn.BatchNorm2d(n_out_planes)

        self.conv2 = nn_ops.conv3x3(n_out_planes, n_out_planes, stride)
        self.bn2 = nn.BatchNorm2d(n_out_planes)

        self.conv3 = nn.Conv2d(n_out_planes, n_out_planes * 4, kernel_size=1)
        self.bn3 = nn.BatchNorm2d(n_out_planes * 4)

        self.relu = nn.ReLU(inplace=True)
        self.identity = identity_func(n_in_planes, n_out_planes * 4, stride)
Esempio n. 4
0
    def __init__(self, n_in_planes, n_out_planes):
        super().__init__()
        self.block = nn.Sequential(
            nn_ops.deconv4x4(n_in_planes, n_out_planes, True),
            nn.BatchNorm2d(n_out_planes), nn.ReLU(inplace=True),
            nn_ops.conv3x3(n_out_planes, n_out_planes, 1, True),
            nn.BatchNorm2d(n_out_planes))

        self.upsample = lambda x: nn.functional.upsample(
            x, scale_factor=4, mode='nearest')
        self.shortcut_conv = nn.Sequential()
        if n_in_planes != n_out_planes:
            self.shortcut_conv = nn.Sequential(
                nn.Conv2d(n_in_planes, n_out_planes, kernel_size=1),
                nn.BatchNorm2d(n_out_planes))
Esempio n. 5
0
    def __init__(self,
                 d_latent,
                 encoder_only=False,
                 device='cuda',
                 no_classes=5,
                 log_dir=''):
        super().__init__()

        self.d_latent = d_latent
        self.device = device
        self.mult = 8
        self.encoder_only = encoder_only

        if not self.encoder_only:
            self.latent_mappingDec = nn.Sequential(
                nn.Linear(self.d_latent, 4 * 4 * 128 * self.mult),
                nn.BatchNorm1d(4 * 4 * 128 * self.mult), nn.ReLU())
            self.block1 = DecoderBlock(128 * self.mult, 64 * self.mult)

        # self.output_conv = nn_ops.conv3x3(64 * self.mult, 3, 1, True)
        # self.final_act = nn.Sigmoid()

        n_blocks = [1, 1, 1, 1]
        n_output_planes = [
            16 * self.mult, 32 * self.mult, 64 * self.mult, 128 * self.mult
        ]
        self.layer0 = nn.Sequential(
            nn_ops.conv3x3(n_output_planes[2], n_output_planes[0], 1),
            nn.BatchNorm2d(n_output_planes[0]), nn.ReLU(inplace=True))
        self.n_in_planes = n_output_planes[0]
        # self.layer1 = self._make_layer(BasicBlock, n_blocks[0], n_output_planes[0], 2)
        self.latent_mappingEnc = nn.Sequential(
            nn.Linear(n_output_planes[0] * BasicBlock.expansion,
                      int(d_latent / 2), True),
            nn.BatchNorm1d(int(d_latent / 2)), nn.Tanh())

        self.linear = nn.Sequential(
            nn.Linear(int(d_latent / 2), no_classes, True), nn.Sigmoid())
        # self.linear = nn.Sequential(nn.Linear(d_latent, no_classes, True),nn.BatchNorm1d(d_latent),nn.Tanh())

        self.apply(nn_ops.variable_init)
        self.to(device)
Esempio n. 6
0
    def __init__(self, block, n_blocks, n_output_planes, n_classes):
        super(ResNet, self).__init__()
        assert len(n_blocks) == 4
        assert len(n_output_planes) == 4

        self.n_in_planes = n_output_planes[0]

        self.layer0 = nn.Sequential(nn_ops.conv3x3(3, self.n_in_planes),
                                    nn.BatchNorm2d(self.n_in_planes),
                                    nn.ReLU(inplace=True))
        self.layer1 = self._make_layer(block, n_blocks[0], n_output_planes[0])
        self.layer2 = self._make_layer(block, n_blocks[1], n_output_planes[1],
                                       2)
        self.layer3 = self._make_layer(block, n_blocks[2], n_output_planes[2],
                                       2)
        self.layer4 = self._make_layer(block, n_blocks[3], n_output_planes[3],
                                       2)
        self.fc = nn.Linear(n_output_planes[3] * block.expansion, n_classes,
                            False)

        self.apply(nn_ops.variable_init)
Esempio n. 7
0
    def __init__(self, d_latent, device='cuda', log_dir=''):
        super().__init__()

        self.d_latent = d_latent
        self.device = device

        self.mult = 8
        self.latent_mapping = nn.Sequential(
            nn.Linear(self.d_latent, 4 * 4 * 128 * self.mult),
            nn.BatchNorm1d(4 * 4 * 128 * self.mult), nn.ReLU())
        self.block1 = DecoderBlock(128 * self.mult, 64 * self.mult)
        self.block2 = DecoderBlock(64 * self.mult, 32 * self.mult)
        self.block3 = DecoderBlock(32 * self.mult, 16 * self.mult)
        self.block4 = DecoderBlock(16 * self.mult, 8 * self.mult)
        self.block5 = DecoderBlock(8 * self.mult, 4 * self.mult)
        self.block6 = DecoderBlock(4 * self.mult, 2 * self.mult)
        self.output_conv = nn_ops.conv3x3(2 * self.mult, 3, 1, True)
        self.final_act = nn.Sigmoid()

        self.apply(nn_ops.variable_init)
        self.to(device)
        utils.model_info(self, 'celebA_decoder', log_dir)
Esempio n. 8
0
    def __init__(self,
                 d_latent,
                 no_classes=None,
                 device='cuda',
                 log_dir='',
                 encoder_only=False,
                 Tone=3):
        super().__init__()

        self.d_latent = d_latent
        self.no_classes = no_classes
        self.device = device
        self.encoder_only = encoder_only
        self.Tone = Tone

        self.mult = 8
        self.latent_mapping = nn.Sequential(
            nn.Linear(self.d_latent, 4 * 4 * 128 * self.mult),
            nn.BatchNorm1d(4 * 4 * 128 * self.mult), nn.ReLU())
        self.block1 = DecoderBlock(128 * self.mult, 64 * self.mult)
        self.block2 = DecoderBlock(64 * self.mult, 32 * self.mult)
        self.block3 = DecoderBlock(32 * self.mult, 16 * self.mult)
        self.block4 = DecoderBlock(16 * self.mult, 8 * self.mult)
        self.block5 = DecoderBlock(8 * self.mult, 4 * self.mult)

        n_blocks = [1, 1, 1, 1]
        # n_output_planes = [32 * self.mult, 64 * self.mult, 128 * self.mult, 256 * self.mult]
        # n_output_planes = [16 * self.mult, 32 * self.mult, 64 * self.mult, 128 * self.mult]
        n_output_planes = [
            8 * self.mult, 16 * self.mult, 32 * self.mult, 64 * self.mult
        ]
        n_last_plane = self.no_classes
        self.layer0 = nn.Sequential(
            nn_ops.conv3x3(n_output_planes[0], n_output_planes[1], 1),
            nn.BatchNorm2d(n_output_planes[1]), nn.ReLU(inplace=True))
        self.layer1 = nn.Sequential(
            nn_ops.conv3x3(n_output_planes[1], n_output_planes[0], 1),
            nn.BatchNorm2d(n_output_planes[0]), nn.ReLU(inplace=True))
        self.layer2 = nn.Sequential(
            nn_ops.conv3x3(n_output_planes[0], n_last_plane * 3, 1),
            nn.BatchNorm2d(n_last_plane * 3), nn.ReLU(inplace=True))

        # self.latent_mappingDev = nn.Sequential(
        #       nn.Linear(n_output_planes[3] * BasicBlock.expansion, d_latent, True),
        #       nn.BatchNorm1d(d_latent),
        #       nn.Tanh()
        #     )
        # self.layer1 = self._make_layer(BasicBlock, 1, n_output_planes[2], 2)#self._make_layer(BasicBlock, n_blocks[1], n_output_planes[1], 2)
        self.last_layerRGB = nn_ops.conv3x3(n_output_planes[0],
                                            n_last_plane * 3, 1, True)
        self.final_actRGB = nn.Sigmoid()

        self.last_layerBW = nn_ops.conv3x3(n_output_planes[0], n_last_plane, 1,
                                           True)
        self.final_actBW = nn.Sigmoid()

        # self.layer2 = self._make_layer(BasicBlock, 1, n_output_planes[3], 1)#self._make_layer(BasicBlock, n_blocks[1], n_output_planes[1], 2)
        # # self.layer3 = self._make_layer(BasicBlock, n_blocks[2], n_output_planes[2], 2)
        # self.block3 = DecoderBlock(n_output_planes[2], 32 * self.mult)
        # # self.output_conv = nn_ops.conv3x3(2 * self.mult, 3, 1, True)

        # self.linear = nn.Sequential(nn.Linear(int(d_latent/2), no_classes, True),nn.Sigmoid())
        # self.linear = nn.Sequential(nn.Linear(d_latent, no_classes, True),nn.BatchNorm1d(d_latent),nn.Tanh())

        self.apply(nn_ops.variable_init)
        self.to(device)