map_location = lambda storage, loc: storage

optimizer_generator = Adam(netG.parameters(), 2e-4, betas=(0.5, 0.999))
optimizer_discriminator = Adam(netD.parameters(), 2e-4, betas=(0.5, 0.999))

criterion = BCELoss()

true_labels = Variable(t.ones(CONFIG["BATCH_SIZE"]))
fake_labels = Variable(t.zeros(CONFIG["BATCH_SIZE"]))
fix_noises = Variable(t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1))
noises = Variable(t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1))

if CONFIG["GPU_NUMS"] > 0:
    netD.cuda()
    netG.cuda()
    criterion.cuda()
    true_labels, fake_labels = true_labels.cuda(), fake_labels.cuda()
    fix_noises, noises = fix_noises.cuda(), noises.cuda()

proBar = ProgressBar(CONFIG["EPOCHS"], len(dataLoader),
                     "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, CONFIG["EPOCHS"] + 1):
    if epoch % 30 == 0:
        optimizer_discriminator.param_groups[0]['lr'] /= 10
        optimizer_generator.param_groups[0]['lr'] /= 10

    for ii, (img, _) in enumerate(dataLoader):
        real_img = Variable(img.cuda() if CONFIG["GPU_NUMS"] > 1 else img)

        if ii % 1 == 0:
            # 训练判别器
Beispiel #2
0

NetG = Generator()
NetD = Discriminator()
BCE_LOSS = BCELoss()
G_optimizer = Adam(NetG.parameters(),
                   lr=CONFIG["LEARNING_RATE"],
                   betas=(0.5, 0.999))
D_optimizer = Adam(NetD.parameters(),
                   lr=CONFIG["LEARNING_RATE"],
                   betas=(0.5, 0.999))

if CONFIG["GPU_NUMS"] > 0:
    NetG = NetG.cuda()
    NetD = NetD.cuda()
    BCE_LOSS = BCE_LOSS.cuda()

transform = Compose([
    Resize((CONFIG["IMAGE_SIZE"], CONFIG["IMAGE_SIZE"])),
    ToTensor(),
    Normalize(mean=[0.5] * 3, std=[0.5] * 3)
])
train_loader = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(
    root=CONFIG["DATA_PATH"], transform=transform),
                                           batch_size=CONFIG["BATCH_SIZE"],
                                           shuffle=True)


def one_hot(target):
    y = torch.zeros(target.size()[0], 10)
Beispiel #3
0
class trainDeepPix(object):
    r'''
    Object to train DeepPix Network

    @params:: 
        `model`:: Initialized DeepPix Model
        `lr`:: Learning Rate for Adam Optimizer
        `weight_decay` :: L2 Regularization parameter
    '''
    def __init__(self, model, lr, weight_decay):

        super(trainDeepPix, self).__init__()

        self.model = model
        self.lossC = BCELoss()
        self.lossS = BCELoss()

        if CUDA:
            self.model = self.model.cuda()
            self.lossC = self.lossC.cuda()
            self.lossS = self.lossS.cuda()

        self.optimizer = Adam(self.model.parameters(),
                              lr=lr,
                              weight_decay=weight_decay)

    @staticmethod
    def _convertYLabel(y):

        returnY = torch.ones((y.shape[0], 196)).type(torch.FloatTensor)

        for i in range(y.shape[0]):
            returnY[i] = returnY[i] * y[i]

        return returnY.cuda() if CUDA else returnY

    @staticmethod
    def _calcAccuracy(yPred, yTrue):

        yPred = yPred.view(-1)
        yTrue = yTrue.view(-1)

        if CUDA:
            yPred = (yPred > 0.5).type(torch.cuda.LongTensor)
        else:
            yPred = (yPred > 0.5).type(torch.LongTensor)

        return torch.sum((yTrue == yPred).type(torch.LongTensor),
                         dim=0) / float(yTrue.shape[0])

    def train(self,
              ImgList,
              LabelList,
              mtcnn,
              batch_size=32,
              epochs=50,
              detectFace=True):
        r'''
        Utility to train DeepPix Model,
        @params:: 
            `ImgList`:: List of Image Paths
            `LabelList`:: List of Labels correspoding to images
                            [should be 0 or 1]
        '''

        ImgArray = []

        widgets = [
            f"Cropping faces: " if detectFace else f"Preprocessing Data: ",
            progressbar.Percentage(), " ",
            progressbar.Bar(), " ",
            progressbar.ETA()
        ]

        pbar = progressbar.ProgressBar(maxval=len(ImgList),
                                       widgets=widgets).start()

        __ctr = 0

        for img in ImgList:
            res = cropFace(mtcnn, img, detectFace=detectFace)
            if res is not None:
                ImgArray.append(res.unsqueeze(0))
            else:
                del LabelList[__ctr]

            pbar.update(__ctr)
            __ctr += 1

        pbar.finish()

        for epoch in range(epochs):

            widgets = [
                f"Epoch {epoch+1}/{epochs} ",
                progressbar.Percentage(), " ",
                progressbar.Bar(), " ",
                progressbar.ETA()
            ]

            pbar = progressbar.ProgressBar(maxval=np.arange(
                0, len(ImgArray), batch_size).shape[0],
                                           widgets=widgets).start()

            __ctr = 0

            batch_loss = []
            batch_accClass = []
            batch_accSeg = []

            for item in np.arange(0, len(ImgArray), batch_size):

                trainX = torch.cat(ImgArray[item:item + batch_size], dim=0)
                trainY = torch.tensor(LabelList[item:item + batch_size]).type(
                    torch.FloatTensor)

                if CUDA:
                    trainX = trainX.cuda()
                    trainY = trainY.cuda()

                self.model.train()
                self.optimizer.zero_grad()

                classPred, segPred = self.model(trainX)

                segPred = segPred.view(trainX.shape[0], -1)

                train_loss = self.lossC(
                    classPred.squeeze(), trainY) + self.lossC(
                        segPred.squeeze(), self._convertYLabel(trainY))

                train_loss.backward()
                self.optimizer.step()

                classAcc = self._calcAccuracy(classPred, trainY)
                SegAcc = self._calcAccuracy(segPred,
                                            self._convertYLabel(trainY))

                batch_loss.append(train_loss.item())
                batch_accClass.append(classAcc.item())
                batch_accSeg.append(SegAcc.item())
                pbar.update(__ctr)
                __ctr += 1

            pbar.finish()

            print(
                f'Summary -> train_loss:: {mean(batch_loss)}, class_acc:: {mean(batch_accClass)}, seg_acc:: {mean(batch_accSeg)}'
            )

    def predict(self,
                ImgList,
                mtcnn,
                batch_size=16,
                thresh=0.5,
                testLabel=None,
                detectFace=True):
        r'''
        Utility to predict `spoof/bonafide` viz `0/1` given list
        of test image Path

        @params:: 
            `ImgList`:: Test Image Path List
            `mtcnn`:: Face Cropping Module
            `batch size`:: Batch Size for testing
            `thresh`:: Threshold to classify an image as spoof or bonafide
        '''

        self.model.eval()

        ImgArray = []

        widgets = [
            f"Cropping faces: " if detectFace else f"Preprocessing Data: ",
            progressbar.Percentage(), " ",
            progressbar.Bar(), " ",
            progressbar.ETA()
        ]

        pbar = progressbar.ProgressBar(maxval=len(ImgList),
                                       widgets=widgets).start()

        __ctr = 0

        for img in ImgList:

            res = cropFace(mtcnn, img, detectFace=detectFace)
            if res is not None:
                ImgArray.append(res.unsqueeze(0))
            else:
                if testLabel is not None:
                    del testLabel[__ctr]

            pbar.update(__ctr)
            __ctr += 1

        # ImgArray = torch.cat(ImgArray, dim=0)

        pbar.finish()

        returnY = np.zeros((len(ImgArray)), dtype="uint8")

        widgets = [
            f"Predicting ",
            progressbar.Percentage(), " ",
            progressbar.Bar(), " ",
            progressbar.ETA()
        ]

        pbar = progressbar.ProgressBar(maxval=np.arange(
            0, len(ImgArray), batch_size).shape[0],
                                       widgets=widgets).start()

        __ctr = 0

        for item in np.arange(0, len(ImgArray), batch_size):

            _, segPred = self.model(
                torch.cat(ImgArray[item:item + batch_size], dim=0).cuda() if
                CUDA else torch.cat(ImgArray[item:item + batch_size], dim=0))

            segPred = segPred.view(segPred.shape[0], -1)

            segPred = torch.mean(segPred, dim=1)
            segPred = (segPred > thresh).type(
                torch.LongTensor).cpu().detach().numpy()

            returnY[item:item + batch_size] = segPred
            pbar.update(__ctr)
            __ctr += 1

        pbar.finish()

        return returnY

    def saveModel(self, path):
        r'''
        Saves current model state to the path given
        '''

        torch.save(self.model.state_dict(), path)
        print("[INFO] Model Saved")

    def loadModel(self, path):
        r'''
        Loads model state from the path given
        and maps to available/given device
        '''

        self.model.load_state_dict(
            torch.load(path,
                       map_location=DEVICE if CUDA else torch.device("cpu")))
        print("[INFO] Model Loaded..")
        return output

NetG = Generator()
NetD = Discriminator()

optimizerD = Adam(NetD.parameters(),lr=CONFIG["LEARNING_RATE"],betas=(CONFIG["BETA1"],0.999))
optimizerG = Adam(NetG.parameters(),lr=CONFIG["LEARNING_RATE"],betas=(CONFIG["BETA1"],0.999))
criterion = BCELoss()

fix_noise = Variable(torch.FloatTensor(CONFIG["BATCH_SIZE"],CONFIG["NOISE_DIM"],1,1).normal_(0,1))
if CONFIG["GPU_NUMS"] > 0:
    NetD = NetD.cuda()
    NetG = NetG.cuda()
    fix_noise = fix_noise.cuda()
    criterion.cuda() # it's a good habit

transform=Compose([
    ToTensor()
])

dataset = MNISTDataSetForPytorch(root=CONFIG["DATA_PATH"],train=True, transform=transform)
dataloader=torch.utils.data.DataLoader(dataset,CONFIG["BATCH_SIZE"],shuffle = True)

bar = ProgressBar(CONFIG["EPOCHS"], len(dataloader), "D Loss:%.3f;G Loss:%.3f")
for epoch in range(1, CONFIG["EPOCHS"] + 1):
    if epoch % 30 == 0:
        optimizerD.param_groups[0]['lr'] /= 10
        optimizerG.param_groups[0]['lr'] /= 10

    for ii, data in enumerate(dataloader,0):
'''
Net_G = Generator()
Net_D = Discriminator()
Net_G.normal_weight_init(mean=0.0, std=0.02)
Net_D.normal_weight_init(mean=0.0, std=0.02)

Net_G = DataParallel(Net_G)
Net_D = DataParallel(Net_D)

BCE_loss = BCELoss()
L1_loss = L1Loss()

if GPU_NUMS > 1:
    Net_D.cuda()
    Net_G.cuda()
    BCE_loss = BCE_loss.cuda()
    L1_loss = L1_loss.cuda()
G_optimizer = Adam(Net_G.parameters(), lr=0.0002, betas=(0.5, 0.999))
D_optimizer = Adam(Net_D.parameters(), lr=0.0002, betas=(0.5, 0.999))
'''
读入数据
'''
if not os.path.exists("output"):
    os.mkdir("output")

train_set = DataSetFromFolderForPicTransfer(
    os.path.join("/data/facades_fixed", "train"))
test_set = DataSetFromFolderForPicTransfer(
    os.path.join("/data/facades_fixed", "test"))
train_data_loader = DataLoader(dataset=train_set,
                               num_workers=2,
Beispiel #6
0
class YOLOLayer(Module):
    """Detection layer"""
    def __init__(self, anchors, num_classes, img_dim):
        super(YOLOLayer, self).__init__()
        self.anchors = anchors
        self.num_anchors = len(anchors)
        self.num_classes = num_classes
        self.bbox_attrs = 5 + num_classes
        self.img_dim = img_dim
        self.ignore_thres = 0.5
        self.lambda_coord = 1

        self.mse_loss = MSELoss()
        self.bce_loss = BCELoss()

    def forward(self, x, targets=None):
        bs = x.size(0)
        g_dim = x.size(2)
        stride =  self.img_dim / g_dim
        # Tensors for cuda support
        FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
        LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor

        prediction = x.view(bs,  self.num_anchors, self.bbox_attrs, g_dim, g_dim).permute(0, 1, 3, 4, 2).contiguous()

        # Get outputs
        x = torch.sigmoid(prediction[..., 0])          # Center x
        y = torch.sigmoid(prediction[..., 1])          # Center y
        w = prediction[..., 2]                         # Width
        h = prediction[..., 3]                         # Height
        conf = torch.sigmoid(prediction[..., 4])       # Conf
        pred_cls = torch.sigmoid(prediction[..., 5:])  # Cls pred.

        # Calculate offsets for each grid
        grid_x = torch.linspace(0, g_dim-1, g_dim).repeat(g_dim,1).repeat(bs*self.num_anchors, 1, 1).view(x.shape).type(FloatTensor)
        grid_y = torch.linspace(0, g_dim-1, g_dim).repeat(g_dim,1).t().repeat(bs*self.num_anchors, 1, 1).view(y.shape).type(FloatTensor)
        scaled_anchors = [(a_w / stride, a_h / stride) for a_w, a_h in self.anchors]
        anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))
        anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))
        anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, g_dim*g_dim).view(w.shape)
        anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, g_dim*g_dim).view(h.shape)

        # Add offset and scale with anchors
        pred_boxes = FloatTensor(prediction[..., :4].shape)
        pred_boxes[..., 0] = x.data + grid_x
        pred_boxes[..., 1] = y.data + grid_y
        pred_boxes[..., 2] = torch.exp(w.data) * anchor_w
        pred_boxes[..., 3] = torch.exp(h.data) * anchor_h

        # Training
        if targets is not None:

            if x.is_cuda:
                self.mse_loss = self.mse_loss.cuda()
                self.bce_loss = self.bce_loss.cuda()

            nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls = build_targets(pred_boxes.cpu().data,
                                                                                        targets.cpu().data,
                                                                                        scaled_anchors,
                                                                                        self.num_anchors,
                                                                                        self.num_classes,
                                                                                        g_dim,
                                                                                        self.ignore_thres,
                                                                                        self.img_dim)

            nProposals = int((conf > 0.25).sum().item())
            recall = float(nCorrect / nGT) if nGT else 1

            # Handle masks
            mask = Variable(mask.type(FloatTensor))
            cls_mask = Variable(mask.unsqueeze(-1).repeat(1, 1, 1, 1, self.num_classes).type(FloatTensor))
            conf_mask = Variable(conf_mask.type(FloatTensor))

            # Handle target variables
            tx    = Variable(tx.type(FloatTensor), requires_grad=False)
            ty    = Variable(ty.type(FloatTensor), requires_grad=False)
            tw    = Variable(tw.type(FloatTensor), requires_grad=False)
            th    = Variable(th.type(FloatTensor), requires_grad=False)
            tconf = Variable(tconf.type(FloatTensor), requires_grad=False)
            tcls  = Variable(tcls.type(FloatTensor), requires_grad=False)

            # Mask outputs to ignore non-existing objects
            loss_x = self.lambda_coord * self.bce_loss(x * mask, tx * mask)
            loss_y = self.lambda_coord * self.bce_loss(y * mask, ty * mask)
            loss_w = self.lambda_coord * self.mse_loss(w * mask, tw * mask) / 2
            loss_h = self.lambda_coord * self.mse_loss(h * mask, th * mask) / 2
            loss_conf = self.bce_loss(conf * conf_mask, tconf * conf_mask)
            loss_cls = self.bce_loss(pred_cls * cls_mask, tcls * cls_mask)
            loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls

            return loss, loss_x.item(), loss_y.item(), loss_w.item(), loss_h.item(), loss_conf.item(), loss_cls.item(), recall

        else:
            # If not in training phase return predictions
            output = torch.cat((pred_boxes.view(bs, -1, 4) * stride, conf.view(bs, -1, 1), pred_cls.view(bs, -1, self.num_classes)), -1)
            return output.data
def main_train():
    discriminator = PatchGANDiscriminator()
    generator = Generator()

    learning_rate = .005
    updateable_params = filter(lambda p: p.requires_grad,
                               discriminator.parameters())
    discriminator_optimizer = optim.Adam(updateable_params, lr=learning_rate)
    updateable_params = filter(lambda p: p.requires_grad,
                               generator.parameters())
    generator_optimizer = optim.Adam(updateable_params, lr=learning_rate)
    criterion = BCELoss().cuda()

    if use_gpu:
        discriminator = discriminator.cuda()
        generator = generator.cuda()
        criterion = criterion.cuda()

    train_loader, gan_loader = load_loaders()

    for i in range(1, 10):
        print(f"GAN EPOCH: {i}")
        # Train discriminator on color images
        train(discriminator,
              None,
              discriminator_optimizer,
              criterion,
              train_loader,
              use_gpu,
              epochs=1,
              total_iter=0,
              epoch_start=0)
        # Train discriminator on fakes
        train(discriminator,
              generator,
              generator_optimizer,
              criterion,
              gan_loader,
              use_gpu,
              epochs=10,
              total_iter=0,
              epoch_start=0)
        real_score, fake_score = score_it(discriminator, generator)
        print("REAL SCORE:", real_score)
        print("FAKE SCORE:", fake_score)

    print("These should all be 1")
    debug_dat_model(discriminator, None, train_loader)
    print("These should all be 0")
    debug_dat_model(discriminator, generator, gan_loader)

    now_time = time.strftime("%H-%M-%S")
    os.makedirs("./pickles", exist_ok=True)

    disc_file = f"./pickles/discriminator-{now_time}.p"
    gen_file = f"./pickles/generator-{now_time}.p"
    print("Saving to: ")
    print(disc_file)
    print(gen_file)
    torch.save(discriminator, disc_file)
    torch.save(generator, gen_file)