示例#1
0
def eval():
    dataloader = celeba_loader.get_loader_downsample(args)
    ## Setup FNet
    fnet = sface.sface()
    fnet.load_state_dict(torch.load('../../pretrained/sface.pth'))
    fnet.to(args.device)
    srnet = edsr_se.Edsr()
    srnet.load_state_dict(torch.load(args.model_file)['net'])
    srnet.to(args.device)

    val.val_raw_se("sface", -1, 8, 96, 112, 32, args.device, fnet, srnet)
    val.val_raw_se("sface", -1, 7, 96, 112, 32, args.device, fnet, srnet)
    val.val_raw_se("sface", -1, 6, 96, 112, 32, args.device, fnet, srnet)
    val.val_raw_se("sface", -1, 4, 96, 112, 32, args.device, fnet, srnet)
    val.val_raw_se("sface", -1, 4, 96, 112, 32, args.device, fnet, srnet)
示例#2
0
def main():
    dataloader = celeba_loader.get_loader_downsample(args)
    ## Setup FNet
    fnet = SphereFace(type='teacher',
                      pretrain=torch.load('../../pretrained/sface.pth'))
    fnet.to(args.device)
    common.freeze(fnet)
    if args.Continue:
        net, optimizer, last_epoch, scheduler = backup_init(args)
    else:
        net, optimizer, last_epoch, scheduler = common_init(args)
    best_acc = 0.0
    epochs = args.epoch
    criterion = LearnGuideLoss()
    for epoch_id in range(last_epoch + 1, epochs):
        bar = tqdm(dataloader, total=len(dataloader), ncols=0)
        loss = [0.0, 0.0, 0.0, 0.0, 0.0]
        loss_class = [0.0, 0.0, 0.0, 0.0, 0.0]
        loss_feature = [0.0, 0.0, 0.0, 0.0, 0.0]
        count = [0, 0, 0, 0, 0]
        net.train()
        for batch_id, inputs in enumerate(bar):
            lr = optimizer.param_groups[0]['lr']
            index = np.random.randint(1, 4 + 1)
            if index == 1:
                index = 0
            lr_face = inputs['down{}'.format(2**index)].to(args.device)
            hr_face = inputs['down1'].to(args.device)
            target = inputs['id'].to(args.device)
            lr_face = nn.functional.interpolate(lr_face,
                                                size=(112, 96),
                                                mode='bilinear',
                                                align_corners=False)
            down_factor = torch.ones(size=(args.bs, 1, 1, 1)).to('cuda:0')
            down_factor *= (2**index) / 16
            down_factor2 = 1 / down_factor / 16
            down_factor = torch.cat([down_factor, down_factor2], dim=1)
            lr_classes = net(tensor2SFTensor(lr_face), down_factor, target)
            fnet(tensor2SFTensor(hr_face))
            lossd, lossd_class, lossd_feature = criterion(
                lr_classes, target, net.getFeature(), fnet.getFeature())
            loss[index] += lossd.item()
            loss_class[index] += lossd_class
            loss_feature[index] += lossd_feature
            count[index] += 1
            optimizer.zero_grad()
            lossd.backward()
            optimizer.step()
            scheduler.step()  # update learning rate
            # display
            description = "epoch {} : ".format(epoch_id)
            description += 'loss: {:.4f} '.format(loss[index] / count[index])
            description += 'loss_class: {:.4f} '.format(loss_class[index] /
                                                        count[index])
            description += 'loss_feature: {:.4f} '.format(loss_feature[index] /
                                                          count[index])
            description += 'lr: {:.3e} '.format(lr)
            description += 'index: {:.0f} '.format(index)
            bar.set_description(desc=description)

        net.setVal(True)
        acc = val.val_sesface(-1, 96, 112, 32, args.device, fnet, net, index=7)
        net.setVal(False)
        if acc > best_acc:
            best_acc = acc
            save_network_for_backup(args, net, optimizer, scheduler, epoch_id)

    # Save the final SR model
    save_network(args, net, epochs)
示例#3
0
def train():
    dataloader = celeba_loader.get_loader_downsample(args)
    train_iter = iter(dataloader)
    ## Setup FNet
    fnet = sface.sface()

    fnet.load_state_dict(torch.load('../../pretrained/sface.pth'))
    common.freeze(fnet)
    fnet.to(args.device)

    if args.Continue:
        srnet, optimizer, last_epoch, scheduler = backup_init(args)
    else:
        srnet, optimizer, last_epoch, scheduler = common_init(args)
    criterion_pixel = nn.L1Loss()

    epochs = args.epoch
    best_acc = 0.0
    for epoch_id in range(last_epoch + 1, epochs):
        bar = tqdm(dataloader, total=len(dataloader), ncols=0)
        loss = [0.0, 0.0, 0.0, 0.0, 0.0]
        loss_pixel = [0.0, 0.0, 0.0, 0.0, 0.0]
        loss_feature = [0.0, 0.0, 0.0, 0.0, 0.0]
        count = [0, 0, 0, 0, 0]
        srnet.train()
        for _, inputs in enumerate(bar):
            lr = optimizer.param_groups[0]['lr']
            index = np.random.randint(2, 4 + 1)
            lr_face = inputs['down{}'.format(2**index)].to(args.device)
            mr_face = inputs['down{}'.format(2**(index - 2))].to(args.device)
            if index == 2:
                hr_face = mr_face
            else:
                hr_face = inputs['down1'].to(args.device)

            down_factor = torch.ones(size=(args.bs, 1, 1, 1)).to('cuda:0')
            down_factor *= (2**index) / 16
            down_factor2 = 1 / down_factor / 16
            down_factor = torch.cat([down_factor, down_factor2], dim=1)
            sr_face = srnet(lr_face, down_factor)
            lossd_pixel = criterion_pixel(sr_face, mr_face.detach())
            loss_pixel[index] += lossd_pixel.item()
            lossd = lossd_pixel
            # Feature loss
            sr_face_up = nn.functional.interpolate(sr_face,
                                                   size=(112, 96),
                                                   mode='bilinear',
                                                   align_corners=False)
            if args.lamb_id > 0:
                sr_face_feature = fnet(common.tensor2SFTensor(sr_face_up))
                hr_face_feature = fnet(
                    common.tensor2SFTensor(hr_face)).detach()
                lossd_feature = 1 - torch.nn.CosineSimilarity()(
                    sr_face_feature, hr_face_feature)
                lossd_feature = lossd_feature.mean()
                loss_feature[index] += lossd_feature.item()

            lossd += args.lamb_id * lossd_feature
            loss[index] += lossd.item()
            count[index] += 1
            optimizer.zero_grad()
            lossd.backward()
            optimizer.step()
            scheduler.step()  # update learning rate
            # display
            description = "epoch {} :".format(epoch_id)
            description += 'loss: {:.4f} '.format(loss[index] / count[index])
            description += 'loss_pixel: {:.4f} '.format(loss_pixel[index] /
                                                        count[index])
            description += 'loss_feature: {:.4f} '.format(loss_feature[index] /
                                                          count[index])
            description += 'lr: {:.3e} '.format(lr)
            description += 'index: {:.0f} '.format(index)
            bar.set_description(desc=description)
        print('16 loss:{:.4f} , 8 loss:{:.4f}, 4 loss:{:.4f}'.format(
            loss[4] / count[4], loss[3] / count[3], loss[2] / count[2]))

        acc = val.val_raw_se("sface", -1, 8, 96, 112, 32, args.device, fnet,
                             srnet)
        if acc >= best_acc or epoch_id % 3 == 0:
            best_acc = acc
            save_network_for_backup(args, srnet, optimizer, scheduler,
                                    epoch_id)

    # Save the final SR model
    save_network(args, srnet, args.iterations)
示例#4
0
    common.freeze(srnet)
    lr_fnet = sface.SeSface()
    lr_fnet.load_state_dict(torch.load('/content/drive/MyDrive/app/sesface_1/sesface_backup_epoch13.pth')['net'])
    lr_fnet.to(args.device)
    lr_fnet.setVal(True)
    common.freeze(lr_fnet)
    fnet.eval()
    srnet.eval()
    lr_fnet.eval()
    return fnet, srnet, lr_fnet


if __name__ == '__main__':
    args = train_args.get_args()
    fnet, srnet, lr_fnet = initModels()
    dataloader = celeba_loader.get_loader_downsample(args)

    bar = tqdm(dataloader, total=len(dataloader), ncols=0)
    all_data = None
    count = 0
    for batch_id, inputs in enumerate(bar):
        target = inputs['id'].to(args.device)
        data = torch.reshape(target, [-1, 1])
        hr_face = inputs['down1'].to(args.device)
        down_factor = torch.ones(size=(args.bs, 1, 1, 1)).to('cuda:0')
        down_factor *= 1 / 16
        down_factor2 = 1 / down_factor / 16
        down_factor = torch.cat([down_factor, down_factor2], dim=1)
        feature1 = fnet(hr_face)
        feature2 = lr_fnet(hr_face, down_factor)
        data = torch.cat([data, feature1], dim=1)