Exemplo n.º 1
0
def main(args):
    print(args)
    MULTI_GPU = False
    DEVICE = torch.device("cuda:0")
    DATA_ROOT = '/raid/Data/ms1m-retinaface-t1/'
    with open(os.path.join(DATA_ROOT, 'property'), 'r') as f:
        NUM_CLASS, h, w = [int(i) for i in f.read().split(',')]

    if args.network == 'VIT':
        model = ViT_face(image_size=112,
                         patch_size=8,
                         loss_type='CosFace',
                         GPU_ID=DEVICE,
                         num_class=NUM_CLASS,
                         dim=512,
                         depth=20,
                         heads=8,
                         mlp_dim=2048,
                         dropout=0.1,
                         emb_dropout=0.1)
    elif args.network == 'VITs':
        model = ViTs_face(loss_type='CosFace',
                          GPU_ID=DEVICE,
                          num_class=NUM_CLASS,
                          image_size=112,
                          patch_size=8,
                          ac_patch_size=12,
                          pad=4,
                          dim=512,
                          depth=20,
                          heads=8,
                          mlp_dim=2048,
                          dropout=0.1,
                          emb_dropout=0.1)

    model_root = args.model
    model.load_state_dict(torch.load(model_root))

    #debug
    w = torch.load(model_root)
    for x in w.keys():
        print(x, w[x].shape)
    #embed()
    TARGET = [i for i in args.target.split(',')]
    vers = get_val_data('./eval/', TARGET)
    acc = []

    for ver in vers:
        name, data_set, issame = ver
        accuracy, std, xnorm, best_threshold, roc_curve = perform_val(
            MULTI_GPU, DEVICE, 512, args.batch_size, model, data_set, issame)
        print('[%s]XNorm: %1.5f' % (name, xnorm))
        print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (name, accuracy, std))
        print('[%s]Best-Threshold: %1.5f' % (name, best_threshold))
        acc.append(accuracy)
    print('Average-Accuracy: %1.5f' % (np.mean(acc)))
Exemplo n.º 2
0
def OneEpoch(epoch, train_loader, OPTIMIZER, DISP_FREQ, NUM_EPOCH_WARM_UP, NUM_BATCH_WARM_UP):
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    batch = 0
#iterator = iter(train_loader)
    start = time.time()
    for inputs, labels in train_loader:
        if (epoch + 1 <= NUM_EPOCH_WARM_UP) and (batch + 1 <= NUM_BATCH_WARM_UP): # adjust LR for each training batch during warm up
            warm_up_lr(batch + 1, NUM_BATCH_WARM_UP, LR, OPTIMIZER)

        # compute output
        inputs = inputs.to(DEVICE, non_blocking=True)
        labels = labels.to(DEVICE, non_blocking=True).long()
        features = BACKBONE(inputs)
        outputs = HEAD(features, labels)
        loss = LOSS(outputs, labels)
    
        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, labels, topk = (1, 5))
        losses.update(loss.data.item(), inputs.size(0))
        top1.update(prec1.data.item(), inputs.size(0))
        top5.update(prec5.data.item(), inputs.size(0))
    
        # compute gradient and do SGD step
        OPTIMIZER.zero_grad()
        loss.backward()
        OPTIMIZER.step()
                
                # dispaly training loss & acc every DISP_FREQ
        if ((batch + 1) % DISP_FREQ == 0) and batch != 0:
            print("=" * 60)
            print('Epoch {}/{} Batch {}/{}\t'
                 'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                 'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                 'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                 epoch + 1, NUM_EPOCH, batch + 1, len(train_loader) * NUM_EPOCH, loss = losses, top1 = top1, top5 = top5))
            print("Running speed in the last 100 batches: {:.3f} iter/s.".format(DISP_FREQ / (time.time() - start)))
            start = time.time()
            print("=" * 60)
        batch += 1

    epoch_loss = losses.avg
    epoch_acc = top1.avg
    writer.add_scalar("Training_Loss", epoch_loss, epoch + 1)
    writer.add_scalar("Training_Accuracy", epoch_acc, epoch + 1)
    print("=" * 60)
    print('Epoch: {}/{}\t'
        'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
        'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
        'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
        epoch + 1, NUM_EPOCH, loss = losses, top1 = top1, top5 = top5))
    print("=" * 60)
    # perform validation & save checkpoints per epoch
    # validation statistics per epoch (buffer for visualization)
    print("=" * 60)
    print("Perform Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints...")
    accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, lfw, lfw_issame)
    buffer_val(writer, "LFW", accuracy_lfw, best_threshold_lfw, roc_curve_lfw, epoch + 1)
#		accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_ff, cfp_ff_issame)
#		buffer_val(writer, "CFP_FF", accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff, epoch + 1)
#		accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_fp, cfp_fp_issame)
#		buffer_val(writer, "CFP_FP", accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp, epoch + 1)
#		accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, agedb, agedb_issame)
#		buffer_val(writer, "AgeDB", accuracy_agedb, best_threshold_agedb, roc_curve_agedb, epoch + 1)
#		accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, calfw, calfw_issame)
#		buffer_val(writer, "CALFW", accuracy_calfw, best_threshold_calfw, roc_curve_calfw, epoch + 1)
#		accuracy_cplfw, best_threshold_cplfw, roc_curve_cplfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cplfw, cplfw_issame)
#		buffer_val(writer, "CPLFW", accuracy_cplfw, best_threshold_cplfw, roc_curve_cplfw, epoch + 1)
    accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, vgg2_fp, vgg2_fp_issame)
    buffer_val(writer, "VGGFace2_FP", accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp, epoch + 1)
    print("=" * 60)

    # save checkpoints per epoch
    if MULTI_GPU:
        torch.save(BACKBONE.module.state_dict(), os.path.join(MODEL_ROOT, "Backbone_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(BACKBONE_NAME, epoch + 1, batch, get_time())))
        torch.save(HEAD.state_dict(), os.path.join(MODEL_ROOT, "Head_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(HEAD_NAME, epoch + 1, batch, get_time())))
    else:
        torch.save(BACKBONE.state_dict(), os.path.join(MODEL_ROOT, "Backbone_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(BACKBONE_NAME, epoch + 1, batch, get_time())))
        torch.save(HEAD.state_dict(), os.path.join(MODEL_ROOT, "Head_{}_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(HEAD_NAME, epoch + 1, batch, get_time())))
Exemplo n.º 3
0
                          NUM_EPOCH,
                          batch + 1,
                          len(train_loader) * NUM_EPOCH,
                          loss=losses,
                          top1=top1,
                          top5=top5))
                print("=" * 60)

                # perform validation & save checkpoints per epoch
                # validation statistics per epoch (buffer for visualization)
                print("=" * 60)
                print(
                    "Perform Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints..."
                )
                accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(
                    MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE,
                    lfw, lfw_issame)
                buffer_val(writer, "LFW", accuracy_lfw, best_threshold_lfw,
                           roc_curve_lfw, batch + 1)
                accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(
                    MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE,
                    cfp_ff, cfp_ff_issame)
                buffer_val(writer, "CFP_FF", accuracy_cfp_ff,
                           best_threshold_cfp_ff, roc_curve_cfp_ff, batch + 1)
                accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(
                    MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE,
                    cfp_fp, cfp_fp_issame)
                buffer_val(writer, "CFP_FP", accuracy_cfp_fp,
                           best_threshold_cfp_fp, roc_curve_cfp_fp, batch + 1)
                accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(
                    MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE,
Exemplo n.º 4
0
                top1 = AverageMeter()

            if (
                (batch + 1) % VER_FREQ == 0
            ) and batch != 0:  #perform validation & save checkpoints (buffer for visualization)
                for params in OPTIMIZER.param_groups:
                    lr = params['lr']
                    break
                print("Learning rate %f" % lr)
                print("Perform Evaluation on", TARGET,
                      ", and Save Checkpoints...")
                acc = []
                for ver in vers:
                    name, data_set, issame = ver
                    accuracy, std, xnorm, best_threshold, roc_curve = perform_val(
                        MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE,
                        BACKBONE, data_set, issame)
                    buffer_val(writer, name, accuracy, std, xnorm,
                               best_threshold, roc_curve, batch + 1)
                    print('[%s][%d]XNorm: %1.5f' % (name, batch + 1, xnorm))
                    print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' %
                          (name, batch + 1, accuracy, std))
                    print('[%s][%d]Best-Threshold: %1.5f' %
                          (name, batch + 1, best_threshold))
                    acc.append(accuracy)

                # save checkpoints per epoch
                if need_save(acc, highest_acc):
                    if MULTI_GPU:
                        torch.save(
                            BACKBONE.module.state_dict(),
Exemplo n.º 5
0
    BACKBONE = BACKBONE_DICT[BACKBONE_NAME](INPUT_SIZE)
    print("=" * 60)
    print("{} Backbone Generated".format(BACKBONE_NAME))
    print("=" * 60)
    if BACKBONE_RESUME_ROOT:
        print("=" * 60)
        if os.path.isfile(BACKBONE_RESUME_ROOT):
            print("Loading Backbone Checkpoint '{}'".format(
                BACKBONE_RESUME_ROOT))
            BACKBONE.load_state_dict(torch.load(BACKBONE_RESUME_ROOT))
        else:
            print("No Checkpoint Found at '{}'".format(BACKBONE_RESUME_ROOT))
            exit()
        print("=" * 60)

    BACKBONE.cuda()

    if len(GPU_ID) > 1:
        # multi-GPU setting
        BACKBONE = nn.DataParallel(BACKBONE, device_ids=GPU_ID)

    accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(
        EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, lfw, lfw_issame)
    accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(
        EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_fp, cfp_fp_issame)
    accuracy_agedb_30, best_threshold_agedb, roc_curve_agedb = perform_val(
        EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, agedb_30, agedb_30_issame)
    print("Evaluation: LFW Acc: {}, CFP_FP Acc: {}, AgeDB Acc: {}".format(
        accuracy_lfw, accuracy_cfp_fp, accuracy_agedb_30))
Exemplo n.º 6
0
 # perform validation & save checkpoints per epoch
 # validation statistics per epoch (buffer for visualization)
 print("=" * 60)
 print(
     "Perform Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints..."
 )
 # accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, lfw, lfw_issame)
 # buffer_val(writer, "LFW", accuracy_lfw, best_threshold_lfw, roc_curve_lfw, epoch + 1)
 # accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_ff, cfp_ff_issame)
 # buffer_val(writer, "CFP_FF", accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff, epoch + 1)
 # accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_fp, cfp_fp_issame)
 # buffer_val(writer, "CFP_FP", accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp, epoch + 1)
 # accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, agedb, agedb_issame)
 # buffer_val(writer, "AgeDB", accuracy_agedb, best_threshold_agedb, roc_curve_agedb, epoch + 1)
 accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val(
     MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, calfw,
     calfw_issame)
 buffer_val(writer, "CALFW", accuracy_calfw, best_threshold_calfw,
            roc_curve_calfw, epoch + 1)
 accuracy_cplfw, best_threshold_cplfw, roc_curve_cplfw = perform_val(
     MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cplfw,
     cplfw_issame)
 buffer_val(writer, "CPLFW", accuracy_cplfw, best_threshold_cplfw,
            roc_curve_cplfw, epoch + 1)
 # accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, vgg2_fp, vgg2_fp_issame)
 # buffer_val(writer, "VGGFace2_FP", accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp, epoch + 1)
 # print("Epoch {}/{}, Evaluation: LFW Acc: {}, CFP_FF Acc: {}, CFP_FP Acc: {}, AgeDB Acc: {}, CALFW Acc: {}, CPLFW Acc: {}, VGG2_FP Acc: {}".format(epoch + 1, NUM_EPOCH, accuracy_lfw, accuracy_cfp_ff, accuracy_cfp_fp, accuracy_agedb, accuracy_calfw, accuracy_cplfw, accuracy_vgg2_fp))
 # print("=" * 60)
 print("Epoch {}/{}, Evaluation:  CALFW Acc: {}, CPLFW Acc: {}".format(
     epoch + 1, NUM_EPOCH, accuracy_calfw, accuracy_cplfw))
 print("=" * 60)
Exemplo n.º 7
0
def main(ARGS):

    if ARGS.model_path == None:
        raise AssertionError("Path should not be None")

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    ####### Model setup
    print('Model type: %s' % ARGS.model_type)
    if ARGS.model_type == 'ResNet_50':
        model = ResNet_50(ARGS.input_size)
    elif ARGS.model_type == 'ResNet_101':
        model = ResNet_101(ARGS.input_size)
    elif ARGS.model_type == 'ResNet_152':
        model = ResNet_152(ARGS.input_size)
    elif ARGS.model_type == 'IR_50':
        model = IR_50(ARGS.input_size)
    elif ARGS.model_type == 'IR_101':
        model = IR_101(ARGS.input_size)
    elif ARGS.model_type == 'IR_152':
        model = IR_152(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_50':
        model = IR_SE_50(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_101':
        model = IR_SE_101(ARGS.input_size)
    elif ARGS.model_type == 'IR_SE_152':
        model = IR_SE_152(ARGS.input_size)
    else:
        raise AssertionError(
            'Unsuported model_type {}. We only support: [\'ResNet_50\', \'ResNet_101\', \'ResNet_152\', \'IR_50\', \'IR_101\', \'IR_152\', \'IR_SE_50\', \'IR_SE_101\', \'IR_SE_152\']'
            .format(ARGS.model_type))

    if use_cuda:
        model.load_state_dict(torch.load(ARGS.model_path))
    else:
        model.load_state_dict(torch.load(ARGS.model_path, map_location='cpu'))

    model.to(device)
    # embedding_size = 512
    model.eval()

    # DATA_ROOT = './../evoLVe_data/data' # the parent root where your train/val/test data are stored
    # INPUT_SIZE = [112, 112] # support: [112, 112] and [224, 224]
    # BACKBONE_RESUME_ROOT = './../evoLVe_data/pth/backbone_ir50_ms1m_epoch120.pth' # the root to resume training from a saved checkpoint
    # BACKBONE_RESUME_ROOT = './../pytorch-face/pth/IR_50_MODEL_arcface_casia_epoch56_lfw9925.pth'

    MULTI_GPU = False  # flag to use multiple GPUs; if you choose to train with single GPU, you should first run "export CUDA_VISILE_DEVICES=device_id" to specify the GPU card you want to use
    # DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # EMBEDDING_SIZE = 512 # feature dimension
    # BATCH_SIZE = 512

    # BACKBONE = IR_50(INPUT_SIZE)

    # if os.path.isfile(BACKBONE_RESUME_ROOT):
    #     print("Loading Backbone Checkpoint '{}'".format(BACKBONE_RESUME_ROOT))
    #     BACKBONE.load_state_dict(torch.load(BACKBONE_RESUME_ROOT, map_location='cpu'))
    # else:
    #     print("No Checkpoint Found at '{}'.".format(BACKBONE_RESUME_ROOT))
    #     sys.exit()

    print("=" * 60)
    print(
        "Performing Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints..."
    )

    #### LFW
    print("Performing Evaluation on LFW...")
    lfw, lfw_issame = get_val_pair(ARGS.data_root, 'lfw')
    accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, lfw,
        lfw_issame)
    print("Evaluation: LFW Acc: {}".format(accuracy_lfw))

    #### CALFW WORKS
    print("Performing Evaluation on CALFW...")
    calfw, calfw_issame = get_val_pair(ARGS.data_root, 'calfw')
    accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, calfw,
        calfw_issame)
    print("Evaluation: CALFW Acc: {}".format(accuracy_calfw))

    #### CPLFW
    print("Performing Evaluation on CPLFW...")
    cplfw, cplfw_issame = get_val_pair(ARGS.data_root, 'cplfw')
    accuracy_cplfw, best_threshold_calfw, roc_curve_calfw = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cplfw,
        cplfw_issame)
    print("Evaluation: CPLFW Acc: {}".format(accuracy_cplfw))

    #### CFP-FF
    print("Performing Evaluation on CFP-FF...")
    cfp_ff, cfp_ff_issame = get_val_pair(ARGS.data_root, 'cfp_ff')
    accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_ff,
        cfp_ff_issame)
    print("Evaluation: CFP-FF Acc: {}".format(accuracy_cfp_ff))

    #### CFP-FP
    print("Performing Evaluation on CFP-FP...")
    cfp_fp, cfp_fp_issame = get_val_pair(ARGS.data_root, 'cfp_fp')
    accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model, cfp_fp,
        cfp_fp_issame)
    print("Evaluation: CFP-FP Acc: {}".format(accuracy_cfp_fp))

    #### AgeDB_30
    print("Performing Evaluation on AgeDB_30...")
    agedb_30, agedb_30_issame = get_val_pair(ARGS.data_root, 'agedb_30')
    accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model,
        agedb_30, agedb_30_issame)
    print("Evaluation: AgeDB_30 Acc: {}".format(accuracy_agedb))

    #### VggFace2_FP
    print("Performing Evaluation on VggFace2_FP...")
    vgg2_fp, vgg2_fp_issame = get_val_pair(ARGS.data_root, 'vgg2_fp')
    accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val(
        MULTI_GPU, device, ARGS.embedding_size, ARGS.batch_size, model,
        vgg2_fp, vgg2_fp_issame)
    print("Evaluation: VggFace2_FP Acc: {}".format(accuracy_vgg2_fp))

    print("=" * 60)
    print("FINAL RESULTS:")
    print(
        "Evaluation: LFW Acc: {}, CFP_FF Acc: {}, CFP_FP Acc: {}, AgeDB Acc: {}, CALFW Acc: {}, CPLFW Acc: {}, VGG2_FP Acc: {}"
        .format(accuracy_lfw, accuracy_cfp_ff, accuracy_cfp_fp, accuracy_agedb,
                accuracy_calfw, accuracy_cplfw, accuracy_vgg2_fp))
    print("=" * 60)
Exemplo n.º 8
0
            # dispaly training loss & acc every DISP_FREQ
            if batch % 2000 == 0 and batch != 0:
                print("=" * 60)
                print('Epoch {}/{} Batch {}/{}\t'
                      'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                    epoch + 1, NUM_EPOCH, batch + 1, len(train_loader) * NUM_EPOCH, loss=losses, top1=top1, top5=top5))
                print("=" * 60)

                # perform validation & save checkpoints per epoch
                # validation statistics per epoch (buffer for visualization)
                print("=" * 60)
                print("Perform Evaluation on LFW, CFP_FF, CFP_FP, AgeDB, CALFW, CPLFW and VGG2_FP, and Save Checkpoints...")
                accuracy_lfw, best_threshold_lfw, roc_curve_lfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, lfw, lfw_issame)
                buffer_val(writer, "LFW", accuracy_lfw, best_threshold_lfw, roc_curve_lfw, batch + 1)
                accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_ff, cfp_ff_issame)
                buffer_val(writer, "CFP_FF", accuracy_cfp_ff, best_threshold_cfp_ff, roc_curve_cfp_ff, batch + 1)
                accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cfp_fp, cfp_fp_issame)
                buffer_val(writer, "CFP_FP", accuracy_cfp_fp, best_threshold_cfp_fp, roc_curve_cfp_fp, batch + 1)
                accuracy_agedb, best_threshold_agedb, roc_curve_agedb = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, agedb, agedb_issame)
                buffer_val(writer, "AgeDB", accuracy_agedb, best_threshold_agedb, roc_curve_agedb, batch + 1)
                accuracy_calfw, best_threshold_calfw, roc_curve_calfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, calfw, calfw_issame)
                buffer_val(writer, "CALFW", accuracy_calfw, best_threshold_calfw, roc_curve_calfw, batch + 1)
                accuracy_cplfw, best_threshold_cplfw, roc_curve_cplfw = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, cplfw, cplfw_issame)
                buffer_val(writer, "CPLFW", accuracy_cplfw, best_threshold_cplfw, roc_curve_cplfw, batch + 1)
                accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp = perform_val(MULTI_GPU, DEVICE, EMBEDDING_SIZE, BATCH_SIZE, BACKBONE, vgg2_fp, vgg2_fp_issame)
                buffer_val(writer, "VGGFace2_FP", accuracy_vgg2_fp, best_threshold_vgg2_fp, roc_curve_vgg2_fp, batch + 1)
                print("Batch {}/{}, Evaluation: LFW Acc: {}, CFP_FF Acc: {}, CFP_FP Acc: {}, AgeDB Acc: {}, CALFW Acc: {}, CPLFW Acc: {}, VGG2_FP Acc: {}".format(batch + 1, len(train_loader) * NUM_EPOCH, accuracy_lfw, accuracy_cfp_ff, accuracy_cfp_fp, accuracy_agedb, accuracy_calfw, accuracy_cplfw, accuracy_vgg2_fp))
                print("=" * 60)