Esempio n. 1
0
def demo(data_root,
         train_list,
         validate_list,
         test_list,
         save,
         n_epochs=1,
         batch_size=64,
         lr=0.01,
         wd=0.0005,
         momentum=0.9,
         seed=None):
    """
    A demo to show off training and testing of :
    "Deep facial age estimation using conditional multitask learning with weak label esxpansion."
    Trains and evaluates a mean-variance loss on MOPPH Album2 dataset.

    Args:
        data_root (str) - path to directory where data exist
        train_list (str) - path to directory where train_data_list exist
        validation_list (str) - path to directory where validation_data_list exist
        test_list (str) - path to directory where test_data_list exist
        save (str) - path to save the model and results to 

        n_epochs (int) - number of epochs for training (default 3)
        batch_size (int) - size of minibatch (default 64)
        lr (float) - base lerning rate (default 0.001)
        wd (float) -weight deday (default 0.0001)
        momentum (float) momentum (default 0.9)
        seed (int) - manually set the random seed (default None)
    """

    # Mean and std value from Imagenet
    mean = [0.485, 0.456, 0.406]
    stdv = [0.229, 0.224, 0.225]
    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(5),
        transforms.ColorJitter(0.05, 0.05, 0.05, 0.05),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])
    test_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])

    if os.path.exists(
            os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv')):
        os.remove(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'))
    with open(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'),
              'w') as f:
        f.write(
            '******************************************************************\n'
        )
        f.write('records on MORPH2 dataset under ' 'S1-S2-S3' ' protocol\n')
        f.write(
            '******************************************************************\n'
        )
        f.write('\n')
        f.write('\n')

    train_set = data_prepare(data_root=data_root,
                             data_list=train_list,
                             transform=train_transforms)
    valid_set = data_prepare(data_root=data_root,
                             data_list=validate_list,
                             transform=test_transforms)
    test_set = data_prepare(data_root=data_root,
                            data_list=test_list,
                            transform=test_transforms)

    resnet18_model = models.resnet18(pretrained=True)
    fc_features = resnet18_model.fc.in_features
    resnet18_model.fc = nn.Linear(fc_features, 101)
    model = resnet18_model

    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)

    # Model on cuda
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # prepare data
    if seed is not None:
        torch.manual_seed(seed)
        if use_cuda:
            torch.cuda.manual_seed_all(seed)

    train_loader = torch.utils.data.DataLoader(
        train_set,
        batch_size=batch_size,
        shuffle=True,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)
    valid_loader = torch.utils.data.DataLoader(
        valid_set,
        batch_size=batch_size,
        shuffle=False,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)
    test_loader = torch.utils.data.DataLoader(
        test_set,
        batch_size=batch_size,
        shuffle=False,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)

    # Wrap model for multi-GPUs, if necessary
    if torch.cuda.is_available() and torch.cuda.device_count() > 1:
        #        model.features = torch.nn.DataParallel(model.features)
        model = model = torch.nn.DataParallel(model)
    model_wrapper = model.to(device)
    # Optimizer
    optimizer = torch.optim.SGD(model_wrapper.parameters(),
                                lr=lr,
                                momentum=momentum,
                                nesterov=True,
                                weight_decay=wd)
    #        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.5 * n_epochs, 0.75 * n_epochs],
    #                                                         gamma=0.1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[30, 50, 60],
                                                     gamma=0.1)

    #    # Train and validate model
    best_argmax_MAE = 100
    best_exp_MAE = 100
    model_state_name_1 = 'DLDL_resnet18_nesterov_model_1_1_1.dat'
    model_state_name_2 = 'DLDL_resnet18_nesterov_model_1_1_2.dat'
    model_state_dir_1 = os.path.join(save, model_state_name_1)
    model_state_dir_2 = os.path.join(save, model_state_name_2)

    with open(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'),
              'a') as f:
        f.write(
            'epoch, train_loss, train_argmax_MAE, train_exp_MAE, valid_loss, valid_argmax_MAE, valid_exp_MAE\n'
        )

    for epoch in range(n_epochs):

        scheduler.step()
        _, train_loss, train_argmax_MAE, train_exp_MAE = train(
            model=model_wrapper,
            loader=train_loader,
            optimizer=optimizer,
            epoch=epoch,
            n_epochs=n_epochs,
            device=device)
        _, valid_loss, valid_argmax_MAE, valid_exp_MAE = validate(
            model=model_wrapper,
            loader=valid_loader,
            epoch=epoch,
            n_epochs=n_epochs,
            device=device)
        #         Determine if model is the best

        if valid_argmax_MAE < best_argmax_MAE:
            best_argmax_MAE = valid_argmax_MAE
            if os.path.exists(model_state_dir_1):
                os.remove(model_state_dir_1)
            torch.save(model_wrapper.state_dict(), model_state_dir_1)
        if valid_exp_MAE < best_exp_MAE:
            best_exp_MAE = valid_exp_MAE
            if os.path.exists(model_state_dir_2):
                os.remove(model_state_dir_2)
            torch.save(model_wrapper.state_dict(), model_state_dir_2)

        with open(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'),
                  'a') as f:
            f.write('%03d, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n' %
                    ((epoch + 1), train_loss, train_argmax_MAE, train_exp_MAE,
                     valid_loss, valid_argmax_MAE, valid_exp_MAE))
        if math.isnan(float(train_argmax_MAE)) or math.isnan(
                float(train_exp_MAE)):
            break

    # Test model
    if os.path.exists(model_state_dir_1):
        _, test_loss, test_argmax_MAE, AE_list, predict_age_list, real_age_list = test(
            model=model_wrapper,
            loader=test_loader,
            device=device,
            MAE_mode='argmax',
            model_state_dir=model_state_dir_1)
        os.remove(model_state_dir_1)
        #        os.remove(model_state_dir)
        with open(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'),
                  'a') as f:
            f.write('test_loss, test_argmax_MAE:\n')
            f.write('%0.3f, %0.3f\n' % (test_loss, test_argmax_MAE))
#            f.write('\n')

        CS_1_numerator = CS_2_numerator = CS_3_numerator = CS_4_numerator = CS_5_numerator = CS_6_numerator = CS_7_numerator = CS_8_numerator = CS_9_numerator = CS_10_numerator = 0
        for i in range(len(AE_list)):
            if AE_list[i] <= 1:
                CS_1_numerator += 1
            if AE_list[i] <= 2:
                CS_2_numerator += 1
            if AE_list[i] <= 3:
                CS_3_numerator += 1
            if AE_list[i] <= 4:
                CS_4_numerator += 1
            if AE_list[i] <= 5:
                CS_5_numerator += 1
            if AE_list[i] <= 6:
                CS_6_numerator += 1
            if AE_list[i] <= 7:
                CS_7_numerator += 1
            if AE_list[i] <= 8:
                CS_8_numerator += 1
            if AE_list[i] <= 9:
                CS_9_numerator += 1
            if AE_list[i] <= 10:
                CS_10_numerator += 1

        CS_1 = CS_1_numerator / len(AE_list)
        CS_2 = CS_2_numerator / len(AE_list)
        CS_3 = CS_3_numerator / len(AE_list)
        CS_4 = CS_4_numerator / len(AE_list)
        CS_5 = CS_5_numerator / len(AE_list)
        CS_6 = CS_6_numerator / len(AE_list)
        CS_7 = CS_7_numerator / len(AE_list)
        CS_8 = CS_8_numerator / len(AE_list)
        CS_9 = CS_9_numerator / len(AE_list)
        CS_10 = CS_10_numerator / len(AE_list)

        with open(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'),
                  'a') as f:
            f.write(
                'CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\n'
            )
            f.write(
                '%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n'
                %
                (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))
            f.write('\n')

#'***************************'

    if os.path.exists(model_state_dir_2):
        _, test_loss, test_exp_MAE, AE_list, predict_age_list, real_age_list = test(
            model=model_wrapper,
            loader=test_loader,
            device=device,
            MAE_mode='exp',
            model_state_dir=model_state_dir_2)
        os.remove(model_state_dir_2)
        #        os.remove(model_state_dir)
        with open(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'),
                  'a') as f:
            f.write('test_loss, test_exp_MAE:\n')
            f.write('%0.3f, %0.3f\n' % (test_loss, test_exp_MAE))


#            f.write('\n')

        CS_1_numerator = CS_2_numerator = CS_3_numerator = CS_4_numerator = CS_5_numerator = CS_6_numerator = CS_7_numerator = CS_8_numerator = CS_9_numerator = CS_10_numerator = 0
        for i in range(len(AE_list)):
            if AE_list[i] <= 1:
                CS_1_numerator += 1
            if AE_list[i] <= 2:
                CS_2_numerator += 1
            if AE_list[i] <= 3:
                CS_3_numerator += 1
            if AE_list[i] <= 4:
                CS_4_numerator += 1
            if AE_list[i] <= 5:
                CS_5_numerator += 1
            if AE_list[i] <= 6:
                CS_6_numerator += 1
            if AE_list[i] <= 7:
                CS_7_numerator += 1
            if AE_list[i] <= 8:
                CS_8_numerator += 1
            if AE_list[i] <= 9:
                CS_9_numerator += 1
            if AE_list[i] <= 10:
                CS_10_numerator += 1

        CS_1 = CS_1_numerator / len(AE_list)
        CS_2 = CS_2_numerator / len(AE_list)
        CS_3 = CS_3_numerator / len(AE_list)
        CS_4 = CS_4_numerator / len(AE_list)
        CS_5 = CS_5_numerator / len(AE_list)
        CS_6 = CS_6_numerator / len(AE_list)
        CS_7 = CS_7_numerator / len(AE_list)
        CS_8 = CS_8_numerator / len(AE_list)
        CS_9 = CS_9_numerator / len(AE_list)
        CS_10 = CS_10_numerator / len(AE_list)

        with open(os.path.join(save, 'DLDL_ResNet18_nesterov_results_1_1.csv'),
                  'a') as f:
            f.write(
                'CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\n'
            )
            f.write(
                '%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n'
                %
                (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))
            f.write('\n')
def demo(data_root, test_list, save, batch_size=64, seed=None):
    """
    A demo to show off training and testing of :
    "Deep facial age estimation using conditional multitask learning with weak label esxpansion."
    Trains and evaluates a mean-variance loss on MOPPH Album2 dataset.

    Args:
        data_root (str) - path to directory where data exist
        train_list (str) - path to directory where train_data_list exist
        validation_list (str) - path to directory where validation_data_list exist
        test_list (str) - path to directory where test_data_list exist
        save (str) - path to save the model and results to 

        n_epochs (int) - number of epochs for training (default 3)
        batch_size (int) - size of minibatch (default 64)
        lr (float) - base lerning rate (default 0.001)
        wd (float) -weight deday (default 0.0001)
        momentum (float) momentum (default 0.9)
        seed (int) - manually set the random seed (default None)
    """

    # Mean and std value from Imagenet
    mean = [0.485, 0.456, 0.406]
    stdv = [0.229, 0.224, 0.225]
    test_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])

    if os.path.exists(
            os.path.join(
                save,
                'ordinal_regression_resnet18_nesterov_results_consistency_broken_counting_1_5.csv'
            )):
        os.remove(
            os.path.join(
                save,
                'ordinal_regression_resnet18_nesterov_results_consistency_broken_counting_1_5.csv'
            ))
    with open(
            os.path.join(
                save,
                'ordinal_regression_resnet18_nesterov_results_consistency_broken_counting_1_5.csv'
            ), 'w') as f:
        f.write(
            '******************************************************************\n'
        )
        f.write('records on AgeDB dataset under '
                'five-fold cross-validation'
                ' protocol\n')
        f.write(
            '******************************************************************\n'
        )
        f.write('\n')
        f.write('\n')

    test_set = data_prepare(data_root=data_root,
                            data_list=test_list,
                            transform=test_transforms)

    ensemble_learning_model = el_resnet18(num_classes=2)
    model = ensemble_learning_model

    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)

    # Model on cuda
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # prepare data
    if seed is not None:
        torch.manual_seed(seed)
        if use_cuda:
            torch.cuda.manual_seed_all(seed)

    test_loader = torch.utils.data.DataLoader(
        test_set,
        batch_size=batch_size,
        shuffle=False,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)

    # Wrap model for multi-GPUs, if necessary
    if torch.cuda.is_available() and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)
    model_wrapper = model.to(device)

    ######################fold one#######################################
    model_state_name_1 = 'el_resnet18_nesterov_two_group_model_1_5_1.dat'
    model_state_dir_1 = os.path.join(save, model_state_name_1)

    # Test model
    if os.path.exists(model_state_dir_1):
        _, test_total_loss, test_argmax_MAE, test_accuracy, AE_list, predict_age_list, real_age_list, total_consistency_broken_count, total_test_images_count = test(
            model=model_wrapper,
            loader=test_loader,
            device=device,
            model_state_dir=model_state_dir_1,
            mode='ordinal_regression')
        with open(
                os.path.join(
                    save,
                    'ordinal_regression_resnet18_nesterov_results_consistency_broken_counting_1_5.csv'
                ), 'a') as f:
            f.write(
                '####test with original ordinal regression method################'
            )
            f.write('test_total_loss, test_argmax_MAE, test_accuracy:\n')
            f.write('%0.4f, %0.4f, %0.4f\n' %
                    (test_total_loss, test_argmax_MAE, test_accuracy))
        CS_1_numerator = CS_2_numerator = CS_3_numerator = CS_4_numerator = CS_5_numerator = CS_6_numerator = CS_7_numerator = CS_8_numerator = CS_9_numerator = CS_10_numerator = 0
        for i in range(len(AE_list)):
            if AE_list[i] <= 1:
                CS_1_numerator += 1
            if AE_list[i] <= 2:
                CS_2_numerator += 1
            if AE_list[i] <= 3:
                CS_3_numerator += 1
            if AE_list[i] <= 4:
                CS_4_numerator += 1
            if AE_list[i] <= 5:
                CS_5_numerator += 1
            if AE_list[i] <= 6:
                CS_6_numerator += 1
            if AE_list[i] <= 7:
                CS_7_numerator += 1
            if AE_list[i] <= 8:
                CS_8_numerator += 1
            if AE_list[i] <= 9:
                CS_9_numerator += 1
            if AE_list[i] <= 10:
                CS_10_numerator += 1

        CS_1 = CS_1_numerator / len(AE_list)
        CS_2 = CS_2_numerator / len(AE_list)
        CS_3 = CS_3_numerator / len(AE_list)
        CS_4 = CS_4_numerator / len(AE_list)
        CS_5 = CS_5_numerator / len(AE_list)
        CS_6 = CS_6_numerator / len(AE_list)
        CS_7 = CS_7_numerator / len(AE_list)
        CS_8 = CS_8_numerator / len(AE_list)
        CS_9 = CS_9_numerator / len(AE_list)
        CS_10 = CS_10_numerator / len(AE_list)

        with open(
                os.path.join(
                    save,
                    'ordinal_regression_resnet18_nesterov_results_consistency_broken_counting_1_5.csv'
                ), 'a') as f:
            f.write(
                'CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\n'
            )
            f.write(
                '%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n'
                %
                (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))
            f.write('totoal_consistency_broken_count: %d\n' %
                    total_consistency_broken_count)
            f.write('total_test_images_count: %d\n' % total_test_images_count)
            f.write('')
            f.write('\n')
            f.write('\n')


#######################################################################################
        _, test_total_loss, test_argmax_MAE, test_accuracy, AE_list, predict_age_list, real_age_list = test(
            model=model_wrapper,
            loader=test_loader,
            device=device,
            model_state_dir=model_state_dir_1,
            mode='EL_2group')
        #        os.remove(model_state_dir_1)
        with open(
                os.path.join(
                    save,
                    'ordinal_regression_resnet18_nesterov_results_consistency_broken_counting_1_5.csv'
                ), 'a') as f:
            f.write(
                '####test with the proposed EL-2group method################')
            f.write('test_total_loss, test_argmax_MAE, test_accuracy:\n')
            f.write('%0.4f, %0.4f, %0.4f\n' %
                    (test_total_loss, test_argmax_MAE, test_accuracy))
        CS_1_numerator = CS_2_numerator = CS_3_numerator = CS_4_numerator = CS_5_numerator = CS_6_numerator = CS_7_numerator = CS_8_numerator = CS_9_numerator = CS_10_numerator = 0
        for i in range(len(AE_list)):
            if AE_list[i] <= 1:
                CS_1_numerator += 1
            if AE_list[i] <= 2:
                CS_2_numerator += 1
            if AE_list[i] <= 3:
                CS_3_numerator += 1
            if AE_list[i] <= 4:
                CS_4_numerator += 1
            if AE_list[i] <= 5:
                CS_5_numerator += 1
            if AE_list[i] <= 6:
                CS_6_numerator += 1
            if AE_list[i] <= 7:
                CS_7_numerator += 1
            if AE_list[i] <= 8:
                CS_8_numerator += 1
            if AE_list[i] <= 9:
                CS_9_numerator += 1
            if AE_list[i] <= 10:
                CS_10_numerator += 1

        CS_1 = CS_1_numerator / len(AE_list)
        CS_2 = CS_2_numerator / len(AE_list)
        CS_3 = CS_3_numerator / len(AE_list)
        CS_4 = CS_4_numerator / len(AE_list)
        CS_5 = CS_5_numerator / len(AE_list)
        CS_6 = CS_6_numerator / len(AE_list)
        CS_7 = CS_7_numerator / len(AE_list)
        CS_8 = CS_8_numerator / len(AE_list)
        CS_9 = CS_9_numerator / len(AE_list)
        CS_10 = CS_10_numerator / len(AE_list)

        with open(
                os.path.join(
                    save,
                    'ordinal_regression_resnet18_nesterov_results_consistency_broken_counting_1_5.csv'
                ), 'a') as f:
            f.write(
                'CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\n'
            )
            f.write(
                '%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n'
                %
                (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))
def demo(data_root, test_list, save, batch_size=64, seed=None):
    """
    A demo to show off training and testing of :
    "Deep facial age estimation using conditional multitask learning with weak label esxpansion."
    Trains and evaluates a mean-variance loss on MOPPH Album2 dataset.

    Args:
        data_root (str) - path to directory where data exist
        train_list (str) - path to directory where train_data_list exist
        validation_list (str) - path to directory where validation_data_list exist
        test_list (str) - path to directory where test_data_list exist
        save (str) - path to save the model and results to 

        n_epochs (int) - number of epochs for training (default 3)
        batch_size (int) - size of minibatch (default 64)
        lr (float) - base lerning rate (default 0.001)
        wd (float) -weight deday (default 0.0001)
        momentum (float) momentum (default 0.9)
        seed (int) - manually set the random seed (default None)
    """

    

    # Mean and std value from Imagenet 
    mean=[0.485, 0.456, 0.406]
    stdv=[0.229, 0.224, 0.225]
    test_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])
    
    
    
    
    if os.path.exists(os.path.join(save, 'el_resnet101_nesterov_three_group_test_record_1_1.csv')):
        os.remove(os.path.join(save, 'el_resnet101_nesterov_three_group_test_record_1_1.csv'))
    with open(os.path.join(save, 'el_resnet101_nesterov_three_group_test_record_1_1.csv'), 'w') as f:
        f.write('******************************************************************\n')
        f.write('records on AgeDB dataset under ''80%-20%'' protocol\n')
        f.write('******************************************************************\n')
        f.write('\n')
        f.write('\n')
    

    test_set = data_prepare(data_root=data_root, data_list=test_list, transform=test_transforms)    

    
    ensemble_learning_model = el_resnet101(num_classes=3)
    pretrained_dict=model_zoo.load_url(model_urls['resnet101'])
    model_dict=ensemble_learning_model.state_dict()
    pretrained_dict={k:v for k,v in pretrained_dict.items() if k in model_dict}
    model_dict.update(pretrained_dict)
    ensemble_learning_model.load_state_dict(model_dict)

    model=ensemble_learning_model

    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)

    # Model on cuda
    use_cuda=torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # prepare data
    if seed is not None:
        torch.manual_seed(seed)
        if use_cuda:
            torch.cuda.manual_seed_all(seed)
            
 
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False,
                                              pin_memory=(torch.cuda.is_available()), num_workers=4)   

    # Wrap model for multi-GPUs, if necessary
    if torch.cuda.is_available() and torch.cuda.device_count() > 1:
#        model.features = torch.nn.DataParallel(model.features)
        model = model = torch.nn.DataParallel(model)
    model_wrapper = model.to(device)

    # Train and validate model
#    best_argmax_MAE = 100
#    best_accuracy = 0
    model_state_name_1='el_resnet101_nesterov_three_group_model_1_1_1.dat'
    model_state_dir_1=os.path.join(save, model_state_name_1)


    # Test model       
    if os.path.exists(model_state_dir_1):                   
        _, test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy, AE_list, predict_age_list, real_age_list= test(
            model=model_wrapper,
            loader=test_loader,
            device=device,
            model_state_dir=model_state_dir_1,
        )
        os.remove(model_state_dir_1)
        with open(os.path.join(save, 'el_resnet101_nesterov_three_group_test_record_1_1.csv'), 'a') as f:
            f.write('test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy:\n')
            f.write('%0.4f, %0.4f, %0.4f, %0.4f, %0.4f\n' % (test_total_loss, test_cross_entropy_loss, test_kl_loss, test_argmax_MAE, test_accuracy))
#            f.write('\n') 

        CS_1_numerator=CS_2_numerator=CS_3_numerator=CS_4_numerator=CS_5_numerator=CS_6_numerator=CS_7_numerator=CS_8_numerator=CS_9_numerator=CS_10_numerator=0
        for i in range(len(AE_list)):
            if AE_list[i]<=1:
                CS_1_numerator+=1
            if AE_list[i]<=2:
                CS_2_numerator+=1
            if AE_list[i]<=3:
                CS_3_numerator+=1
            if AE_list[i]<=4:
                CS_4_numerator+=1
            if AE_list[i]<=5:
                CS_5_numerator+=1
            if AE_list[i]<=6:
                CS_6_numerator+=1
            if AE_list[i]<=7:
                CS_7_numerator+=1
            if AE_list[i]<=8:
                CS_8_numerator+=1
            if AE_list[i]<=9:
                CS_9_numerator+=1
            if AE_list[i]<=10:
                CS_10_numerator+=1
                
        CS_1=CS_1_numerator/len(AE_list)
        CS_2=CS_2_numerator/len(AE_list)
        CS_3=CS_3_numerator/len(AE_list)
        CS_4=CS_4_numerator/len(AE_list)
        CS_5=CS_5_numerator/len(AE_list)
        CS_6=CS_6_numerator/len(AE_list)
        CS_7=CS_7_numerator/len(AE_list)
        CS_8=CS_8_numerator/len(AE_list)
        CS_9=CS_9_numerator/len(AE_list)
        CS_10=CS_10_numerator/len(AE_list)
        
        with open(os.path.join(save, 'el_resnet101_nesterov_three_group_test_record_1_1.csv'), 'a') as f:
            f.write('CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\n')
            f.write('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n'
                    % (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))
            f.write('\n')

        with open(test_list) as f:
            test_lists_record=f.readlines()
        with open(os.path.join(save, 'el_resnet101_nesterov_three_group_test_record_1_1.csv'), 'a') as f:
            f.write('image_name, true_age, predict_age, AE:\n')
        for i in range(len(AE_list)):
            img_name = test_lists_record[i].split()[0]
            true_age = real_age_list[i]
            predict_age = predict_age_list[i]
            AE = AE_list[i]
            with open(os.path.join(save, 'el_resnet101_nesterov_three_group_test_record_1_1.csv'), 'a') as f:
                f.write('%s, %d, %d, %d\n' % (img_name, true_age, predict_age, AE))      
Esempio n. 4
0
def demo(data_root,
         train_list,
         test_list,
         save,
         n_epochs=1,
         batch_size=64,
         lr=0.01,
         wd=0.0005,
         momentum=0.9,
         lemta1=0.1,
         lemta2=0.004,
         seed=None):
    #def demo(data_root, train_list, validation_list, test_list, save, n_epochs=1,
    #      batch_size=64, lr=0.001, wd=0.0005, seed=None):
    """
    A demo to show off training and testing of :
    "Deep facial age estimation using conditional multitask learning with weak label esxpansion."
    Trains and evaluates a mean-variance loss on MOPPH Album2 dataset.

    Args:
        data_root (str) - path to directory where data exist
        train_list (str) - path to directory where train_data_list exist
        validation_list (str) - path to directory where validation_data_list exist
        test_list (str) - path to directory where test_data_list exist
        save (str) - path to save the model and results to 

        n_epochs (int) - number of epochs for training (default 3)
        batch_size (int) - size of minibatch (default 64)
        lr (float) - base lerning rate (default 0.001)
        wd (float) -weight deday (default 0.0001)
        momentum (float) momentum (default 0.9)
        seed (int) - manually set the random seed (default None)
    """

    # Mean and std value from Imagenet
    mean = [0.485, 0.456, 0.406]
    stdv = [0.229, 0.224, 0.225]
    #    mean=[0.5, 0.5, 0.5]
    #    stdv=[0.5, 0.5, 0.5]
    train_transforms = transforms.Compose([
        #        transforms.Resize(146),
        #        transforms.RandomCrop(128),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(5),
        transforms.ColorJitter(0.05, 0.05, 0.05, 0.05),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])
    test_transforms = transforms.Compose([
        #        transforms.Resize(146),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])

    # Datasets
    train_set = data_prepare(data_root=data_root,
                             data_list=train_list,
                             transform=train_transforms)
    test_set = data_prepare(data_root=data_root,
                            data_list=test_list,
                            transform=test_transforms)

    pretrained_dict = model_zoo.load_url(model_urls['vgg16'])
    #    print(pretrained_dict.keys())
    model = conditional_VGG16()
    #    print(model)

    model_dict = model.state_dict()
    #    print(model_dict.keys())
    #    os._exit(0)
    #    del model_dict[]

    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    #    print(pretrained_dict.keys())
    #    os._exit(0)

    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)

    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)

    # Model on cuda
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # prepare data
    if seed is not None:
        torch.manual_seed(seed)
        if use_cuda:
            torch.cuda.manual_seed_all(seed)

    train_loader = torch.utils.data.DataLoader(
        train_set,
        batch_size=batch_size,
        shuffle=True,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)
    test_loader = torch.utils.data.DataLoader(
        test_set,
        batch_size=batch_size,
        shuffle=False,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)

    # Wrap model for multi-GPUs, if necessary
    if torch.cuda.is_available() and torch.cuda.device_count() > 1:
        model.features = torch.nn.DataParallel(model.features)
    model_wrapper = model.to(device)
    # Optimizer
    optimizer = torch.optim.SGD(model_wrapper.parameters(),
                                lr=lr,
                                momentum=momentum,
                                nesterov=True,
                                weight_decay=wd)
    #    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.5 * n_epochs, 0.75 * n_epochs],
    #                                                     gamma=0.1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[50, 70, 90],
                                                     gamma=0.1)
    #    optimizer = torch.optim.Adam(model_wrapper.parameters(), lr=lr, weight_decay=wd, amsgrad=True)

    # Start log
    if os.path.exists(
            os.path.join(save, 'conditional_VGG16_nesterov_results_3.csv')):
        os.remove(
            os.path.join(save, 'conditional_VGG16_nesterov_results_3.csv'))
    with open(os.path.join(save, 'conditional_VGG16_nesterov_results_3.csv'),
              'w') as f:
        f.write(
            'epoch, '
            'train_softmax_loss_male_black, train_mean_loss_male_black, train_deviation_loss_male_black, '
            'train_softmax_loss_male_white, train_mean_loss_male_white, train_deviation_loss_male_white, '
            'train_softmax_loss_female_black, train_mean_loss_female_black, train_deviation_loss_female_black, '
            'train_softmax_loss_female_white, train_mean_loss_female_white, train_deviation_loss_female_white, '
            'train_age_loss, train_race_loss, train_gender_loss, train_total_loss, train_race_accuracy, train_gender_accuracy, train_MAE\n'
        )
    # Train and validate model
    best_MAE = 100
    model_state_dir = os.path.join(save,
                                   'conditional_VGG16_nesterov_model_3.dat')
    for epoch in range(n_epochs):

        scheduler.step()
        _, tra_slmb, tra_mlmb, tra_dlmb, tra_slmw, tra_mlmw, tra_dlmw, tra_slfb, tra_mlfb, tra_dlvb, tra_slfw, tra_mlfw, tra_dlfw, tra_al, tra_rl, tra_gl, tra_tl, tra_ra, tra_ga, train_MAE = train(
            model=model_wrapper,
            loader=train_loader,
            optimizer=optimizer,
            epoch=epoch,
            n_epochs=n_epochs,
            device=device,
            lemta1=lemta1,
            lemta2=lemta2)
        print('*********************************')
        #         Determine if model is the best

        if train_MAE < best_MAE:
            best_MAE = train_MAE
            print('New best MAE: %.4f' % best_MAE)
            if os.path.exists(model_state_dir):
                os.remove(model_state_dir)
            torch.save(model_wrapper.state_dict(), model_state_dir)

        with open(
                os.path.join(save, 'conditional_VGG16_nesterov_results_3.csv'),
                'a') as f:
            f.write('%03d, '
                    '%0.3f, %0.3f, %0.3f, '
                    '%0.3f, %0.3f, %0.3f, '
                    '%0.3f, %0.3f, %0.3f, '
                    '%0.3f, %0.3f, %0.3f, '
                    '%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n' %
                    ((epoch + 1), tra_slmb, tra_mlmb, tra_dlmb, tra_slmw,
                     tra_mlmw, tra_dlmw, tra_slfb, tra_mlfb, tra_dlvb,
                     tra_slfw, tra_mlfw, tra_dlfw, tra_al, tra_rl, tra_gl,
                     tra_tl, tra_ra, tra_ga, train_MAE))

# Test model
    _, tes_ra, tes_ga, test_MAE, AE_list, predict_age_list, real_age_list = test(
        model=model_wrapper,
        loader=test_loader,
        device=device,
        model_state_dir=model_state_dir,
        lemta1=lemta1,
        lemta2=lemta2)
    CS_1_numerator = CS_2_numerator = CS_3_numerator = CS_4_numerator = CS_5_numerator = CS_6_numerator = CS_7_numerator = CS_8_numerator = CS_9_numerator = CS_10_numerator = 0
    for i in range(len(AE_list)):
        if AE_list[i] <= 1:
            CS_1_numerator += 1
        if AE_list[i] <= 2:
            CS_2_numerator += 1
        if AE_list[i] <= 3:
            CS_3_numerator += 1
        if AE_list[i] <= 4:
            CS_4_numerator += 1
        if AE_list[i] <= 5:
            CS_5_numerator += 1
        if AE_list[i] <= 6:
            CS_6_numerator += 1
        if AE_list[i] <= 7:
            CS_7_numerator += 1
        if AE_list[i] <= 8:
            CS_8_numerator += 1
        if AE_list[i] <= 9:
            CS_9_numerator += 1
        if AE_list[i] <= 10:
            CS_10_numerator += 1

    CS_1 = CS_1_numerator / len(AE_list)
    CS_2 = CS_2_numerator / len(AE_list)
    CS_3 = CS_3_numerator / len(AE_list)
    CS_4 = CS_4_numerator / len(AE_list)
    CS_5 = CS_5_numerator / len(AE_list)
    CS_6 = CS_6_numerator / len(AE_list)
    CS_7 = CS_7_numerator / len(AE_list)
    CS_8 = CS_8_numerator / len(AE_list)
    CS_9 = CS_9_numerator / len(AE_list)
    CS_10 = CS_10_numerator / len(AE_list)
    with open(test_list) as f:
        test_lines = f.readlines()

    index = 0
    with open(os.path.join(save, 'conditional_VGG16_nesterov_results_3.csv'),
              'a') as f:
        f.write('******************************\n')
        f.write('\n')
        f.write('******************************\n')
        f.write('test_img_name, real_age, predict_age, absolute_error(AE):\n')
    for test_line in test_lines:
        img_name = test_line.split()[0]
        img_predict_age = predict_age_list[index].item()
        img_real_age = real_age_list[index].item()
        img_AE = AE_list[index].item()
        record_line = img_name + '  ' + str(round(
            img_real_age, 2)) + '  ' + str(round(
                img_predict_age, 2)) + '  ' + str(round(img_AE, 2)) + '\n'
        index += 1
        with open(
                os.path.join(save, 'conditional_VGG16_nesterov_results_3.csv'),
                'a') as f:
            f.write(record_line)
    with open(os.path.join(save, 'conditional_VGG16_nesterov_results_3.csv'),
              'a') as f:
        f.write('******************************\n')
        f.write('test_race_accuracy, test_gender_accuracy, test_MAE\n')
        f.write('%0.3f, %0.3f, %0.3f\n' % (tes_ra, tes_ga, test_MAE))
        f.write(
            'CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10:\n')
        f.write(
            '%0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f, %0.3f\n'
            % (CS_1, CS_2, CS_3, CS_4, CS_5, CS_6, CS_7, CS_8, CS_9, CS_10))
def demo(data_root,
         train_list,
         validate_list,
         save,
         n_epochs=1,
         batch_size=64,
         lr=0.01,
         wd=0.0005,
         momentum=0.9,
         seed=None):
    """
    A demo to show off training and testing of :
    "Deep facial age estimation using conditional multitask learning with weak label esxpansion."
    Trains and evaluates a mean-variance loss on MOPPH Album2 dataset.

    Args:
        data_root (str) - path to directory where data exist
        train_list (str) - path to directory where train_data_list exist
        validation_list (str) - path to directory where validation_data_list exist
        test_list (str) - path to directory where test_data_list exist
        save (str) - path to save the model and results to 

        n_epochs (int) - number of epochs for training (default 3)
        batch_size (int) - size of minibatch (default 64)
        lr (float) - base lerning rate (default 0.001)
        wd (float) -weight deday (default 0.0001)
        momentum (float) momentum (default 0.9)
        seed (int) - manually set the random seed (default None)
    """

    # Mean and std value from Imagenet
    mean = [0.485, 0.456, 0.406]
    stdv = [0.229, 0.224, 0.225]
    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(5),
        transforms.ColorJitter(0.05, 0.05, 0.05, 0.05),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])
    test_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=stdv),
    ])

    if os.path.exists(
            os.path.join(save, 'el_resnet101_2groups_nesterov_results_1.csv')):
        os.remove(
            os.path.join(save, 'el_resnet101_2groups_nesterov_results_1.csv'))
    with open(
            os.path.join(save, 'el_resnet101_2groups_nesterov_results_1.csv'),
            'w') as f:
        f.write(
            '******************************************************************\n'
        )
        f.write('records on IMDB-WIKI dataset\n')
        f.write(
            '******************************************************************\n'
        )
        f.write('\n')
        f.write('\n')

    train_set = data_prepare(data_root=data_root,
                             data_list=train_list,
                             transform=train_transforms)
    valid_set = data_prepare(data_root=data_root,
                             data_list=validate_list,
                             transform=test_transforms)

    ensemble_learning_model = el_resnet101(num_classes=2)
    pretrained_dict = model_zoo.load_url(model_urls['resnet101'])
    model_dict = ensemble_learning_model.state_dict()
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    model_dict.update(pretrained_dict)
    ensemble_learning_model.load_state_dict(model_dict)

    model = ensemble_learning_model

    # Make save directory
    if not os.path.exists(save):
        os.makedirs(save)
    if not os.path.isdir(save):
        raise Exception('%s is not a dir' % save)

    # Model on cuda
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # prepare data
    if seed is not None:
        torch.manual_seed(seed)
        if use_cuda:
            torch.cuda.manual_seed_all(seed)

    train_loader = torch.utils.data.DataLoader(
        train_set,
        batch_size=batch_size,
        shuffle=True,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)
    valid_loader = torch.utils.data.DataLoader(
        valid_set,
        batch_size=batch_size,
        shuffle=False,
        pin_memory=(torch.cuda.is_available()),
        num_workers=4)

    # Wrap model for multi-GPUs, if necessary
    if torch.cuda.is_available() and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)
    model_wrapper = model.to(device)
    # Optimizer
    optimizer = torch.optim.SGD(model_wrapper.parameters(),
                                lr=lr,
                                momentum=momentum,
                                nesterov=True,
                                weight_decay=wd)
    #        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.5 * n_epochs, 0.75 * n_epochs],
    #                                                         gamma=0.1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[30, 50, 60],
                                                     gamma=0.1)

    # Train and validate model
    #    best_argmax_MAE = 100
    #    model_state_name_1='el_resnet101_nesterov_2groups_model_1.dat'
    #    model_state_dir_1=os.path.join(save, model_state_name_1)
    best_argmax_MAE = 100
    best_accuracy = 0
    model_state_name_1 = 'el_resnet101_nesterov_2groups_imdb_wiki_model_1_1.dat'
    model_state_dir_1 = os.path.join(save, model_state_name_1)
    model_state_name_2 = 'el_resnet101_nesterov_2groups_imdb_wiki_model_1_2.dat'
    model_state_dir_2 = os.path.join(save, model_state_name_2)

    with open(
            os.path.join(save, 'el_resnet101_2groups_nesterov_results_1.csv'),
            'a') as f:
        f.write('epoch, train_total_loss, train_argmax_MAE, train_accuracy, '
                'valid_total_loss, valid_argmax_MAE, valid_accuracy\n')

    start = time.time()

    for epoch in range(n_epochs):
        scheduler.step()
        _, train_total_loss, train_argmax_MAE, train_accuracy = train(
            model=model_wrapper,
            loader=train_loader,
            optimizer=optimizer,
            epoch=epoch,
            n_epochs=n_epochs,
            device=device)
        _, valid_total_loss, valid_argmax_MAE, valid_accuracy = validate(
            model=model_wrapper,
            loader=valid_loader,
            epoch=epoch,
            n_epochs=n_epochs,
            device=device)
        #         Determine if model is the best

        if valid_argmax_MAE < best_argmax_MAE:
            best_argmax_MAE = valid_argmax_MAE
            if os.path.exists(model_state_dir_1):
                os.remove(model_state_dir_1)
            torch.save(model_wrapper.state_dict(), model_state_dir_1)
        if valid_accuracy > best_accuracy:
            best_accuracy = valid_accuracy
            if os.path.exists(model_state_dir_2):
                os.remove(model_state_dir_2)
            torch.save(model_wrapper.state_dict(), model_state_dir_2)

        with open(
                os.path.join(save,
                             'el_resnet101_2groups_nesterov_results_1.csv'),
                'a') as f:
            f.write('%03d, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f, %0.4f\n' %
                    ((epoch + 1), train_total_loss, train_argmax_MAE,
                     train_accuracy, valid_total_loss, valid_argmax_MAE,
                     valid_accuracy))
        if math.isnan(float(train_argmax_MAE)):
            break

    elapsed = (time.time() - start)
    print('time cost:', elapsed)