Beispiel #1
0
def main():

    args = parse_inputs()

    # Load Model
    net = GazeNet()
    net.cuda()

    resume_path = args.resume_path
    net, optimizer, start_epoch = resume_checkpoint(net, None, resume_path)

    #Prepare dataloaders
    test_images_dir = args.test_dir
    test_pickle_path = args.test_annotation

    #For GOO
    val_set = GooDataset(test_images_dir,
                         test_pickle_path,
                         'test',
                         use_bboxes=True)

    test_only = True
    if not test_only:
        test_and_save(net, test_data_loader)

    # Get a random sample image from the dataset
    idx = np.random.randint(len(val_set))

    image_path = val_set[idx]['image_path']
    eyes = val_set[idx]['eye_position']
    bboxes = val_set[idx]['gt_bboxes']

    heatmap, x, y = test_on_image(net, image_path, eyes)
    draw_results(image_path, eyes, (x, y), bboxes)
Beispiel #2
0
def main():

    args = parse_inputs()

    # Load Model
    net = ModelSpatial()
    net.cuda()

    resume_path = args.resume_path
    net, optimizer, start_epoch = resume_checkpoint(net, None, resume_path)

    #Prepare dataloaders
    test_images_dir = args.test_dir
    test_pickle_path = args.test_annotation

    #For GOO
    val_set = GazeDataset(test_images_dir, test_pickle_path, 'test')
    test_set = GazeDataset(test_images_dir, test_pickle_path, 'test')
    test_data_loader = torch.utils.data.DataLoader(test_set,
                                                   batch_size=16,
                                                   num_workers=8,
                                                   shuffle=False)

    predictions_npz = args.predictions_npz
    if predictions_npz is None:
        print(
            '==> No npzfile provided. Inference will be done on the test dataset and will be saved to predictions.npz'
        )
        test_and_save(net, test_data_loader)
        predictions_npz = './predictions.npz'

    print('==> Calculating eval metrics...')
    metrics = calculate_metrics(predictions_npz, val_set)

    PA_count = metrics['pa']
    # CPA_count = metrics['cpa']
    print('AUC:', metrics['auc'])
    print('L2 Distance: ', metrics['l2'])
    print('Angular Error:', metrics['angular'])
    print("Percentage Distances: ",
          [0.01, 0.03, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30])
    print(
        "Proximate Accuracy: \t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t"
        % (
            PA_count[0],
            PA_count[1],
            PA_count[2],
            PA_count[3],
            PA_count[4],
            PA_count[5],
            PA_count[6],
            PA_count[7],
        ))
Beispiel #3
0
def main():

    # Dataloaders for GOO
    batch_size = args.batch_size

    print('==> Loading Train Dataset')
    train_set = GooDataset(args.train_dir, args.train_annotation, 'train', use_gazemask=args.gazemask)
    train_data_loader = DataLoader(train_set, batch_size=batch_size,
                                   shuffle=True, num_workers=16)
    
    if args.test_dir is not None:
        print('==> Loading Test Dataset')
        test_set = GooDataset(args.test_dir, args.test_annotation, 'test')
        test_data_loader = DataLoader(test_set, batch_size=batch_size//2,
                                    shuffle=False, num_workers=8)

    # Loads model
    net = GazeNet()
    net.cuda()

    # Hyperparameters
    start_epoch = 0
    max_epoch = 25
    learning_rate = args.init_lr

    # Initializes Optimizer
    gaze_opt = GazeOptimizer(net, learning_rate)
    optimizer = gaze_opt.getOptimizer(start_epoch)

    # Resuming Training
    resume_training = args.resume_training
    if resume_training:
        net, optimizer, start_epoch = resume_checkpoint(net, optimizer, args.resume_path)       
        if args.test_dir is not None: 
            test(net, test_data_loader,logger)

    for epoch in range(start_epoch, max_epoch):
        
        # Update optimizer
        optimizer = gaze_opt.getOptimizer(epoch)

        # Train model
        train(net, train_data_loader, optimizer, epoch, logger)

        # Save model and optimizer at the last 5 epochs
        if epoch > max_epoch-5:
            save_path = args.save_model_dir
            save_checkpoint(net, optimizer, epoch+1, save_path)
        
        # Evaluate model
        if args.test_dir is not None:
            test(net, test_data_loader, logger)
Beispiel #4
0
def main():
    # transform = _get_transform(args.input_resolution)

    # Prepare data
    print("Loading Data")

    batch_size = args.batch_size
    train_set = GazeDataset(args.train_dir, args.train_annotation, 'train')
    train_data_loader = DataLoader(dataset=train_set,
                                   batch_size=batch_size,
                                   shuffle=False,
                                   num_workers=8)

    if args.test_dir is not None:
        print('==> Loading Test Dataset')
        test_set = GazeDataset(args.test_dir, args.test_annotation, 'test')
        test_data_loader = DataLoader(test_set,
                                      batch_size=1,
                                      shuffle=False,
                                      num_workers=0)  # half train batch

    # Loads model
    print("Constructing model")
    net = ModelSpatial()
    net.cuda()
    # net.cuda().to(device)

    # Hyperparameters
    start_epoch = 0
    # max_epoch = 25
    max_epoch = 45

    learning_rate = args.init_lr

    # Initial weights chong
    if args.init_weights:
        model_dict = net.state_dict()
        pretrained_dict = torch.load(args.init_weights)
        pretrained_dict = pretrained_dict['model']
        model_dict.update(pretrained_dict)
        net.load_state_dict(model_dict)

    # Initializes Optimizer
    gaze_opt = GazeOptimizer(net, learning_rate)
    optimizer = gaze_opt.getOptimizer(start_epoch)

    # Resuming Training
    resume_training = args.resume_training
    print(resume_training)
    if resume_training:
        net, optimizer, start_epoch = resume_checkpoint(
            net, optimizer, args.resume_path)
        if args.test_dir is not None:
            test(net, test_data_loader, logger)

    for epoch in range(start_epoch, max_epoch):

        # Update optimizer
        optimizer = gaze_opt.getOptimizer(epoch)

        # Train model
        train(net, train_data_loader, optimizer, epoch, logger)

        # # Save model and optimizer at the last 5 epochs
        if epoch % 4 == 0:
            save_path = './saved_models_gazefollow/temp_chong/'
            save_checkpoint(net, optimizer, epoch + 1, save_path)