Exemplo n.º 1
0
    # prepare directory structure
    Path(args.out).mkdir(parents=True, exist_ok=True)

    # prepare dataset
    n_workers = 1
    seq_len = 6
    overlap = seq_len - 1
    print('seq_len = {},  overlap = {}'.format(seq_len, overlap))

    # loop over sequences
    for seq in args.sequences:
        # create ds to iterate
        df = get_data_info(image_dir,
                           pose_dir,
                           folder_list=[seq],
                           seq_len_range=[seq_len, seq_len],
                           overlap=overlap,
                           sample_times=1,
                           shuffle=False,
                           sort=False)
        df = df.loc[df.seq_len == seq_len]  # drop last
        dataset = PoseSequenceDataset(df)
        dataloader = DataLoader(dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                num_workers=n_workers)

        # loop over sequence
        answer = [
            [0.0] * 6,
        ]
        n_batch = len(dataloader)
Exemplo n.º 2
0
else:
    print('Create new data info')
    if par.partition != None:
        partition = par.partition
        train_df, valid_df = get_partition_data_info(
            partition,
            par.train_video,
            par.seq_len,
            overlap=1,
            sample_times=par.sample_times,
            shuffle=True,
            sort=True)
    else:
        print("going into get_data_info")
        train_df = get_data_info(folder_list=par.train_video,
                                 seq_len_range=par.seq_len,
                                 overlap=1,
                                 sample_times=par.sample_times)
        valid_df = get_data_info(folder_list=par.valid_video,
                                 seq_len_range=par.seq_len,
                                 overlap=1,
                                 sample_times=par.sample_times)
    # save the data info
    train_df.to_pickle(par.train_data_info_path)
    valid_df.to_pickle(par.valid_data_info_path)

train_sampler = SortedRandomBatchSampler(train_df,
                                         par.batch_size,
                                         drop_last=True)
train_dataset = ImageSequenceDataset(train_df, par.resize_mode,
                                     (par.img_w, par.img_h), par.img_means,
                                     par.img_stds, par.minus_point_5)
Exemplo n.º 3
0
    print('Load model from: ', load_model_path)

    # Data
    n_workers = 6  #pmedina (original = 1)
    seq_len = int((par.seq_len[0] + par.seq_len[1]) / 2)
    overlap = seq_len - 1
    print('seq_len = {},  overlap = {}'.format(seq_len, overlap))
    batch_size = par.batch_size

    fd = open('test_dump.txt', 'w')
    fd.write('\n' + '=' * 50 + '\n')

    for test_video in videos_to_test:
        df = get_data_info(folder_list=[test_video],
                           seq_len_range=[seq_len, seq_len],
                           overlap=overlap,
                           sample_times=1,
                           shuffle=False,
                           sort=False)
        df = df.loc[df.seq_len == seq_len]  # drop last
        dataset = ImageSequenceDataset(df, par.resize_mode,
                                       (par.img_w, par.img_h), par.img_means,
                                       par.img_stds, par.minus_point_5)
        df.to_csv('test_df.csv')
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=n_workers)

        gt_pose = np.load('{}{}.npy'.format(par.pose_dir,
                                            test_video))  # (n_images, 6)
Exemplo n.º 4
0
        format(args.partition))
    train_df, valid_df = get_partition_data_info(image_dir,
                                                 pose_dir,
                                                 args.partition,
                                                 args.train_sequences,
                                                 par.seq_len,
                                                 overlap=1,
                                                 sample_times=par.sample_times,
                                                 max_step=par.max_step)
else:  # case: create training and validtion dataset from given list of sequences
    print('make train data from sequences: {} (dataset: {})'.format(
        args.train_sequences, args.dataset))
    train_df = get_data_info(image_dir,
                             pose_dir,
                             folder_list=args.train_sequences,
                             seq_len_range=par.seq_len,
                             overlap=1,
                             sample_times=par.sample_times,
                             max_step=par.max_step)
    print('make validation data from sequences: {} (dataset: {})'.format(
        args.valid_sequences, args.dataset))
    valid_df = get_data_info(image_dir,
                             pose_dir,
                             folder_list=args.valid_sequences,
                             seq_len_range=par.seq_len,
                             overlap=1,
                             sample_times=par.sample_times,
                             max_step=par.max_step)

print('Create Dataset Loaders')
train_sampler = SortedRandomBatchSampler(train_df,