Ejemplo n.º 1
0
def main():
    args = parse_args()

    ## MOTION FILTER + SKELETON NORM (SAVE NUMPY)
    videos_dirs = get_videos_path(args.dataset_path)

    for video_dir in tqdm(videos_dirs, desc='Processing videos...'):
        ## Create folder for .npy files
        os.makedirs(video_dir + '/data/', exist_ok=True)

        ## Get jsons files
        jsons_path = video_dir +  '/openpose/json/' 
        jsons_files = os.listdir(jsons_path)
        jsons_files = sorted(jsons_files, key=sort_jsons)

        frames = load_video_json(jsons_files, jsons_path)
        frames = treat_missing_joints(frames)

        frames = np.array( [normalize_kp(frames[frame, :, :]) for frame in range(frames.shape[0])] )   
            
        frames = motion_filter(frames)

        n_samples, _, _ = frames.shape
        for sample in range(n_samples):
            np.save(video_dir + '/data/' + jsons_files[sample].split('.json')[0] + '.npy', frames[sample, :, :])
Ejemplo n.º 2
0
def main():
    args = parse_args()

    videos_dir = get_videos_path(args.dataset_path)
    videos_path = [video_dir + '/' + video_dir.split('/')[-1] + '.mp4' for video_dir in videos_dir]

    for idx, video_path in enumerate(tqdm(videos_path, desc="Rendering debug videos...")):
        video_name = video_path.split('/')[-1]

        filter_video(video_name, videos_dir[idx])
def main():
    args = parse_args()

    videos_path = get_videos_path(args.dataset_path)
    videos_path = [
        video_path + '/' + video_path.split('/')[-1] + '.mp4'
        for video_path in videos_path
    ]

    for video in tqdm(videos_path, desc="Standardizing videos..."):
        standardize_data(video, args.fps, args.sample_rate)
Ejemplo n.º 4
0
def main():
    args = parse_args()

    videos_path = get_videos_path(args.dataset_path)
    videos_path = [
        video_path + '/' + video_path.split('/')[-1] + '.mp4'
        for video_path in videos_path
    ]

    for video in tqdm(videos_path, desc="Extracting audio from videos..."):
        extract_audio(video)
Ejemplo n.º 5
0
def main():
    args = parse_args()

    videos_dirs = get_videos_path(args.dataset_path)

    videos_paths = [video_dir + '/' + video_dir.split('/')[-1] + '.mp4' for video_dir in videos_dirs]
    txts_paths =   [video_dir + '/' + video_dir.split('/')[-1] + '.txt' for video_dir in videos_dirs]

    for idx, video_path in enumerate(tqdm(videos_paths, desc="Cutting videos...")):
        video_intervals = open(txts_paths[idx], 'r').read().splitlines()
        video_intervals = [video_interval.replace(' ', '') for video_interval in video_intervals if video_interval != '']
        video_intervals = [( int(interval.split(',')[0]) , int(interval.split(',')[1]) ) for interval in video_intervals]

        style_dir = videos_dirs[idx][:videos_dirs[idx].rfind('/')] + '/'

        split_and_organize_data(video_intervals, video_path, style_dir)
Ejemplo n.º 6
0
def main():
    args = parse_args()

    videos_dir = get_videos_path(args.dataset_path)
    videos_path = [
        video_dir + '/' + video_dir.split('/')[-1] + '.mp4'
        for video_dir in videos_dir
    ]

    for idx, video_path in enumerate(
            tqdm(videos_path, desc="Filtering openpose predictions...")):
        video_name = video_path.split('/')[-1]
        json_files = os.listdir(videos_dir[idx] + '/openpose/json/')
        json_files = sorted(json_files, key=sort_openpose_jsons)

        filter_video(video_name, json_files, videos_dir[idx])
def main():
    args = parse_args()
    '''Params arguments
    (joints_to_aug, noise_x, noise_y, sigma, scale, flip, noise)'''

    params = [  # 40 # 30
        ([3, 4], True, True, 9.765625, 30, False, True),
        ([6, 7], True, True, 9.765625, 30, False, True),
        ([10, 11], True, True, 9.765625, 30, False, True),
        ([13, 14], True, True, 9.765625, 30, False, True),
        ([3, 4, 6, 7], True, True, 9.765625, 30, False, True),
        ([10, 11, 13, 14], True, True, 9.765625, 30, False, True),
        ([3, 4, 10, 11], True, True, 9.765625, 30, False, True),
        ([6, 7, 13, 14], True, True, 9.765625, 30, False, True),
        ([3, 4, 13, 14], True, True, 9.765625, 30, False, True),
        ([6, 7, 10, 11], True, True, 9.765625, 30, False, True),
        ([3, 4], True, True, 9.765625, 40, False, True),
        ([6, 7], True, True, 9.765625, 40, False, True),
        ([10, 11], True, True, 9.765625, 40, False, True),
        ([13, 14], True, True, 9.765625, 40, False, True),
        ([3, 4, 6, 7], True, True, 9.765625, 40, False, True),
        ([10, 11, 13, 14], True, True, 9.765625, 40, False, True),
        ([3, 4, 10, 11], True, True, 9.765625, 40, False, True),
        ([6, 7, 13, 14], True, True, 9.765625, 40, False, True),
        ([3, 4, 13, 14], True, True, 9.765625, 40, False, True),
        ([6, 7, 10, 11], True, True, 9.765625, 40, False, True)
    ]

    videos_dir = get_videos_path(args.dataset_path)
    videos_dir = [
        video_dir for video_dir in videos_dir
        if len(video_dir.split('/')[-1].split('_')) == 2
    ]

    if args.pose:
        for i, param in enumerate(params):
            print("Processing dataset with params config " + str(i + 1) + "/" +
                  str(len(params)))
            augment_data(videos_dir, args, param, i)

    if args.audio:
        print("Augmenting audio data...")
        augment_audio(videos_dir, args, params)
def main():
    args = parse_args()

    videos_dir = get_videos_path(args.dataset_path)

    if args.data_aug:
        wavs_paths = [
            video_dir + '/' + video_dir.split('/')[-1] + '.wav'
            for video_dir in videos_dir
        ]
        #wavs_paths =   [re.sub(r'(I_\d+)_\d+', r'\1', wav_path) for wav_path in wavs_paths] # Uncomment this line if you used audio data augmentation

        videos_paths = [
            video_dir + '/' + video_dir.split('/')[-1] + '.mp4'
            for video_dir in videos_dir
        ]
        videos_paths = [
            re.sub(r'(I_\d+)_\d+', r'\1', video_path)
            for video_path in videos_paths
        ]

    else:
        ## Remove data augmentation dirs
        videos_dir = [
            video_dir for video_dir in videos_dir
            if len(video_dir.split('/')[-1].split('_')) == 2
        ]
        wavs_paths = [
            video_dir + '/' + video_dir.split('/')[-1] + '.wav'
            for video_dir in videos_dir
        ]
        videos_paths = [
            video_dir + '/' + video_dir.split('/')[-1] + '.mp4'
            for video_dir in videos_dir
        ]

    videos_style = [video_dir.split('/')[-2] for video_dir in videos_dir]

    samples_dict = {}
    global_sample_idx = 0

    for idx, video_dir in enumerate(
            tqdm(videos_dir, desc='Processing videos...')):
        data_path = video_dir + '/data/'
        data_files = os.listdir(data_path)
        data_files = sorted(data_files, key=sort_npy)

        skeletons_dict, indices_dict = get_skeletons(data_files, data_path,
                                                     args.stride,
                                                     args.sample_size)

        ## Get audio sample rate
        wav_path = wavs_paths[idx]
        sample_rate = librosa.get_samplerate(wav_path)

        ## Get video fps
        video_path = videos_paths[idx]
        cap = cv2.VideoCapture(video_path)
        fps = round(cap.get(cv2.CAP_PROP_FPS))

        ## Map npy files to audio intervals/sequences
        audio_dict = map_video_to_audio(indices_dict, fps, sample_rate)

        ## Saving info
        for sample_idx, skeletons_sample in skeletons_dict.items():
            audio_sample = audio_dict[sample_idx]

            metadata = {}
            metadata['samples_frames'] = skeletons_sample
            metadata['samples_audio'] = audio_sample
            metadata['audio_path'] = wav_path
            metadata['style'] = videos_style[idx]
            metadata['video_id'] = idx

            samples_dict[global_sample_idx] = metadata

            global_sample_idx = global_sample_idx + 1

    ## Saving pickle file
    with open(
            args.dataset_path + 'metadata_' + str(args.sample_size) + '_' +
            str(args.stride) + '_' + str(args.data_aug) + '.pickle',
            'wb') as f:
        pickle.dump(samples_dict, f, protocol=pickle.HIGHEST_PROTOCOL)