Esempio n. 1
0
    def predict(self, images):
        """
        Runs forward pass of model.

        Args:
            images (BxTxHxWx3): Images to predict.

        Returns:
            dict.
        """
        feed_dict = {
            self.images_pl: images,
        }
        fetch_dict = self.make_fetch_dict(self.omegas_pred[0])

        fetch_dict_deltas = {}
        for delta_t, omega_delta in sorted(self.omegas_pred.items()):
            if delta_t == 0:
                continue
            update_dict_entries(accumulator=fetch_dict_deltas,
                                appender=self.make_fetch_dict(omega_delta,
                                                              suffix='_delta'))
        # DxBxTx... --> BxTxDx...
        for k in fetch_dict_deltas:
            fetch_dict_deltas[k] = tf.stack(fetch_dict_deltas[k], axis=2)
        fetch_dict.update(fetch_dict_deltas)

        results = self.sess.run(fetch_dict, feed_dict)
        return results
Esempio n. 2
0
    def predict_all_images(self, all_images):
        """
        Wrapper to predict entire sequence.

        Because of edge padding, images at edges will have low quality
        predictions since they don't have full field-of-view. Thus, we slide
        a window of size T across the images and only keep the predictions
        with full fov.

        Args:
            all_images (NxHxWx3): Images in sequence.

        Returns:
            dict
        """
        B = self.batch_size
        T = self.sequence_length
        N = len(all_images)
        H, W = self.img_size, self.img_size

        # Need margin on both sides. Num good frames = T - 2 * margin.
        margin = (self.fov - 1) // 2
        g = self.sequence_length - 2 * margin
        count = np.ceil(N / (g * B)).astype(int)
        num_fill = count * B * g + T - N
        images_padded = np.concatenate(
            (
                np.zeros((margin, H, W, 3)),  # Front padding.
                all_images,
                np.zeros((num_fill, H, W, 3)),  # Back padding.
            ),
            axis=0)
        images_batched = []
        # [ m ][    g    ][ m ]             Slide over by g every time.
        #            [ m ][    g    ][ m ]
        for i in range(count * B):
            images_batched.append(images_padded[i * g:i * g + T])
        images_batched = np.reshape(images_batched, (count, B, T, H, W, 3))

        results = {}
        # results is the full frames results, pred is single frame result
        for images in tqdm(images_batched):
            pred = self.predict(images, )
            update_dict_entries(results, pred)

        # Results are now (CxBxTx...). Should be (Nx...).
        new_results = {}
        # k,v stands for key value
        for k, v in results.items():
            v = np.array(v)[:, :, margin:-margin]
            old_shape = v.shape[3:]
            new_v = v.reshape((-1, ) + old_shape)[:N]
            new_results[k] = new_v
        return new_results
Esempio n. 3
0
def main(config):
    t0 = time()
    config = restore_config(config)
    print('-' * 20)
    print('Evaluating {}'.format(config.load_path))

    json_path = get_result_path_name(
        split=config.split,
        load_path=config.load_path,
        pred_mode=config.pred_mode,
        datasets=config.test_datasets,
        pred_dir=config.pred_dir,
    )

    if os.path.exists(json_path):
        print(json_path, 'already exists!')
        all_dataset_results = json.load(open(json_path, 'r'))
        save_results(config, all_dataset_results)
        print('Total time:', time() - t0)
        print('-' * 20)
        exit(0)

    resnet_path = config.resnet_path if config.precomputed_phi else ''
    model = Tester(
        config,
        pretrained_resnet_path=resnet_path,
        sequence_length=config.T
    )

    all_dataset_results = {}
    if config.pred_mode == 'const':
        all_dataset_results.update({
            'past': {},
            'past_const': {},
            'present': {},
            'future': {},
            'future_const': {},
        })
    for dataset in config.test_datasets:
        print('Evaluating dataset:', dataset)
        dataset_result = {}
        if config.pred_mode == 'const':
            dataset_result.update({
                'past': {},
                'past_const': {},
                'present': {},
                'future': {},
                'future_const': {},
            })
        if dataset == 'h36m':
            tf_paths = sorted(glob(os.path.join(
                config.tf_dir,
                dataset,
                config.split,
                '*cam03*.tfrecord',
                )))
        else:
            tf_paths = sorted(glob(os.path.join(
                config.tf_dir,
                dataset,
                config.split,
                '*.tfrecord',
            )))
        if config.reverse:
            tf_paths = tf_paths[::-1]
        for i, fname in enumerate(tf_paths):
            print('\n', '*' * 10)
            print(dataset, '{}/{}'.format(i, len(tf_paths)))
            print('Running on', os.path.basename(fname))
            path_result = {}
            if config.pred_mode == 'const':
                path_result.update({
                    'past': {},
                    'past_const': {},
                    'present': {},
                    'future': {},
                    'future_const': {},
                })
            for p_id, s_ex in enumerate(tf.python_io.tf_record_iterator(fname)):
                data = read_from_example(s_ex)
                images = data['images']
                if dataset == 'h36m':
                    images = images
                    data['poses'] = data['poses']
                    data['kps'] = data['kps']
                    data['gt3ds'] = data['gt3ds']

                preds = get_predictions(
                    model=model,
                    images=images,
                    load_path=config.load_path,
                    tf_path=fname,
                    p_id=p_id,
                    pred_dir=config.pred_dir,
                )
                eval_path = get_eval_path_name(
                    load_path=config.load_path,
                    pred_mode=config.pred_mode,
                    tf_path=fname,
                    p_id=p_id,
                    pred_dir=config.pred_dir,
                    min_visible=config.min_visible,
                )

                if config.pred_mode == 'const':
                    errors_dict = test_sequence_const(
                        data=data,
                        preds=preds,
                        eval_path=eval_path,
                        has_3d=(dataset in DATASETS_3D),
                        min_visible=config.min_visible,
                    )
                    for k in errors_dict.keys():
                        extend_dict_entries(path_result[k], errors_dict[k])
                else:
                    compute_mesh = config.split == 'test' and dataset == '3dpw'
                    errors = test_sequence(
                        data=data,
                        preds=preds,
                        eval_path=eval_path,
                        pred_mode=config.pred_mode,
                        has_3d=(dataset in DATASETS_3D),
                        min_visible=config.min_visible,
                        compute_mesh=compute_mesh,
                    )
                    extend_dict_entries(path_result, errors)
            if config.pred_mode == 'const':
                for k in path_result.keys():
                    update_dict_entries(dataset_result[k], path_result[k])
            else:
                update_dict_entries(dataset_result, path_result)

        if config.pred_mode == 'const':
            for pred_type, result in dataset_result.items():
                mean_of_dict_values(result)
                all_dataset_results[pred_type][dataset] = result
        else:
            mean_of_dict_values(dataset_result)
            all_dataset_results[dataset] = dataset_result

    save_results(config, all_dataset_results, json_path)

    print('Total time:', time() - t0)
    print('-' * 20)