Exemple #1
0
    def collate_fn(batch):
        metadata = VCLA_METADATA()
        affordance = False
        features, labels, seg_lengths, total_length, activity, sequence_id, additional = batch[0]
        feature_dim = list(features.shape)
        if len(feature_dim) > 2:
            affordance = True
        max_seq_length = np.max(
            np.array([total_length for (features, labels, seg_lengths, total_length, activity, sequence_id, additional) in batch]))
        feature_dim[0] = max_seq_length
        feature_dim.insert(1, len(batch))  # max_length * batch * (obj_num) * feature_size
        obj_nums = np.zeros(len(batch))
        if affordance:
            max_obj_num = metadata.MAXIMUM_OBJ_VIDEO
            feature_dim[-2] = max_obj_num
            total_lengths = np.zeros(len(batch) * max_obj_num)
        else:
            total_lengths = np.zeros(len(batch))
        features_batch = np.zeros(feature_dim)
        labels_batch = np.zeros(feature_dim[: -1])
        probs_batch = np.zeros(feature_dim[: 2] + [len(metadata.subactivities)])

        activities = list()
        sequence_ids = list()
        ctc_labels = list()
        ctc_lengths = list()
        for batch_i, (features, labels, seg_lengths, total_length, activity, sequence_id, additional) in enumerate(batch):
            for frame in range(features.shape[0]):
                probs_batch[frame, batch_i, int(labels[frame])] = 1.0
            merged_labels = list()
            current_label = -1
            for label in labels:
                if label != current_label:
                    current_label = label
                    merged_labels.append(current_label)
            ctc_labels.append(merged_labels)
            ctc_lengths.append(len(merged_labels))

            if affordance:
                obj_num = labels.shape[1]
                features_batch[:total_length, batch_i, :obj_num, :] = np.nan_to_num(features)
                labels_batch[:total_length, batch_i, :obj_num] = labels
                for rel_idx in range(3):
                    total_lengths[batch_i * 3 + rel_idx] = total_length
                obj_nums[batch_i] = obj_num
            else:
                features_batch[:total_length, batch_i, :] = np.nan_to_num(features)
                labels_batch[:total_length, batch_i] = labels
                total_lengths[batch_i] = total_length
            activities.append(activity)
            sequence_ids.append(sequence_id)

        features_batch = torch.FloatTensor(features_batch)
        labels_batch = torch.LongTensor(labels_batch)
        total_lengths = torch.IntTensor(total_lengths)
        obj_nums = torch.IntTensor(obj_nums)
        ctc_lengths = torch.IntTensor(ctc_lengths)

        # Feature_batch, labels_batch, activities, sequence_ids, total_lengths, obj_nums, ctc_labels, ctc_lengths, probs_batch, all_labels
        return features_batch, labels_batch, activities, sequence_ids, total_lengths, obj_nums, ctc_labels, ctc_lengths, None, None
Exemple #2
0
 def __init__(self, dataset='VCLA_GAZE'):
     self.paths_dict = {
         'WNP': wnp_config.Paths(),
         'VCLA_GAZE': vcla_gaze_config.Paths(),
         'CAD': cad_config.Paths(),
         'Breakfast': breakfast_config.Paths()
     }
     self.metadata_dict = {
         'WNP': WNP_METADATA(),
         'VCLA_GAZE': VCLA_METADATA(),
         'CAD': CAD_METADATA(),
         'Breakfast': BREAKFAST_METADATA()
     }
     self.dataset_dict = {
         'WNP':
         lambda path, mode, task, subsample: wnp.WNP(
             path, mode, task, subsample),
         'VCLA_GAZE':
         lambda path, mode, task, subsample: vcla_gaze.VCLA_GAZE(
             path, mode, task, subsample),
         'CAD':
         lambda path, mode, task, subsample: cad.CAD(
             path, mode, task, subsample),
         'Breakfast':
         lambda path, mode, task, subsample: breakfast.Breakfast(
             path, mode, task, subsample)
     }
     self.dataset = self.dataset_dict[dataset]
     self.paths = self.paths_dict[dataset]
     self.metadata = self.metadata_dict[dataset]
Exemple #3
0
Description: Prior calculation for VCLA_GAZE dataset
                Need to first generate the activity_corpus.p using dataparser.py

"""

import os
import sys
import pickle
import json

import numpy as np
import scipy.stats

import datasets.VCLA_GAZE.vcla_gaze_config as config
from datasets.VCLA_GAZE.metadata import VCLA_METADATA
metadata = VCLA_METADATA()

def learn_prior(paths):
    def normalize_prob(cpt):
        for s in range(cpt.shape[0]):
            cpt[s, :] = cpt[s, :]/np.sum(cpt[s, :])

        return cpt

    if not os.path.exists(os.path.join(paths.tmp_root, 'activity_corpus.p')):
        sys.exit('Ground truth pickle file not found.')
    with open(os.path.join(paths.tmp_root, 'activity_corpus.p'), 'rb') as f:
        activity_corpus = pickle.load(f)

    action_cpt = np.ones((len(metadata.subactivities), len(metadata.actions))) * 0.3
    affordance_cpt = np.ones((len(metadata.subactivities), len(metadata.affordances))) * 0.1