def forward(self,
                slow_features,
                fast_features,
                proposals,
                objects=None,
                extras={},
                part_forward=-1):
        ia_active = hasattr(self, "ia_structure")
        if part_forward == 1:
            person_pooled = cat(
                [box.get_field("pooled_feature") for box in proposals])
            if objects is None:
                object_pooled = None
            else:
                object_pooled = cat(
                    [box.get_field("pooled_feature") for box in objects])
        else:
            x = self.roi_pooling(slow_features, fast_features, proposals)

            person_pooled = self.max_pooler(x)

            if has_object(self.config.MODEL.IA_STRUCTURE):
                object_pooled = self.roi_pooling(slow_features, fast_features,
                                                 objects)
                object_pooled = self.max_pooling_zero_safe(object_pooled)
            else:
                object_pooled = None

        if part_forward == 0:
            return None, person_pooled, object_pooled

        x_after = person_pooled

        if ia_active:
            tsfmr = self.ia_structure
            mem_len = self.config.MODEL.IA_STRUCTURE.LENGTH
            mem_rate = self.config.MODEL.IA_STRUCTURE.MEMORY_RATE
            use_penalty = self.config.MODEL.IA_STRUCTURE.PENALTY
            memory_person, memory_person_boxes = self.get_memory_feature(
                extras["person_pool"], extras, mem_len, mem_rate,
                self.max_feature_len_per_sec, tsfmr.dim_others, person_pooled,
                proposals, use_penalty)

            ia_feature = self.ia_structure(
                person_pooled,
                proposals,
                object_pooled,
                objects,
                memory_person,
            )
            x_after = self.fusion(x_after, ia_feature,
                                  self.config.MODEL.IA_STRUCTURE.FUSION)

        x_after = x_after.view(x_after.size(0), -1)

        x_after = F.relu(self.fc1(x_after))
        x_after = F.relu(self.fc2(x_after))

        return x_after, person_pooled, object_pooled
Esempio n. 2
0
def build_dataset(cfg,
                  dataset_list,
                  transforms,
                  dataset_catalog,
                  is_train=True,
                  object_transforms=None):
    """
    Arguments:
        cfg: config object for the experiment.
        dataset_list (list[str]): Contains the names of the datasets, i.e.,
            ava_video_train_v2.2, ava_video_val_v2.2, etc..
        transforms (callable): transforms to apply to each (clip, target) sample.
        dataset_catalog (DatasetCatalog): contains the information on how to
            construct a dataset.
        is_train (bool): whether to setup the dataset for training or testing.
        object_transforms: transforms to apply to object boxes.
    """
    if not isinstance(dataset_list, (list, tuple)):
        raise RuntimeError(
            "dataset_list should be a list of strings, got {}".format(
                dataset_list))
    datasets = []
    for dataset_name in dataset_list:
        data = dataset_catalog.get(dataset_name)
        factory = getattr(D, data["factory"])
        args = data["args"]
        if data["factory"] == "AVAVideoDataset":
            # for AVA, we want to remove clips without annotations
            # during training
            args["remove_clips_without_annotations"] = is_train
            args[
                "frame_span"] = cfg.INPUT.FRAME_NUM * cfg.INPUT.FRAME_SAMPLE_RATE
            if not is_train:
                args["box_thresh"] = cfg.TEST.BOX_THRESH
                args["action_thresh"] = cfg.TEST.ACTION_THRESH
            else:
                # disable box_file when train, use only gt to train
                args["box_file"] = None
            if has_object(cfg.MODEL.IA_STRUCTURE):
                args["object_transforms"] = object_transforms
            else:
                args["object_file"] = None

        args["transforms"] = transforms
        # make dataset from factory
        dataset = factory(**args)
        datasets.append(dataset)

    # for testing, return a list of datasets
    if not is_train:
        return datasets

    # for training, concatenate all datasets into a single one
    dataset = datasets[0]
    if len(datasets) > 1:
        dataset = D.ConcatDataset(datasets)

    return [dataset]
Esempio n. 3
0
    def __init__(
        self,
        cfg_file_path,
        model_weight_url,
        detect_rate,
        common_cate,
        device,
        exclude_class=[],
    ):
        # TODO: add exclude class
        cfg = base_cfg.clone()
        cfg.merge_from_file(cfg_file_path)
        cfg.MODEL.WEIGHT = model_weight_url
        cfg.MODEL.IA_STRUCTURE.MEMORY_RATE *= detect_rate
        if common_cate:
            cfg.MODEL.ROI_ACTION_HEAD.NUM_CLASSES = 15
            cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_MOVEMENT_CLASSES = 6
            cfg.MODEL.ROI_ACTION_HEAD.NUM_OBJECT_MANIPULATION_CLASSES = 5
            cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_INTERACTION_CLASSES = 4
        cfg.freeze()
        self.cfg = cfg

        self.model = build_detection_model(cfg)
        self.model.eval()
        self.model.to(device)
        self.has_memory = has_memory(cfg.MODEL.IA_STRUCTURE)
        self.mem_len = cfg.MODEL.IA_STRUCTURE.LENGTH
        self.mem_rate = cfg.MODEL.IA_STRUCTURE.MEMORY_RATE
        self.has_object = has_object(cfg.MODEL.IA_STRUCTURE)

        checkpointer = ActionCheckpointer(cfg, self.model)
        self.mem_pool = MemoryPool()
        self.object_pool = MemoryPool()
        self.mem_timestamps = []
        self.obj_timestamps = []
        self.pred_pos = 0
        print("Loading action model weight from {}.".format(cfg.MODEL.WEIGHT))
        _ = checkpointer.load(cfg.MODEL.WEIGHT)
        print("Action model weight successfully loaded.")

        self.transforms, self.person_transforms, self.object_transforms = self.build_transform(
        )

        self.device = device
        self.cpu_device = torch.device("cpu")
        self.exclude_class = exclude_class
Esempio n. 4
0
    def __init__(self, dim_person, dim_mem, dim_out, structure_cfg):
        super(IAStructure, self).__init__()
        self.dim_person = dim_person
        self.dim_others = dim_mem
        self.dim_inner = structure_cfg.DIM_INNER
        self.dim_out = dim_out

        self.max_person = structure_cfg.MAX_PERSON
        self.max_object = structure_cfg.MAX_OBJECT
        self.mem_len = structure_cfg.LENGTH[0] + structure_cfg.LENGTH[1] + 1
        self.mem_feature_len = self.mem_len * structure_cfg.MAX_PER_SEC

        self.I_block_list = structure_cfg.I_BLOCK_LIST

        bias = not structure_cfg.NO_BIAS
        conv_init_std = structure_cfg.CONV_INIT_STD

        self.has_P = has_person(structure_cfg)
        self.has_O = has_object(structure_cfg)
        self.has_M = has_memory(structure_cfg)

        self.person_dim_reduce = nn.Conv3d(dim_person, self.dim_inner, 1,
                                           bias)  # reduce person query
        init_layer(self.person_dim_reduce, conv_init_std, bias)
        self.reduce_dropout = nn.Dropout(structure_cfg.DROPOUT)

        if self.has_M:
            self.mem_dim_reduce = nn.Conv3d(dim_mem, self.dim_inner, 1, bias)
            init_layer(self.mem_dim_reduce, conv_init_std, bias)
        if self.has_P:
            self.person_key_dim_reduce = nn.Conv3d(dim_person, self.dim_inner,
                                                   1,
                                                   bias)  # reduce person key
            init_layer(self.person_key_dim_reduce, conv_init_std, bias)
        if self.has_O:
            self.object_dim_reduce = nn.Conv3d(dim_person, self.dim_inner, 1,
                                               bias)
            init_layer(self.object_dim_reduce, conv_init_std, bias)
Esempio n. 5
0
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0):
    num_gpus = get_world_size()
    if is_train:
        # for training
        videos_per_batch = cfg.SOLVER.VIDEOS_PER_BATCH
        assert (
            videos_per_batch % num_gpus == 0
        ), "SOLVER.VIDEOS_PER_BATCH ({}) must be divisible by the number "
        "of GPUs ({}) used.".format(videos_per_batch, num_gpus)
        videos_per_gpu = videos_per_batch // num_gpus
        shuffle = True
        drop_last = True
        num_iters = cfg.SOLVER.MAX_ITER
    else:
        # for testing
        videos_per_batch = cfg.TEST.VIDEOS_PER_BATCH
        assert (
            videos_per_batch % num_gpus == 0
        ), "TEST.VIDEOS_PER_BATCH ({}) must be divisible by the number "
        "of GPUs ({}) used.".format(videos_per_batch, num_gpus)
        videos_per_gpu = videos_per_batch // num_gpus
        shuffle = False if not is_distributed else True
        drop_last = False
        num_iters = None
        start_iter = 0

    # group images which have similar aspect ratio. In this case, we only
    # group in two cases: those with width / height > 1, and the other way around,
    # but the code supports more general grouping strategy
    aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []

    DatasetCatalog = paths_catalog.DatasetCatalog
    dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST

    # build dataset
    transforms = build_transforms(cfg, is_train)
    if has_object(cfg.MODEL.IA_STRUCTURE):
        object_transforms = build_object_transforms(cfg, is_train=is_train)
    else:
        object_transforms = None
    datasets = build_dataset(cfg, dataset_list, transforms, DatasetCatalog,
                             is_train, object_transforms)

    # build sampler and dataloader
    data_loaders = []
    for dataset in datasets:
        sampler = make_data_sampler(dataset, shuffle, is_distributed)
        batch_sampler = make_batch_data_sampler(dataset, sampler,
                                                aspect_grouping,
                                                videos_per_gpu, num_iters,
                                                start_iter, drop_last)
        collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
        num_workers = cfg.DATALOADER.NUM_WORKERS
        data_loader = torch.utils.data.DataLoader(
            dataset,
            num_workers=num_workers,
            batch_sampler=batch_sampler,
            collate_fn=collator,
        )
        data_loaders.append(data_loader)
    if is_train:
        # during training, a single (possibly concatenated) data_loader is returned
        assert len(data_loaders) == 1
        return data_loaders[0]
    return data_loaders