Exemple #1
0
def run_test(cfg, model, distributed):
    if distributed:
        model = model.module
    torch.cuda.empty_cache()

    output_folders = [None] * len(cfg.DATASETS.TEST)
    dataset_names = cfg.DATASETS.TEST
    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            os.makedirs(output_folder, exist_ok=True)
            output_folders[idx] = output_folder
    # make test dataloader.
    data_loaders_test = make_data_loader(cfg,
                                         is_train=False,
                                         is_distributed=distributed)
    # test for each dataset.
    for output_folder, dataset_name, data_loader_test in zip(
            output_folders, dataset_names, data_loaders_test):
        inference(
            model,
            data_loader_test,
            dataset_name,
            mem_active=has_memory(cfg.MODEL.IA_STRUCTURE),
            output_folder=output_folder,
        )
        synchronize()
    def __init__(
        self,
        cfg_file_path,
        model_weight_url,
        detect_rate,
        common_cate,
        device,
        exclude_class=[],
    ):
        # TODO: add exclude class
        cfg = base_cfg.clone()
        cfg.merge_from_file(cfg_file_path)
        cfg.MODEL.WEIGHT = model_weight_url
        cfg.MODEL.IA_STRUCTURE.MEMORY_RATE *= detect_rate
        if common_cate:
            cfg.MODEL.ROI_ACTION_HEAD.NUM_CLASSES = 15
            cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_MOVEMENT_CLASSES = 6
            cfg.MODEL.ROI_ACTION_HEAD.NUM_OBJECT_MANIPULATION_CLASSES = 5
            cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_INTERACTION_CLASSES = 4
        cfg.freeze()
        self.cfg = cfg

        self.model = build_detection_model(cfg)
        self.model.eval()
        self.model.to(device)
        self.has_memory = has_memory(cfg.MODEL.IA_STRUCTURE)
        self.mem_len = cfg.MODEL.IA_STRUCTURE.LENGTH
        self.mem_rate = cfg.MODEL.IA_STRUCTURE.MEMORY_RATE
        self.has_object = has_object(cfg.MODEL.IA_STRUCTURE)

        checkpointer = ActionCheckpointer(cfg, self.model)
        self.mem_pool = MemoryPool()
        self.object_pool = MemoryPool()
        self.mem_timestamps = []
        self.obj_timestamps = []
        self.pred_pos = 0
        print("Loading action model weight from {}.".format(cfg.MODEL.WEIGHT))
        _ = checkpointer.load(cfg.MODEL.WEIGHT)
        print("Action model weight successfully loaded.")

        self.transforms, self.person_transforms, self.object_transforms = self.build_transform(
        )

        self.device = device
        self.cpu_device = torch.device("cpu")
        self.exclude_class = exclude_class
Exemple #3
0
    def __init__(self, dim_person, dim_mem, dim_out, structure_cfg):
        super(IAStructure, self).__init__()
        self.dim_person = dim_person
        self.dim_others = dim_mem
        self.dim_inner = structure_cfg.DIM_INNER
        self.dim_out = dim_out

        self.max_person = structure_cfg.MAX_PERSON
        self.max_object = structure_cfg.MAX_OBJECT
        self.mem_len = structure_cfg.LENGTH[0] + structure_cfg.LENGTH[1] + 1
        self.mem_feature_len = self.mem_len * structure_cfg.MAX_PER_SEC

        self.I_block_list = structure_cfg.I_BLOCK_LIST

        bias = not structure_cfg.NO_BIAS
        conv_init_std = structure_cfg.CONV_INIT_STD

        self.has_P = has_person(structure_cfg)
        self.has_O = has_object(structure_cfg)
        self.has_M = has_memory(structure_cfg)

        self.person_dim_reduce = nn.Conv3d(dim_person, self.dim_inner, 1,
                                           bias)  # reduce person query
        init_layer(self.person_dim_reduce, conv_init_std, bias)
        self.reduce_dropout = nn.Dropout(structure_cfg.DROPOUT)

        if self.has_M:
            self.mem_dim_reduce = nn.Conv3d(dim_mem, self.dim_inner, 1, bias)
            init_layer(self.mem_dim_reduce, conv_init_std, bias)
        if self.has_P:
            self.person_key_dim_reduce = nn.Conv3d(dim_person, self.dim_inner,
                                                   1,
                                                   bias)  # reduce person key
            init_layer(self.person_key_dim_reduce, conv_init_std, bias)
        if self.has_O:
            self.object_dim_reduce = nn.Conv3d(dim_person, self.dim_inner, 1,
                                               bias)
            init_layer(self.object_dim_reduce, conv_init_std, bias)
Exemple #4
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")

    # Merge config file.
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    # Print experimental infos.
    save_dir = ""
    logger = setup_logger("alphaction", save_dir, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(cfg)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + get_pretty_env_info())

    # Build the model.
    model = build_detection_model(cfg)
    model.to("cuda")

    # load weight.
    output_dir = cfg.OUTPUT_DIR
    checkpointer = ActionCheckpointer(cfg, model, save_dir=output_dir)
    checkpointer.load(cfg.MODEL.WEIGHT)

    output_folders = [None] * len(cfg.DATASETS.TEST)
    dataset_names = cfg.DATASETS.TEST
    mem_active = has_memory(cfg.MODEL.IA_STRUCTURE)
    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            os.makedirs(output_folder, exist_ok=True)
            output_folders[idx] = output_folder

    # Do inference.
    data_loaders_test = make_data_loader(cfg,
                                         is_train=False,
                                         is_distributed=distributed)
    for output_folder, dataset_name, data_loader_test in zip(
            output_folders, dataset_names, data_loaders_test):
        inference(
            model,
            data_loader_test,
            dataset_name,
            mem_active=mem_active,
            output_folder=output_folder,
        )
        synchronize()
Exemple #5
0
def train(cfg,
          local_rank,
          distributed,
          tblogger=None,
          transfer_weight=False,
          adjust_lr=False,
          skip_val=False,
          no_head=False):
    # build the model.
    model = build_detection_model(cfg)

    device = torch.device("cuda")
    model.to(device)

    # make solver.
    optimizer = make_optimizer(cfg, model)
    scheduler = make_lr_scheduler(cfg, optimizer)

    if distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model,
            device_ids=[local_rank],
            output_device=local_rank,
            # this should be removed if we update BatchNorm stats
            broadcast_buffers=False,
        )

    arguments = {}
    arguments["iteration"] = 0
    arguments["person_pool"] = MemoryPool()

    output_dir = cfg.OUTPUT_DIR

    # load weight.
    save_to_disk = get_rank() == 0
    checkpointer = ActionCheckpointer(cfg, model, optimizer, scheduler,
                                      output_dir, save_to_disk)
    extra_checkpoint_data = checkpointer.load(
        cfg.MODEL.WEIGHT,
        model_weight_only=transfer_weight,
        adjust_scheduler=adjust_lr,
        no_head=no_head)

    arguments.update(extra_checkpoint_data)

    # make dataloader.
    data_loader = make_data_loader(
        cfg,
        is_train=True,
        is_distributed=distributed,
        start_iter=arguments['iteration'],
    )

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    val_period = cfg.SOLVER.EVAL_PERIOD

    mem_active = has_memory(cfg.MODEL.IA_STRUCTURE)

    # make validation dataloader if necessary
    if not skip_val:
        dataset_names_val = cfg.DATASETS.TEST
        data_loaders_val = make_data_loader(cfg,
                                            is_train=False,
                                            is_distributed=distributed)
    else:
        dataset_names_val = []
        data_loaders_val = []
    # training
    do_train(
        model,
        data_loader,
        optimizer,
        scheduler,
        checkpointer,
        device,
        checkpoint_period,
        arguments,
        tblogger,
        val_period,
        dataset_names_val,
        data_loaders_val,
        distributed,
        mem_active,
    )

    return model