コード例 #1
0
def build(target_assigner_cfg):
    class_name = target_assigner_cfg["type"]
    if class_name == "TaskAssignerV1":
        builder = TaskAssignerV1
        params = {proc_param(k):v
            for k, v in target_assigner_cfg.items() if is_param(k)}
        # build box_coder
        box_coder = build_box_coder(target_assigner_cfg["box_coder"])
        params["box_coder"] = box_coder
        # build anchors & region similarity calculators
        classsettings_cfgs = [v
            for k, v in target_assigner_cfg.items() if "class_settings" in k]
        anchor_generators = []
        similarity_calculators = []
        classes = params["classes"]
        for cls in classes:
            classsetting_cfg = [itm for itm in classsettings_cfgs
                if itm["AnchorGenerator"]["@class_name"] == cls][0]
            anchor_generator_cfg = classsetting_cfg["AnchorGenerator"]
            anchor_generator = build_anchor_generator(anchor_generator_cfg)
            anchor_generators.append(anchor_generator)
            similarity_calculator_cfg = classsetting_cfg["SimilarityCalculator"]
            similarity_calculator = build_similarity_calculator(similarity_calculator_cfg)
            similarity_calculators.append(similarity_calculator)
        params["anchor_generators"] = anchor_generators
        params["region_similarity_calculators"] = similarity_calculators
    else:
        raise NotImplementedError
    return builder(**params)
コード例 #2
0
def build_box_coder(box_coder_cfg):
    class_name = box_coder_cfg["type"]
    if class_name == "BoxCoderV1":
        builder = BoxCoderV1
    else:
        raise NotImplementedError
    box_coder_params = {proc_param(k): v
        for k, v in box_coder_cfg.items() if is_param(k)}
    return builder(**box_coder_params)
コード例 #3
0
def build_similarity_calculator(similarity_calculator_cfg):
    class_name = similarity_calculator_cfg["type"]
    if class_name == "NearestIoUSimilarity":
        builder = NearestIoUSimilarity
    else:
        raise NotImplementedError
    params = {proc_param(k):v
        for k, v in similarity_calculator_cfg.items() if is_param(k)}
    return builder(**params)
コード例 #4
0
def build_anchor_generator(anchor_generator_cfg):
    class_name = anchor_generator_cfg["type"]
    if class_name == "AnchorGeneratorBEV":
        builder = AnchorGeneratorBEV
    else:
        raise NotImplementedError
    params = {proc_param(k):v
        for k, v in anchor_generator_cfg.items() if is_param(k)}
    return builder(**params)
コード例 #5
0
def build_prep_info_func(prep_info_cfg, dataset_name):
    params = {
        proc_param(k): v
        for k, v in prep_info_cfg.items() if is_param(k)
    }
    if dataset_name == "carla":
        prep_info_func = partial(prep_info_func_carla, **params)
    elif dataset_name == "nusc":
        prep_info_func = partial(prep_info_func_carla, **params)
    elif dataset_name in ["kitti", 'nusc-kitti']:
        prep_info_func = partial(prep_info_func_kitti, **params)
    else:
        raise NotImplementedError
    return prep_info_func
コード例 #6
0
def build_prep_func(voxelizer, target_assigner, anchor_cache, prep_cfg,
                    dataset_name):
    params = {proc_param(k): v for k, v in prep_cfg.items() if is_param(k)}
    params["voxelizer"] = voxelizer
    params["target_assigner"] = target_assigner
    params["anchor_cache"] = anchor_cache
    if dataset_name == "carla":
        prep_func = partial(prep_func_carla, **params)
    elif dataset_name == "nusc":
        prep_func = partial(prep_func_carla, **params)
    elif dataset_name in ["kitti", "nusc-kitti"]:
        prep_func = partial(prep_func_kitti, **params)
    else:
        raise NotImplementedError
    return prep_func
コード例 #7
0
def build_dataset(data_cfg, prep_func, prep_info_func, dataset_name):
    params = {proc_param(k): v for k, v in data_cfg.items() if is_param(k)}
    params["prep_func"] = prep_func
    if dataset_name == "carla":
        params["prep_info_func"] = prep_info_func
        dataset = CarlaDataset(**params)
    elif dataset_name == "nusc":
        dataset = NuScenesDataset(**params)
    elif dataset_name == "kitti":
        params["prep_info_func"] = prep_info_func
        dataset = KittiDataset(**params)
    elif dataset_name == "nusc-kitti":
        params["prep_info_func"] = prep_info_func
        dataset = NuscenesKittiDataset(**params)
    else:
        raise NotImplementedError
    return dataset
コード例 #8
0
def build(voxelizer_cfg):
    '''
    @voxelizer_cfg: dict
        e.g.voxelizer_cfg = {
        "type": "VoxelizerV1",
        "@voxel_size": [0.05, 0.05, 0.1],
        "@point_cloud_range": [0, -40, -3, 70.4, 40, 1],
        "@max_num_points": 5,
        "@max_voxels": 20000
        }
    '''
    class_name = voxelizer_cfg["type"]
    if class_name == "VoxelizerV1":
        builder = VoxelizerV1
    else:
        raise NotImplementedError
    params = {proc_param(k):v
        for k, v in voxelizer_cfg.items() if is_param(k)}
    voxelizer = builder(**params)
    return voxelizer
コード例 #9
0
def compute_mas_weights(cfg):
    global g_log_dir, g_save_dir
    cores = setup_cores(cfg, mode="compute_mas_weights")
    model = cores["model"]
    dataloader_train = cores["dataloader_train"]
    if "@num_of_datasamples" in cfg.MAS.keys():
        num_of_datasamples = cfg.MAS["@num_of_datasamples"]
    else:
        num_of_datasamples = len(dataloader_train.dataset)
    params = {proc_param(k): v for k, v in cfg.MAS.items() if is_param(k)}
    params["num_of_datasamples"] = num_of_datasamples
    params["dataloader"] = dataloader_train
    print(params)
    mas_weights_dict = model.compute_mas_weights(**params)
    write_pkl(
        {k: v.cpu().numpy()
         for k, v in mas_weights_dict["new_omega"].items()},
        os.path.join(g_save_dir,
                     f"mas_newomega-{model.get_global_step()}.pkl"))
    write_pkl(
        {k: v.cpu().numpy()
         for k, v in mas_weights_dict["omega"].items()},
        os.path.join(g_save_dir, f"mas_omega-{model.get_global_step()}.pkl"))
    write_pkl(
        {
            k: v.cpu().numpy()
            for k, v in mas_weights_dict["new_clsterm"].items()
        },
        os.path.join(g_save_dir,
                     f"mas_newclsterm-{model.get_global_step()}.pkl"))
    write_pkl(
        {
            k: v.cpu().numpy()
            for k, v in mas_weights_dict["new_regterm"].items()
        },
        os.path.join(g_save_dir,
                     f"mas_newregterm-{model.get_global_step()}.pkl"))
コード例 #10
0
def compute_ewc_weights(cfg):
    global g_log_dir, g_save_dir
    cores = setup_cores(cfg, mode="compute_ewc_weights")
    model = cores["model"]
    dataloader_train = cores["dataloader_train"]
    if "@num_of_datasamples" in cfg.EWC.keys():
        num_of_datasamples = cfg.EWC["@num_of_datasamples"]
    else:
        num_of_datasamples = len(dataloader_train.dataset)
    params = {proc_param(k): v for k, v in cfg.EWC.items() if is_param(k)}
    params["num_of_datasamples"] = num_of_datasamples
    params["dataloader"] = dataloader_train
    ewc_weights_dict = model.compute_ewc_weights_v2(**params)
    write_pkl(
        {
            k: v.cpu().numpy()
            for k, v in ewc_weights_dict["newtask_FIM"].items()
        },
        os.path.join(g_save_dir,
                     f"ewc_newtaskFIM-{model.get_global_step()}.pkl"))
    write_pkl({k: v.cpu().numpy()
               for k, v in ewc_weights_dict["FIM"].items()},
              os.path.join(g_save_dir,
                           f"ewc_weights-{model.get_global_step()}.pkl"))
コード例 #11
0
def generate_pseudo_annotation(cfg):
    if "pseudo_annotation" not in cfg.TRAINDATA.keys():
        return
    Logger.log_txt("==========Generate Pseudo Annotations START=========")
    # build voxelizer and target_assigner
    voxelizer = build_voxelizer(cfg.VOXELIZER)
    param = deepcopy(cfg.TARGETASSIGNER)
    param["@classes"] = cfg.NETWORK["@classes_source"]
    target_assigner = build_target_assigner(param)
    # create pseudo dataset
    ## build network with evaluation mode
    ## do not change cfg.NETWORK
    param = deepcopy(cfg.NETWORK)
    ## modify network._model config and make it same to network._sub_model
    param["@classes_target"] = param["@classes_source"]
    param["@model_resume_dict"] = param["@sub_model_resume_dict"]
    param["@is_training"] = False
    param["@box_coder"] = target_assigner.box_coder
    param["@middle_layer_dict"]["@output_shape"] = [
        1
    ] + voxelizer.grid_size[::-1].tolist() + [16]
    param = {proc_param(k): v for k, v in param.items() if is_param(k)}
    network = Network(**param).cuda()
    ## build dataloader without data augmentation, without shuffle, batch_size 1
    param = deepcopy(cfg.TRAINDATA)
    param["prep"]["@augment_dict"] = None
    param["training"] = False
    param["prep"]["@training"] = False
    param["batch_size"] = 1
    param["num_of_workers"] = 1
    param["@class_names"] = cfg.NETWORK["@classes_source"]
    # The following line does not affect actually.
    param["prep"]["@filter_label_dict"]["keep_classes"] = cfg.NETWORK[
        "@classes_source"]
    dataloader_train = build_dataloader(data_cfg=param,
                                        ext_dict={
                                            "voxelizer":
                                            voxelizer,
                                            "target_assigner":
                                            target_assigner,
                                            "feature_map_size":
                                            param["feature_map_size"]
                                        })
    ## create new labels
    ### setup tmp dirs: tmp_root
    data_root_path = cfg.TRAINDATA["@root_path"]
    data_pc_path = os.path.join(data_root_path, "velodyne")
    data_calib_path = os.path.join(data_root_path, "calib")
    data_label_path = os.path.join(data_root_path, "label_2")
    tmp_root_path = f"/tmp/incdet3-{time.time()}/training"
    tmp_pc_path = os.path.join(tmp_root_path, "velodyne")
    tmp_calib_path = os.path.join(tmp_root_path, "calib")
    tmp_det_path = os.path.join(tmp_root_path, "detections")
    tmp_label_path = os.path.join(tmp_root_path, "label_2")
    tmp_splitidx_path = os.path.join(os.path.dirname(tmp_root_path),
                                     "split_index")
    os.makedirs(tmp_root_path, exist_ok=False)
    os.makedirs(tmp_label_path, exist_ok=False)
    os.makedirs(tmp_splitidx_path, exist_ok=False)
    ### soft link lidar dir, calib dir to tmp_root dir
    os.symlink(data_pc_path, tmp_pc_path)
    os.symlink(data_calib_path, tmp_calib_path)
    ### forward model on dataloader and save detections in tmp_root dir
    network.eval()
    detections = []
    tags = [itm["tag"] for itm in dataloader_train.dataset._kitti_infos]
    calibs = [itm["calib"] for itm in dataloader_train.dataset._kitti_infos]
    write_txt(sorted(tags), os.path.join(tmp_splitidx_path, "train.txt"))
    for data in tqdm(dataloader_train):
        data = example_convert_to_torch(data)
        detection = network(data)
        detections.append(detection[0])
    dataset_type = str(type(dataloader_train.dataset))
    dataloader_train.dataset.save_detections(detections, tags, calibs,
                                             tmp_det_path)
    ### ensemble the detections and gt labels
    ensemble_pseudo_anno_and_gt(gt_label_dir=data_label_path,
                                detection_dir=tmp_det_path,
                                old_classes=cfg.NETWORK["@classes_source"],
                                pseudo_anno_dir=tmp_label_path)
    ## create new info pkls
    ### create info pkl by system call
    assert (
        cfg.TRAINDATA["dataset"] == "kitti",
        "We currently only support the kitti dataset for pseudo annotation.")
    cmd = "python3 tools/create_data_pseudo_anno.py "
    cmd += "--dataset kitti "
    cmd += f"--data-dir {os.path.dirname(tmp_root_path)}"
    os.system(cmd)
    # update cfg.TRAINDATA
    ### update @root_path, @info_path
    cfg.TRAINDATA["@root_path"] = tmp_root_path
    cfg.TRAINDATA["@info_path"] = os.path.join(os.path.dirname(tmp_root_path),
                                               "KITTI_infos_train.pkl")
    ### update classes_to_exclude
    cfg.TRAINDATA["prep"]["@classes_to_exclude"] = []
    Logger.log_txt("==========Generate Pseudo Annotations END=========")
コード例 #12
0
def setup_cores(cfg, mode):
    global g_use_fp16
    if mode == "train":
        # build dataloader_train
        generate_pseudo_annotation(cfg)
        Logger.log_txt(
            "After generating the pseudo annotation, the cfg.TRAINDATA is ")
        Logger.log_txt(cfg.TRAINDATA)
        Logger.log_txt(
            "After generating the pseudo annotation, the cfg.NETWORK is ")
        Logger.log_txt(cfg.NETWORK)
        voxelizer = build_voxelizer(cfg.VOXELIZER)
        target_assigner = build_target_assigner(cfg.TARGETASSIGNER)
        dataloader_train = build_dataloader(
            data_cfg=cfg.TRAINDATA,
            ext_dict={
                "voxelizer": voxelizer,
                "target_assigner": target_assigner,
                "feature_map_size": cfg.TRAINDATA["feature_map_size"]
            })
        # build dataloader_val
        dataloader_val = build_dataloader(data_cfg=cfg.VALDATA,
                                          ext_dict={
                                              "voxelizer":
                                              voxelizer,
                                              "target_assigner":
                                              target_assigner,
                                              "feature_map_size":
                                              cfg.VALDATA["feature_map_size"]
                                          })
        # build dataloader_test
        dataloader_test = None
        # build model
        param = cfg.NETWORK
        param["@middle_layer_dict"]["@output_shape"] = [
            1
        ] + voxelizer.grid_size[::-1].tolist() + [16]
        param["@is_training"] = True
        param["@box_coder"] = target_assigner.box_coder
        param = {proc_param(k): v for k, v in param.items() if is_param(k)}
        network = Network(**param).cuda()
        # build optimizer & lr_scheduler
        optimizer, lr_scheduler = build_optimizer_and_lr_scheduler(
            net=network,
            optimizer_cfg=cfg.TRAIN["optimizer_dict"],
            lr_scheduler_cfg=cfg.TRAIN["lr_scheduler_dict"],
            start_iter=network.get_global_step())
        # handle fp16 training
        use_fp16 = cfg.TASK["use_fp16"] if "use_fp16" in cfg.TASK.keys(
        ) else False
        if use_fp16:
            network, optimizer = amp.initialize(network,
                                                optimizer,
                                                opt_level="O2")
        g_use_fp16 = use_fp16
    elif mode == "test":
        # build dataloader_train
        voxelizer = build_voxelizer(cfg.VOXELIZER)
        target_assigner = build_target_assigner(cfg.TARGETASSIGNER)
        dataloader_train = None
        # build dataloader_val
        dataloader_val = None
        # build dataloader_test
        dataloader_test = build_dataloader(data_cfg=cfg.TESTDATA,
                                           ext_dict={
                                               "voxelizer":
                                               voxelizer,
                                               "target_assigner":
                                               target_assigner,
                                               "feature_map_size":
                                               cfg.TESTDATA["feature_map_size"]
                                           })
        # build model
        param = cfg.NETWORK
        param["@is_training"] = False
        param["@box_coder"] = target_assigner.box_coder
        param["@middle_layer_dict"]["@output_shape"] = [
            1
        ] + voxelizer.grid_size[::-1].tolist() + [16]
        param = {proc_param(k): v for k, v in param.items() if is_param(k)}
        network = Network(**param).cuda()
        # build optimizer & lr_scheduler
        optimizer, lr_scheduler = None, None
    elif mode in ["compute_ewc_weights", "compute_mas_weights"]:
        voxelizer = build_voxelizer(cfg.VOXELIZER)
        target_assigner = build_target_assigner(cfg.TARGETASSIGNER)
        dataloader_train = build_dataloader(
            data_cfg=cfg.TRAINDATA,
            ext_dict={
                "voxelizer": voxelizer,
                "target_assigner": target_assigner,
                "feature_map_size": cfg.TRAINDATA["feature_map_size"]
            })
        dataloader_val, dataloader_test = None, None
        # build model
        param = cfg.NETWORK
        param["@middle_layer_dict"]["@output_shape"] = [
            1
        ] + voxelizer.grid_size[::-1].tolist() + [16]
        param["@is_training"] = True
        param["@box_coder"] = target_assigner.box_coder
        param = {proc_param(k): v for k, v in param.items() if is_param(k)}
        network = Network(**param).cuda()
        # build optimizer & lr_scheduler
        optimizer, lr_scheduler = None, None
    else:
        raise NotImplementedError
    cores = {
        "dataloader_train": dataloader_train,
        "dataloader_val": dataloader_val,
        "dataloader_test": dataloader_test,
        "model": network,
        "optimizer": optimizer,
        "lr_scheduler": lr_scheduler
    }
    return cores