def _create_fsdp_model_config(with_fsdp: bool):
     with initialize_config_module(config_module="vissl.config"):
         cfg = compose(
             "defaults",
             overrides=[
                 "config=test/integration_test/quick_swav",
                 "+config/pretrain/swav/models=regnet16Gf",
                 "config.SEED_VALUE=0",
                 "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                 "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                 "config.LOSS.swav_loss.epsilon=0.03",
                 "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                 "config.MODEL.FSDP_CONFIG.mixed_precision=False",
                 "config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
                 "config.MODEL.FSDP_CONFIG.compute_dtype=float32",
                 "config.OPTIMIZER.construct_single_param_group_only=True",
             ],
         )
     args, config = convert_to_attrdict(cfg)
     if with_fsdp:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_fsdp"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head_fsdp"
         config.TRAINER.TASK_NAME = "self_supervision_fsdp_task"
     else:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_v2"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head"
     return config
Esempio n. 2
0
 def _create_pretraining_config(with_fsdp: bool, num_gpu: int = 2):
     with initialize_config_module(config_module="vissl.config"):
         cfg = compose(
             "defaults",
             overrides=[
                 "config=test/integration_test/quick_swav",
                 "+config/pretrain/swav/models=regnet16Gf",
                 "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.DATA_LIMIT=40",
                 "config.SEED_VALUE=0",
                 "config.MODEL.AMP_PARAMS.USE_AMP=False",
                 "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                 "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                 "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                 "config.LOSS.swav_loss.epsilon=0.03",
                 "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                 "config.MODEL.FSDP_CONFIG.mixed_precision=False",
                 "config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
                 "config.MODEL.FSDP_CONFIG.compute_dtype=float32",
                 f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpu}",
                 "config.LOG_FREQUENCY=1",
                 "config.OPTIMIZER.construct_single_param_group_only=True",
                 "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
                 "config.OPTIMIZER.use_larc=False",
             ],
         )
     args, config = convert_to_attrdict(cfg)
     if with_fsdp:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_fsdp"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head_fsdp"
         config.TRAINER.TASK_NAME = "self_supervision_fsdp_task"
     else:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_v2"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head"
     return config
def run_scene_optimizer() -> None:
    """ """
    with initialize_config_module(config_module="gtsfm.configs"):
        # config is relative to the gtsfm module
        cfg = compose(config_name="default_lund_door_set1_config.yaml")
        scene_optimizer: SceneOptimizer = instantiate(cfg.SceneOptimizer)

        loader = OlssonLoader(os.path.join(DATA_ROOT, "set1_lund_door"),
                              image_extension="JPG")

        sfm_result_graph = scene_optimizer.create_computation_graph(
            num_images=len(loader),
            image_pair_indices=loader.get_valid_pairs(),
            image_graph=loader.create_computation_graph_for_images(),
            camera_intrinsics_graph=loader.
            create_computation_graph_for_intrinsics(),
            gt_pose_graph=loader.create_computation_graph_for_poses(),
        )

        # create dask client
        cluster = LocalCluster(n_workers=2, threads_per_worker=4)

        with Client(cluster), performance_report(filename="dask-report.html"):
            sfm_result = sfm_result_graph.compute()

        assert isinstance(sfm_result, GtsfmData)
Esempio n. 4
0
def hydra_main(overrides: List[str]):
    print(f"####### overrides: {overrides}")
    with initialize_config_module(config_module="vissl.config"):
        cfg = compose("defaults", overrides=overrides)
    setup_logging(__name__)
    args, config = convert_to_attrdict(cfg)
    benchmark_data(config)
Esempio n. 5
0
def get_config(config_name="classification", **kwargs) -> DictConfig:
    """
    Get a copy of the default config.
    """
    with initialize_config_module("gale.conf"):
        cfg = compose(config_name, **kwargs)
    return cfg.copy()
Esempio n. 6
0
 def _create_config(with_fsdp: bool):
     with initialize_config_module(config_module="vissl.config"):
         cfg = compose(
             "defaults",
             overrides=[
                 "config=pretrain/swav/swav_8node_resnet",
                 "+config/pretrain/swav/models=regnet16Gf",
                 "config.SEED_VALUE=2",
                 "config.MODEL.AMP_PARAMS.USE_AMP=True",
                 "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                 "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                 "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                 "config.OPTIMIZER.num_epochs=1",
                 "config.OPTIMIZER.use_larc=False",
                 "config.LOSS.swav_loss.epsilon=0.03",
                 "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=16",
                 "config.DISTRIBUTED.NCCL_DEBUG=False",
                 "config.DISTRIBUTED.NUM_NODES=1",
                 "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                 "config.MODEL.FSDP_CONFIG.mixed_precision=False",
                 "config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
             ],
         )
     args, config = convert_to_attrdict(cfg)
     if with_fsdp:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_fsdp"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head_fsdp"
     else:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_v2"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head"
     return config
Esempio n. 7
0
    def test_create_computation_graph(self):
        """Will test Dask multi-processing capabilities and ability to serialize all objects."""
        use_intrinsics_in_verification = False

        with initialize_config_module(config_module="gtsfm.configs"):

            # config is relative to the gtsfm module
            cfg = compose(config_name="scene_optimizer_unit_test_config.yaml")
            self.obj: SceneOptimizer = instantiate(cfg.SceneOptimizer)

            # generate the dask computation graph
            sfm_result_graph = self.obj.create_computation_graph(
                len(self.loader),
                self.loader.get_valid_pairs(),
                self.loader.create_computation_graph_for_images(),
                self.loader.create_computation_graph_for_intrinsics(),
                use_intrinsics_in_verification=use_intrinsics_in_verification,
            )

            # create dask client
            cluster = LocalCluster(n_workers=1, threads_per_worker=4)

            with Client(cluster):
                sfm_result = dask.compute(sfm_result_graph)[0]

            self.assertIsInstance(sfm_result, SfmResult)

            # compare the camera poses
            poses = sfm_result.get_camera_poses()

            expected_poses = [self.loader.get_camera_pose(i) for i in range(len(self.loader))]

            self.assertTrue(comp_utils.compare_global_poses(poses, expected_poses))
Esempio n. 8
0
def test_initialize_with_module(hydra_restore_singletons: Any) -> None:
    with initialize_config_module(
        config_module="tests.test_apps.app_with_cfg_groups.conf", job_name="my_pp"
    ):
        assert compose(config_name="config") == {
            "optimizer": {"type": "nesterov", "lr": 0.001}
        }
    def _create_pretraining_config(num_gpu: int = 2):
        with initialize_config_module(config_module="vissl.config"):
            cfg = compose(
                "defaults",
                overrides=[
                    "config=test/integration_test/quick_swav",
                    "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                    "config.DATA.TRAIN.DATA_LIMIT=40",
                    "config.SEED_VALUE=0",
                    "config.MODEL.AMP_PARAMS.USE_AMP=False",
                    "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                    "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                    "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                    "config.LOSS.swav_loss.epsilon=0.03",
                    "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                    "config.MODEL.FSDP_CONFIG.mixed_precision=False",
                    "config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
                    "config.MODEL.FSDP_CONFIG.compute_dtype=float32",
                    f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpu}",
                    "config.LOG_FREQUENCY=1",
                    "config.OPTIMIZER.construct_single_param_group_only=True",
                    "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
                    "config.OPTIMIZER.use_larc=False",
                ],
            )

        args, config = convert_to_attrdict(cfg)
        return config
def run_scene_optimizer(args) -> None:
    """ Run GTSFM over images from an Argoverse vehicle log"""
    with initialize_config_module(config_module="gtsfm.configs"):
        # config is relative to the gtsfm module
        cfg = compose(config_name="default_lund_door_set1_config.yaml")
        scene_optimizer: SceneOptimizer = instantiate(cfg.SceneOptimizer)

        loader = ArgoverseDatasetLoader(
            dataset_dir=args.dataset_dir,
            log_id=args.log_id,
            stride=args.stride,
            max_num_imgs=args.max_num_imgs,
            max_lookahead_sec=args.max_lookahead_sec,
            camera_name=args.camera_name,
        )

        sfm_result_graph = scene_optimizer.create_computation_graph(
            len(loader),
            loader.get_valid_pairs(),
            loader.create_computation_graph_for_images(),
            loader.create_computation_graph_for_intrinsics(),
            use_intrinsics_in_verification=True,
            gt_pose_graph=loader.create_computation_graph_for_poses(),
        )

        # create dask client
        cluster = LocalCluster(n_workers=2, threads_per_worker=4)

        with Client(cluster), performance_report(filename="dask-report.html"):
            sfm_result = sfm_result_graph.compute()

        assert isinstance(sfm_result, GtsfmData)
        scene_avg_reproj_error = sfm_result.get_scene_avg_reprojection_error()
        logger.info('Scene avg reproj error: {}'.format(
            str(np.round(scene_avg_reproj_error, 3))))
Esempio n. 11
0
 def _create_extract_features_config(checkpoint_path: str,
                                     num_gpu: int = 2):
     with initialize_config_module(config_module="vissl.config"):
         cfg = compose(
             "defaults",
             overrides=[
                 "config=feature_extraction/extract_resnet_in1k_8gpu",
                 "+config/feature_extraction/with_head=rn50_swav",
                 f"config.MODEL.WEIGHTS_INIT.PARAMS_FILE={checkpoint_path}",
                 "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.LABEL_SOURCES=[synthetic]",
                 "config.DATA.TEST.DATA_SOURCES=[synthetic]",
                 "config.DATA.TEST.LABEL_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.DATA_LIMIT=40",
                 "config.DATA.TEST.DATA_LIMIT=20",
                 "config.SEED_VALUE=0",
                 "config.MODEL.AMP_PARAMS.USE_AMP=False",
                 "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                 "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                 "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                 "config.LOSS.swav_loss.epsilon=0.03",
                 "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                 "config.MODEL.FSDP_CONFIG.mixed_precision=False",
                 "config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
                 "config.MODEL.FSDP_CONFIG.compute_dtype=float32",
                 f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpu}",
                 "config.LOG_FREQUENCY=1",
                 "config.OPTIMIZER.construct_single_param_group_only=True",
                 "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
                 "config.DATA.TEST.BATCHSIZE_PER_REPLICA=2",
                 "config.OPTIMIZER.use_larc=False",
             ],
         )
     args, config = convert_to_attrdict(cfg)
     return config
Esempio n. 12
0
 def test_generated_config(self) -> None:
     with initialize_config_module(config_module="hydra_app.conf"):
         cfg = compose(config_name="config", overrides=["app.user=test_user"])
         assert cfg == {
             "app": {"user": "******", "num1": 10, "num2": 20},
             "db": {"host": "localhost", "port": 3306},
         }
Esempio n. 13
0
    def _create_benchmark_config(
        checkpoint_path: str,
        with_fsdp: bool,
        with_eval_mlp: bool = True,
        num_gpu: int = 2,
    ):
        if with_eval_mlp:
            head_config = "+config/debugging/benchmark/linear_image_classification/models=regnet16Gf_eval_mlp"
        else:
            head_config = "+config/debugging/benchmark/linear_image_classification/models=regnet16Gf_mlp"

        with initialize_config_module(config_module="vissl.config"):
            cfg = compose(
                "defaults",
                overrides=[
                    "config=debugging/benchmark/linear_image_classification/eval_resnet_8gpu_transfer_imagenette_160",
                    head_config,
                    f"config.MODEL.WEIGHTS_INIT.PARAMS_FILE={checkpoint_path}",
                    "config.SEED_VALUE=2",
                    "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                    "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                    "config.OPTIMIZER.num_epochs=1",
                    "config.OPTIMIZER.param_schedulers.lr.lengths=[0.1, 0.9]",
                    "config.OPTIMIZER.param_schedulers.lr.name=cosine",
                    "config.LOSS.swav_loss.epsilon=0.03",
                    "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                    "config.DATA.TRAIN.LABEL_SOURCES=[synthetic]",
                    "config.DATA.TEST.DATA_SOURCES=[synthetic]",
                    "config.DATA.TEST.LABEL_SOURCES=[synthetic]",
                    "config.DATA.TRAIN.DATA_LIMIT=40",
                    "config.DATA.TEST.DATA_LIMIT=16",
                    "config.DISTRIBUTED.NCCL_DEBUG=False",
                    "config.MODEL.AMP_PARAMS.USE_AMP=false",
                    "config.MODEL.FSDP_CONFIG.mixed_precision=false",
                    "config.OPTIMIZER.use_larc=false",
                    "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",  # This is critical
                    "config.REPRODUCIBILITY.CUDDN_DETERMINISTIC=True",
                    "config.DATA.TRAIN.USE_DEBUGGING_SAMPLER=True",
                    "config.DATA.TEST.USE_DEBUGGING_SAMPLER=True",
                    "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
                    "config.DATA.TEST.BATCHSIZE_PER_REPLICA=4",
                    "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                    "config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=false",
                    "config.OPTIMIZER.construct_single_param_group_only=True",
                    "config.OPTIMIZER.num_epochs=2",
                    "config.DISTRIBUTED.NUM_NODES=1",
                    f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpu}",
                ],
            )
        args, config = convert_to_attrdict(cfg)
        if with_fsdp:
            config["MODEL"]["TRUNK"]["NAME"] = "regnet_fsdp"
            head_type = "eval_mlp_fsdp" if with_eval_mlp else "mlp_fsdp"
            config["MODEL"]["HEAD"]["PARAMS"][0][0] = head_type
            config.TRAINER.TASK_NAME = "self_supervision_fsdp_task"
        else:
            config["MODEL"]["TRUNK"]["NAME"] = "regnet_v2"
            head_type = "eval_mlp" if with_eval_mlp else "mlp"
            config["MODEL"]["HEAD"]["PARAMS"][0][0] = head_type
        return config
Esempio n. 14
0
def hydra_main(overrides: List[Any]):
    ######################################################################################
    # DO NOT MOVE THIS IMPORT TO TOP LEVEL: submitit processes will not be initialized
    # correctly (MKL_THREADING_LAYER will be set to INTEL instead of GNU)
    ######################################################################################
    from vissl.hooks import default_hook_generator

    ######################################################################################

    print(f"####### overrides: {overrides}")
    with initialize_config_module(config_module="vissl.config"):
        cfg = compose("defaults", overrides=overrides)
    args, config = convert_to_attrdict(cfg)

    if config.SLURM.USE_SLURM:
        assert (
            is_submitit_available()
        ), "Please 'pip install submitit' to schedule jobs on SLURM"
        launch_distributed_on_slurm(engine_name=args.engine_name, cfg=config)
    else:
        launch_distributed(
            cfg=config,
            node_id=args.node_id,
            engine_name=args.engine_name,
            hook_generator=default_hook_generator,
        )
Esempio n. 15
0
def test_with_initialize_config_module() -> None:
    with initialize_config_module(config_module="hydra_app.conf"):
        # config is relative to a module
        cfg = compose(config_name="config", overrides=["app.user=test_user"])
        assert cfg == {
            "app": {"user": "******", "num1": 10, "num2": 20},
            "db": {"host": "localhost", "port": 3306},
        }
Esempio n. 16
0
    def _generate_config(self, config):
        """
        Generate AttrDict config from a config YAML file and overrides.
        """
        with initialize_config_module(config_module="vissl.config"):
            config = compose("defaults", overrides=config)

        return convert_to_attrdict(config)
Esempio n. 17
0
def test_initialize_config_module_ctx(hydra_restore_singletons: Any) -> None:
    with initialize_config_module(
            config_module="examples.jupyter_notebooks.cloud_app.conf"):
        ret = compose(return_hydra_config=True)
        assert ret.hydra.job.name == "app"

    with initialize_config_module(
            config_module="examples.jupyter_notebooks.cloud_app.conf",
            job_name="test_job"):
        ret = compose(return_hydra_config=True)
        assert ret.hydra.job.name == "test_job"

    with initialize_config_module(
            config_module="examples.jupyter_notebooks.cloud_app.conf",
            job_name="test_job"):
        ret = compose(return_hydra_config=True)
        assert ret.hydra.job.name == "test_job"
Esempio n. 18
0
 def test_initialize_config_module_ctx(self, config_file: str,
                                       overrides: List[str],
                                       expected: Any) -> None:
     with initialize_config_module(
             config_module="examples.jupyter_notebooks.cloud_app.conf",
             job_name="job_name",
     ):
         ret = compose(config_file, overrides)
         assert ret == expected
Esempio n. 19
0
def test_missing_init_py_error(hydra_restore_singletons: Any) -> None:
    expected = (
        "Primary config module 'hydra.test_utils.configs.missing_init_py' not found."
        "\nCheck that it's correct and contains an __init__.py file")

    with pytest.raises(Exception, match=re.escape(expected)):
        with initialize_config_module(
                config_module="hydra.test_utils.configs.missing_init_py"):
            hydra = GlobalHydra.instance().hydra
            assert hydra is not None
            compose(config_name="test.yaml", overrides=[])
def setup_pathmanager():
    """
    Setup PathManager. A bit hacky -- we use the #set_env_vars method to setup pathmanager
    and as such we need to create a dummy config, and dummy values for local_rank and node_id.
    """
    with initialize_config_module(config_module="vissl.config"):
        cfg = compose(
            "defaults",
            overrides=["config=test/integration_test/quick_swav"],
        )
    config = AttrDict(cfg).config
    set_env_vars(local_rank=0, node_id=0, cfg=config)
Esempio n. 21
0
def main():
    """
    Convert raw splitted data with class sensitive annotations into
    a new dataset which only distinguishes fg and bg
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('tasks', type=str, nargs='+',
                        help="Single or multiple task identifiers to process consecutively",
                        )
    parser.add_argument('--overwrite', action='store_true')
    parser.add_argument('-o', '--overwrites', type=str, nargs='+',
                        help="overwrites for config file",
                        required=False)
    args = parser.parse_args()
    tasks = args.tasks
    ov = args.overwrites
    overwrite = args.overwrite
    initialize_config_module(config_module="nndet.conf")

    for task in tasks:
        convert_raw(task, overwrite, ov)
Esempio n. 22
0
def test_missing_init_py_error(hydra_restore_singletons: Any) -> None:
    with pytest.raises(
            Exception,
            match=re.escape(
                "Unexpected error checking content of 'hydra.test_utils.configs.missing_init_py', "
                "did you forget an __init__.py?"),
    ):
        with initialize_config_module(
                config_module="hydra.test_utils.configs.missing_init_py"):
            hydra = GlobalHydra.instance().hydra
            assert hydra is not None
            hydra.compose_config(config_name=None, overrides=[])
Esempio n. 23
0
def hydra_main(overrides: List[Any]):
    print(f"####### overrides: {overrides}")
    with initialize_config_module(config_module="vissl.config"):
        cfg = compose("defaults", overrides=overrides)
    setup_logging(__name__)
    args, config = convert_to_attrdict(cfg)
    launch_distributed(
        config,
        node_id=args.node_id,
        engine_name=args.engine_name,
        hook_generator=default_hook_generator,
    )
    # close the logging streams including the filehandlers
    shutdown_logging()
Esempio n. 24
0
def main() -> None:
    with initialize(config_path="conf"):
        cfg = compose(config_name="config", return_hydra_config=True)
        assert cfg.config == {"hello": "world"}
        assert cfg.hydra.job.name == "main"

    with initialize(config_path="conf", job_name="test_job"):
        cfg = compose(config_name="config", return_hydra_config=True)
        assert cfg.config == {"hello": "world"}
        assert cfg.hydra.job.name == "test_job"

    abs_config_dir = os.path.abspath("initialization_test_app/conf")
    with initialize_config_dir(config_dir=abs_config_dir):
        cfg = compose(config_name="config", return_hydra_config=True)
        assert cfg.config == {"hello": "world"}
        assert cfg.hydra.job.name == "app"

    with initialize_config_dir(config_dir=abs_config_dir, job_name="test_job"):
        cfg = compose(config_name="config", return_hydra_config=True)
        assert cfg.config == {"hello": "world"}
        assert cfg.hydra.job.name == "test_job"

    # Those tests can only work if the module is installed
    if len(sys.argv) > 1 and sys.argv[1] == "module_installed":
        with initialize_config_module(
                config_module="initialization_test_app.conf"):
            cfg = compose(config_name="config", return_hydra_config=True)
            assert cfg.config == {"hello": "world"}
            assert cfg.hydra.job.name == "app"

        with initialize_config_module(
                config_module="initialization_test_app.conf",
                job_name="test_job"):
            cfg = compose(config_name="config", return_hydra_config=True)
            assert cfg.config == {"hello": "world"}
            assert cfg.hydra.job.name == "test_job"
Esempio n. 25
0
 def _create_linear_evaluation_config(self, with_fsdp: bool,
                                      with_mixed_precision: bool,
                                      auto_wrap_threshold: int):
     with initialize_config_module(config_module="vissl.config"):
         cfg = compose(
             "defaults",
             overrides=[
                 "config=test/integration_test/quick_eval_in1k_linear",
                 "+config/test/integration_test/models=eval_regnet_fsdp",
                 "config.SEED_VALUE=0",
                 "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                 "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                 "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                 "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                 "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.LABEL_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
                 "config.DATA.TRAIN.DATA_LIMIT=32",
                 "config.DATA.TEST.DATA_SOURCES=[synthetic]",
                 "config.DATA.TEST.LABEL_SOURCES=[synthetic]",
                 "config.DATA.TEST.BATCHSIZE_PER_REPLICA=4",
                 "config.DATA.TEST.DATA_LIMIT=32",
                 "config.DATA.TRAIN.USE_DEBUGGING_SAMPLER=True",
                 "config.OPTIMIZER.use_larc=False",
                 "config.OPTIMIZER.construct_single_param_group_only=True",
                 "config.LOG_FREQUENCY=1",
                 "config.REPRODUCIBILITY.CUDDN_DETERMINISTIC=True",
                 "config.DISTRIBUTED.NUM_PROC_PER_NODE=2",
             ],
         )
     args, config = convert_to_attrdict(cfg)
     if with_fsdp:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_fsdp"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "eval_mlp_fsdp"
         config.TRAINER.TASK_NAME = "self_supervision_fsdp_task"
         config.MODEL.FSDP_CONFIG.mixed_precision = with_mixed_precision
         config.MODEL.FSDP_CONFIG.fp32_reduce_scatter = with_mixed_precision
         config.MODEL.FSDP_CONFIG.compute_dtype = torch.float32
         config.MODEL.FSDP_CONFIG.AUTO_WRAP_THRESHOLD = auto_wrap_threshold
     else:
         config["MODEL"]["TRUNK"]["NAME"] = "regnet_v2"
         config["MODEL"]["HEAD"]["PARAMS"][0][0] = "eval_mlp"
     config.MODEL.AMP_PARAMS.USE_AMP = with_mixed_precision
     config.MODEL.TRUNK.REGNET.stage_checkpoints = [[2], [4], [6, 11], []]
     config.MODEL.ACTIVATION_CHECKPOINTING.USE_ACTIVATION_CHECKPOINTING = False
     return config
    def _create_pretraining_config(
        with_fsdp: bool,
        with_activation_checkpointing: bool,
        with_mixed_precision: bool,
        auto_wrap_threshold: int,
    ):
        with initialize_config_module(config_module="vissl.config"):
            cfg = compose(
                "defaults",
                overrides=[
                    "config=test/integration_test/quick_swav",
                    "+config/pretrain/swav/models=regnet16Gf",
                    "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                    "config.SEED_VALUE=0",
                    "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                    "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                    "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                    "config.LOSS.swav_loss.epsilon=0.03",
                    "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                    "config.DISTRIBUTED.NUM_PROC_PER_NODE=2",
                    "config.LOG_FREQUENCY=1",
                    "config.OPTIMIZER.construct_single_param_group_only=True",
                    "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
                    "config.OPTIMIZER.use_larc=False",
                    "config.REPRODUCIBILITY.CUDDN_DETERMINISTIC=True",
                    "config.DATA.TRAIN.USE_DEBUGGING_SAMPLER=True",
                ],
            )
        args, config = convert_to_attrdict(cfg)
        if with_fsdp:
            config["MODEL"]["TRUNK"]["NAME"] = "regnet_fsdp"
            config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head_fsdp"
            config.TRAINER.TASK_NAME = "self_supervision_fsdp_task"
            config.MODEL.FSDP_CONFIG.mixed_precision = with_mixed_precision
            config.MODEL.FSDP_CONFIG.fp32_reduce_scatter = with_mixed_precision
            config.MODEL.FSDP_CONFIG.compute_dtype = torch.float32
            config.MODEL.FSDP_CONFIG.AUTO_WRAP_THRESHOLD = auto_wrap_threshold
        else:
            config["MODEL"]["TRUNK"]["NAME"] = "regnet_v2"
            config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head"
        config.MODEL.AMP_PARAMS.USE_AMP = with_mixed_precision

        config.MODEL.ACTIVATION_CHECKPOINTING.USE_ACTIVATION_CHECKPOINTING = (
            with_activation_checkpointing)
        return config
Esempio n. 27
0
    def test_create_computation_graph(self):
        """Will test Dask multi-processing capabilities and ability to serialize all objects."""
        self.loader = OlssonLoader(str(DATA_ROOT_PATH / "set1_lund_door"),
                                   image_extension="JPG")

        with initialize_config_module(config_module="gtsfm.configs"):

            # config is relative to the gtsfm module
            cfg = compose(config_name="scene_optimizer_unit_test_config.yaml")
            obj: SceneOptimizer = instantiate(cfg.SceneOptimizer)

            # generate the dask computation graph
            sfm_result_graph = obj.create_computation_graph(
                len(self.loader),
                self.loader.get_valid_pairs(),
                self.loader.create_computation_graph_for_images(),
                self.loader.create_computation_graph_for_intrinsics(),
                gt_pose_graph=self.loader.create_computation_graph_for_poses(),
            )

            # create dask client
            cluster = LocalCluster(n_workers=1, threads_per_worker=4)

            with Client(cluster):
                sfm_result = dask.compute(sfm_result_graph)[0]

            self.assertIsInstance(sfm_result, GtsfmData)

            # compare the camera poses
            computed_poses = sfm_result.get_camera_poses()
            computed_rotations = [x.rotation() for x in computed_poses]
            computed_translations = [x.translation() for x in computed_poses]

            # get active cameras from largest connected component, may be <len(self.loader)
            connected_camera_idxs = sfm_result.get_valid_camera_indices()
            expected_poses = [
                self.loader.get_camera_pose(i) for i in connected_camera_idxs
            ]

            self.assertTrue(
                comp_utils.compare_global_poses(expected_poses,
                                                expected_poses))
Esempio n. 28
0
 def _create_extract_features_config_head(checkpoint_path: str, num_gpu: int = 2):
     with initialize_config_module(config_module="vissl.config"):
         cfg = compose(
             "defaults",
             overrides=[
                 "config=feature_extraction/extract_resnet_in1k_8gpu",
                 "+config/feature_extraction/with_head=rn50_swav",
                 f"config.MODEL.WEIGHTS_INIT.PARAMS_FILE={checkpoint_path}",
                 "config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.LABEL_SOURCES=[synthetic]",
                 "config.DATA.TEST.DATA_SOURCES=[synthetic]",
                 "config.DATA.TEST.LABEL_SOURCES=[synthetic]",
                 "config.DATA.TRAIN.DATA_LIMIT=40",
                 "config.DATA.TEST.DATA_LIMIT=20",
                 "config.SEED_VALUE=0",
                 f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpu}",
                 "config.OPTIMIZER.construct_single_param_group_only=True",
                 "config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
                 "config.DATA.TEST.BATCHSIZE_PER_REPLICA=2",
             ],
         )
     args, config = convert_to_attrdict(cfg)
     return config
Esempio n. 29
0
    def _create_pretraining_config(with_fsdp: bool,
                                   with_activation_checkpointing: bool,
                                   with_larc: bool):
        with initialize_config_module(config_module="vissl.config"):
            cfg = compose(
                "defaults",
                overrides=[
                    "config=pretrain/swav/swav_8node_resnet",
                    "+config/pretrain/swav/models=regnet16Gf",
                    "config.SEED_VALUE=2",
                    "config.MODEL.AMP_PARAMS.USE_AMP=True",
                    "config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
                    "config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
                    "config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
                    f"config.OPTIMIZER.use_larc={with_larc}",
                    "config.LOSS.swav_loss.epsilon=0.03",
                    "config.MODEL.FSDP_CONFIG.flatten_parameters=True",
                    "config.MODEL.FSDP_CONFIG.mixed_precision=False",
                    "config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
                ],
            )
        args, config = convert_to_attrdict(cfg)
        if with_fsdp:
            config["MODEL"]["TRUNK"]["NAME"] = "regnet_fsdp"
            config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head_fsdp"
        else:
            config["MODEL"]["TRUNK"]["NAME"] = "regnet_v2"
            config["MODEL"]["HEAD"]["PARAMS"][0][0] = "swav_head"

        if with_larc and with_fsdp:
            config.MODEL.FSDP_CONFIG.flatten_parameters = False
            config.OPTIMIZER.name = "sgd_fsdp"

        config["MODEL"]["ACTIVATION_CHECKPOINTING"][
            "USE_ACTIVATION_CHECKPOINTING"] = with_activation_checkpointing
        return config
Esempio n. 30
0
def hydra_main(overrides: List[Any]):
    with initialize_config_module(config_module="vissl.config"):
        cfg = compose("defaults", overrides=overrides)
    args, config = convert_to_attrdict(cfg)
    main(args, config)