def test_export_with_pretrained(tmp_path):
    config = SampleConfig()
    config.update({
        "model": "resnet18",
        "dataset": "imagenet",
        "input_info": {
            "sample_size": [2, 3, 299, 299]
        },
        "num_classes": 1000,
        "compression": {
            "algorithm": "magnitude_sparsity"
        }
    })
    config_factory = ConfigFactory(config, tmp_path / 'config.json')

    onnx_path = os.path.join(str(tmp_path), "model.onnx")
    args = {
        "--mode": "export",
        "--config": config_factory.serialize(),
        "--pretrained": '',
        "--to-onnx": onnx_path
    }

    if not torch.cuda.is_available():
        args["--cpu-only"] = True

    runner = Command(create_command_line(args, "classification"))
    runner.run()
    assert os.path.exists(onnx_path)
Example #2
0
def configure_device(current_gpu, config: SampleConfig):
    config.current_gpu = current_gpu
    config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        configure_distributed(config)

    config.device = get_device(config)

    if config.execution_mode == ExecutionMode.SINGLE_GPU:
        torch.cuda.set_device(config.current_gpu)
Example #3
0
def start_worker(main_worker, config: SampleConfig):
    if config.execution_mode == ExecutionMode.CPU_ONLY:
        main_worker(current_gpu=None, config=config)
        return

    if config.execution_mode == ExecutionMode.SINGLE_GPU:
        main_worker(current_gpu=config.gpu_id, config=config)
        return

    if config.execution_mode == ExecutionMode.GPU_DATAPARALLEL:
        main_worker(current_gpu=None, config=config)
        return

    if config.execution_mode == ExecutionMode.MULTIPROCESSING_DISTRIBUTED:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        config.ngpus_per_node = torch.cuda.device_count()
        config.world_size = config.ngpus_per_node * config.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker, nprocs=config.ngpus_per_node, args=(config, ))
def ssd_vgg_512_test():
    ssd_params = SampleConfig({
        "steps": [8, 16, 32, 64, 128, 256, 512],
        "min_sizes": [35.84, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8],
        "max_sizes": [76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6],
        "aspect_ratios": [[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]],
        "variance": [0.1, 0.1, 0.2, 0.2],
        "clip":
        False,
        "flip":
        True
    })
    return SSD_VGG(cfg=ssd_params, size=512, num_classes=21)
Example #5
0
def ssd_mobilenet():
    ssd_params = SampleConfig({
        "variance": [0.1, 0.1, 0.2, 0.2],
        "max_sizes": [60, 111, 162, 213, 264, 315],
        "min_sizes": [30, 60, 111, 162, 213, 264],
        "steps": [16, 32, 64, 100, 150, 300],
        "aspect_ratios": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
        "clip": False,
        "flip": True,
        "top_k": 200
    })

    return MobileNetSSD(21, ssd_params)
Example #6
0
def ssd_vgg300():
    ssd_params = SampleConfig({
        "clip":
        False,
        "variance": [0.1, 0.1, 0.2, 0.2],
        "max_sizes": [60, 111, 162, 213, 264, 315],
        "min_sizes": [30, 60, 111, 162, 213, 264],
        "steps": [8, 16, 32, 64, 100, 300],
        "aspect_ratios": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
        "flip":
        True
    })

    return SSD_VGG(ssd_params, 300, 21, True)
Example #7
0
def test_model_can_be_loaded_with_resume(_params):
    p = _params
    sample_config_path = p['sample_config_path']
    checkpoint_path = p['checkpoint_path']

    config = SampleConfig.from_json(str(sample_config_path))
    nncf_config = NNCFConfig.from_json(str(sample_config_path))

    config.execution_mode = p['execution_mode']

    config.current_gpu = 0
    config.device = get_device(config)
    config.distributed = config.execution_mode in (
        ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        config.dist_url = "tcp://127.0.0.1:9898"
        config.dist_backend = "nccl"
        config.rank = 0
        config.world_size = 1
        configure_distributed(config)

    model_name = config['model']
    model = load_model(model_name,
                       pretrained=False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params'))
    nncf_config = register_default_init_args(
        nncf_config, train_loader=create_ones_mock_dataloader(nncf_config))

    model.to(config.device)
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, nncf_config)
    model, _ = prepare_model_for_execution(model, config)

    if config.distributed:
        compression_ctrl.distributed()

    checkpoint = torch.load(checkpoint_path, map_location='cpu')
    load_state(model, checkpoint['state_dict'], is_resume=True)
Example #8
0
def create_model(config: SampleConfig, resuming_checkpoint: dict = None):
    input_info_list = create_input_infos(config.nncf_config)
    image_size = input_info_list[0].shape[-1]
    ssd_net = build_ssd(config.model, config.ssd_params, image_size,
                        config.num_classes, config)
    weights = config.get('weights')
    if weights:
        sd = torch.load(weights,
                        map_location='cpu',
                        pickle_module=restricted_pickle_module)
        sd = sd["state_dict"]
        load_state(ssd_net, sd)

    ssd_net.to(config.device)
    model_state_dict, compression_state = extract_model_and_compression_states(
        resuming_checkpoint)
    compression_ctrl, compressed_model = create_compressed_model(
        ssd_net, config.nncf_config, compression_state)
    if model_state_dict is not None:
        load_state(compressed_model, model_state_dict, is_resume=True)
    compressed_model, _ = prepare_model_for_execution(compressed_model, config)

    compressed_model.train()
    return compression_ctrl, compressed_model
Example #9
0
def is_pretrained_model_requested(config: SampleConfig) -> bool:
    return config.get('pretrained', True) if config.get('weights') is None else False