示例#1
0
def export_check(model_name : str, model_arg : dict, export_arg : dict, suffix : str, predictor_args : dict={}, example_image=None) :
    output_directory = Path(test_output_directory)
    output_directory.mkdir(exist_ok=True,parents=True)
    model_config = create_model_cfg(model_name, model_arg)
    model_components = create_model(model_config)
    ## TODO : remove
    if model_name in tmp_output_format :
        predictor_args['metainfo'] = tmp_output_format[model_name]
    predictor = create_predictor(
        model_components=model_components,
        **predictor_args
    ).eval()
    experiment_name = get_test_experiment_name(model_name, model_arg.network_args.backbone, suffix)
    image_size = model_config.preprocess_args.input_size
    exporter = create_exporter(
        config=export_arg,
        experiment_name=experiment_name,
        image_size=image_size,
        output_directory=output_directory
    )
    ## TODO : read export error msg
    result = exporter(predictor, example_image_path=example_image)
    torch.save(predictor.model.state_dict(), output_directory / '{}.pth'.format(experiment_name))
    del predictor, model_components
    return result
def test_exporter(model_name, image, backbone="resnet18", remove_output=True):
    args = model_argmap[model_name]
    args.network_args.backbone = backbone
    model = create_model_components(model_name, args['preprocess_args'], args['network_args'], 
        loss_args=args['loss_args'], postprocess_args=args['postprocess_args'])
    predictor = create_predictor(model)
    input_size = args['preprocess_args']['input_size']
    output_path = os.path.join(output_dir, '{}_{}.pt'.format(model_name, backbone))
    output_format = predictor.output_format
    additional_inputs = None
    if hasattr(predictor.postprocess, 'additional_inputs'):
        additional_inputs = predictor.postprocess.additional_inputs

    print(" >> Exporting...")
    exporter = TorchScriptExporter(output_path, image_size=input_size, check_tolerance=1e-6)
    ok = exporter(predictor, example_image_path=image)
    assert ok and os.path.exists(output_path)
    del model, predictor, exporter

    model = torch.jit.load(output_path)
    for name, value in output_format.items():
        assert hasattr(model, name + '_axis')
        assert hasattr(model, name + '_indices')
        assert getattr(model, name + '_axis') == torch.tensor(value['axis'])
        assert all(getattr(model, name + '_indices') == torch.tensor(value['indices']))

    inputs = (('input', (1, input_size, input_size, 3)),)
    x = torch.randint(0, 256, (1,input_size,input_size,3))
    run_inputs, run_inputs_kwargs = [x], {}
    if additional_inputs:
        inputs += additional_inputs
    for n, (name, shape) in enumerate(inputs):
        assert hasattr(model, name + '_input_shape')
        assert hasattr(model, name + '_input_pos')
        assert getattr(model, name + '_input_shape').equal(torch.tensor(shape))
        assert getattr(model, name + '_input_pos').equal(torch.tensor(n))
        if name != 'input':
            run_inputs.append(torch.zeros(*shape))
            run_inputs_kwargs[name] = torch.zeros(*shape)
    shape_name = [name.replace('_input_shape', '') for name,_ in model.named_buffers(recurse=False) if name.endswith('_input_shape')]
    pos_name = [name.replace('_input_pos', '') for name,_ in model.named_buffers(recurse=False) if name.endswith('_input_pos')]
    assert sorted(shape_name) == sorted(pos_name)

    print(" >> Evaluating...")
    with torch.no_grad():
        out_eval = model(*run_inputs)
    assert len(out_eval) == 1 ## single batch
    assert out_eval[0].shape[-1] == sum(len(v['indices']) for v in output_format.values()) # number of elements

    ## TODO: check using keyword arguments, currently unsupported
    # out_eval_kwargs = model(x, **run_inputs_kwargs)
    # assert out_eval_kwargs.equal(out_eval)

    if remove_output:
        os.remove(output_path)
    del model, out_eval
示例#3
0
    def __init__(self,
                 config: EasyDict,
                 weights: Union[str, Path, None] = None):
        """Class initialization

        Args:
            config (EasyDict): dictionary parsed from Vortex experiment file
            weights (Union[str,Path,None], optional): path to selected Vortex model's weight. If set to None, it will \
                                                      assume that final model weights exist in **experiment directory**. \
                                                      Defaults to None.
        
        Example:
            ```python
            from vortex.utils.parser import load_config
            from vortex.core.pipelines import GraphExportPipeline
            
            # Parse config
            config = load_config('experiments/config/example.yml')
            graph_exporter = GraphExportPipeline(config=config,
                                                 weights='experiments/outputs/example/example.pth')
            ```
        """

        # Configure output directory
        self.experiment_directory, _ = check_and_create_output_dir(config)
        self.experiment_name = config.experiment_name

        # Initialize Pytorch model
        if weights is None:
            state_dict = self.experiment_directory / '{}.pth'.format(
                self.experiment_name)
        else:
            state_dict = weights
        model_components = create_model(config.model,
                                        state_dict=state_dict,
                                        stage='validate')
        model_components.network = model_components.network.eval()
        self.predictor = create_predictor(model_components).eval()
        self.image_size = config.model.preprocess_args.input_size

        # Initialize dataset train to get class_names
        dataset = create_dataset(
            config.dataset,
            preprocess_config=config.model.preprocess_args,
            stage='train')
        self.class_names = dataset.dataset.class_names if hasattr(
            dataset.dataset, 'class_names') else None

        # Initialize export config
        self.export_configs = [config.exporter] \
            if not isinstance(config.exporter, list) \
                else config.exporter
示例#4
0
def test_predictor(task):
    config_path = os.path.join(proj_path, "tests", "config",
                               "test_" + task + ".yml")
    config = load_config(config_path)
    check_result = check_config(config, experiment_type='train')
    assert check_result.valid, "config file %s for task %s is not valid, "\
        "result:\n%s" % (config_path, task, str(check_result))

    args = {
        'model_name': config.model.name,
        'preprocess_args': config.model.preprocess_args,
        'network_args': config.model.network_args,
        'loss_args': config.model.loss_args,
        'postprocess_args': config.model.postprocess_args,
        'stage': 'train'
    }
    model_components = create_model_components(**args)
    predictor = create_predictor(model_components)
    assert not predictor.training
    assert hasattr(predictor, "output_format")
    assert predictor.model.task == task

    s = config.model.preprocess_args.input_size
    x = torch.randint(0, 256, size=(1, s, s, 3))
    args = {}
    if task == 'detection':
        args["score_threshold"] = torch.tensor(
            [config.trainer.validation.args["score_threshold"]],
            dtype=torch.float32)
        args["iou_threshold"] = torch.tensor(
            [config.trainer.validation.args["iou_threshold"]],
            dtype=torch.float32)

    with torch.no_grad():
        out = predictor(x, **args)
    out_np = np.asarray(out)
    if out_np.size != 0:
        result = get_prediction_results(out_np, predictor.output_format)[0]
        for name, val in result.items():
            indices = predictor.output_format[name]['indices']
            assert val.shape[-1] == len(indices), f"output_format for {name} could not be "\
                f"retrieved properly, size missmatch expect {len(indices)} got {val.shape[-1]}"

    num_elem = sum(len(f["indices"]) for f in predictor.output_format.values())
    assert out[0].size(-1) == num_elem, "number of element in output of predictor is not "\
        "the same as described in output_format; expect %s got %s" % (num_elem, out.size(1))
def export_model(model_name):
    args = model_argmap[model_name]
    model = create_model_components(model_name,
                                    args['preprocess_args'],
                                    args['network_args'],
                                    loss_args=args['loss_args'],
                                    postprocess_args=args['postprocess_args'])
    predictor = create_predictor(model)
    input_size = args['preprocess_args']['input_size']

    output_path = os.path.join(output_dir, '{}.pt'.format(model_name))
    exporter = TorchScriptExporter(output_path,
                                   image_size=input_size,
                                   check_tolerance=1e-6)
    result = exporter(predictor)
    assert os.path.exists(output_path)
    return output_path
示例#6
0
def _(model_components,
      dataset,
      validation_args,
      predictor_args=None,
      device='cpu'):
    if isinstance(device, str):
        device = torch.device(device)
    elif not isinstance(device, torch.device):
        raise RuntimeError("Unknown data type for device; expected `torch.device` " \
            "or `str`, got %s" % type(device))
    if predictor_args is None:
        predictor_args = {}
    predictor = create_predictor(model_components, **predictor_args).to(device)

    task = model_components.network.task
    return create_validator_instance(task=task,
                                     predictor=predictor,
                                     dataset=dataset,
                                     **validation_args)
示例#7
0
def eval_check(runtime : str, model_name : str, model_arg : dict, export_arg : dict, suffix : str, predictor_args : dict={}) :
    ## torch predictor
    output_directory = Path(test_output_directory)
    output_directory.mkdir(exist_ok=True,parents=True)
    model_config = create_model_cfg(model_name, model_arg)
    model_components = create_model(model_config)
    experiment_name = get_test_experiment_name(model_name, model_arg.network_args.backbone, suffix)
    onnx_model_path = output_directory / '{}.onnx'.format(experiment_name)
    pth_path = output_directory / '{}.pth'.format(experiment_name)
    if not (onnx_model_path.exists() and pth_path.exists()) :
        return EvalResult(Status.ERROR, msg='file not found, export might have failed')
    model_components.network.load_state_dict(
        torch.load(pth_path)
    )
    if model_name in tmp_output_format :
        predictor_args['metainfo'] = tmp_output_format[model_name]
    predictor = create_predictor(
        model_components=model_components,
        **predictor_args
    ).eval()
    image_size = model_config.preprocess_args.input_size
    ## onnx model
    try :
        ## TODO : check for fallback
        onnx_model = create_runtime_model(str(onnx_model_path), runtime)
    except Exception as e:
        print(e)
        return EvalResult(Status.ERROR, msg='RuntimeErorr : {}'.format(str(e)))
    ## predict check
    input_test = (np.random.rand(1,image_size,image_size,3) * 255).astype(np.uint8)
    ## TODO : read additional input from predictor or onnx input spec
    additional_args = dict(
        score_threshold=0.0,
        iou_threshold=1.0,
    )
    torch_results = torch_predict(predictor, input_test, **additional_args)
    onnx_results = onnx_predict(onnx_model, input_test, **additional_args)
    ok = len(torch_results) == len(onnx_results)
    # print("len(torch_results) == len(onnx_results)", len(torch_results) == len(onnx_results))
    status = EvalResult(status=ok, msg="len(torch_results) != len(onnx_results)" if not ok else "")
    for torch_result, onnx_result in zip(torch_results, onnx_results) :
        if not status :
            break
        ok = len(torch_result.keys()) == len(onnx_result.keys())
        # print("len(torch_result.keys()) == len(onnx_result.keys())", len(torch_result.keys()) == len(onnx_result.keys()))
        status = EvalResult(status=ok, msg="len(torch_result.keys()) != len(onnx_result.keys())" if not ok else "")
        if not status :
            break
        ok = all(key in onnx_result.keys() for key in torch_result.keys())
        # print("all(key in onnx_result.keys() for key in torch_result.keys())", all(key in onnx_result.keys() for key in torch_result.keys()))
        status = EvalResult(status=ok, msg="not all(key in onnx_result.keys() for key in torch_result.keys())" if not ok else "")
        if not status :
            break
        ok = all(isclose(onnx_value, torch_value, **isclose_config) for key in onnx_result.keys() for onnx_value, torch_value in zip(onnx_result[key].flatten(), torch_result[key].flatten()))
        ## TODO : collect diff across batch
        # diff = sum(np.sum(np.abs(onnx_result[key] - torch_result[key]).flatten()).item() / len(onnx_result[key].flatten()) for key in onnx_result.keys())
        diff = sum(((abs(onnx_value-torch_value) / len(onnx_result[key].flatten())) if len(onnx_result[key].flatten()) else 0) for key in onnx_result.keys() for onnx_value, torch_value in zip(onnx_result[key].flatten(), torch_result[key].flatten()))
        # print("all(np.isclose(onnx_result[key], torch_result[key], **isclose_config) for key in onnx_result.keys())", all(np.isclose(onnx_result[key], torch_result[key], **isclose_config) for key in onnx_result.keys()))
        s = Status.FAILED if not ok else Status.SUCCESS
        status = EvalResult(status=s, diff=diff, msg="isclose failed" if not ok else "")
        if not status :
            break
    del onnx_model, predictor, onnx_results, torch_results, input_test, model_components
    return status
示例#8
0
    def __init__(
        self,
        config: EasyDict,
        weights: Union[str, Path, None] = None,
        device: Union[str, None] = None,
    ):
        """Class initialization

        Args:
            config (EasyDict): dictionary parsed from Vortex experiment file
            weights (Union[str,Path,None], optional): path to selected Vortex model's weight. If set to None, it will \
                                                      assume that final model weights exist in **experiment directory**. \
                                                      Defaults to None.
            device (Union[str,None], optional): selected device for model's computation. If None, it will use the device \
                                                described in **experiment file**. Defaults to None.

        Raises:
            FileNotFoundError: raise error if selected 'weights' file is not found

        Example:
            ```python
            from vortex.core.pipelines import PytorchPredictionPipeline
            from vortex.utils.parser import load_config

            # Parse config
            config_path = 'experiments/config/example.yml'
            config = load_config(config_path)
            weights_file = 'experiments/outputs/example/example.pth'
            device = 'cuda'

            vortex_predictor = PytorchPredictionPipeline(config = config,
                                                       weights = weights_file,
                                                       device = device)
            ```
        """

        self.config = config
        self.output_file_prefix = 'prediction'

        # Configure experiment directory
        experiment_directory, _ = check_and_create_output_dir(config)

        # Initialize dataset to get class_names
        dataset = create_dataset(
            config.dataset,
            stage='train',
            preprocess_config=config.model.preprocess_args)
        self.class_names = dataset.dataset.class_names if hasattr(
            dataset.dataset, 'class_names') else None

        # Set compute device
        if device is None:
            device = config.trainer.device
        device = torch.device(device)

        # Initialize model
        if weights is None:
            filename = Path(experiment_directory) / ('%s.pth' %
                                                     config.experiment_name)
        else:
            filename = Path(weights)
            if not filename.exists():
                raise FileNotFoundError(
                    'Selected weights file {} is not found'.format(filename))

        model_components = create_model(config.model,
                                        state_dict=str(filename),
                                        stage='validate')
        model_components.network = model_components.network.to(device)
        self.predictor = create_predictor(model_components)
        self.predictor.to(device)

        # Configure input size for image
        self.input_size = config.model.preprocess_args.input_size