コード例 #1
0
ファイル: __init__.py プロジェクト: zhenbinwu/hls4ml
def convert_from_yaml_config(yamlConfig):
    model = None
    if 'OnnxModel' in yamlConfig:
        if __onnx_enabled__:
            model = onnx_to_hls(yamlConfig)
        else:
            raise Exception("ONNX not found. Please install ONNX.")
    elif 'PytorchModel' in yamlConfig:
        if __pytorch_enabled__:
            model = pytorch_to_hls(yamlConfig)
        else:
            raise Exception("PyTorch not found. Please install PyTorch.")
    elif 'TensorFlowModel' in yamlConfig:
        if __tensorflow_enabled__:
            model = tf_to_hls(yamlConfig)
        else:
            raise Exception("TensorFlow not found. Please install TensorFlow.")
    else:
        model = keras_to_hls(yamlConfig)

    return model
コード例 #2
0
def convert_from_config(config):
    """Convert to hls4ml model based on the provided configuration.

    Arguments:
        config: A string containing the path to the YAML configuration file on
            the filesystem or a dict containig the parsed configuration.

    Returns:
        HLSModel: hls4ml model.
    """

    if isinstance(config, str):
        yamlConfig = parse_yaml_config(config)
    else:
        yamlConfig = config

    model = None
    if 'OnnxModel' in yamlConfig:
        if __onnx_enabled__:
            model = onnx_to_hls(yamlConfig)
        else:
            raise Exception("ONNX not found. Please install ONNX.")
    elif 'PytorchModel' in yamlConfig:
        if __pytorch_enabled__:
            model = pytorch_to_hls(yamlConfig)
        else:
            raise Exception("PyTorch not found. Please install PyTorch.")
    elif 'TensorFlowModel' in yamlConfig:
        if __tensorflow_enabled__:
            model = tf_to_hls(yamlConfig)
        else:
            raise Exception("TensorFlow not found. Please install TensorFlow.")
    else:
        model = keras_to_hls(yamlConfig)

    return model
コード例 #3
0
ファイル: __init__.py プロジェクト: thaarres/hls4ml
def convert_from_onnx_model(model,
                            output_dir='my-hls-test',
                            project_name='myproject',
                            input_data_tb=None,
                            output_data_tb=None,
                            backend='Vivado',
                            board=None,
                            part=None,
                            clock_period=5,
                            io_type='io_parallel',
                            hls_config={},
                            **kwargs):
    """
    
    Convert an ONNX model to a hls model.
    
    Parameters
    ----------
    model : ONNX model object.
        Model to be converted to hls model object.
    output_dir (str, optional): Output directory of the generated HLS
        project. Defaults to 'my-hls-test'.
    project_name (str, optional): Name of the HLS project.
        Defaults to 'myproject'.
    input_data_tb (str, optional): String representing the path of input data in .npy or .dat format that will be
        used during csim and cosim.
    output_data_tb (str, optional): String representing the path of output data in .npy or .dat format that will be
        used during csim and cosim.
    backend (str, optional): Name of the backend to use, e.g., 'Vivado'
        or 'Quartus'.
    board (str, optional): One of target boards specified in `supported_board.json` file. If set to `None` a default
        device of a backend will be used. See documentation of the backend used.
    part (str, optional): The FPGA part. If set to `None` a default part of a backend will be used.
        See documentation of the backend used. Note that if `board` is specified, the part associated to that board
        will overwrite any part passed as a parameter.
    clock_period (int, optional): Clock period of the design.
        Defaults to 5.
    io_type (str, optional): Type of implementation used. One of
        'io_parallel' or 'io_serial'. Defaults to 'io_parallel'.
    hls_config (dict, optional): The HLS config.
    kwargs** (dict, optional): Additional parameters that will be used to create the config of the specified backend
        
    Returns
    -------
    hls_model : hls4ml model object.
        
    See Also
    --------
    hls4ml.convert_from_keras_model, hls4ml.convert_from_pytorch_model
    
    Examples
    --------
    >>> import hls4ml
    >>> config = hls4ml.utils.config_from_onnx_model(model, granularity='model')
    >>> hls_model = hls4ml.converters.convert_from_onnx_model(model, hls_config=config)
    """

    config = create_config(output_dir=output_dir,
                           project_name=project_name,
                           board=board,
                           part=part,
                           clock_period=clock_period,
                           io_type=io_type,
                           backend=backend,
                           **kwargs)

    config['OnnxModel'] = model
    config['InputData'] = input_data_tb
    config['OutputPredictions'] = output_data_tb
    config['HLSConfig'] = {}

    model_config = hls_config.get('Model', None)
    config['HLSConfig']['Model'] = _check_model_config(model_config)

    _check_hls_config(config, hls_config)

    return onnx_to_hls(config)