Ejemplo n.º 1
0
def onnx_to_hls(yamlConfig):

    ######################
    ##  Do translation
    ######################

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #Extract model architecture
    model = ModelProto()
    with open(yamlConfig['OnnxModel'], 'rb') as fid:
        model.ParseFromString(fid.read())

    #Define supported layers
    core_operations = ['Gemm', 'BatchNormalization', 'Conv']
    transform_operations = [
        'Squeeze', 'Unsqueeze', 'Transpose', 'Flatten', 'Identity', 'Reshape'
    ]
    pool_operations = ['AveragePool', 'MaxPool']
    merge_operations = [
        'Add', 'Sub', 'Mul', 'Average', 'Max', 'Min', 'Concat', 'Sum'
    ]
    activation_operations = [
        'Relu', 'Tanh', 'Sigmoid', 'LeakyRelu', 'ThresholdedRelu',
        'HardSigmoid', 'Elu', 'Selu', 'PRelu', 'Softmax', 'Softsign',
        'Softplus'
    ]
    supported_operations = core_operations + transform_operations + pool_operations + merge_operations + activation_operations

    operation_map = {
        'Gemm': 'Dense',
        'Relu': 'Activation',
        'Tanh': 'Activation',
        'Sigmoid': 'Activation',
        'LeakyRelu': 'LeakyReLU',
        'ThresholdedRelu': 'ThresholdedReLU',
        'HardSigmoid': 'Activation',
        'Elu': 'ELU',
        'Selu': 'Activation',
        'PRelu': 'PReLU',
        'Softmax': 'Activation',
        'Softsign': 'Activation',
        'Softplus': 'Activation',
        'Sum': 'Add',
        'Sub': 'Subtract',
        'Max': 'Maximum',
        'Min': 'Minimum',
        'Mul': 'Multiply',
        'Concat': 'Concatenate'
    }

    #Define layers to skip for conversion to HLS
    skip_layers = [
        'Squeeze', 'Unsqueeze', 'Dropout', 'Identity', 'Flatten', 'Transpose',
        'Reshape'
    ]
    #Map inputs of skipped layers
    inputs_map = {}

    passes = [
        'fuse_transpose_into_gemm', 'fuse_matmul_add_bias_into_gemm',
        'eliminate_nop_transpose', 'fuse_consecutive_transposes'
    ]
    model = shape_inference.infer_shapes(
        model)  # have to infer shapes before optimizing the model
    model = optimizer.optimize(model, passes)
    model = shape_inference.infer_shapes(
        model)  # have to infer shapes before optimizing the model

    reader = ONNXDataReader(model)

    #Loop through layers
    layer_counter = 0
    all_inputs = [x.name for x in model.graph.input]
    all_initializers = [x.name for x in model.graph.initializer]
    input_layers = [x for x in all_inputs if x not in all_initializers]
    output_layers = [x.name for x in model.graph.output]

    for i, inp in enumerate(input_layers):
        input_layer = {}
        input_layer['name'] = inp
        input_layer['class_name'] = 'InputLayer'
        inp_shape = next((x.type.tensor_type.shape.dim
                          for x in model.graph.input if x.name == inp), None)
        input_layer['input_shape'] = [x.dim_value for x in inp_shape]
        if len(input_layer['input_shape']) > 1:
            input_layer['input_shape'][0] = None

        input_layer['outputs'] = [inp]

        sanitize_layer_name(input_layer)
        input_layers[i] = input_layer['name']
        layer_list.append(input_layer)

    # Check for unsupported layer type
    for operation in model.graph.node:
        if operation.op_type not in supported_operations:
            raise Exception('ERROR: Unsupported operation type: {}'.format(
                operation.op_type))

    # Get input shape
    current_shape = [
        d.dim_value for d in model.graph.input[0].type.tensor_type.shape.dim
    ]
    print('Input shape:', current_shape)

    print('Topology:')
    for operation in model.graph.node:
        if operation.op_type == 'Flatten':
            current_shape = [current_shape[0], np.prod(current_shape[1:])]
        if operation.op_type in skip_layers:
            #Currently supported skipped layers have only one input and output
            #Skipped layers can follow each other (e.g., Dropout -> Flatten)
            input_name = inputs_map.get(operation.input[0], operation.input[0])
            output_name = operation.output[0]
            inputs_map[output_name] = input_name
            continue

        if operation.op_type in supported_operations:
            layer_counter = layer_counter + 1

        #Dictionary to fill in and append to layer_list
        layer = {}

        #Extract name for finding weights and biases
        if operation.name:
            layer['name'] = operation.name
        else:
            layer['name'] = operation.op_type + str(layer_counter)
        layer['class_name'] = operation_map.get(operation.op_type,
                                                operation.op_type)
        layer['inputs'] = [
            inputs_map.get(operation.input[0], operation.input[0])
        ]
        layer['outputs'] = [x for x in operation.output]

        #Extract type of activation
        if operation.op_type in activation_operations:
            layer['activation'] = operation.op_type.lower()
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = operation.op_type.lower()

        #Get number of inputs and outputs
        #(We take it from the weights to avoid dealing with InputLayer and Flatten details)
        if layer['class_name'] == 'Dense':
            current_shape = get_input_shape(model, operation)
            layer['n_in'] = next(
                (x.type.tensor_type.shape.dim[-1].dim_value
                 for x in model.graph.input if x.name == operation.input[0]),
                None)
            layer['n_out'] = next((x.type.tensor_type.shape.dim[-1].dim_value
                                   for x in model.graph.value_info
                                   if x.name == operation.output[0]), None)
            tran_weight = get_onnx_attribute(operation, 'transB', 0)
            reader.add_input(layer['name'], operation.input, tran_weight)

            current_shape = [current_shape[0], layer['n_out']]
        elif layer['class_name'] == 'Conv':
            current_shape = get_input_shape(model, operation)
            strides = get_onnx_attribute(operation, 'strides')
            kernel_shape = get_onnx_attribute(operation, 'kernel_shape')

            if len(current_shape) == 3:  # Conv1D
                layer['class_name'] = 'Conv1D'
                reader.add_input(layer['name'], operation.input)

                layer['n_in'] = current_shape[2]
                layer['filt_width'] = kernel_shape[0]
                layer['n_chan'] = current_shape[1]
                layer['n_filt'] = next(
                    (x.type.tensor_type.shape.dim[1].dim_value
                     for x in model.graph.value_info
                     if x.name == operation.output[0]), None)
                layer['stride'] = strides[0]
                pads = compute_pads_1d(operation, layer)

                layer['pad_left'] = pads[0]
                layer['pad_right'] = pads[1]
                if all(x == 0
                       for x in pads):  # No padding, i.e., 'VALID' padding
                    layer['n_out'] = int(
                        math.ceil(
                            float(layer['n_in'] - layer['filt_width'] + 1) /
                            float(layer['stride'])))
                else:
                    layer['n_out'] = int(
                        math.ceil(
                            float(layer['n_in']) / float(layer['stride'])))

                layer['data_format'] = 'channels_first'

                current_shape = [
                    current_shape[0], layer['n_filt'], layer['n_out']
                ]
            elif len(current_shape) == 4:  # Conv2D
                layer['class_name'] = 'Conv2D'
                reader.add_input(layer['name'],
                                 operation.input,
                                 transpose=True,
                                 perm=[2, 3, 1, 0])

                layer['in_height'] = current_shape[2]
                layer['in_width'] = current_shape[3]
                layer['filt_height'] = kernel_shape[0]
                layer['filt_width'] = kernel_shape[1]
                layer['n_chan'] = current_shape[1]
                layer['n_filt'] = next(
                    (x.type.tensor_type.shape.dim[1].dim_value
                     for x in model.graph.value_info
                     if x.name == operation.output[0]), None)
                layer['stride_height'] = strides[0]
                layer['stride_width'] = strides[1]
                pads = compute_pads_2d(operation, layer)

                layer['pad_top'] = pads[0]
                layer['pad_bottom'] = pads[2]
                layer['pad_left'] = pads[1]
                layer['pad_right'] = pads[3]

                if all(
                        x == 0 for x in pads
                ):  # No padding, i.e., 'VALID' padding in Keras/Tensorflow
                    layer['out_width'] = int(
                        math.ceil(
                            float(layer['in_width'] - layer['filt_width'] + 1)
                            / float(layer['stride_width'])))
                    layer['out_height'] = int(
                        math.ceil(
                            float(layer['in_height'] - layer['filt_height'] +
                                  1) / float(layer['stride_height'])))
                else:
                    layer['out_height'] = int(
                        math.ceil(
                            float(layer['in_height']) /
                            float(layer['stride_height'])))
                    layer['out_width'] = int(
                        math.ceil(
                            float(layer['in_width']) /
                            float(layer['stride_width'])))

                current_shape = [
                    current_shape[0], layer['n_filt'], layer['out_height'],
                    layer['out_width']
                ]
        elif layer['class_name'] == 'BatchNormalization':
            layer['epsilon'] = get_onnx_attribute(operation, 'epsilon')
            layer['momentum'] = get_onnx_attribute(operation, 'momentum')

            reader.add_input(layer['name'], operation.input)

            in_size = 1
            for dim in current_shape[1:]:
                in_size *= dim
            layer['n_in'] = in_size
            layer['n_out'] = layer['n_in']
            if len(current_shape) == 2:
                layer['n_filt'] = -1
            else:
                layer['n_filt'] = current_shape[1]
        elif layer['class_name'] in pool_operations:
            current_shape = get_input_shape(model, operation)
            info = layer['class_name'].replace('Pool', '')
            strides = get_onnx_attribute(operation, 'strides')
            kernel_shape = get_onnx_attribute(operation, 'kernel_shape')
            if len(current_shape) == 3:  # 1D
                layer['class_name'] = info + 'Pooling1D'
                layer['stride'] = strides[0]
                layer['pool_size'] = layer['y_filt'] = kernel_shape[0]
                pads = compute_pads_1d(operation, layer)
                layer['pad_left'] = pads[0]
                layer['pad_right'] = pads[1]

                if all(x == 0
                       for x in pads):  # No padding, i.e., 'VALID' padding
                    layer['n_out'] = int(
                        math.ceil(
                            float(layer['y_in'] - layer['y_filt'] + 1) /
                            float(layer['stride'])))
                else:
                    layer['n_out'] = int(
                        math.ceil(
                            float(layer['y_in']) / float(layer['stride'])))

                current_shape = [
                    current_shape[0], layer['n_filt'], layer['n_out']
                ]
            elif len(current_shape) == 4:  # 2D
                layer['class_name'] = info + 'Pooling2D'

                layer['n_filt'] = current_shape[1]
                layer['in_height'] = current_shape[2]
                layer['in_width'] = current_shape[3]

                layer['stride_height'] = strides[0]
                layer['stride_width'] = strides[1]
                layer['pool_height'] = layer['filt_height'] = kernel_shape[0]
                layer['pool_width'] = layer['filt_width'] = kernel_shape[1]

                pads = compute_pads_2d(operation, layer)
                layer['pad_top'] = pads[0]
                layer['pad_bottom'] = pads[2]
                layer['pad_left'] = pads[1]
                layer['pad_right'] = pads[3]

                if all(
                        x == 0 for x in pads
                ):  # No padding, i.e., 'VALID' padding in Keras/Tensorflow
                    layer['out_width'] = int(
                        math.ceil(
                            float(layer['in_width'] - layer['filt_width'] + 1)
                            / float(layer['stride_width'])))
                    layer['out_height'] = int(
                        math.ceil(
                            float(layer['in_height'] - layer['filt_height'] +
                                  1) / float(layer['stride_height'])))
                else:
                    layer['out_height'] = int(
                        math.ceil(
                            float(layer['in_height']) /
                            float(layer['stride_height'])))
                    layer['out_width'] = int(
                        math.ceil(
                            float(layer['in_width']) /
                            float(layer['stride_width'])))

                layer['n_out'] = layer['out_height'] * layer[
                    'out_height'] * layer['n_filt']
                current_shape = [
                    current_shape[0], layer['n_filt'], layer['out_height'],
                    layer['out_width']
                ]
        elif layer['class_name'] in ['ELU', 'LeakyReLU', 'ThresholdedReLU']:
            layer['activation'] = layer['class_name']
            layer['activ_param'] = get_onnx_attribute(operation, 'alpha', 0.01)
        elif layer['class_name'] == 'PReLU':
            layer['activation'] = layer['class_name']

        elif layer['class_name'] in [
                operation_map.get(op, op) for op in merge_operations
        ]:
            layer['op'] = layer['class_name'].lower()
            if layer['class_name'] == 'Concatenate':
                rank = len(current_shape[1:])
                if rank > 3:
                    raise Exception(
                        'ERROR: Concatenation of tensors with rank > 3 is not yet supported.'
                    )
                layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
                layer['axis'] = get_onnx_attribute(operation, 'axis')
            else:
                layer['class_name'] = 'Merge'
            layer['inputs'] = [inputs_map.get(x, x) for x in operation.input]
            if len(layer['inputs']) > 2:
                raise Exception(
                    'ERROR: Merging more than two tensors is not yet supported.'
                )

        sanitize_layer_name(layer)
        print('Layer name: {}, layer type: {}, current shape: {}'.format(
            layer['name'], layer['class_name'], current_shape))
        layer_list.append(layer)

    #################
    ## Generate HLS
    #################

    print('Creating HLS model')
    hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers,
                         output_layers)
    optimizers = [
        'eliminate_linear_activation', 'merge_batch_norm_quantized_tanh',
        'quantize_dense_output'
    ]
    optimize_model(hls_model, optimizers)
    return hls_model
Ejemplo n.º 2
0
def pytorch_to_hls(config):
    """ Convert Pytorch model to hls model from configuration.
    
    Parameters
    ----------
    config: dict
        pytorch configuration from yaml file or passed through API.
        
    Returns
    -------
    hls_model : hls4ml model object.
    
    Notes
    -----
    Only sequential pytorch models are supported for now.
    """

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    print('Interpreting Model ...')

    reader = PyTorchFileReader(config) if isinstance(
        config['PytorchModel'], str) else PyTorchModelReader(config)
    input_shapes = [list(reader.input_shape)]

    model = reader.torch_model

    #Define layers to skip for conversion to HLS
    skip_layers = ['Dropout', 'Flatten', 'Sequential']

    #All supported layers
    supported_layers = get_supported_pytorch_layers() + skip_layers

    #Map inputs of skipped and split (activation) layers
    inputs_map = {}

    input_layers = None
    output_layers = None

    layer_config = None

    #Output shape tracking
    output_shapes = {}
    output_shape = None

    #Loop through layers
    print('Topology:')
    layer_counter = 0

    #First add input layer
    input_layer = {}
    input_layer['name'] = 'input1'
    input_layer['class_name'] = 'InputLayer'
    input_layer['input_shape'] = input_shapes[0][1:]
    layer_list.insert(0, input_layer)
    print("Input Shape: ", input_shapes)

    for layer_name, pytorch_layer in model.named_modules():

        pytorch_class = pytorch_layer.__class__.__name__

        #First module is the whole model's class
        if pytorch_class == model.__class__.__name__:
            continue

        if pytorch_class not in supported_layers:
            raise Exception('Unsupported layer {}'.format(pytorch_class))

        #If not the first layer then input shape is taken from last layer's output
        if layer_counter != 0:
            input_shapes = [output_shape]  #In case there are multiple inputs

        #Handle skipped layers
        if pytorch_class in skip_layers:
            if pytorch_class == 'Sequential':  #Ignore the mother module's class name
                continue

            if pytorch_class == 'Flatten':
                output_shapes[layer_name] = [
                    input_shapes[0][0],
                    np.prod(input_shapes[0][1:])
                ]
            else:
                output_shapes[layer_name] = input_shapes[0]
            continue  #!!

        #Increment the layer counter after initial screenings
        if pytorch_class in supported_layers:
            layer_counter += 1

        #Process the layer
        layer, output_shape = layer_handlers[pytorch_class](pytorch_layer,
                                                            layer_name,
                                                            input_shapes,
                                                            reader, config)

        print('Layer name: {}, layer type: {}, input shape: {}'.format(
            layer['name'], layer['class_name'], input_shapes))
        layer_list.append(layer)

        assert (output_shape is not None)
        output_shapes[layer['name']] = output_shape

    #################
    ## Generate HLS
    #################

    print('Creating HLS model')
    hls_model = HLSModel(config, reader, layer_list)
    return hls_model
Ejemplo n.º 3
0
def keras_to_hls(config):

    ######################
    ##  Do translation
    ######################

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    if 'KerasModel' in config:
        # Model instance passed in config from API
        keras_model = config['KerasModel']
        if isinstance(keras_model, str):
            from tensorflow.keras.models import load_model
            keras_model = load_model(keras_model)
        model_arch = json.loads(keras_model.to_json())
        reader = KerasModelReader(keras_model)
    elif 'KerasJson' in config:
        # Extract model architecture from json
        with open(config['KerasJson']) as json_file:
            model_arch = json.load(json_file)
        reader = KerasFileReader(config)
    elif 'KerasH5' in config:
        # Model arch and weights are in H5 file (from model.save() function)
        with h5py.File(config['KerasH5'], mode='r') as h5file:
            # Load the configuration from h5 using json's decode
            model_arch = h5file.attrs.get('model_config')
            if model_arch is None:
                raise ValueError('No model found in config file.')
            else:
                model_arch = json.loads(model_arch.decode('utf-8'))
        reader = KerasFileReader(config)
    else:
        raise ValueError('No model found in config file.')

    # print(model_arch)

    #Define layers to skip for conversion to HLS
    skip_layers = ['Dropout', 'Flatten']
    #All supported layers
    supported_layers = get_supported_keras_layers() + skip_layers

    #Map inputs of skipped and split (activation) layers
    inputs_map = {}

    #Loop through layers
    layer_counter = 0

    input_layers = None
    output_layers = None

    layer_config = None
    if model_arch['class_name'] == 'Sequential':
        print('Interpreting Sequential')
        layer_config = model_arch['config']
        if 'layers' in layer_config: # Newer Keras versions have 'layers' in 'config' key
            layer_config = layer_config['layers']
        # Sequential doesn't have InputLayer in TF < 2.3 (Keras 2.4.0)
        if layer_config[0]['class_name'] != 'InputLayer':
            input_layer = {}
            input_layer['name'] = 'input1'
            input_layer['class_name'] = 'InputLayer'
            input_layer['input_shape'] = layer_config[0]['config']['batch_input_shape'][1:]
            layer_list.append(input_layer)
            print('Input shape:', input_layer['input_shape'])
    elif model_arch['class_name'] in ['Model', 'Functional']: # TF >= 2.3 calls it 'Funcational' API
        print('Interpreting Model')
        layer_config = model_arch['config']['layers']
        input_layers = [ inp[0] for inp in model_arch['config']['input_layers'] ]
        output_layers = [ out[0] for out in model_arch['config']['output_layers'] ]

    # Get input shape and check for unsupported layer type
    for keras_layer in layer_config:
        if keras_layer['class_name'] not in supported_layers:
            raise Exception('ERROR: Unsupported layer type: {}'.format(keras_layer['class_name']))

    output_shapes = {}
    output_shape = None

    print('Topology:')
    for keras_layer in layer_config:
        if 'batch_input_shape' in keras_layer['config']:
            if 'inbound_nodes' in keras_layer and len(keras_layer['inbound_nodes']) > 0:
                input_shapes = [output_shapes[inbound_node[0][0]] for inbound_node in keras_layer['inbound_nodes']]
            else:
                input_shapes = [keras_layer['config']['batch_input_shape']]
        else:
            if 'inbound_nodes' in keras_layer:
                input_shapes = [output_shapes[inbound_node[0][0]] for inbound_node in keras_layer['inbound_nodes']]
            else:
                # Sequential model, so output_shape from the previous layer is still valid
                input_shapes = [output_shape]

        keras_class = keras_layer['class_name']

        if keras_class in skip_layers:
            if 'inbound_nodes' in keras_layer:
                name = keras_layer['config']['name']
                #Currently supported skipped layers have only one input
                parent_input = keras_layer['inbound_nodes'][0][0][0]
                #Skipped layers can follow each other (e.g., Dropout -> Flatten)
                inputs_map[name] = inputs_map.get(parent_input, parent_input)

            if keras_class == 'Flatten':
                output_shapes[keras_layer['config']['name']] = [input_shapes[0][0], np.prod(input_shapes[0][1:])]
            else:
                output_shapes[keras_layer['config']['name']] = input_shapes[0]

            continue

        if keras_class in supported_layers:
            layer_counter = layer_counter + 1

        #Extract inbound nodes
        if 'inbound_nodes' in keras_layer and len(keras_layer['inbound_nodes']) > 0:
            input_names = [ inputs_map.get(inp[0], inp[0]) for inp in keras_layer['inbound_nodes'][0] ]
        else:
            input_names = None

        layer, output_shape = layer_handlers[keras_class](keras_layer, input_names, input_shapes, reader, config)

        print('Layer name: {}, layer type: {}, current shape: {}'.format(layer['name'], layer['class_name'], input_shapes))
        layer_list.append( layer )
        if 'activation' in layer and layer['class_name'] not in ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax']:# + qkeras_layers:
            act_layer = {}
            act_layer['name'] = layer['name'] + '_' + layer['activation']
            act_layer['activation'] = layer['activation']
            if 'activ_param' in layer:
                act_layer['activ_param'] = layer['activ_param']
                act_layer['class_name'] = layer['activation']
            elif layer['activation'] == 'softmax':
                act_layer['class_name'] = 'Softmax'
            else:
                act_layer['class_name'] = 'Activation'
            inputs_map[layer['name']] = act_layer['name']
            if output_layers is not None and layer['name'] in output_layers:
                output_layers = [act_layer['name'] if name == layer['name'] else name for name in output_layers]
            layer_list.append(act_layer)

        assert(output_shape is not None)

        output_shapes[layer['name']] = output_shape

    #################
    ## Generate HLS
    #################

    print('Creating HLS model')
    hls_model = HLSModel(config, reader, layer_list, input_layers, output_layers)
    return hls_model
Ejemplo n.º 4
0
def tf_to_hls(yamlConfig):

    ######################
    ##  Do translation
    ######################

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    if not os.path.exists(yamlConfig['TensorFlowModel']):
        raise Exception('The specified file does not exist: {}'.format(yamlConfig['TensorFlowModel']))

    graph_def = None
    graph = None

    #Extract model architecture from pb
    try:
        with tf.io.gfile.GFile(yamlConfig['TensorFlowModel'], "rb") as f:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(f.read())
    except BaseException as e:
        raise Exception('Error loading the graph definition: {}'.format(str(e)))

    try:
        assert graph_def is not None
        with tf.Graph().as_default() as graph:
            tf.import_graph_def(
                graph_def,
                input_map=None,
                return_elements=None,
                name='',
                producer_op_list=None
            )
    except BaseException as e:
        raise Exception('Error importing the graph: {}'.format(str(e)))

    #Define supported operations
    array_ops = ['ConcatV2', 'StridedSlice', 'Transpose']
    core_ops = ['Const', 'Identity', 'Placeholder']
    image_ops = ['ResizeNearestNeighbor']
    math_ops = ['Add', 'AddV2', 'MatMul', 'Mul', 'Sigmoid']
    nn_ops = ['AvgPool', 'BiasAdd', 'Conv2D', 'Elu', 'FusedBatchNorm', 'MaxPool', 'Relu', 'Selu', 'Softmax']
    supported_ops = array_ops + core_ops + image_ops + math_ops + nn_ops

    input_layers = []
    output_layers = _find_graph_outputs(graph)

    # Get input shape and check for unsupported layer type
    output_shape = None
    for tf_op in graph.get_operations():
        if tf_op.type not in supported_ops:
            raise Exception('ERROR: Unsupported layer type: {}'.format(tf_op.type))

    print('Topology:')
    for tf_op in graph.get_operations():
        handled = False

        layer = {}
        layer['name'] = tf_op.name

        if tf_op.type == 'Placeholder':
            if len(tf_op.inputs) == 0: # Input
                output_shape = tf_op.outputs[0].shape.as_list()
                layer['class_name'] = 'InputLayer'
                layer['input_shape'] = output_shape[1:]
                #layer['outputs'] = [tf_op.outputs[0].name for o in tf_op.outputs]
                layer['outputs'] = _parse_tensor_names(tf_op.outputs)
                input_layers.append(layer['name'])
                handled = True

        elif tf_op.type == 'Identity':
            # Hack/TODO: Some exported models have their outputs set as an Identity layer,
            #            if this layer is ignored by TFDataReader then the parsing will
            #            fail. This hack solves this problem but likely better solutions
            #            exist.
            output_name = _parse_tensor_names(tf_op.outputs[0])[0];
            if output_name == 'Identity':
                output_shape = tf_op.outputs[0].shape.as_list()
                layer['class_name'] = 'Identity'
                layer['shape'] = output_shape[1:]
                layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
                layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
                handled = True
            else:
                handled = True
                continue

        elif tf_op.type == 'Const':
            # Nothing to do here, TFDataReader handles these
            handled = True
            continue

        elif tf_op.type == 'MatMul':
            input_shape = tf_op.inputs[0].shape.as_list()
            output_shape = tf_op.outputs[0].shape.as_list()
            layer['class_name'] = 'Dense'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
            layer['n_in'] = input_shape[-1]
            layer['n_out'] = output_shape[-1]
            handled = True

        elif tf_op.type == 'BiasAdd':
            input_shape = tf_op.inputs[0].shape.as_list()
            output_shape = tf_op.outputs[0].shape.as_list()
            layer['class_name'] = 'BiasAdd'
            layer['op'] = 'Add'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
            handled = True

        elif tf_op.type in ['Elu', 'Relu', 'Selu', 'Sigmoid', 'Softmax']:
            output_shape = tf_op.outputs[0].shape.as_list()
            layer['class_name'] = 'Activation'
            layer['activation'] = tf_op.type
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
            handled = True

        elif tf_op.type == 'Conv2D':
            input_shape = tf_op.inputs[0].shape.as_list()
            weights_shape = tf_op.inputs[1].shape.as_list()
            output_shape = tf_op.outputs[0].shape.as_list()
            layer['data_format'], c_idx, h_idx, w_idx = _parse_data_format(tf_op.get_attr('data_format').decode())
            dilations = tf_op.get_attr('dilations')
            strides = tf_op.get_attr('strides')

            layer['class_name'] = 'Conv2D'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])

            layer['n_chan'] = input_shape[c_idx]
            layer['in_height'] = input_shape[h_idx]
            layer['in_width'] = input_shape[w_idx]

            # weights_shape = (filter_height, filter_width, n_channels, n_filters)
            layer['filt_height'] = weights_shape[0]
            layer['filt_width'] = weights_shape[1]
            layer['n_chan'] = weights_shape[2]
            layer['n_filt'] = weights_shape[3]

            layer['stride_height'] = strides[h_idx]
            layer['stride_width'] = strides[w_idx]
            layer['dilation_height'] = dilations[h_idx]
            layer['dilation_width'] = dilations[w_idx]

            layer['padding'] = tf_op.get_attr('padding').decode().lower()
            in_height = input_shape[h_idx]
            in_width = input_shape[w_idx]
            _compute_pads_2d(layer, in_height, in_width)

            handled = True

        elif tf_op.type == 'MaxPool':
            input_shape = tf_op.inputs[0].shape.as_list()
            output_shape = tf_op.outputs[0].shape.as_list()
            layer['data_format'], c_idx, h_idx, w_idx = _parse_data_format(tf_op.get_attr('data_format').decode())
            strides = tf_op.get_attr('strides')
            kernel_size = tf_op.get_attr('ksize')

            layer['class_name'] = 'MaxPooling2D'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])

            layer['padding'] = tf_op.get_attr('padding').decode().lower()

            layer['in_height'] = input_shape[h_idx]
            layer['in_width'] = input_shape[w_idx]
            layer['n_filt'] = input_shape[c_idx]

            layer['stride_height'] = strides[h_idx]
            layer['stride_width'] = strides[w_idx]
            layer['filt_height'] = layer['pool_height'] = kernel_size[h_idx]
            layer['filt_width'] = layer['pool_width'] = kernel_size[w_idx]

            layer['padding'] = tf_op.get_attr('padding').decode().lower()
            in_height = input_shape[h_idx]
            in_width = input_shape[w_idx]
            _compute_pads_2d(layer, in_height, in_width)

            handled = True

        elif tf_op.type == 'FusedBatchNorm':
            input_shape = tf_op.inputs[0].shape.as_list()
            output_shape = tf_op.outputs[0].shape.as_list()
            
            layer['class_name'] = 'BatchNormalization'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
            layer['data_format'], c_idx, h_idx, w_idx = _parse_data_format(tf_op.get_attr('data_format').decode())
            layer['n_in'] = np.prod(input_shape[1:])
            layer['epsilon'] = tf_op.get_attr('epsilon')

            if len(input_shape) < 4:
                layer['n_filt'] = -1
            else:
                layer['n_filt'] = input_shape[c_idx]

            handled = True

        elif tf_op.type == 'ConcatV2':
            layer['class_name'] = 'Concatenate'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[:-1])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
            output_shape = tf_op.outputs[0].shape.as_list()

            rank = tf_op.get_attr('N')
            if rank != 2:
                raise Exception('Unsupported number of inputs in Concat operation')

            layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
            layer['axis'] = tf_op.inputs[2].op.node_def.attr['value'].tensor.int_val[0] # Urgh!

            handled = True

        elif tf_op.type in ['Add', 'AddV2', 'Mul']:
            layer['class_name'] = 'Merge'
            layer['inputs'] = _parse_tensor_names(list(tf_op.inputs))
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
            output_shape = tf_op.outputs[0].shape.as_list()
            
            layer['op'] = tf_op.type.lower()
            if layer['op'] == 'mul':
                layer['op'] = 'multiply'
            
            handled = True

        elif tf_op.type == 'Transpose':
            layer['class_name'] = 'Transpose'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])
            layer['perm'] = tensor_util.MakeNdarray(tf_op.inputs[1].op.node_def.attr['value'].tensor).tolist()
            output_shape = tf_op.outputs[0].shape.as_list()

            handled = True

        elif tf_op.type == 'ResizeNearestNeighbor':
            layer['class_name'] = 'Resize'
            layer['algorithm'] = 'nearest'
            layer['inputs'] = _parse_tensor_names(tf_op.inputs[0])
            layer['outputs'] = _parse_tensor_names(tf_op.outputs[0])

            input_shape = tf_op.inputs[0].shape.as_list() # (B, H, W, C)
            output_shape = tf_op.outputs[0].shape.as_list()
            layer['height'] = input_shape[1]
            layer['width'] = input_shape[2]
            layer['n_chan'] = input_shape[3]
            layer['new_height'] = output_shape[1]
            layer['new_width'] = output_shape[2]

            # Check for currently unsupported operations
            align_corners = tf_op.get_attr('align_corners')
            if align_corners:
                raise NotImplementedError('Property "align_corners=True" is not supported.')
            half_pixel_centers = tf_op.get_attr('align_corners')
            if half_pixel_centers:
                raise NotImplementedError('Property "half_pixel_centers=True" is not supported.')

            handled = True

        if not handled:
            raise Exception('Unable to parse operation: {} - {}'.format(tf_op.type, tf_op.name))

        print('Layer name: {}, layer type: {}, current shape: {}'.format(layer['name'], layer['class_name'], output_shape))
        layer_list.append(layer)

    #################
    ## Generate HLS
    #################

    reader = TFDataReader(graph)
    print('Creating HLS model')
    hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers, output_layers)
    optimizers = ['eliminate_linear_activation', 'merge_batch_norm_quantized_tanh', 'quantize_dense_output', 'fuse_biasadd', 'fuse_dense_batch_norm']
    optimize_model(hls_model, optimizers)
    return hls_model
Ejemplo n.º 5
0
def pytorch_to_hls(yamlConfig):

    ######################
    ##  Do translation
    ######################

    print('Interpreting Model')
    reader = PyTorchDataReader(yamlConfig)

    core_layers = ['Linear']
    skip_layers = ['Dropout', 'Flatten']
    activation_layers = [
        'ReLU', 'Sigmoid', 'Tanh', 'SELU', 'LeakyReLU', 'Softmax', 'Softplus',
        'Softsign'
    ]
    supported_layers = core_layers + skip_layers + activation_layers

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #Loop through layers
    print('Topology:')
    modelstr = repr(reader.torch_model).split('\n')
    for pytorch_layer in modelstr:
        layer_match = re.match(r'\((\d)\): (\w+)\((.*)\)',
                               pytorch_layer.strip())
        if layer_match is None:
            continue

        layer_idx = layer_match.group(1)
        layer_type = layer_match.group(2)
        layer_spec = layer_match.group(3)

        # #Dictionary to fill in and append to layer_list
        layer = {}

        #layer_type = matchname.group(1)
        if layer_type not in supported_layers:
            raise Exception('Unsupported layer {}'.format(layer_type))

        if layer_type == 'Linear':
            layer['class_name'] = 'Dense'
            layer['name'] = layer_idx

            dense_spec = re.match(r'in_features=(\d+), out_features=(\d+).*',
                                  layer_spec)
            if dense_spec is None:
                raise Exception(
                    'Unable to interpret Linear layer ({})'.format(layer_spec))

            # #Get number of inputs and outputs
            layer['n_in'] = int(dense_spec.group(1))
            layer['n_out'] = int(dense_spec.group(2))

            current_shape = [layer['n_in'], layer['n_out']]
            print('Layer index: {}, layer type: {}, current shape: {}'.format(
                layer['name'], layer['class_name'], current_shape))
        elif layer_type in activation_layers:
            layer['class_name'] = 'Activation'
            layer['activation'] = layer_type.lower()
            layer['name'] = layer['activation'] + '_' + str(layer_idx)

        layer_list.append(layer)

    input_layer = {}
    input_layer['name'] = 'input1'
    input_layer['class_name'] = 'InputLayer'
    input_layer['input_shape'] = [layer_list[0]['n_in']]
    layer_list.insert(0, input_layer)

    #################
    ## Generate HLS
    #################

    reader = PyTorchDataReader(yamlConfig)
    print('Creating HLS model')
    hls_model = HLSModel(yamlConfig, reader, layer_list)
    optimizers = [
        'eliminate_linear_activation', 'merge_batch_norm_quantized_tanh',
        'quantize_dense_output', 'fuse_dense_batch_norm'
    ]
    optimize_model(hls_model, optimizers)
    return hls_model
Ejemplo n.º 6
0
def keras_to_hls(yamlConfig):

    ######################
    ##  Do translation
    ######################

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #If the json file is not provided, interpret this as the full model is saved in KerasH5 with model.save()
    if yamlConfig.get('KerasJson', None) is None:
        #Load the model's info and add them in a dict
        filepath = yamlConfig['KerasH5']

        #Open file
        opened_new_file = not isinstance(filepath, h5py.File)
        if opened_new_file:
            f = h5py.File(filepath, mode='r')
        else:
            f = filepath

        #Load the configuration from h5 using json's decode
        # instantiate model
        model_arch = f.attrs.get('model_config')
        if model_arch is None:
            raise ValueError('No model found in config file.')
        else:
            model_arch = json.loads(model_arch.decode('utf-8'))

    else:
        #Extract model architecture from json
        with open(yamlConfig['KerasJson']) as json_file:
            model_arch = json.load(json_file)

    #print(model_arch)

    #Define supported laers
    core_layers = [
        'InputLayer', 'Dropout', 'Flatten', 'Dense', 'BinaryDense',
        'TernaryDense', 'Reshape'
    ]
    conv_layers = ['Conv1D', 'Conv2D', 'BinaryConv2D']
    pooling_layers = [
        'MaxPooling1D', 'MaxPooling2D', 'AveragePooling1D', 'AveragePooling2D'
    ]
    norm_layers = ['BatchNormalization']
    activation_layers = [
        'Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU'
    ]
    merge_layers = [
        'Add', 'Subtract', 'Multiply', 'Average', 'Maximum', 'Minimum',
        'Concatenate'
    ]
    qkeras_layers = ['QDense', 'QActivation', 'QConv1D', 'QConv2D']
    #Define layers to skip for conversion to HLS
    skip_layers = ['Dropout', 'Flatten']
    #All supported layers
    supported_layers = core_layers + conv_layers + pooling_layers + norm_layers + activation_layers + merge_layers + qkeras_layers + skip_layers

    #Map inputs of skipped and split (activation) layers
    inputs_map = {}

    #Loop through layers
    layer_counter = 0

    input_layers = None
    output_layers = None

    layer_config = None
    if model_arch['class_name'] == 'Sequential':
        print('Interpreting Sequential')
        layer_config = model_arch["config"]
        if 'layers' in layer_config:  # Newer Keras versions have 'layers' in 'config' key
            layer_config = layer_config['layers']
        # Sequential doesn't have InputLayer
        input_layer = {}
        input_layer['name'] = 'input1'
        input_layer['class_name'] = 'InputLayer'
        print(layer_config[0])
        print(layer_config[0]['config'])
        #input_layer['input_shape'] = layer_config[0]['config']['batch_input_shape'][1:]
        input_layer['input_shape'] = inptshp
        layer_list.append(input_layer)
        print('Input shape:', input_layer['input_shape'])
    elif model_arch['class_name'] == 'Model':
        print('Interpreting Model')
        layer_config = model_arch["config"]["layers"]
        input_layers = [inp[0] for inp in model_arch["config"]["input_layers"]]
        output_layers = [
            out[0] for out in model_arch["config"]["output_layers"]
        ]

    # Get input shape and check for unsupported layer type
    current_shape = inptshp
    for keras_layer in layer_config:
        if keras_layer["class_name"] not in supported_layers:
            raise Exception('ERROR: Unsupported layer type: {}'.format(
                keras_layer["class_name"]))
        if 'batch_input_shape' in keras_layer['config']:
            current_shape = keras_layer['config'][
                'batch_input_shape']  # [None, 100, 7]

    print('Topology:')
    for keras_layer in layer_config:
        if keras_layer["class_name"] == 'Flatten':
            print(current_shape)
            current_shape = [current_shape[0], np.prod(current_shape[1:])]
        if keras_layer["class_name"] in skip_layers:
            if 'inbound_nodes' in keras_layer:
                name = keras_layer['config']['name']
                #Currently supported skipped layers have only one input
                parent_input = keras_layer['inbound_nodes'][0][0][0]
                #Skipped layers can follow each other (e.g., Dropout -> Flatten)
                inputs_map[name] = inputs_map.get(parent_input, parent_input)
            continue

        if keras_layer["class_name"] in supported_layers:
            layer_counter = layer_counter + 1

        #Dictionary to fill in and append to layer_list
        layer = {}

        #Extract name for finding weights and biases
        layer['name'] = keras_layer['config']['name']
        layer['class_name'] = keras_layer['class_name']

        #Extract inbound nodes
        if 'inbound_nodes' in keras_layer and len(
                keras_layer['inbound_nodes']) > 0:
            layer['inputs'] = [
                inputs_map.get(inp[0], inp[0])
                for inp in keras_layer['inbound_nodes'][0]
            ]

        #Extract type of activation and number of nodes
        for config, config_value in keras_layer["config"].items():
            if (config == "activation"):
                layer['activation'] = config_value
            if (config == "epsilon"):
                layer['epsilon'] = config_value
            #if(config=="units"):
            #print("PARSED NUM OF NODES",config_value)

        # Default one layer call
        if layer['class_name'] == 'InputLayer':
            layer['input_shape'] = keras_layer['config']['batch_input_shape'][
                1:]
        if keras_layer["class_name"] == 'Reshape':
            layer['target_shape'] = keras_layer['config']['target_shape']
            current_shape[1:] = keras_layer['config']['target_shape']
        if 'Dense' in layer['class_name']:
            weights_shape = get_weights_shape(yamlConfig['KerasH5'],
                                              layer['name'])
            layer['n_in'] = weights_shape[0]
            layer['n_out'] = weights_shape[1]
            if 'Binary' in layer['class_name']:
                layer['quantize'] = 2
            elif 'Ternary' in layer['class_name']:
                layer['quantize'] = 3
            elif layer['class_name'] == 'QDense':
                get_qkeras_quantization(layer, keras_layer)
            else:
                layer['quantize'] = 0
            current_shape = [current_shape[0], layer['n_out']]
        elif layer['class_name'] == 'Conv1D':
            # weights_shape = (filter_width, n_channels, n_filters)
            weights_shape = get_weights_shape(yamlConfig['KerasH5'],
                                              layer['name'])
            layer['n_in'] = current_shape[1]
            layer['filt_width'] = weights_shape[
                0]  # or keras_layer['config']['kernel_size']
            layer['n_chan'] = weights_shape[1]
            layer['n_filt'] = weights_shape[
                2]  # or keras_layer['config']['filters']
            layer['stride'] = keras_layer['config']['strides'][0]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                in_width = current_shape[1]
                layer['n_out'] = int(
                    math.ceil(float(in_width) / float(layer['stride'])))
                if (in_width % layer['stride'] == 0):
                    pad_along_width = max(
                        layer['filt_width'] - layer['stride'], 0)
                else:
                    pad_along_width = max(
                        layer['filt_width'] - (in_width % layer['stride']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_width = current_shape[1]
                layer['n_out'] = int(
                    math.ceil(
                        float(in_width - layer['filt_width'] + 1) /
                        float(layer['stride'])))
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            layer['data_format'] = keras_layer['config'].get(
                'data_format', 'channels_last')
            get_qkeras_quantization(layer, keras_layer)
            current_shape = [current_shape[0], layer['n_out'], layer['n_filt']]
        elif 'Conv2D' in layer['class_name']:
            layer['data_format'] = keras_layer['config'].get(
                'data_format', 'channels_last')
            # weights_shape = (filter_height, filter_width, n_channels, n_filters)
            weights_shape = get_weights_shape(yamlConfig['KerasH5'],
                                              layer['name'])
            layer['in_height'] = current_shape[1]
            layer['in_width'] = current_shape[2]
            if layer['data_format'] == 'channels_first':
                layer['in_height'] = current_shape[2]
                layer['in_width'] = current_shape[3]
            layer['filt_height'] = weights_shape[0]
            layer['filt_width'] = weights_shape[1]
            layer['n_chan'] = weights_shape[2]
            layer['n_filt'] = weights_shape[3]
            layer['stride_height'] = keras_layer['config']['strides'][0]
            layer['stride_width'] = keras_layer['config']['strides'][1]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                #Height
                in_height = current_shape[1]
                if layer['data_format'] == 'channels_first':
                    in_height = current_shape[2]
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(
                        layer['filt_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(
                        layer['filt_height'] -
                        (in_height % layer['stride_height']), 0)
                layer['pad_top'] = pad_along_height // 2
                layer['pad_bottom'] = pad_along_height - layer['pad_top']
                #Width
                in_width = current_shape[2]
                if layer['data_format'] == 'channels_first':
                    in_width = current_shape[3]
                layer['out_width'] = int(
                    math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(
                        layer['filt_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(
                        layer['filt_width'] -
                        (in_width % layer['stride_width']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                if layer['data_format'] == 'channels_first':
                    in_height = current_shape[2]
                    in_width = current_shape[3]
                layer['out_width'] = int(
                    math.ceil(
                        float(in_width - layer['filt_width'] + 1) /
                        float(layer['stride_width'])))
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height - layer['filt_height'] + 1) /
                        float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            get_qkeras_quantization(layer, keras_layer)
            if layer['data_format'] == 'channels_first':
                current_shape = [
                    current_shape[0], layer['n_filt'], layer['out_height'],
                    layer['out_width']
                ]
            else:
                current_shape = [
                    current_shape[0], layer['out_height'], layer['out_width'],
                    layer['n_filt']
                ]
        elif layer['class_name'] == 'BatchNormalization':
            in_size = 1
            for dim in current_shape[1:]:
                in_size *= dim
            layer['n_in'] = in_size
            layer['n_out'] = layer['n_in']
            if len(current_shape) == 2:
                layer['n_filt'] = -1
            elif len(current_shape) == 3:
                layer['n_filt'] = current_shape[2]
            elif len(current_shape) == 4:
                layer['n_filt'] = current_shape[3]
        elif 'Pooling' in layer['class_name']:
            if int(layer['class_name'][-2]) == 1:
                layer['n_in'] = current_shape[1]
                layer['n_filt'] = current_shape[2]
                layer['pool_size'] = keras_layer['config']['pool_size'][0]
                layer['stride'] = keras_layer['config']['strides'][0]
                layer['padding'] = keras_layer['config']['padding']
                if layer['padding'] == 'same':
                    in_width = current_shape[1]
                    layer['n_out'] = int(
                        math.ceil(float(in_width) / float(layer['stride'])))
                    if (in_width % layer['stride'] == 0):
                        pad_along_width = max(
                            layer['pool_size'] - layer['stride'], 0)
                    else:
                        pad_along_width = max(
                            layer['pool_size'] - (in_width % layer['stride']),
                            0)
                    layer['pad_left'] = pad_along_width // 2
                    layer['pad_right'] = pad_along_width - layer['pad_left']
                elif layer['padding'] == 'valid':
                    in_width = current_shape[1]
                    layer['n_out'] = int(
                        math.ceil(
                            float(in_width - layer['pool_size'] + 1) /
                            float(layer['stride'])))
                    layer['pad_left'] = 0
                    layer['pad_right'] = 0
                current_shape = [
                    current_shape[0], layer['n_out'], layer['n_filt']
                ]
            elif int(layer['class_name'][-2]) == 2:
                layer['data_format'] = keras_layer['config'].get(
                    'data_format', 'channels_last')
                layer['in_height'] = current_shape[1]
                layer['in_width'] = current_shape[2]
                layer['n_filt'] = current_shape[3]
                if layer['data_format'] == 'channels_first':
                    layer['in_height'] = current_shape[2]
                    layer['in_width'] = current_shape[3]
                    layer['n_filt'] = current_shape[1]
                layer['stride_height'] = keras_layer['config']['strides'][0]
                layer['stride_width'] = keras_layer['config']['strides'][1]
                layer['pool_height'] = keras_layer['config']['pool_size'][0]
                layer['pool_width'] = keras_layer['config']['pool_size'][1]
                layer['padding'] = keras_layer['config']['padding']
                if layer['padding'] == 'same':
                    #Height
                    in_height = current_shape[1]
                    if layer['data_format'] == 'channels_first':
                        in_height = current_shape[2]
                    layer['out_height'] = int(
                        math.ceil(
                            float(in_height) / float(layer['stride_height'])))
                    if (in_height % layer['stride_height'] == 0):
                        pad_along_height = max(
                            layer['pool_height'] - layer['stride_height'], 0)
                    else:
                        pad_along_height = max(
                            layer['pool_height'] -
                            (in_height % layer['stride_height']), 0)
                    layer['pad_top'] = pad_along_height // 2
                    layer['pad_bottom'] = pad_along_height - layer['pad_top']
                    #Width
                    in_width = current_shape[2]
                    if layer['data_format'] == 'channels_first':
                        in_height = current_shape[3]
                    layer['out_width'] = int(
                        math.ceil(
                            float(in_width) / float(layer['stride_width'])))
                    if (in_width % layer['stride_width'] == 0):
                        pad_along_width = max(
                            layer['pool_width'] - layer['stride_width'], 0)
                    else:
                        pad_along_width = max(
                            layer['pool_width'] -
                            (in_width % layer['stride_width']), 0)
                    layer['pad_left'] = pad_along_width // 2
                    layer['pad_right'] = pad_along_width - layer['pad_left']
                elif layer['padding'] == 'valid':
                    in_height = current_shape[1]
                    in_width = current_shape[2]
                    if layer['data_format'] == 'channels_first':
                        in_height = current_shape[2]
                        in_width = current_shape[3]
                    layer['out_width'] = int(
                        math.ceil(
                            float(in_width - layer['pool_width'] + 1) /
                            float(layer['stride_width'])))
                    layer['out_height'] = int(
                        math.ceil(
                            float(in_height - layer['pool_height'] + 1) /
                            float(layer['stride_height'])))
                    layer['pad_top'] = 0
                    layer['pad_bottom'] = 0
                    layer['pad_left'] = 0
                    layer['pad_right'] = 0
                if layer['data_format'] == 'channels_last':
                    current_shape = [
                        current_shape[0], layer['out_height'],
                        layer['out_width'], layer['n_filt']
                    ]
                elif layer['data_format'] == 'channels_first':
                    current_shape = [
                        current_shape[0], layer['n_filt'], layer['out_height'],
                        layer['out_width']
                    ]

        elif layer['class_name'] == 'LeakyReLU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('alpha', 0.3)
        elif layer['class_name'] == 'ThresholdedReLU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('theta', 1.)
        elif layer['class_name'] == 'ELU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('alpha', 1.)
        elif layer['class_name'] == 'PReLU':
            layer['activation'] = layer['class_name']
        elif layer['class_name'] == 'QActivation':
            if 'quantized_relu' in layer['activation']:
                layer['activation'] = 'relu'
            elif 'quantized_tanh' in layer['activation']:
                layer['activation'] = 'tanh'
            else:
                raise Exception('Unsupported activation {} in layer {}'.format(
                    layer['activation'], layer['name']))

        elif layer['class_name'] in merge_layers:
            layer['op'] = layer['class_name'].lower()
            if layer['class_name'] == 'Concatenate':
                rank = len(current_shape[1:])
                if rank > 3:
                    raise Exception(
                        'ERROR: Concatenation of tensors with rank > 3 is not yet supported.'
                    )
                layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
                layer['axis'] = keras_layer['config']['axis']
            else:
                layer['class_name'] = 'Merge'
            if len(layer['inputs']) > 2:
                raise Exception(
                    'ERROR: Merging more than two tensors is not yet supported.'
                )

        print('Layer name: {}, layer type: {}, current shape: {}'.format(
            layer['name'], layer['class_name'], current_shape))
        layer_list.append(layer)
        if 'activation' in layer and layer[
                'class_name'] not in activation_layers + qkeras_layers:
            act_layer = {}
            act_layer['name'] = layer['name'] + '_' + layer['activation']
            act_layer['activation'] = layer['activation']
            if 'activ_param' in layer:
                act_layer['activ_param'] = layer['activ_param']
                act_layer['class_name'] = layer['activation']
            else:
                act_layer['class_name'] = 'Activation'
            inputs_map[layer['name']] = act_layer['name']
            if output_layers is not None and layer['name'] in output_layers:
                output_layers = [
                    act_layer['name'] if name == layer['name'] else name
                    for name in output_layers
                ]
            layer_list.append(act_layer)

    #################
    ## Generate HLS
    #################

    reader = KerasDataReader(yamlConfig)
    print('Creating HLS model')
    hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers,
                         output_layers)
    optimizers = [
        'eliminate_linear_activation', 'merge_batch_norm_quantized_tanh',
        'quantize_dense_output', 'fuse_dense_batch_norm'
    ]
    optimize_model(hls_model, optimizers)
    return hls_model
Ejemplo n.º 7
0
def onnx_to_hls(config):
    """ Convert onnx model to hls model from configuration.
    
    Parameters
    ----------
    config: dict
        onnx configuration from yaml file or passed through API.
        
    Returns
    -------
    hls_model : hls4ml model object
        
    """

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #Extract model architecture
    print('Interpreting Model ...')

    model = onnx.load(config['OnnxModel']) if isinstance(
        config['OnnxModel'], str) else config['OnnxModel']

    model = shape_inference.infer_shapes(model)
    graph = model.graph

    reader = ONNXDataReader(model)

    #Obtain list of input/ouput layers
    all_inputs = [x.name for x in model.graph.input]
    all_initializers = [x.name for x in model.graph.initializer]
    input_layers = [x for x in all_inputs if x not in all_initializers]
    output_layers = get_out_layer_name(graph)

    print("Output layers: ", output_layers)

    for i, inp in enumerate(input_layers):
        input_layer = {}
        input_layer['name'] = replace_char_inconsitency(inp)
        input_layer['class_name'] = 'InputLayer'
        inp_shape = next((x.type.tensor_type.shape.dim
                          for x in model.graph.input if x.name == inp), None)
        input_layer['input_shape'] = [x.dim_value for x in inp_shape]

        if len(input_layer['input_shape']) > 1:
            input_layer['input_shape'][0] = None  #Firt dim is batch

        #Clean the layer name for specific models
        sanitize_layer_name(input_layer)
        input_layers[i] = input_layer['name']

        layer_list.append(input_layer)

    # Defined supported layers and check for unsupported layer type
    skip_layers = ['Dropout', 'Identity', 'Flatten']

    #Map inputs of skipped layers
    inputs_map = {}

    supported_layers = get_supported_onnx_layers() + skip_layers

    # Get input shape
    current_shape = [input_layer['input_shape']]
    print('Input shape:', current_shape[0])

    #Loop through layers
    layer_counter = 0

    #Output shape tracking
    output_shapes = {}
    output_shape = None

    print('Topology:')
    for node in graph.node:

        if node.op_type not in supported_layers:
            raise Exception('ERROR: Unsupported operation type: {}'.format(
                node.op_type))

        #If not the first layer then input shape is taken from last layer's output
        if layer_counter != 0:
            current_shape = [output_shape]

        if node.op_type in skip_layers:
            if node.op_type == 'Flatten':
                output_shape = [
                    current_shape[0][0],
                    np.prod(current_shape[0][1:])
                ]

            else:
                #Currently supported skipped layers have only one input and output
                #Skipped layers can follow each other (e.g., Dropout -> Flatten)

                #Mapping inputs
                input_name = inputs_map.get(node.input[0], node.input[0])
                output_name = node.output[0]
                inputs_map[output_name] = input_name

                output_shape = current_shape[0]
            continue

        if node.op_type in supported_layers:
            layer_counter = layer_counter + 1

        #Process the layer
        layer, output_shape = layer_handlers[node.op_type](reader, node,
                                                           inputs_map,
                                                           current_shape,
                                                           graph, config)

        sanitize_layer_name(layer)
        print('Layer name: {}, layer type: {}, current shape: {}'.format(
            layer['name'], layer['class_name'], current_shape))
        layer_list.append(layer)

    #################
    ## Generate HLS
    #################

    print('Creating HLS model')
    hls_model = HLSModel(config, reader, layer_list, input_layers,
                         output_layers)
    return hls_model