示例#1
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c",
                        action='store',
                        dest='config',
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config:
        parser.error('A configuration file needs to be specified.')
    configDir = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir,
                                               yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['TMVAxml']):
        yamlConfig['sklearnPkl'] = os.path.join(configDir,
                                                yamlConfig['TMVAxml'])

    if not (yamlConfig["IOType"] == "io_parallel"):
        raise Exception('ERROR: Invalid IO type (serial not yet supported)')

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware".format(yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware".format(yamlConfig['OutputDir']))

    xml = ET.parse(yamlConfig['TMVAxml'])
    ensembleDict = ensembleToDict(xml)
    bdt_writer(ensembleDict, yamlConfig)
示例#2
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c",
                        action='store',
                        dest='config',
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config:
        parser.error('A configuration file needs to be specified.')

    configDir = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir,
                                               yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['KerasH5']):
        yamlConfig['KerasH5'] = os.path.join(configDir, yamlConfig['KerasH5'])
    if not os.path.isabs(yamlConfig['KerasJson']):
        yamlConfig['KerasJson'] = os.path.join(configDir,
                                               yamlConfig['KerasJson'])

    if not (yamlConfig["IOType"] == "io_parallel"
            or yamlConfig["IOType"] == "io_serial"):
        raise Exception('ERROR: Invalid IO type')

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware/weights".format(
            yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir']))

    h5File = h5py.File(yamlConfig['KerasH5'])

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #Extract model architecture from json
    with open(yamlConfig['KerasJson']) as json_file:
        model_arch = json.load(json_file)
    #print(model_arch)

    #Define supported laers
    supported_layers = [
        'InputLayer', 'Dropout', 'Flatten', 'Dense', 'Conv1D', 'Conv2D'
    ]

    #Define layers to skip for conversion to HLS
    skip_layers = ['InputLayer', 'Dropout', 'Flatten']

    #Loop through layers
    layer_counter = 0
    input_layer = {}

    layer_config = None
    if model_arch['class_name'] == 'Sequential':
        print 'Interpreting Sequential'
        layer_config = model_arch["config"]
    elif model_arch['class_name'] == 'Model':
        print 'Interpreting Model'
        layer_config = model_arch["config"]["layers"]

    # Get input shape and check for unsupported layer type
    current_shape = None
    for keras_layer in layer_config:
        if keras_layer["class_name"] not in supported_layers:
            raise Exception('ERROR: Unsupported layer type: %s' %
                            keras_layer["class_name"])
        if 'batch_input_shape' in keras_layer['config']:
            current_shape = keras_layer['config'][
                'batch_input_shape']  # [None, 100, 7]
    print 'Input shape:', current_shape

    print 'Topology:'
    for keras_layer in layer_config:
        if keras_layer["class_name"] is 'Flatten':
            current_shape = [current_shape[0], np.prod(current_shape[1:])]
        if keras_layer["class_name"] in skip_layers:
            continue

        layer_counter = layer_counter + 1

        #Dictionary to fill in and append to layer_list
        layer = {}

        #Extract name for finding weights and biases
        layer['name'] = keras_layer['config']['name']
        layer['class_name'] = keras_layer['class_name']

        #Extract type of activation and number of nodes
        for config, config_value in keras_layer["config"].items():
            if (config == "activation"):
                layer['activation'] = config_value
            #if(config=="units"):
            #print("PARSED NUM OF NODES",config_value)

        #Translate weights and biases from h5 file
        weights = h5File['/{}/{}/kernel:0'.format(layer['name'],
                                                  layer['name'])][()]
        biases = h5File['/{}/{}/bias:0'.format(layer['name'],
                                               layer['name'])][()]
        cur_n_zeros = print_array_to_cpp("w{}".format(layer_counter), weights,
                                         yamlConfig['OutputDir'])
        print_array_to_cpp("b{}".format(layer_counter), biases,
                           yamlConfig['OutputDir'])
        layer['weights_n_zeros'] = cur_n_zeros

        #Get number of inputs and outputs
        #(We take it from the weights to avoid dealing with InputLayer and Flatten details)
        if layer['class_name'] == 'Dense':
            layer['n_in'] = weights.shape[0]
            layer['n_out'] = weights.shape[1]
            current_shape = [current_shape[0], layer['n_out']]
        elif layer['class_name'] == 'Conv1D':
            # weights.shape = (filter_width, n_channels, n_filters)
            layer['y_in'] = current_shape[1]
            layer['y_filt'] = weights.shape[
                0]  # or keras_layer['config']['kernel_size']
            layer['n_chan'] = weights.shape[1]
            layer['n_filt'] = weights.shape[
                2]  # or keras_layer['config']['filters']
            layer['stride'] = keras_layer['config']['strides'][0]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                in_width = current_shape[1]
                layer['y_out'] = int(
                    math.ceil(float(in_width) / float(layer['stride'])))
                if (in_width % layer['stride'] == 0):
                    pad_along_width = max(layer['y_filt'] - layer['stride'], 0)
                else:
                    pad_along_width = max(
                        layer['y_filt'] - (in_width % layer['stride']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_width = current_shape[1]
                layer['y_out'] = int(
                    math.ceil(
                        float(in_width - layer['y_filt'] + 1) /
                        float(layer['stride'])))
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            current_shape = [current_shape[0], layer['y_out'], layer['n_filt']]
        elif layer['class_name'] == 'Conv2D':
            layer['in_height'] = current_shape[1]
            layer['in_width'] = current_shape[2]
            layer['filt_height'] = weights.shape[0]
            layer['filt_width'] = weights.shape[1]
            layer['n_chan'] = weights.shape[2]
            layer['n_filt'] = weights.shape[3]
            layer['stride_height'] = keras_layer['config']['strides'][0]
            layer['stride_width'] = keras_layer['config']['strides'][1]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                #Height
                in_height = current_shape[1]
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(
                        layer['filt_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(
                        layer['filt_height'] -
                        (in_height % layer['stride_height']), 0)
                layer['pad_top'] = pad_along_height // 2
                layer['pad_bottom'] = pad_along_height - layer['pad_top']
                #Width
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(
                        layer['filt_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(
                        layer['filt_width'] -
                        (in_width % layer['stride_width']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(
                        float(in_width - layer['filt_width'] + 1) /
                        float(layer['stride_width'])))
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height - layer['filt_height'] + 1) /
                        float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
                current_shape = [
                    current_shape[0], layer['out_height'], layer['out_width'],
                    layer['n_filt']
                ]
        print 'Layer name: %s, layer type: %s, current shape: %s, number of zeros: %s' % (
            layer['name'], layer['class_name'], current_shape, cur_n_zeros)
        layer_list.append(layer)

    #################
    ## Generate HLS
    #################

    #Weights and biases are already dumped to output directory
    #Now generate HLS from list of layer dictionaries
    hls_writer(layer_list, yamlConfig)
示例#3
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c", action='store', dest='config',
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config: parser.error('A configuration file needs to be specified.')

    configDir = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir, yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['KerasH5']):
        yamlConfig['KerasH5'] = os.path.join(configDir, yamlConfig['KerasH5'])
    if not os.path.isabs(yamlConfig['KerasJson']):
        yamlConfig['KerasJson'] = os.path.join(configDir, yamlConfig['KerasJson'])

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware/weights".format(yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir']))

    h5File = h5py.File(yamlConfig['KerasH5'], 'r')

    # This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    # Extract model architecture from json
    with open(yamlConfig['KerasJson']) as json_file:
        model_arch = json.load(json_file)
    # print(model_arch)

    # Define supported laers
    supported_layers = ['InputLayer', 'Dropout', 'Flatten', 'Dense', 'BinaryDense', 'TernaryDense', 'Conv1D', 'Conv2D',
                        'BatchNormalization', 'MaxPooling1D', 'MaxPooling2D', 'AveragePooling1D', 'AveragePooling2D']
    activation_layers = ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax']

    # Define layers to skip for conversion to HLS
    skip_layers = ['InputLayer', 'Dropout', 'Flatten']

    # Loop through layers
    layer_counter = 0
    input_layer = {}

    layer_config = None
    if model_arch['class_name'] == 'Sequential':
        print('Interpreting Sequential')
        layer_config = model_arch["config"]
    elif model_arch['class_name'] == 'Model':
        print('Interpreting Model')
        layer_config = model_arch["config"]["layers"]

    # Get input shape and check for unsupported layer type
    current_shape = None
    for keras_layer in layer_config:
        if keras_layer["class_name"] not in supported_layers + activation_layers:
            raise Exception('ERROR: Unsupported layer type: {}'.format(keras_layer["class_name"]))
        if 'batch_input_shape' in keras_layer['config']:
            current_shape = keras_layer['config']['batch_input_shape']  # [None, 100, 7]
    print('Input shape:', current_shape)

    # Set some variables to make the routine after a bit smoother
    is_conv2d = False
    is_dense = False
    quantize = 0
    for keras_layer in layer_config:
        if keras_layer["class_name"] == 'Conv2D':
            is_conv2d = True
            break
        if keras_layer["class_name"] == 'Dense' or keras_layer["class_name"] == 'BinaryDense' or keras_layer[
            "class_name"] == 'TernaryDense':
            is_dense = True
            if keras_layer["class_name"] == 'BinaryDense': quantize = 2
            if keras_layer["class_name"] == 'TernaryDense': quantize = 3
            break

    print('Topology:')
    for il, keras_layer in enumerate(layer_config):
        if keras_layer["class_name"] is 'Flatten':
            current_shape = [current_shape[0], np.prod(current_shape[1:])]
        if keras_layer["class_name"] in skip_layers:
            continue

        if keras_layer["class_name"] in supported_layers + activation_layers:
            layer_counter = layer_counter + 1

        # Dictionary to fill in and append to layer_list
        layer = {}

        # Extract name for finding weights and biases
        layer['name'] = keras_layer['config']['name']
        layer['class_name'] = keras_layer['class_name']

        # Extract type of activation and number of nodes
        for config, config_value in keras_layer["config"].items():
            if (config == "activation"):
                layer['activation'] = config_value
            if (config == "epsilon"):
                layer['epsilon'] = config_value
            # if(config=="units"):
            # print("PARSED NUM OF NODES",config_value)

        # Translate weights and biases from h5 file
        if layer['class_name'] != 'BatchNormalization' and layer[
            'class_name'] not in activation_layers and 'Pooling' not in layer['class_name']:
            found_weights = h5File[layer['name']].visit(find_kernel_in_h5)
            weights = h5File['/{}/{}'.format(layer['name'], found_weights)][()]
            cur_n_zeros = print_array_to_cpp("w{}".format(layer_counter), weights, yamlConfig['OutputDir'], quantize)
            found_bias = h5File[layer['name']].visit(find_bias_in_h5)
            if found_bias:
                biases = h5File['/{}/{}'.format(layer['name'], found_bias)][()]
            else:
                biases = np.zeros(weights.shape[1])
            print_array_to_cpp("b{}".format(layer_counter), biases, yamlConfig['OutputDir'])
            layer['weights_n_zeros'] = cur_n_zeros
        elif layer['class_name'] == 'BatchNormalization':
            cur_n_zeros = []
            layer['weights_n_zeros'] = cur_n_zeros
            found_beta = h5File[layer['name']].visit(find_beta_in_h5)
            beta = h5File['/{}/{}'.format(layer['name'], found_beta)][()]
            print_array_to_cpp("beta{}".format(layer_counter), beta, yamlConfig['OutputDir'])
            found_mean = h5File[layer['name']].visit(find_moving_mean_in_h5)
            mean = h5File['/{}/{}'.format(layer['name'], found_mean)][()]
            print_array_to_cpp("mean{}".format(layer_counter), mean, yamlConfig['OutputDir'])
            found_gamma = h5File[layer['name']].visit(find_gamma_in_h5)
            gamma = h5File['/{}/{}'.format(layer['name'], found_gamma)][()]
            found_var = h5File[layer['name']].visit(find_moving_variance_in_h5)
            var = h5File['/{}/{}'.format(layer['name'], found_var)][()]
            var = var + layer['epsilon']
            scale = gamma / np.sqrt(var)
            print_array_to_cpp("scale{}".format(layer_counter), scale, yamlConfig['OutputDir'])

        # Skip activation layers if possible
        skip_layer = False
        # Default one layer call
        layer['n_part'] = 1
        # Get number of inputs and outputs
        # (We take it from the weights to avoid dealing with InputLayer and Flatten details)
        if layer['class_name'] == 'Dense' or layer['class_name'] == 'BinaryDense' or layer[
            'class_name'] == 'TernaryDense':
            layer['n_in'] = weights.shape[0]
            layer['n_out'] = weights.shape[1]
            # if this layer is too big (more than MAXMULT multiplications);
            # break it out into chunks!
            layer['n_subout'] = [weights.shape[1]]
            if layer['n_in'] * layer['n_out'] > MAXMULT:
                n_subout = int(MAXMULT / layer['n_in'])
                n_totout = 0
                layer['n_subout'] = []
                layer['weights_n_subzeros'] = []
                layer['n_part'] = 0
                while n_totout < layer['n_out']:
                    if n_totout + n_subout <= layer['n_out']:
                        layer['n_subout'].append(n_subout)
                        n_totout += n_subout
                    else:
                        layer['n_subout'].append(layer['n_out'] - n_totout)
                        n_totout += layer['n_out'] - n_totout
                    layer['n_part'] += 1
                for i_part in range(0, layer['n_part']):
                    i_subout = 0
                    if i_part > 0:
                        i_subout = sum(layer['n_subout'][0:i_part])
                    cur_n_zeros = print_array_to_cpp("w{}".format(layer_counter), weights, yamlConfig['OutputDir'],
                                                     quantize, i_part, layer['n_part'], i_subout,
                                                     layer['n_subout'][i_part])
                    print_array_to_cpp("b{}".format(layer_counter), biases, yamlConfig['OutputDir'], i_part,
                                       layer['n_part'], i_subout, layer['n_subout'][i_part])
                    layer['weights_n_subzeros'].append(cur_n_zeros)

            current_shape = [current_shape[0], layer['n_out']]
        elif layer['class_name'] == 'Conv1D':
            # weights.shape = (filter_width, n_channels, n_filters)
            layer['y_in'] = current_shape[1]
            layer['y_filt'] = weights.shape[0]  # or keras_layer['config']['kernel_size']
            layer['n_chan'] = weights.shape[1]
            layer['n_filt'] = weights.shape[2]  # or keras_layer['config']['filters']
            layer['stride'] = keras_layer['config']['strides'][0]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                in_width = current_shape[1]
                layer['y_out'] = int(math.ceil(float(in_width) / float(layer['stride'])))
                if (in_width % layer['stride'] == 0):
                    pad_along_width = max(layer['y_filt'] - layer['stride'], 0)
                else:
                    pad_along_width = max(layer['y_filt'] - (in_width % layer['stride']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_width = current_shape[1]
                layer['y_out'] = int(math.ceil(float(in_width - layer['y_filt'] + 1) / float(layer['stride'])))
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            current_shape = [current_shape[0], layer['y_out'], layer['n_filt']]
        elif layer['class_name'] == 'Conv2D':
            layer['in_height'] = current_shape[1]
            layer['in_width'] = current_shape[2]
            layer['filt_height'] = weights.shape[0]
            layer['filt_width'] = weights.shape[1]
            layer['n_chan'] = weights.shape[2]
            layer['n_filt'] = weights.shape[3]
            layer['stride_height'] = keras_layer['config']['strides'][0]
            layer['stride_width'] = keras_layer['config']['strides'][1]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                # Height
                in_height = current_shape[1]
                layer['out_height'] = int(math.ceil(float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(layer['filt_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(layer['filt_height'] - (in_height % layer['stride_height']), 0)
                layer['pad_top'] = pad_along_height // 2
                layer['pad_bottom'] = pad_along_height - layer['pad_top']
                # Width
                in_width = current_shape[2]
                layer['out_width'] = int(math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(layer['filt_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(layer['filt_width'] - (in_width % layer['stride_width']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(float(in_width - layer['filt_width'] + 1) / float(layer['stride_width'])))
                layer['out_height'] = int(
                    math.ceil(float(in_height - layer['filt_height'] + 1) / float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            current_shape = [current_shape[0], layer['out_height'], layer['out_width'], layer['n_filt']]
        elif layer['class_name'] == 'BatchNormalization':
            if is_dense:
                layer['n_in'] = mean.shape[0]
                layer['n_out'] = mean.shape[0]
                layer['n_filt'] = -1
                current_shape = [current_shape[0], layer['n_out']]
            elif is_conv2d:
                layer['n_in'] = current_shape[1] * current_shape[2] * current_shape[3]
                layer['n_out'] = layer['n_in']
                layer['in_height'] = current_shape[1]
                layer['in_width'] = current_shape[2]
                layer['n_filt'] = current_shape[3]
                current_shape = [current_shape[0], layer['in_height'], layer['in_width'], layer['n_filt']]
        elif 'Pooling' in layer['class_name']:
            info = layer['class_name'].split('Pooling')
            d = int(info[1].split('D')[0])
            op = info[0]
            if d == 1:
                layer['pool_size'] = keras_layer['config']['pool_size']
                layer['stride'] = keras_layer['config']['stride']
            elif d == 2:
                layer['in_height'] = current_shape[1]
                layer['in_width'] = current_shape[2]
                layer['n_filt'] = layer_list[-1]['n_filt']
                layer['stride_height'] = keras_layer['config']['strides'][0]
                layer['stride_width'] = keras_layer['config']['strides'][1]
                layer['pool_height'] = keras_layer['config']['pool_size'][0]
                layer['pool_width'] = keras_layer['config']['pool_size'][1]
                layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                # Height
                in_height = current_shape[1]
                layer['out_height'] = int(math.ceil(float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(layer['pool_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(layer['pool_height'] - (in_height % layer['stride_height']), 0)
                layer['pad_top'] = pad_along_height // 2
                layer['pad_bottom'] = pad_along_height - layer['pad_top']
                # Width
                in_width = current_shape[2]
                layer['out_width'] = int(math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(layer['pool_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(layer['pool_width'] - (in_width % layer['stride_width']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
                layer['n_out'] = layer['out_height'] * layer['out_width'] * layer['n_filt']
            elif layer['padding'] == 'valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(float(in_width - layer['pool_width'] + 1) / float(layer['stride_width'])))
                layer['out_height'] = int(
                    math.ceil(float(in_height - layer['pool_height'] + 1) / float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
                layer['n_out'] = layer['out_height'] * layer['out_height'] * layer['n_filt']
            current_shape = [current_shape[0], layer['out_height'], layer['out_width'], layer['n_filt']]

        elif layer['class_name'] == 'Activation':
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = layer['activation']
                skip_layer = True
                layer_counter = layer_counter - 1
        elif layer['class_name'] == 'LeakyReLU':
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = layer['class_name']
                layer_list[-1]['activ_param'] = keras_layer["config"].get('alpha', 0.3)
                skip_layer = True
                layer_counter = layer_counter - 1
            else:
                layer['activation'] = layer['class_name']
                layer['activ_param'] = keras_layer["config"].get('alpha', 0.3)
        elif layer['class_name'] == 'ThresholdedReLU':
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = layer['class_name']
                layer_list[-1]['activ_param'] = keras_layer["config"].get('theta', 1.)
                skip_layer = True
                layer_counter = layer_counter - 1
            else:
                layer['activation'] = layer['class_name']
                layer['activ_param'] = keras_layer["config"].get('theta', 1.)

        elif layer['class_name'] == 'Softmax':
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = layer['class_name']
                skip_layer = True
                layer_counter = layer_counter - 1
            else:
                layer['activation'] = layer['class_name']

        elif layer['class_name'] == 'ELU':
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = layer['class_name']
                layer_list[-1]['activ_param'] = keras_layer["config"].get('alpha', 1.)
                skip_layer = True
                layer_counter = layer_counter - 1
            else:
                layer['activation'] = layer['class_name']
                layer['activ_param'] = keras_layer["config"].get('alpha', 1.)
        elif layer['class_name'] == 'PReLU':
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = layer['class_name']
                skip_layer = True
                layer_counter = layer_counter - 1
            else:
                layer['activation'] = layer['class_name']

            # Translate learned alpha array from h5 file
            weights = h5File['/{}/{}/alpha:0'.format(layer['name'], layer['name'])][()]
            print_array_to_cpp("a{}".format(layer_counter), weights, yamlConfig['OutputDir'])

        if not skip_layer:
            print('Layer name: {}, layer type: {}, current shape: {}, number of zeros: {}'.format(layer['name'],
                                                                                                  layer['class_name'],
                                                                                                  current_shape,
                                                                                                  cur_n_zeros))
            if layer['n_part'] > 1:
                print(' -> layer will be divided into {} sublayer calls; output neurons: {} '.format(layer['n_part'],
                                                                                                     layer['n_subout']))
            layer_list.append(layer)

    #################
    ## Generate HLS
    #################

    # Weights and biases are already dumped to output directory
    # Now generate HLS from list of layer dictionaries
    hls_writer(layer_list, yamlConfig)
示例#4
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c",
                        action='store',
                        dest='config',
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config:
        parser.error('A configuration file needs to be specified.')

    configDir = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir,
                                               yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['KerasH5']):
        yamlConfig['KerasH5'] = os.path.join(configDir, yamlConfig['KerasH5'])
    if not os.path.isabs(yamlConfig['KerasJson']):
        yamlConfig['KerasJson'] = os.path.join(configDir,
                                               yamlConfig['KerasJson'])

    if not (yamlConfig["IOType"] == "io_parallel"
            or yamlConfig["IOType"] == "io_serial"):
        raise Exception('ERROR: Invalid IO type')

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware/weights".format(
            yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir']))

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #Extract model architecture from json
    with open(yamlConfig['KerasJson']) as json_file:
        model_arch = json.load(json_file)
    #print(model_arch)

    #Define supported laers
    core_layers = [
        'InputLayer', 'Dropout', 'Flatten', 'Dense', 'BinaryDense',
        'TernaryDense'
    ]
    conv_layers = ['Conv1D', 'Conv2D']
    pooling_layers = [
        'MaxPooling1D', 'MaxPooling2D', 'AveragePooling1D', 'AveragePooling2D'
    ]
    norm_layers = ['BatchNormalization']
    activation_layers = [
        'Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU'
    ]
    merge_layers = [
        'Add', 'Subtract', 'Multiply', 'Average', 'Maximum', 'Minimum',
        'Concatenate'
    ]
    supported_layers = core_layers + conv_layers + pooling_layers + norm_layers + activation_layers + merge_layers

    #Define layers to skip for conversion to HLS
    skip_layers = ['Dropout', 'Flatten']
    #Map inputs of skipped and split (activation) layers
    inputs_map = {}

    #Loop through layers
    layer_counter = 0

    input_layers = None
    output_layers = None

    layer_config = None
    if model_arch['class_name'] == 'Sequential':
        print('Interpreting Sequential')
        layer_config = model_arch["config"]
        # Sequential doesn't have InputLayer
        input_layer = {}
        input_layer['name'] = 'input1'
        input_layer['class_name'] = 'InputLayer'
        input_layer['input_shape'] = layer_config[0]['config'][
            'batch_input_shape'][1:]
        layer_list.append(input_layer)
        print('Input shape:', input_layer['input_shape'])
    elif model_arch['class_name'] == 'Model':
        print('Interpreting Model')
        layer_config = model_arch["config"]["layers"]
        input_layers = [inp[0] for inp in model_arch["config"]["input_layers"]]
        output_layers = [
            out[0] for out in model_arch["config"]["output_layers"]
        ]

    # Get input shape and check for unsupported layer type
    current_shape = None
    for keras_layer in layer_config:
        if keras_layer["class_name"] not in supported_layers:
            raise Exception('ERROR: Unsupported layer type: {}'.format(
                keras_layer["class_name"]))
        if 'batch_input_shape' in keras_layer['config']:
            current_shape = keras_layer['config'][
                'batch_input_shape']  # [None, 100, 7]

    print('Topology:')
    for keras_layer in layer_config:
        if keras_layer["class_name"] is 'Flatten':
            current_shape = [current_shape[0], np.prod(current_shape[1:])]
        if keras_layer["class_name"] in skip_layers:
            if 'inbound_nodes' in keras_layer:
                name = keras_layer['config']['name']
                #Currently supported skipped layers have only one input
                parent_input = keras_layer['inbound_nodes'][0][0][0]
                #Skipped layers can follow each other (e.g., Dropout -> Flatten)
                inputs_map[name] = inputs_map.get(parent_input, parent_input)
            continue

        if keras_layer["class_name"] in supported_layers:
            layer_counter = layer_counter + 1

        #Dictionary to fill in and append to layer_list
        layer = {}

        #Extract name for finding weights and biases
        layer['name'] = keras_layer['config']['name']
        layer['class_name'] = keras_layer['class_name']

        #Extract inbound nodes
        if 'inbound_nodes' in keras_layer and len(
                keras_layer['inbound_nodes']) > 0:
            layer['inputs'] = [
                inputs_map.get(inp[0], inp[0])
                for inp in keras_layer['inbound_nodes'][0]
            ]

        #Extract type of activation and number of nodes
        for config, config_value in keras_layer["config"].items():
            if (config == "activation"):
                layer['activation'] = config_value
            if (config == "epsilon"):
                layer['epsilon'] = config_value
            #if(config=="units"):
            #print("PARSED NUM OF NODES",config_value)

        # Default one layer call
        if layer['class_name'] == 'InputLayer':
            layer['input_shape'] = keras_layer['config']['batch_input_shape'][
                1:]
        if 'Dense' in layer['class_name']:
            weights_shape = get_weights_shape(yamlConfig['KerasH5'],
                                              layer['name'])
            layer['n_in'] = weights_shape[0]
            layer['n_out'] = weights_shape[1]
            if 'Binary' in layer['class_name']:
                layer['quantize'] = 2
            elif 'Ternary' in layer['class_name']:
                layer['quantize'] = 3
            else:
                layer['quantize'] = 0
            current_shape = [current_shape[0], layer['n_out']]
        elif layer['class_name'] == 'Conv1D':
            # weights_shape = (filter_width, n_channels, n_filters)
            weights_shape = get_weights_shape(yamlConfig['KerasH5'],
                                              layer['name'])
            layer['y_in'] = current_shape[1]
            layer['y_filt'] = weights_shape[
                0]  # or keras_layer['config']['kernel_size']
            layer['n_chan'] = weights_shape[1]
            layer['n_filt'] = weights_shape[
                2]  # or keras_layer['config']['filters']
            layer['stride'] = keras_layer['config']['strides'][0]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                in_width = current_shape[1]
                layer['y_out'] = int(
                    math.ceil(float(in_width) / float(layer['stride'])))
                if (in_width % layer['stride'] == 0):
                    pad_along_width = max(layer['y_filt'] - layer['stride'], 0)
                else:
                    pad_along_width = max(
                        layer['y_filt'] - (in_width % layer['stride']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_width = current_shape[1]
                layer['y_out'] = int(
                    math.ceil(
                        float(in_width - layer['y_filt'] + 1) /
                        float(layer['stride'])))
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            current_shape = [current_shape[0], layer['y_out'], layer['n_filt']]
        elif layer['class_name'] == 'Conv2D':
            # weights_shape = (filter_height, filter_width, n_channels, n_filters)
            weights_shape = get_weights_shape(yamlConfig['KerasH5'],
                                              layer['name'])
            layer['in_height'] = current_shape[1]
            layer['in_width'] = current_shape[2]
            layer['filt_height'] = weights_shape[0]
            layer['filt_width'] = weights_shape[1]
            layer['n_chan'] = weights_shape[2]
            layer['n_filt'] = weights_shape[3]
            layer['stride_height'] = keras_layer['config']['strides'][0]
            layer['stride_width'] = keras_layer['config']['strides'][1]
            layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                #Height
                in_height = current_shape[1]
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(
                        layer['filt_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(
                        layer['filt_height'] -
                        (in_height % layer['stride_height']), 0)
                layer['pad_top'] = pad_along_height // 2
                layer['pad_bottom'] = pad_along_height - layer['pad_top']
                #Width
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(
                        layer['filt_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(
                        layer['filt_width'] -
                        (in_width % layer['stride_width']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
            elif layer['padding'] == 'valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(
                        float(in_width - layer['filt_width'] + 1) /
                        float(layer['stride_width'])))
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height - layer['filt_height'] + 1) /
                        float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            current_shape = [
                current_shape[0], layer['out_height'], layer['out_width'],
                layer['n_filt']
            ]
        elif layer['class_name'] == 'BatchNormalization':
            in_size = 1
            for dim in current_shape[1:]:
                in_size *= dim
            layer['n_in'] = in_size
            layer['n_out'] = layer['n_in']
            if len(current_shape) == 2:
                layer['n_filt'] = -1
            elif len(current_shape) == 3:
                layer['n_filt'] = current_shape[2]
            elif len(current_shape) == 4:
                layer['n_filt'] = current_shape[3]
        elif 'Pooling' in layer['class_name']:
            info = layer['class_name'].split('Pooling')
            d = int(info[1].split('D')[0])
            op = info[0]
            if d == 1:
                layer['pool_size'] = keras_layer['config']['pool_size']
                layer['stride'] = keras_layer['config']['stride']
            elif d == 2:
                layer['in_height'] = current_shape[1]
                layer['in_width'] = current_shape[2]
                layer['n_filt'] = current_shape[3]
                layer['stride_height'] = keras_layer['config']['strides'][0]
                layer['stride_width'] = keras_layer['config']['strides'][1]
                layer['pool_height'] = keras_layer['config']['pool_size'][0]
                layer['pool_width'] = keras_layer['config']['pool_size'][1]
                layer['padding'] = keras_layer['config']['padding']
            if layer['padding'] == 'same':
                #Height
                in_height = current_shape[1]
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(
                        layer['pool_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(
                        layer['pool_height'] -
                        (in_height % layer['stride_height']), 0)
                layer['pad_top'] = pad_along_height // 2
                layer['pad_bottom'] = pad_along_height - layer['pad_top']
                #Width
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(
                        layer['pool_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(
                        layer['pool_width'] -
                        (in_width % layer['stride_width']), 0)
                layer['pad_left'] = pad_along_width // 2
                layer['pad_right'] = pad_along_width - layer['pad_left']
                layer['n_out'] = layer['out_height'] * layer[
                    'out_width'] * layer['n_filt']
            elif layer['padding'] == 'valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                layer['out_width'] = int(
                    math.ceil(
                        float(in_width - layer['pool_width'] + 1) /
                        float(layer['stride_width'])))
                layer['out_height'] = int(
                    math.ceil(
                        float(in_height - layer['pool_height'] + 1) /
                        float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
                layer['n_out'] = layer['out_height'] * layer[
                    'out_height'] * layer['n_filt']
            current_shape = [
                current_shape[0], layer['out_height'], layer['out_width'],
                layer['n_filt']
            ]

        elif layer['class_name'] == 'LeakyReLU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('alpha', 0.3)
        elif layer['class_name'] == 'ThresholdedReLU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('theta', 1.)
        elif layer['class_name'] == 'ELU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('alpha', 1.)
        elif layer['class_name'] == 'PReLU':
            layer['activation'] = layer['class_name']

        elif layer['class_name'] in merge_layers:
            layer['op'] = layer['class_name'].lower()
            if layer['class_name'] == 'Concatenate':
                rank = len(current_shape[1:])
                if rank > 3:
                    raise Exception(
                        'ERROR: Concatenation of tensors with rank > 3 is not yet supported.'
                    )
                layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
                layer['axis'] = keras_layer['config']['axis']
            else:
                layer['class_name'] = 'Merge'
            if len(layer['inputs']) > 2:
                raise Exception(
                    'ERROR: Merging more than two tensors is not yet supported.'
                )

        print('Layer name: {}, layer type: {}, current shape: {}'.format(
            layer['name'], layer['class_name'], current_shape))
        layer_list.append(layer)
        if 'activation' in layer and layer[
                'class_name'] not in activation_layers:
            act_layer = {}
            act_layer['name'] = layer['name'] + '_' + layer['activation']
            act_layer['activation'] = layer['activation']
            if 'activ_param' in layer:
                act_layer['activ_param'] = layer['activ_param']
                act_layer['class_name'] = layer['activation']
            else:
                act_layer['class_name'] = 'Activation'
            inputs_map[layer['name']] = act_layer['name']
            if output_layers is not None and layer['name'] in output_layers:
                output_layers = [
                    act_layer['name'] if name == layer['name'] else name
                    for name in output_layers
                ]
            layer_list.append(act_layer)

    #################
    ## Generate HLS
    #################

    reader = KerasDataReader(yamlConfig)
    hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers,
                         output_layers)
    write_hls(hls_model)
示例#5
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c", action='store', dest='config',
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config: parser.error('A configuration file needs to be specified.')

    configDir  = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir, yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['OnnxModel']):
        yamlConfig['OnnxModel'] = os.path.join(configDir, yamlConfig['OnnxModel'])

    if not (yamlConfig["IOType"] == "io_parallel" or yamlConfig["IOType"] == "io_serial"): 
        raise Exception('ERROR: Invalid IO type')

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware/weights".format(yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir']))

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #Extract model architecture
    model = ModelProto()
    with open(yamlConfig['OnnxModel'], 'rb') as fid:
        model.ParseFromString(fid.read())
    
    #Define supported layers
    core_operations = ['Gemm', 'BatchNormalization', 'Conv']
    transform_operations = ['Squeeze', 'Unsqueeze', 'Transpose', 'Flatten', 'Identity', 'Reshape']
    pool_operations = ['AveragePool', 'MaxPool']
    merge_operations = ['Add', 'Sub', 'Mul', 'Average', 'Max', 'Min', 'Concat', 'Sum']
    activation_operations = ['Relu', 'Tanh', 'Sigmoid', 'LeakyRelu', 'ThresholdedRelu', 'HardSigmoid', 'Elu', 'Selu', 'PRelu', 'Softmax', 'Softsign', 'Softplus']
    supported_operations = core_operations + transform_operations + pool_operations + merge_operations + activation_operations

    operation_map = {'Gemm':'Dense', 'Relu':'Activation', 'Tanh':'Activation', 'Sigmoid':'Activation',
    'LeakyRelu':'LeakyReLU', 'ThresholdedRelu':'ThresholdedReLU', 'HardSigmoid':'Activation',
    'Elu':'ELU', 'Selu':'Activation', 'PRelu':'PReLU', 'Softmax':'Activation', 'Softsign':'Activation', 'Softplus':'Activation',
    'Sum':'Add', 'Sub':'Subtract', 'Max':'Maximum', 'Min':'Minimum', 'Mul':'Multiply', 'Concat':'Concatenate'}
    
    #Define layers to skip for conversion to HLS
    skip_layers = ['Squeeze', 'Unsqueeze', 'Dropout', 'Identity', 'Flatten', 'Transpose', 'Reshape'] 
    #Map inputs of skipped layers
    inputs_map = {}

    passes = ['fuse_transpose_into_gemm', 'fuse_matmul_add_bias_into_gemm', 'eliminate_nop_transpose', 'fuse_consecutive_transposes']
    model = shape_inference.infer_shapes(model) # have to infer shapes before optimizing the model
    model = optimizer.optimize(model, passes)
    model = shape_inference.infer_shapes(model) # have to infer shapes before optimizing the model
    
    reader = ONNXDataReader(model)

    #Loop through layers
    layer_counter = 0
    all_inputs = [x.name for x in model.graph.input]
    all_initializers = [x.name for x in model.graph.initializer]
    input_layers = [x for x in all_inputs if x not in all_initializers]
    output_layers = [x.name for x in model.graph.output]

    for i, inp in enumerate(input_layers):
        input_layer = {}
        input_layer['name'] = inp
        input_layer['class_name'] = 'InputLayer'
        inp_shape = next((x.type.tensor_type.shape.dim for x in model.graph.input if x.name == inp), None)
        input_layer['input_shape'] = [x.dim_value for x in inp_shape]
        if len(input_layer['input_shape']) > 1:
            input_layer['input_shape'][0] = None

        input_layer['outputs'] = [inp]

        sanitize_layer_name(input_layer)
        input_layers[i] = input_layer['name']
        layer_list.append(input_layer)

    # Check for unsupported layer type
    for operation in model.graph.node:
        if operation.op_type not in supported_operations:
            raise Exception('ERROR: Unsupported operation type: {}'.format(operation.op_type))
    
    # Get input shape
    current_shape = [d.dim_value for d in model.graph.input[0].type.tensor_type.shape.dim]
    print('Input shape:', current_shape)

    print('Topology:')
    for operation in model.graph.node:
        if operation.op_type == 'Flatten':
            current_shape = [current_shape[0], np.prod(current_shape[1:])]
        if operation.op_type in skip_layers:
            #Currently supported skipped layers have only one input and output
            #Skipped layers can follow each other (e.g., Dropout -> Flatten)
            input_name = inputs_map.get(operation.input[0], operation.input[0])
            output_name = operation.output[0]
            inputs_map[output_name] = input_name
            continue 

        if operation.op_type in supported_operations:
            layer_counter = layer_counter + 1

        #Dictionary to fill in and append to layer_list
        layer = {}

        #Extract name for finding weights and biases
        if operation.name:
            layer['name'] = operation.name
        else:
            layer['name'] = operation.op_type + str(layer_counter)
        layer['class_name'] = operation_map.get(operation.op_type, operation.op_type)
        layer['inputs'] = [ inputs_map.get(operation.input[0], operation.input[0]) ]
        layer['outputs'] = [x for x in operation.output]

        #Extract type of activation
        if operation.op_type in activation_operations:
            layer['activation'] = operation.op_type.lower()
            if layer_list[-1]['class_name'] != 'BatchNormalization':
                layer_list[-1]['activation'] = operation.op_type.lower()
        
        #Get number of inputs and outputs
        #(We take it from the weights to avoid dealing with InputLayer and Flatten details)
        if layer['class_name'] == 'Dense':
            current_shape = get_input_shape(model, operation)
            layer['n_in'] = next((x.type.tensor_type.shape.dim[-1].dim_value for x in model.graph.input if x.name == operation.input[0]), None)
            layer['n_out'] = next((x.type.tensor_type.shape.dim[-1].dim_value for x in model.graph.value_info if x.name == operation.output[0]), None)
            tran_weight = get_onnx_attribute(operation, 'transB', 0)
            reader.add_input(layer['name'], operation.input, tran_weight)
            
            current_shape = [current_shape[0], layer['n_out']]
        elif layer['class_name']=='Conv':
            current_shape = get_input_shape(model, operation)
            strides = get_onnx_attribute(operation, 'strides')
            kernel_shape = get_onnx_attribute(operation, 'kernel_shape')

            if len(current_shape) == 3: # Conv1D
                layer['class_name'] = 'Conv1D'
                reader.add_input(layer['name'], operation.input)

                layer['y_in']=current_shape[2]
                layer['y_filt']=kernel_shape[0]
                layer['n_chan']=current_shape[1]
                layer['n_filt']=next((x.type.tensor_type.shape.dim[1].dim_value for x in model.graph.value_info if x.name == operation.output[0]), None)
                layer['stride']=strides[0]
                pads = compute_pads_1d(operation, layer)

                layer['pad_left'] = pads[0]
                layer['pad_right'] = pads[1]
                if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding
                    layer['y_out'] = int(math.ceil(float(layer['y_in'] - layer['y_filt'] + 1) / float(layer['stride'])))
                else:
                    layer['y_out'] = int(math.ceil(float(layer['y_in']) / float(layer['stride'])))

                current_shape=[current_shape[0], layer['n_filt'], layer['y_out']]
            elif len(current_shape) == 4: # Conv2D
                layer['class_name'] = 'Conv2D'
                reader.add_input(layer['name'], operation.input, transpose=True, perm=[2, 3, 1, 0])

                layer['in_height']=current_shape[2]
                layer['in_width']=current_shape[3]
                layer['filt_height']=kernel_shape[0]
                layer['filt_width']=kernel_shape[1]
                layer['n_chan']=current_shape[1]
                layer['n_filt']=next((x.type.tensor_type.shape.dim[1].dim_value for x in model.graph.value_info if x.name == operation.output[0]), None)
                layer['stride_height'] = strides[0]
                layer['stride_width'] = strides[1]
                pads = compute_pads_2d(operation, layer)
                
                layer['pad_top'] = pads[0]
                layer['pad_bottom'] = pads[2]
                layer['pad_left'] = pads[1]
                layer['pad_right'] = pads[3]

                if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding in Keras/Tensorflow
                    layer['out_width'] = int(math.ceil(float(layer['in_width'] - layer['filt_width'] + 1) / float(layer['stride_width'])))
                    layer['out_height'] = int(math.ceil(float(layer['in_height'] - layer['filt_height'] + 1) / float(layer['stride_height'])))
                else:
                    layer['out_height'] = int(math.ceil(float(layer['in_height']) / float(layer['stride_height'])))
                    layer['out_width'] = int(math.ceil(float(layer['in_width']) / float(layer['stride_width'])))
                
                current_shape=[current_shape[0], layer['n_filt'], layer['out_height'], layer['out_width']]
        elif layer['class_name']=='BatchNormalization':
            layer['epsilon'] = get_onnx_attribute(operation, 'epsilon')
            layer['momentum'] = get_onnx_attribute(operation, 'momentum')
            
            reader.add_input(layer['name'], operation.input)
            
            in_size = 1
            for dim in current_shape[1:]:
                in_size *= dim
            layer['n_in'] = in_size
            layer['n_out'] = layer['n_in']
            if len(current_shape) == 2:
                layer['n_filt'] = -1
            else:
                layer['n_filt']=current_shape[1]
        elif layer['class_name'] in pool_operations:
            current_shape = get_input_shape(model, operation)
            info = layer['class_name'].replace('Pool', '')
            strides = get_onnx_attribute(operation, 'strides')
            kernel_shape = get_onnx_attribute(operation, 'kernel_shape')
            if len(current_shape) == 3: # 1D
                layer['class_name'] = info + 'Pooling1D'
                layer['stride'] = strides[0]
                layer['pool_size'] = layer['y_filt'] = kernel_shape[0]
                pads = compute_pads_1d(operation, layer)
                layer['pad_left'] = pads[0]
                layer['pad_right'] = pads[1]

                if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding
                    layer['n_out'] = int(math.ceil(float(layer['y_in'] - layer['y_filt'] + 1) / float(layer['stride'])))
                else:
                    layer['n_out'] = int(math.ceil(float(layer['y_in']) / float(layer['stride'])))

                current_shape=[current_shape[0], layer['n_filt'], layer['n_out']]
            elif len(current_shape) == 4: # 2D
                layer['class_name'] = info + 'Pooling2D'
                
                layer['n_filt'] = current_shape[1]
                layer['in_height'] = current_shape[2]
                layer['in_width'] = current_shape[3]
                
                layer['stride_height'] = strides[0]
                layer['stride_width'] = strides[1]
                layer['pool_height'] = layer['filt_height'] = kernel_shape[0]
                layer['pool_width'] = layer['filt_width'] = kernel_shape[1]
                
                pads = compute_pads_2d(operation, layer)
                layer['pad_top'] = pads[0]
                layer['pad_bottom'] = pads[2]
                layer['pad_left'] = pads[1]
                layer['pad_right'] = pads[3]

                if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding in Keras/Tensorflow
                    layer['out_width'] = int(math.ceil(float(layer['in_width'] - layer['filt_width'] + 1) / float(layer['stride_width'])))
                    layer['out_height'] = int(math.ceil(float(layer['in_height'] - layer['filt_height'] + 1) / float(layer['stride_height'])))
                else:
                    layer['out_height'] = int(math.ceil(float(layer['in_height']) / float(layer['stride_height'])))
                    layer['out_width'] = int(math.ceil(float(layer['in_width']) / float(layer['stride_width'])))

                layer['n_out'] = layer['out_height'] * layer['out_height'] * layer['n_filt']
                current_shape=[current_shape[0], layer['n_filt'], layer['out_height'], layer['out_width']]
        elif layer['class_name'] in ['ELU', 'LeakyReLU', 'ThresholdedReLU']:
            layer['activation'] = layer['class_name']
            layer['activ_param'] = get_onnx_attribute(operation, 'alpha', 0.01)
        elif layer['class_name']=='PReLU':
            layer['activation'] = layer['class_name']

        elif layer['class_name'] in [operation_map.get(op, op) for op in merge_operations]:
            layer['op'] = layer['class_name'].lower()
            if layer['class_name'] == 'Concatenate':
                rank = len(current_shape[1:])
                if rank > 3:
                    raise Exception('ERROR: Concatenation of tensors with rank > 3 is not yet supported.')
                layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
                layer['axis'] = get_onnx_attribute(operation, 'axis')
            else:
                layer['class_name'] = 'Merge'
            layer['inputs'] = [inputs_map.get(x, x) for x in operation.input]
            if len(layer['inputs']) > 2:
                raise Exception('ERROR: Merging more than two tensors is not yet supported.')

        sanitize_layer_name(layer)
        print('Layer name: {}, layer type: {}, current shape: {}'.format(layer['name'], layer['class_name'], current_shape))
        layer_list.append( layer )


    #################
    ## Generate HLS
    #################

    hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers, output_layers)
    write_hls(hls_model)
示例#6
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c",
                        action='store',
                        dest='config',
                        default="pytorch-config.yml",
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config:
        parser.error('A configuration file needs to be specified.')

    configDir = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir,
                                               yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['PytorchModel']):
        yamlConfig['PytorchModel'] = os.path.join(configDir,
                                                  yamlConfig['PytorchModel'])

    if not (yamlConfig["IOType"] == "io_parallel"
            or yamlConfig["IOType"] == "io_serial"):
        raise Exception('ERROR: Invalid IO type')

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware/weights".format(
            yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir']))

    if not torch.cuda.is_available():
        t = torch.load(yamlConfig['PytorchModel'],
                       map_location=lambda storage, loc: storage)
    else:
        t = torch.load(yamlConfig['PytorchModel'])

    modelstr = repr(t).split('\n')
    modeldict = t.state_dict()

    quit()
    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    matchlayer = re.compile("^\s*(\d):\s\W*")
    #Loop through layers
    layer_counter = 1
    for i, pytorch_layer in enumerate(modelstr):
        Nlayer = -1
        NlayerMatch = re.search("\((\d)\):\s", pytorch_layer)
        if NlayerMatch is not None:
            print(pytorch_layer, NlayerMatch.group(1))
            Nlayer = NlayerMatch.group(1)

        layerFun = pytorch_layer.split(":")[-1]

        matchname = re.match(
            "(\w+)\(in_features=(\d+), out_features=(\d+).*\)",
            layerFun.strip())
        if matchname is None:
            continue

        # #Dictionary to fill in and append to layer_list
        layer = {}

        ## Extract name for finding weights and biases
        ## Only suport Dense network for now. Will update this later for others
        layer['class_name'] = "Dense"

        # #Get number of inputs and outputs
        layer["n_in"] = int(matchname.group(2))
        layer["n_out"] = int(matchname.group(3))

        # number of sublayer calls
        layer["n_part"] = 1

        # #Extract type of activation and number of nodes
        layer["activation"] = modelstr[i +
                                       1].split(":")[-1].strip().lower()[:-2]

        # Translate weights and biases from tensorfile
        weights = modeldict[Nlayer + ".weight"].numpy().transpose()
        biases = modeldict[Nlayer + ".bias"].numpy().transpose()
        cur_n_zeros = print_array_to_cpp("w{}".format(layer_counter), weights,
                                         yamlConfig['OutputDir'])
        print_array_to_cpp("b{}".format(layer_counter), biases,
                           yamlConfig['OutputDir'])
        layer['weights_n_zeros'] = cur_n_zeros

        layer_list.append(layer)

        NlayerMatch = re.search("\((\d)\):\s", pytorch_layer)

        layer_counter = layer_counter + 1

    #################
    ## Generate HLS
    #################

    #Weights and biases are already dumped to output directory
    #Now generate HLS from list of layer dictionaries
    hls_writer(layer_list, yamlConfig)
示例#7
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c", action='store', dest='config', default="pytorch-config.yml",
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config: parser.error('A configuration file needs to be specified.')

    configDir  = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir, yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['PytorchModel']):
        yamlConfig['PytorchModel'] = os.path.join(configDir, yamlConfig['PytorchModel'])

    if not (yamlConfig["IOType"] == "io_parallel" or yamlConfig["IOType"] == "io_serial"):
        raise Exception('ERROR: Invalid IO type')

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware/weights".format(yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir']))

    if not torch.cuda.is_available():
      t = torch.load(yamlConfig['PytorchModel'], map_location=lambda storage, loc: storage)
    else:
      t = torch.load(yamlConfig['PytorchModel'])
  
    modelstr = repr(t).split('\n')
    modeldict = t.state_dict()

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    matchlayer = re.compile("^\s*(\d):\s\W*")
    #Loop through layers
    layer_counter = 1;
    for i, pytorch_layer in enumerate(modelstr):
        Nlayer = -1
        NlayerMatch =re.search("\((\d)\):\s", pytorch_layer)
        if NlayerMatch is not None:
            print(pytorch_layer, NlayerMatch.group(1))
            Nlayer = NlayerMatch.group(1)

        layerFun = pytorch_layer.split(":")[-1]

        matchname = re.match("(\w+)\(in_features=(\d+), out_features=(\d+).*\)", layerFun.strip())
        if matchname is None:
            continue

        # #Dictionary to fill in and append to layer_list
        layer={}

        ## Extract name for finding weights and biases
        ## Only suport Dense network for now. Will update this later for others
        layer['class_name'] = "Dense"

        # #Get number of inputs and outputs
        layer["n_in"] =  int(matchname.group(2))
        layer["n_out"] =  int(matchname.group(3))

        # number of sublayer calls
        layer["n_part"] = 1

        # #Extract type of activation and number of nodes
        layer["activation"] = modelstr[i+1].split(":")[-1].strip().lower()[:-2]

        # Translate weights and biases from tensorfile
        weights = modeldict[Nlayer+".weight"].numpy().transpose()
        biases  = modeldict[Nlayer+".bias"].numpy().transpose()
        cur_n_zeros = print_array_to_cpp("w{}".format(layer_counter), weights, yamlConfig['OutputDir'])
        print_array_to_cpp("b{}".format(layer_counter), biases, yamlConfig['OutputDir'])
        layer['weights_n_zeros'] = cur_n_zeros

        layer_list.append(layer)

        NlayerMatch =re.search("\((\d)\):\s", pytorch_layer)

        layer_counter = layer_counter+1


    #################
    ## Generate HLS
    #################

    #Weights and biases are already dumped to output directory
    #Now generate HLS from list of layer dictionaries
    hls_writer(layer_list, yamlConfig)
示例#8
0
def main():

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='')
    parser.add_argument("-c", action='store', dest='config',
                        help="Configuration file.")
    args = parser.parse_args()
    if not args.config: parser.error('A configuration file needs to be specified.')

    configDir  = os.path.abspath(os.path.dirname(args.config))
    yamlConfig = parse_config(args.config)
    if not os.path.isabs(yamlConfig['OutputDir']):
        yamlConfig['OutputDir'] = os.path.join(configDir, yamlConfig['OutputDir'])
    if not os.path.isabs(yamlConfig['KerasH5']):
        yamlConfig['KerasH5'] = os.path.join(configDir, yamlConfig['KerasH5'])
    if not os.path.isabs(yamlConfig['KerasJson']):
        yamlConfig['KerasJson'] = os.path.join(configDir, yamlConfig['KerasJson'])

    if not (yamlConfig["IOType"] == "io_parallel" or yamlConfig["IOType"] == "io_serial"):
        raise Exception('ERROR: Invalid IO type')

    ######################
    ##  Do translation
    ######################
    if not os.path.isdir("{}/firmware/weights".format(yamlConfig['OutputDir'])):
        os.makedirs("{}/firmware/weights".format(yamlConfig['OutputDir']))

    #This is a list of dictionaries to hold all the layer info we need to generate HLS
    layer_list = []

    #Extract model architecture from json
    with open( yamlConfig['KerasJson'] ) as json_file:
        model_arch = json.load(json_file)
    #print(model_arch)

    #Define supported laers
    core_layers = ['InputLayer', 'Dropout', 'Flatten', 'Dense', 'BinaryDense', 'TernaryDense']
    conv_layers = ['Conv1D', 'Conv2D']
    pooling_layers = ['MaxPooling1D', 'MaxPooling2D', 'AveragePooling1D', 'AveragePooling2D']
    norm_layers = ['BatchNormalization']
    activation_layers = ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU']
    merge_layers = ['Add', 'Subtract', 'Multiply', 'Average', 'Maximum', 'Minimum', 'Concatenate']
    supported_layers = core_layers + conv_layers + pooling_layers + norm_layers + activation_layers + merge_layers

    #Define layers to skip for conversion to HLS
    skip_layers = ['Dropout', 'Flatten']
    #Map inputs of skipped and split (activation) layers
    inputs_map = {}

    #Loop through layers
    layer_counter = 0

    input_layers = None
    output_layers = None

    layer_config = None
    if model_arch['class_name'] == 'Sequential':
        print('Interpreting Sequential')
        layer_config = model_arch["config"]
        if 'layers' in layer_config: # Newer Keras versions have 'layers' in 'config' key
            layer_config = layer_config['layers']
        # Sequential doesn't have InputLayer
        input_layer = {}
        input_layer['name'] = 'input1'
        input_layer['class_name'] = 'InputLayer'
        input_layer['input_shape'] = layer_config[0]['config']['batch_input_shape'][1:]
        layer_list.append(input_layer)
        print('Input shape:', input_layer['input_shape'])
    elif model_arch['class_name'] == 'Model':
        print('Interpreting Model')
        layer_config = model_arch["config"]["layers"]
        input_layers = [ inp[0] for inp in model_arch["config"]["input_layers"] ]
        output_layers = [ out[0] for out in model_arch["config"]["output_layers"] ]

    # Get input shape and check for unsupported layer type
    current_shape = None
    for keras_layer in layer_config:
        if keras_layer["class_name"] not in supported_layers:
            raise Exception('ERROR: Unsupported layer type: {}'.format(keras_layer["class_name"]))
        if 'batch_input_shape' in keras_layer['config']:
            current_shape = keras_layer['config']['batch_input_shape'] # [None, 100, 7]

    print('Topology:')
    for keras_layer in layer_config:
        if keras_layer["class_name"] is 'Flatten':
            current_shape = [current_shape[0], np.prod(current_shape[1:])]
        if keras_layer["class_name"] in skip_layers:
            if 'inbound_nodes' in keras_layer:
                name = keras_layer['config']['name']
                #Currently supported skipped layers have only one input
                parent_input = keras_layer['inbound_nodes'][0][0][0]
                #Skipped layers can follow each other (e.g., Dropout -> Flatten)
                inputs_map[name] = inputs_map.get(parent_input, parent_input)
            continue

        if keras_layer["class_name"] in supported_layers:
            layer_counter = layer_counter + 1

        #Dictionary to fill in and append to layer_list
        layer = {}

        #Extract name for finding weights and biases
        layer['name']=keras_layer['config']['name']
        layer['class_name']=keras_layer['class_name']

        #Extract inbound nodes
        if 'inbound_nodes' in keras_layer and len(keras_layer['inbound_nodes']) > 0:
            layer['inputs'] = [ inputs_map.get(inp[0], inp[0]) for inp in keras_layer['inbound_nodes'][0] ]

        #Extract type of activation and number of nodes
        for config,config_value in keras_layer["config"].items():
            if(config=="activation"):
                layer['activation']=config_value
            if(config=="epsilon"):
                layer['epsilon']=config_value
            #if(config=="units"):
                #print("PARSED NUM OF NODES",config_value)

        # Default one layer call
        if layer['class_name'] == 'InputLayer':
            layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:]
        if 'Dense' in layer['class_name']:
            weights_shape = get_weights_shape(yamlConfig['KerasH5'], layer['name'])
            layer['n_in'] = weights_shape[0]
            layer['n_out'] = weights_shape[1]
            if 'Binary' in layer['class_name']:
                layer['quantize'] = 2
            elif 'Ternary' in layer['class_name']:
                layer['quantize'] = 3
            else:
                layer['quantize'] = 0
            current_shape = [current_shape[0], layer['n_out']]
        elif layer['class_name']=='Conv1D':
            # weights_shape = (filter_width, n_channels, n_filters)
            weights_shape = get_weights_shape(yamlConfig['KerasH5'], layer['name'])
            layer['y_in']=current_shape[1]
            layer['y_filt']=weights_shape[0] # or keras_layer['config']['kernel_size']
            layer['n_chan']=weights_shape[1]
            layer['n_filt']=weights_shape[2] # or keras_layer['config']['filters']
            layer['stride']=keras_layer['config']['strides'][0]
            layer['padding']=keras_layer['config']['padding']
            if layer['padding']=='same':
                in_width = current_shape[1]
                layer['y_out'] = int(math.ceil(float(in_width) / float(layer['stride'])))
                if (in_width % layer['stride'] == 0):
                    pad_along_width = max(layer['y_filt'] - layer['stride'], 0)
                else:
                    pad_along_width = max(layer['y_filt'] - (in_width % layer['stride']), 0)
                layer['pad_left']  = pad_along_width // 2
                layer['pad_right']  = pad_along_width - layer['pad_left']
            elif layer['padding']=='valid':
                in_width = current_shape[1]
                layer['y_out'] = int(math.ceil(float(in_width - layer['y_filt'] + 1) / float(layer['stride'])))
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            current_shape=[current_shape[0], layer['y_out'], layer['n_filt']]
        elif layer['class_name']=='Conv2D':
            # weights_shape = (filter_height, filter_width, n_channels, n_filters)
            weights_shape = get_weights_shape(yamlConfig['KerasH5'], layer['name'])
            layer['in_height']=current_shape[1]
            layer['in_width']=current_shape[2]
            layer['filt_height']=weights_shape[0]
            layer['filt_width']=weights_shape[1]
            layer['n_chan']=weights_shape[2]
            layer['n_filt']=weights_shape[3]
            layer['stride_height']=keras_layer['config']['strides'][0]
            layer['stride_width']=keras_layer['config']['strides'][1]
            layer['padding']=keras_layer['config']['padding']
            if layer['padding']=='same':
                #Height
                in_height = current_shape[1]
                layer['out_height'] = int(math.ceil(float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(layer['filt_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(layer['filt_height'] - (in_height % layer['stride_height']), 0)
                layer['pad_top']  = pad_along_height // 2
                layer['pad_bottom']  = pad_along_height - layer['pad_top']
                #Width
                in_width = current_shape[2]
                layer['out_width'] = int(math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(layer['filt_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(layer['filt_width'] - (in_width % layer['stride_width']), 0)
                layer['pad_left']  = pad_along_width // 2
                layer['pad_right']  = pad_along_width - layer['pad_left']
            elif layer['padding']=='valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                layer['out_width'] = int(math.ceil(float(in_width - layer['filt_width'] + 1) / float(layer['stride_width'])))
                layer['out_height'] = int(math.ceil(float(in_height - layer['filt_height'] + 1) / float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
            current_shape=[current_shape[0], layer['out_height'], layer['out_width'], layer['n_filt']]
        elif layer['class_name']=='BatchNormalization':
            in_size = 1
            for dim in current_shape[1:]:
                in_size *= dim
            layer['n_in'] = in_size
            layer['n_out'] = layer['n_in']
            if len(current_shape) == 2:
                layer['n_filt'] = -1
            elif len(current_shape) == 3:
                layer['n_filt']=current_shape[2]
            elif len(current_shape) == 4:
                layer['n_filt']=current_shape[3]
        elif 'Pooling' in layer['class_name']:
            info = layer['class_name'].split('Pooling')
            d = int(info[1].split('D')[0])
            op = info[0]
            if d == 1:
                layer['pool_size']=keras_layer['config']['pool_size']
                layer['stride']=keras_layer['config']['stride']
            elif d == 2:
                layer['in_height']=current_shape[1]
                layer['in_width']=current_shape[2]
                layer['n_filt']=current_shape[3]
                layer['stride_height']=keras_layer['config']['strides'][0]
                layer['stride_width']=keras_layer['config']['strides'][1]
                layer['pool_height']=keras_layer['config']['pool_size'][0]
                layer['pool_width']=keras_layer['config']['pool_size'][1]
                layer['padding']=keras_layer['config']['padding']
            if layer['padding']=='same':
                #Height
                in_height = current_shape[1]
                layer['out_height'] = int(math.ceil(float(in_height) / float(layer['stride_height'])))
                if (in_height % layer['stride_height'] == 0):
                    pad_along_height = max(layer['pool_height'] - layer['stride_height'], 0)
                else:
                    pad_along_height = max(layer['pool_height'] - (in_height % layer['stride_height']), 0)
                layer['pad_top']  = pad_along_height // 2
                layer['pad_bottom']  = pad_along_height - layer['pad_top']
                #Width
                in_width = current_shape[2]
                layer['out_width'] = int(math.ceil(float(in_width) / float(layer['stride_width'])))
                if (in_width % layer['stride_width'] == 0):
                    pad_along_width = max(layer['pool_width'] - layer['stride_width'], 0)
                else:
                    pad_along_width = max(layer['pool_width'] - (in_width % layer['stride_width']), 0)
                layer['pad_left']  = pad_along_width // 2
                layer['pad_right']  = pad_along_width - layer['pad_left']
                layer['n_out'] = layer['out_height'] * layer['out_width'] * layer['n_filt']
            elif layer['padding']=='valid':
                in_height = current_shape[1]
                in_width = current_shape[2]
                layer['out_width'] = int(math.ceil(float(in_width - layer['pool_width'] + 1) / float(layer['stride_width'])))
                layer['out_height'] = int(math.ceil(float(in_height - layer['pool_height'] + 1) / float(layer['stride_height'])))
                layer['pad_top'] = 0
                layer['pad_bottom'] = 0
                layer['pad_left'] = 0
                layer['pad_right'] = 0
                layer['n_out'] = layer['out_height'] * layer['out_height'] * layer['n_filt']
            current_shape=[current_shape[0], layer['out_height'], layer['out_width'], layer['n_filt']]

        elif layer['class_name']=='LeakyReLU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('alpha', 0.3)
        elif layer['class_name']=='ThresholdedReLU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('theta', 1.)
        elif layer['class_name']=='ELU':
            layer['activation'] = layer['class_name']
            layer['activ_param'] = keras_layer["config"].get('alpha', 1.)
        elif layer['class_name']=='PReLU':
            layer['activation'] = layer['class_name']

        elif layer['class_name'] in merge_layers:
            layer['op'] = layer['class_name'].lower()
            if layer['class_name'] == 'Concatenate':
                rank = len(current_shape[1:])
                if rank > 3:
                    raise Exception('ERROR: Concatenation of tensors with rank > 3 is not yet supported.')
                layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
                layer['axis'] = keras_layer['config']['axis']
            else:
                layer['class_name'] = 'Merge'
            if len(layer['inputs']) > 2:
                raise Exception('ERROR: Merging more than two tensors is not yet supported.')

        print('Layer name: {}, layer type: {}, current shape: {}'.format(layer['name'], layer['class_name'], current_shape))
        layer_list.append( layer )
        if 'activation' in layer and layer['class_name'] not in activation_layers:
            act_layer = {}
            act_layer['name'] = layer['name'] + '_' + layer['activation']
            act_layer['activation'] = layer['activation']
            if 'activ_param' in layer:
                act_layer['activ_param'] = layer['activ_param']
                act_layer['class_name'] = layer['activation']
            else:
                act_layer['class_name'] = 'Activation'
            inputs_map[layer['name']] = act_layer['name']
            if output_layers is not None and layer['name'] in output_layers:
                output_layers = [act_layer['name'] if name == layer['name'] else name for name in output_layers]
            layer_list.append(act_layer)


    #################
    ## Generate HLS
    #################

    reader = KerasDataReader(yamlConfig)
    hls_model = HLSModel(yamlConfig, reader, layer_list, input_layers, output_layers)
    write_hls(hls_model)