Ejemplo n.º 1
0
def parse_zeropadding1d_layer(keras_layer, input_names, input_shapes,
                              data_reader, config):
    assert (keras_layer['class_name'] == 'ZeroPadding1D')

    layer = parse_default_keras_layer(keras_layer, input_names)

    padding = keras_layer['config']['padding']
    if isinstance(padding, int):
        layer['pad_left'] = padding
        layer['pad_right'] = padding
    elif isinstance(padding, collections.abc.Sequence):
        layer['pad_left'] = padding[0]
        layer['pad_right'] = padding[1]

    if layer['data_format'] == 'channels_first':
        output_shape = [
            input_shapes[0][0],  # Batch
            input_shapes[0][1],  # Channels
            layer['pad_left'] + input_shapes[0][2] +
            layer['pad_right']  # Width
        ]
        layer['out_width'] = output_shape[2]
        layer['n_chan'] = output_shape[1]
    else:
        output_shape = [
            input_shapes[0][0],  # Batch
            layer['pad_left'] + input_shapes[0][1] +
            layer['pad_right'],  # Width
            input_shapes[0][2]  # Channels
        ]
        layer['out_width'] = output_shape[1]
        layer['n_chan'] = output_shape[2]

    return layer, output_shape
Ejemplo n.º 2
0
def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader,
                       config):
    assert ('UpSampling2D' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    (layer['in_height'], layer['in_width'],
     layer['n_chan']) = parse_data_format(input_shapes[0],
                                          layer['data_format'])

    layer['algorithm'] = keras_layer['config']['interpolation']

    layer['height_factor'] = keras_layer['config']['size'][0]
    layer['width_factor'] = keras_layer['config']['size'][1]

    layer['out_height'] = layer['in_height'] * layer['height_factor']
    layer['out_width'] = layer['in_width'] * layer['width_factor']

    if layer['data_format'] == 'channels_first':
        output_shape = [
            input_shapes[0][0], layer['n_chan'], layer['out_height'],
            layer['out_width']
        ]
    else:
        output_shape = [
            input_shapes[0][0], layer['out_height'], layer['out_width'],
            layer['n_chan']
        ]

    return layer, output_shape
Ejemplo n.º 3
0
def parse_merge_layer(keras_layer, input_names, input_shapes, data_reader,
                      config):
    assert (keras_layer['class_name'] in merge_layers)

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['op'] = layer['class_name'].lower()

    if layer['class_name'] == 'Concatenate':
        rank = len(input_shapes[0][1:])
        if rank > 3:
            raise Exception(
                'ERROR: Concatenation of tensors with rank > 3 is not yet supported.'
            )
        layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
        layer['axis'] = keras_layer['config']['axis']
        #TODO handle output shape
    elif layer['class_name'] == 'Dot':
        rank = len(input_shapes[0][1:])
        if rank > 1:
            raise Exception(
                'ERROR: Dot of tensors with rank > 1 is not yet supported.')
        layer['op'] = layer['class_name'].lower() + '{}d'.format(rank)
    else:
        layer['class_name'] = 'Merge'
    if len(layer['inputs']) > 2:
        raise Exception(
            'ERROR: Merging more than two tensors is not yet supported.')

    return layer, input_shapes[0]
Ejemplo n.º 4
0
def parse_rnn_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert(keras_layer['class_name'] in rnn_layers)

    if keras_layer['class_name'] == 'LSTM':
        div_factor = 4
    elif keras_layer['class_name'] == 'GRU':
        div_factor = 3
    else:
        div_factor = 1

    layer = parse_default_keras_layer(keras_layer, input_names)

    return_sequences_config = keras_layer['config']['return_sequences']
    layer['recurrent_activation'] = keras_layer['config']['recurrent_activation']
    weights_shape = data_reader.get_weights_shape(layer['name'], 'kernel')
    recurrent_weights_shape = data_reader.get_weights_shape(layer['name'], 'recurrent_kernel')
    layer['n_sequence'] = input_shapes[0][1]
    layer['n_sequence_out'] = layer['n_sequence'] if return_sequences_config else 1
    layer['n_in'] = weights_shape[0]
    layer['n_out'] = int(weights_shape[1]/div_factor)
    layer['recurr_n_in']=recurrent_weights_shape[0]
    layer['recurr_n_out']=recurrent_weights_shape[1]

    if return_sequences_config:
        layer['n_sequence_out'] = layer['n_sequence']
        output_shape = [input_shapes[0][0], layer['n_sequence_out'], layer['n_out']]
    else:
        layer['n_sequence_out'] = 1
        output_shape = [input_shapes[0][0], layer['n_out']]

    return layer, output_shape
Ejemplo n.º 5
0
def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader,
                       config):
    assert ('Conv1D' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    (layer['in_width'],
     layer['n_chan']) = parse_data_format(input_shapes[0],
                                          layer['data_format'])

    layer['n_filt'] = keras_layer['config']['filters']
    layer['filt_width'] = keras_layer['config']['kernel_size'][0]
    layer['stride_width'] = keras_layer['config']['strides'][0]
    layer['padding'] = keras_layer['config']['padding']

    (layer['out_width'], layer['pad_left'],
     layer['pad_right']) = compute_padding_1d(layer['padding'],
                                              layer['in_width'],
                                              layer['stride_width'],
                                              layer['filt_width'])

    if layer['data_format'] == 'channels_last':
        output_shape = [
            input_shapes[0][0], layer['out_width'], layer['n_filt']
        ]
    elif layer['data_format'] == 'channels_first':
        output_shape = [
            input_shapes[0][0], layer['n_filt'], layer['out_width']
        ]

    return layer, output_shape
Ejemplo n.º 6
0
def parse_pooling_layer(keras_layer, input_names, input_shapes, data_reader,
                        config):
    assert ('Pooling' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    if int(layer['class_name'][-2]) == 1:
        (layer['n_in'],
         layer['n_filt']) = parse_data_format(input_shapes[0],
                                              layer['data_format'])

        layer['pool_width'] = keras_layer['config']['pool_size'][0]
        layer['stride_width'] = keras_layer['config']['strides'][0]
        layer['padding'] = keras_layer['config']['padding']

        (layer['n_out'], layer['pad_left'],
         layer['pad_right']) = compute_padding_1d(layer['padding'],
                                                  layer['n_in'],
                                                  layer['stride_width'],
                                                  layer['pool_width'])

        if layer['data_format'] == 'channels_last':
            output_shape = [
                input_shapes[0][0], layer['n_out'], layer['n_filt']
            ]
        elif layer['data_format'] == 'channels_first':
            output_shape = [
                input_shapes[0][0], layer['n_filt'], layer['n_out']
            ]
    elif int(layer['class_name'][-2]) == 2:
        (layer['in_height'], layer['in_width'],
         layer['n_filt']) = parse_data_format(input_shapes[0],
                                              layer['data_format'])

        layer['stride_height'] = keras_layer['config']['strides'][0]
        layer['stride_width'] = keras_layer['config']['strides'][1]
        layer['pool_height'] = keras_layer['config']['pool_size'][0]
        layer['pool_width'] = keras_layer['config']['pool_size'][1]
        layer['padding'] = keras_layer['config']['padding']

        (layer['out_height'], layer['out_width'], layer['pad_top'],
         layer['pad_bottom'],
         layer['pad_left'], layer['pad_right']) = compute_padding_2d(
             layer['padding'], layer['in_height'], layer['in_width'],
             layer['stride_height'], layer['stride_width'],
             layer['pool_height'], layer['pool_width'])

        if layer['data_format'] == 'channels_last':
            output_shape = [
                input_shapes[0][0], layer['out_height'], layer['out_width'],
                layer['n_filt']
            ]
        elif layer['data_format'] == 'channels_first':
            output_shape = [
                input_shapes[0][0], layer['n_filt'], layer['out_height'],
                layer['out_width']
            ]

    return layer, output_shape
Ejemplo n.º 7
0
def parse_reshape_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert(keras_layer["class_name"] == 'Reshape')

    layer = parse_default_keras_layer(keras_layer, input_names)
    
    layer['target_shape'] = keras_layer['config']['target_shape']
    output_shape = input_shapes[0][:1] + keras_layer['config']['target_shape']
    
    return layer, output_shape
Ejemplo n.º 8
0
def parse_flatten_layer(keras_layer, input_names, input_shapes, data_reader,
                        config):
    assert (keras_layer["class_name"] == 'Flatten')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['class_name'] = 'Reshape'
    layer['target_shape'] = [input_shapes[0][0], np.prod(input_shapes[0][1:])]
    output_shape = layer['target_shape']

    return layer, output_shape
Ejemplo n.º 9
0
def parse_input_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert(keras_layer['class_name'] == 'InputLayer')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:]
    if keras_layer['config']['dtype'] == 'int32':
        layer['type_name'] = 'integer_input_t'
        layer['precision'] = IntegerPrecisionType(width=32)
    output_shape = keras_layer['config']['batch_input_shape']
    
    return layer, output_shape
Ejemplo n.º 10
0
def parse_qactivation_layer(keras_layer, input_names, input_shapes,
                            data_reader, config):
    assert (keras_layer['class_name'] == 'QActivation')
    supported_activations = [
        'quantized_relu', 'quantized_tanh', 'binary_tanh', 'ternary_tanh',
        'quantized_bits', 'binary', 'ternary'
    ]

    layer = parse_default_keras_layer(keras_layer, input_names)

    activation_config = keras_layer['config']['activation']
    quantizer_obj = get_quantizer(activation_config)
    activation_config = {}
    # some activations are classes
    if hasattr(quantizer_obj, 'get_config'):
        activation_config['class_name'] = quantizer_obj.__class__.__name__
        if activation_config['class_name'] == 'ternary' or activation_config[
                'class_name'] == 'binary':
            activation_config['class_name'] += '_tanh'
        activation_config['config'] = quantizer_obj.get_config()
    # some activation quantizers are just functions with no config
    else:
        activation_config['config'] = {}
        if 'binary' in quantizer_obj.__name__:
            activation_config['class_name'] = 'binary_tanh'
            activation_config['config']['bits'] = 1
            activation_config['config']['integer'] = 1
        elif 'ternary' in quantizer_obj.__name__:
            activation_config['class_name'] = 'ternary_tanh'
            activation_config['config']['bits'] = 2
            activation_config['config']['integer'] = 2
        else:
            activation_config['class_name'] = 'unknown'

    if activation_config['class_name'] not in supported_activations:
        raise Exception('Unsupported QKeras activation: {}'.format(
            activation_config['class_name']))

    if activation_config['class_name'] == 'ternary_tanh':
        layer['class_name'] = 'TernaryTanh'
        layer['threshold'] = activation_config.get('config',
                                                   {}).get('threshold', 0.33)
        if layer['threshold'] is None:
            layer[
                'threshold'] = 0.33  # the default ternary tanh threshold for QKeras
    else:
        layer['class_name'] = 'Activation'
    if activation_config['class_name'] == 'quantized_bits':
        activation_config['class_name'] = 'linear'
    layer['activation'] = activation_config['class_name'].replace(
        'quantized_', '')
    return layer, [shape for shape in input_shapes[0]]
Ejemplo n.º 11
0
def parse_permute_layer(keras_layer, input_names, input_shapes, data_reader,
                        config):
    assert (keras_layer['class_name'] == 'Permute')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['class_name'] = 'Transpose'
    dims = keras_layer['config']['dims']
    layer['perm'] = [dim - 1 for dim in keras_layer['config']['dims']]

    output_shape = [input_shapes[0][0]] + [input_shapes[0][s] for s in dims]

    return layer, output_shape
Ejemplo n.º 12
0
def parse_embedding_layer(keras_layer, input_names, input_shapes, data_reader,
                          config):
    assert ('Embedding' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['n_in'] = input_shapes[0][1]
    layer['vocab_size'] = keras_layer['config']['input_dim']
    layer['n_out'] = keras_layer['config']['output_dim']

    output_shape = input_shapes[0] + [layer['n_out']]

    return layer, output_shape
Ejemplo n.º 13
0
def parse_garnet_layer(keras_layer, input_names, input_shapes, data_reader,
                       config):
    assert (keras_layer['class_name'] in ['GarNet', 'GarNetStack'])

    if not keras_layer['config']['simplified']:
        raise Exception(
            'HLS GarNet is compatible only with keras GarNet with simplified=True'
        )
    if keras_layer['config']['output_activation'] is not None:
        raise Exception('HLS GarNet cannot have output activation')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['input_format'] = keras_layer['config']['input_format']
    if layer['input_format'] != 'xn':
        raise NotImplementedError(
            'HLS GarNet currently only implements signed inputs (input_format="xn")'
        )

    layer['n_vertices'] = input_shapes[0][1]
    layer['collapse'] = keras_layer['config']['collapse']
    layer['mean_by_nvert'] = keras_layer['config']['mean_by_nvert']
    if keras_layer['config']['quantize_transforms']:
        layer['quantizer'] = TernaryQuantizer()

    layer['n_aggregators'] = keras_layer['config']['n_aggregators']
    layer['n_out_features'] = keras_layer['config'][
        'n_filters']  # number of output features
    layer['n_propagate'] = keras_layer['config'][
        'n_propagate']  # number of latent features

    if layer['class_name'] == 'GarNet':
        layer['n_in_features'] = input_shapes[0][2]
        n_out_features = layer['n_out_features']

    elif layer['class_name'] == 'GarNetStack':
        layer['n_sublayers'] = keras_layer['config']['n_sublayers']
        layer['n_in_features'] = [input_shapes[0][2]]

        for il in range(1, layer['n_sublayers']):
            layer['n_in_features'].append(layer['n_out_features'][il - 1])

        n_out_features = layer['n_out_features'][-1]

    if layer['collapse'] in ['mean', 'sum', 'max']:
        output_shape = [input_shapes[0][0], n_out_features]
    else:
        output_shape = input_shapes[0][:2] + [n_out_features]

    return layer, output_shape
Ejemplo n.º 14
0
def parse_lstm_layer(keras_layer, input_names, input_shapes, data_reader,
                     config):
    assert (keras_layer['class_name'] == 'LSTM')

    print(keras_layer)
    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:]
    layer['n_timestamp'] = keras_layer['config']['batch_input_shape'][1]
    layer['n_in'] = keras_layer['config']['units']
    #if keras_layer['config']['dtype'] == 'int32':
    #    layer['type_name'] = 'integer_input_t'
    #    layer['precision'] = IntegerPrecisionType(width=32)
    output_shape = keras_layer['config']['batch_input_shape']

    return layer, output_shape
Ejemplo n.º 15
0
def parse_qactivation_layer(keras_layer, input_names, input_shapes,
                            data_reader, config):
    assert (keras_layer['class_name'] == 'QActivation')
    supported_activations = [
        'quantized_relu', 'quantized_tanh', 'binary_tanh', 'ternary_tanh',
        'quantized_bits'
    ]

    layer = parse_default_keras_layer(keras_layer, input_names)

    activation_config = keras_layer['config']['activation']
    if isinstance(activation_config, str):
        quantizer_obj = get_quantizer(activation_config)

    if isinstance(activation_config, str):
        quantizer_obj = get_quantizer(activation_config)
        activation_config = {}
        # some activations are classes
        if hasattr(quantizer_obj, 'get_config'):
            print("Name: " + quantizer_obj.__class__.__name__)
            activation_config['class_name'] = quantizer_obj.__class__.__name__
            activation_config['config'] = quantizer_obj.get_config()
        # some activation quantizers are just functions with no config
        else:
            activation_config['config'] = {}
            if quantizer_obj.__name__ == 'binary_tanh':
                activation_config['class_name'] = 'binary_tanh'
                activation_config['config']['bits'] = 1
                activation_config['config']['integer'] = 1
            elif quantizer_obj.__name__ == 'ternary_tanh':
                activation_config['class_name'] = 'ternary_tanh'
                activation_config['config']['bits'] = 2
                activation_config['config']['integer'] = 2
            else:
                activation_config['class_name'] = 'unknown'

    if activation_config['class_name'] not in supported_activations:
        raise Exception('Unsupported QKeras activation: {}'.format(
            activation_config['class_name']))

    layer['class_name'] = 'Activation'
    if activation_config['class_name'] == 'quantized_bits':
        activation_config['class_name'] = 'linear'
    layer['activation'] = activation_config['class_name'].replace(
        'quantized_', '')
    return layer, [shape for shape in input_shapes[0]]
Ejemplo n.º 16
0
def parse_input_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert(keras_layer['class_name'] == 'InputLayer')

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:]

    dtype = keras_layer['config']['dtype']
    if dtype.startswith('int') or dtype.startswith('uint'):
        layer['type_name'] = 'integer_input_t'
        width = int(dtype[dtype.index('int') + 3:])
        signed = (not dtype.startswith('u'))
        layer['precision'] = IntegerPrecisionType(width=width, signed=signed)
    # elif bool, q[u]int, ...

    output_shape = keras_layer['config']['batch_input_shape']
    
    return layer, output_shape
Ejemplo n.º 17
0
def parse_batchnorm_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert('BatchNormalization' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    in_size = 1
    for dim in input_shapes[0][1:]:
        in_size *= dim
    layer['n_in'] = in_size
    layer['n_out'] = layer['n_in']
    if len(input_shapes[0]) == 2:
        layer['n_filt'] = -1
    elif len(input_shapes[0]) == 3:
        layer['n_filt']=input_shapes[0][2]
    elif len(input_shapes[0]) == 4:
        layer['n_filt']=input_shapes[0][3]

    return layer, [shape for shape in input_shapes[0]]
Ejemplo n.º 18
0
Archivo: core.py Proyecto: zzulb/hls4ml
def parse_activation_layer(keras_layer, input_names, input_shapes, data_reader,
                           config):
    assert (keras_layer['class_name'] in activation_layers)

    layer = parse_default_keras_layer(keras_layer, input_names)

    if layer['class_name'] != 'Activation':
        layer['activation'] = layer['class_name']
    if layer['class_name'] == 'LeakyReLU':
        layer['activ_param'] = keras_layer["config"].get('alpha', 0.3)
    elif layer['class_name'] == 'ThresholdedReLU':
        layer['activ_param'] = keras_layer["config"].get('theta', 1.)
    elif layer['class_name'] == 'ELU':
        layer['activ_param'] = keras_layer["config"].get('alpha', 1.)

    if layer['class_name'] == 'Activation' and layer['activation'] == 'softmax':
        layer['class_name'] = 'Softmax'

    return layer, [shape for shape in input_shapes[0]]
Ejemplo n.º 19
0
def parse_rnn_layer(keras_layer, input_names, input_shapes, data_reader,
                    config):
    assert (keras_layer['class_name'] in rnn_layers)

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['return_sequences'] = keras_layer['config']['return_sequences']
    layer['return_state'] = keras_layer['config']['return_state']

    if layer['class_name'] != 'SimpleRNN':
        layer['recurrent_activation'] = keras_layer['config'][
            'recurrent_activation']

    layer['time_major'] = keras_layer['config'][
        'time_major'] if 'time_major' in keras_layer['config'] else False

    # TODO Should we handle time_major?
    if layer['time_major']:
        raise Exception('Time-major format is not supported by hls4ml'.format(
            layer['class_name']))

    layer['n_timesteps'] = input_shapes[0][1]
    layer['n_in'] = input_shapes[0][2]

    layer['n_out'] = keras_layer['config']['units']

    if layer['class_name'] == 'GRU':
        layer['apply_reset_gate'] = 'after' if keras_layer['config'][
            'reset_after'] else 'before'

    if layer['return_sequences']:
        output_shape = [
            input_shapes[0][0], layer['n_timesteps'], layer['n_out']
        ]
    else:
        output_shape = [input_shapes[0][0], layer['n_out']]

    if layer['return_state']:
        raise Exception(
            '"return_state" of {} layer is not yet supported.'.format(
                layer['class_name']))

    return layer, output_shape
Ejemplo n.º 20
0
def parse_global_pooling_layer(keras_layer, input_names, input_shapes,
                               data_reader, config):
    assert ('Pooling' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    if int(layer['class_name'][-2]) == 1:
        (layer['n_in'],
         layer['n_filt']) = parse_data_format(input_shapes[0],
                                              layer['data_format'])

        output_shape = [input_shapes[0][0], layer['n_filt']]
    elif int(layer['class_name'][-2]) == 2:
        (layer['in_height'], layer['in_width'],
         layer['n_filt']) = parse_data_format(input_shapes[0],
                                              layer['data_format'])

        output_shape = [input_shapes[0][0], layer['n_filt']]

    return layer, output_shape
Ejemplo n.º 21
0
def parse_dense_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert('Dense' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)
    
    weights_shape = data_reader.get_weights_shape(layer['name'], 'kernel')
    layer['n_in'] = weights_shape[0]
    layer['n_out'] = weights_shape[1]
    if 'Binary' in layer['class_name']:
        layer['weight_quantizer'] = BinaryQuantizer(bits=2)
        layer['bias_quantizer'] = BinaryQuantizer(bits=2)
    elif 'Ternary' in layer['class_name']:
        layer['weight_quantizer'] = TernaryQuantizer()
        layer['bias_quantizer'] = TernaryQuantizer()
    else:
        layer['weight_quantizer'] = None
        layer['bias_quantizer'] = None
    output_shape = [input_shapes[0][0], layer['n_out']]

    return layer, output_shape
Ejemplo n.º 22
0
def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader,
                       config):
    assert ('Conv1D' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    # weights_shape = (filter_width, n_channels, n_filters)
    weights_shape = data_reader.get_weights_shape(layer['name'], 'kernel')
    layer['n_in'] = input_shapes[0][1]
    layer['filt_width'] = weights_shape[
        0]  # or keras_layer['config']['kernel_size']
    layer['n_chan'] = weights_shape[1]
    layer['n_filt'] = weights_shape[2]  # or keras_layer['config']['filters']
    layer['stride'] = keras_layer['config']['strides'][0]
    layer['padding'] = keras_layer['config']['padding']
    if layer['padding'] == 'same':
        in_width = input_shapes[0][1]
        layer['n_out'] = int(
            math.ceil(float(in_width) / float(layer['stride'])))
        if (in_width % layer['stride'] == 0):
            pad_along_width = max(layer['filt_width'] - layer['stride'], 0)
        else:
            pad_along_width = max(
                layer['filt_width'] - (in_width % layer['stride']), 0)
        layer['pad_left'] = pad_along_width // 2
        layer['pad_right'] = pad_along_width - layer['pad_left']
    elif layer['padding'] == 'valid':
        in_width = input_shapes[0][1]
        layer['n_out'] = int(
            math.ceil(
                float(in_width - layer['filt_width'] + 1) /
                float(layer['stride'])))
        layer['pad_left'] = 0
        layer['pad_right'] = 0
    layer['data_format'] = keras_layer['config'].get('data_format',
                                                     'channels_last')
    output_shape = [input_shapes[0][0], layer['n_out'], layer['n_filt']]

    return layer, output_shape
Ejemplo n.º 23
0
def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader,
                       config):
    assert ('Conv2D' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    layer['data_format'] = keras_layer['config'].get('data_format',
                                                     'channels_last')
    # weights_shape = (filter_height, filter_width, n_channels, n_filters)
    weights_shape = data_reader.get_weights_shape(layer['name'], 'kernel')
    layer['in_height'] = input_shapes[0][1]
    layer['in_width'] = input_shapes[0][2]
    if layer['data_format'] == 'channels_first':
        layer['in_height'] = input_shapes[0][2]
        layer['in_width'] = input_shapes[0][3]
    layer['filt_height'] = weights_shape[0]
    layer['filt_width'] = weights_shape[1]
    layer['n_chan'] = weights_shape[2]
    layer['n_filt'] = weights_shape[3]
    layer['stride_height'] = keras_layer['config']['strides'][0]
    layer['stride_width'] = keras_layer['config']['strides'][1]
    layer['padding'] = keras_layer['config']['padding']
    if layer['padding'] == 'same':
        #Height
        in_height = input_shapes[0][1]
        if layer['data_format'] == 'channels_first':
            in_height = input_shapes[0][2]
        layer['out_height'] = int(
            math.ceil(float(in_height) / float(layer['stride_height'])))
        if (in_height % layer['stride_height'] == 0):
            pad_along_height = max(
                layer['filt_height'] - layer['stride_height'], 0)
        else:
            pad_along_height = max(
                layer['filt_height'] - (in_height % layer['stride_height']), 0)
        layer['pad_top'] = pad_along_height // 2
        layer['pad_bottom'] = pad_along_height - layer['pad_top']
        #Width
        in_width = input_shapes[0][2]
        if layer['data_format'] == 'channels_first':
            in_width = input_shapes[0][3]
        layer['out_width'] = int(
            math.ceil(float(in_width) / float(layer['stride_width'])))
        if (in_width % layer['stride_width'] == 0):
            pad_along_width = max(layer['filt_width'] - layer['stride_width'],
                                  0)
        else:
            pad_along_width = max(
                layer['filt_width'] - (in_width % layer['stride_width']), 0)
        layer['pad_left'] = pad_along_width // 2
        layer['pad_right'] = pad_along_width - layer['pad_left']
    elif layer['padding'] == 'valid':
        in_height = input_shapes[0][1]
        in_width = input_shapes[0][2]
        if layer['data_format'] == 'channels_first':
            in_height = input_shapes[0][2]
            in_width = input_shapes[0][3]
        layer['out_width'] = int(
            math.ceil(
                float(in_width - layer['filt_width'] + 1) /
                float(layer['stride_width'])))
        layer['out_height'] = int(
            math.ceil(
                float(in_height - layer['filt_height'] + 1) /
                float(layer['stride_height'])))
        layer['pad_top'] = 0
        layer['pad_bottom'] = 0
        layer['pad_left'] = 0
        layer['pad_right'] = 0
    if layer['data_format'] == 'channels_first':
        output_shape = [
            input_shapes[0][0], layer['n_filt'], layer['out_height'],
            layer['out_width']
        ]
    else:
        output_shape = [
            input_shapes[0][0], layer['out_height'], layer['out_width'],
            layer['n_filt']
        ]

    return layer, output_shape
Ejemplo n.º 24
0
def parse_pooling_layer(keras_layer, input_names, input_shapes, data_reader, config):
    assert('Pooling' in keras_layer['class_name'])

    layer = parse_default_keras_layer(keras_layer, input_names)

    if int(layer['class_name'][-2]) == 1:
        layer['n_in']=input_shapes[0][1]
        layer['n_filt']=input_shapes[0][2]
        layer['pool_size']=keras_layer['config']['pool_size'][0]
        layer['stride']=keras_layer['config']['strides'][0]
        layer['padding']=keras_layer['config']['padding']
        if layer['padding']=='same':
            in_width = input_shapes[0][1]
            layer['n_out'] = int(math.ceil(float(in_width) / float(layer['stride'])))
            if (in_width % layer['stride'] == 0):
                pad_along_width = max(layer['pool_size'] - layer['stride'], 0)
            else:
                pad_along_width = max(layer['pool_size'] - (in_width % layer['stride']), 0)
            layer['pad_left']  = pad_along_width // 2
            layer['pad_right']  = pad_along_width - layer['pad_left']
        elif layer['padding']=='valid':
            in_width = input_shapes[0][1]
            layer['n_out'] = int(math.ceil(float(in_width - layer['pool_size'] + 1) / float(layer['stride'])))
            layer['pad_left'] = 0
            layer['pad_right'] = 0
        output_shape=[input_shapes[0][0], layer['n_out'], layer['n_filt']]
    elif int(layer['class_name'][-2]) == 2:
        layer['data_format'] = keras_layer['config'].get('data_format', 'channels_last')
        layer['in_height']=input_shapes[0][1]
        layer['in_width']=input_shapes[0][2]
        layer['n_filt']=input_shapes[0][3]
        if layer['data_format'] == 'channels_first':
            layer['in_height']=input_shapes[0][2]
            layer['in_width']=input_shapes[0][3]
            layer['n_filt']=input_shapes[0][1]
        layer['stride_height']=keras_layer['config']['strides'][0]
        layer['stride_width']=keras_layer['config']['strides'][1]
        layer['pool_height']=keras_layer['config']['pool_size'][0]
        layer['pool_width']=keras_layer['config']['pool_size'][1]
        layer['padding']=keras_layer['config']['padding']
        if layer['padding']=='same':
            #Height
            in_height = input_shapes[0][1]
            if layer['data_format'] == 'channels_first': in_height = input_shapes[0][2]
            layer['out_height'] = int(math.ceil(float(in_height) / float(layer['stride_height'])))
            if (in_height % layer['stride_height'] == 0):
                pad_along_height = max(layer['pool_height'] - layer['stride_height'], 0)
            else:
                pad_along_height = max(layer['pool_height'] - (in_height % layer['stride_height']), 0)
            layer['pad_top'] = pad_along_height // 2
            layer['pad_bottom'] = pad_along_height - layer['pad_top']
            #Width
            in_width = input_shapes[0][2]
            if layer['data_format'] == 'channels_first': in_height = input_shapes[0][3]
            layer['out_width'] = int(math.ceil(float(in_width) / float(layer['stride_width'])))
            if (in_width % layer['stride_width'] == 0):
                pad_along_width = max(layer['pool_width'] - layer['stride_width'], 0)
            else:
                pad_along_width = max(layer['pool_width'] - (in_width % layer['stride_width']), 0)
            layer['pad_left']  = pad_along_width // 2
            layer['pad_right']  = pad_along_width - layer['pad_left']
        elif layer['padding'] == 'valid':
            in_height = input_shapes[0][1]
            in_width = input_shapes[0][2]
            if layer['data_format'] == 'channels_first':
                in_height = input_shapes[0][2]
                in_width = input_shapes[0][3]
            layer['out_width'] = int(math.ceil(float(in_width - layer['pool_width'] + 1) / float(layer['stride_width'])))
            layer['out_height'] = int(math.ceil(float(in_height - layer['pool_height'] + 1) / float(layer['stride_height'])))
            layer['pad_top'] = 0
            layer['pad_bottom'] = 0
            layer['pad_left'] = 0
            layer['pad_right'] = 0
        if layer['data_format'] == 'channels_last':
            output_shape=[input_shapes[0][0], layer['out_height'], layer['out_width'], layer['n_filt']]
        elif layer['data_format'] == 'channels_first':
            output_shape=[input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']]
    
    return layer, output_shape