Пример #1
0
def set_inplace_node(node, config):
    activation = config['activation']
    if activation == 'relu':
        node_type = NodeType.ReLU
    elif activation == 'tanh':
        node_type = NodeType.TanH
    elif activation == 'sigmoid':
        node_type = NodeType.Sigmoid
    elif activation == 'elu':
        node_type = NodeType.ELU
    elif activation == 'softmax':
        node_type = NodeType.SoftMax
    in_node = cnn_layer.LayerNode(node.name + '_' + activation, node_type,
                                  node)
    return in_node
Пример #2
0
def parse_caffe_def2(netdef: str):
    type_map = {
        'Convolution': NodeType.Convolution,
        'Deconvolution': NodeType.Convolution,
        'InnerProduct': NodeType.InnerProduct,
        'Scale': NodeType.Scale,
        'BatchNorm': NodeType.BatchNorm,
        'Concat': NodeType.Concat,
        'Eltwise': NodeType.Eltwise,
        'Pooling': NodeType.Pooling,
        'Upsample': NodeType.UpSampling,
        'Power': NodeType.Power,
        'ReLU': NodeType.ReLU,
        'PReLU': NodeType.PReLU,
        'TanH': NodeType.TanH,
        'ELU': NodeType.ELU,
        'Sigmoid': NodeType.Sigmoid,
        'Input': NodeType.Input,
        'Data': NodeType.Data,
        'Dropout': NodeType.DropOut,
        'Softmax': NodeType.SoftMax,
        'Flatten': NodeType.Flatten,
        'Reshape': NodeType.Reshape,
    }

    caffe_net = caffe_pb2.NetParameter()
    try:
        text_format.Parse(netdef, caffe_net)
    except Exception as e:
        logging.exception("Exception occurred while parsing Input network: %s",
                          e)
        raise

    top_map = {}
    global_input_nodes = []
    network_argdict = {}
    # Handle fixed size input
    if len(caffe_net.input) == 1:
        node = cnn_layer.LayerNode(caffe_net.input[0], NodeType.Input)
        global_input_nodes.append(node)
        if len(caffe_net.input_dim) == 4:
            dim = (caffe_net.input_dim[3], caffe_net.input_dim[2],
                   caffe_net.input_dim[1])
        elif len(caffe_net.input_shape) == 1:
            dim = (caffe_net.input_shape[0].dim[3],
                   caffe_net.input_shape[0].dim[2],
                   caffe_net.input_shape[0].dim[1])
        node.input_dim = dim
        network_argdict["debug_node"] = caffe_net
        top_map[caffe_net.input[0]] = node
    # Handle each layer node
    parsed_nodes = []
    for i, layer in enumerate(caffe_net.layer):
        logging.debug('Handling layer %d, Name: %s, Type %s', i, layer.name,
                      layer.type)
        if layer.type not in type_map:
            logging.exception('Encountered unsupported layer format %s.',
                              layer.type)
            raise cnn_exception.ParseError('Unsupported type:' + layer.type)
        node_type = type_map[layer.type]

        # search for exsisting input and output nodes
        input_nodes = []
        for label in layer.bottom:
            if label in top_map:
                input_nodes.append(top_map[label])

        if node_type in (NodeType.ReLU, NodeType.TanH, NodeType.ELU,
                         NodeType.Sigmoid, NodeType.BatchNorm, NodeType.Scale,
                         NodeType.PReLU, NodeType.DropOut):
            if layer.bottom[0] == layer.top[0]:
                node = cnn_layer.LayerNode(layer.name, node_type, input_nodes)
                if node_type == NodeType.ReLU:
                    param = cnn_layer.NodeParam()
                    param.relu_param = layer.relu_param.negative_slope
                    node.param = param
                elif node_type == NodeType.BatchNorm:
                    param = cnn_layer.NodeParam()
                    param.epsilon = layer.batch_norm_param.eps
                    node.param = param
                top_map[layer.top[0]] = node
                continue

        node = cnn_layer.LayerNode(layer.name, node_type, input_nodes)
        parsed_nodes.append(node)
        # add this node to top_map and bottom_map
        for label in layer.top:
            if label in top_map:
                raise cnn_exception.ParseError("Ill-formed layer. name: %s" %
                                               layer.name)
            top_map[label] = node

        if node_type == NodeType.Input:
            global_input_nodes.append(node)
            dim = (layer.input_param.shape[0].dim[3],
                   layer.input_param.shape[0].dim[2],
                   layer.input_param.shape[0].dim[1])
            node.input_dim = dim
            network_argdict["debug_node"] = caffe_net
        elif node_type == NodeType.Convolution:
            param = cnn_layer.NodeParam()
            param.is_deconv = (layer.type == "Deconvolution")
            param.num_output = int(layer.convolution_param.num_output)
            param.kernel_size = get_tuple(layer.convolution_param.kernel_size)
            param.pad_lrtb = get_pad(layer.convolution_param.pad)
            param.stride = get_tuple(layer.convolution_param.stride)
            param.group = int(layer.convolution_param.group)
            node.param = param
        elif node_type == NodeType.Pooling:
            param = cnn_layer.NodeParam()
            param.pool = int(layer.pooling_param.pool)
            param.kernel_size = get_tuple(layer.pooling_param.kernel_size)
            param.pad_lrtb = get_pad(layer.pooling_param.pad)
            param.stride = get_tuple(layer.pooling_param.stride)
            param.is_global = layer.pooling_param.global_pooling
            node.param = param
        elif node_type == NodeType.UpSampling:
            param = cnn_layer.NodeParam()
            param.kernel_size = 2, 2
            node.param = param
        elif node_type == NodeType.Power:
            if layer.power_param.power != 1 or layer.power_param.shift != 0:
                raise ValueError(
                    "Power layer is supported only with "
                    "power = 1 and shift = 0, got %s and %s" %
                    (layer.power_param.power, layer.power_param.shift))
            param = cnn_layer.NodeParam()
            param.scale = float(layer.power_param.scale)
            node.param = param
        elif node_type == NodeType.InnerProduct:
            param = cnn_layer.NodeParam()
            param.num_output = int(layer.inner_product_param.num_output)
            node.param = param
        elif node_type == NodeType.Reshape:
            param = cnn_layer.NodeParam()
            dims = layer.reshape_param.shape.dim
            param.reshape_param = (dims[3], dims[2], dims[1])
            node.param = param
        elif node_type == NodeType.ReLU:
            param = cnn_layer.NodeParam()
            param.relu_param = layer.relu_param.negative_slope
            node.param = param

    _set_node_output(top_map)
    global_output_nodes = []
    for node in parsed_nodes:
        if len(node.output_nodes) == 0:
            global_output_nodes.append(node)

    return global_input_nodes, global_output_nodes, network_argdict
Пример #3
0
def parse_keras_network2(net_def, netweight, custom_layer, need_flip=False):
    type_map = {
        'Conv1D': NodeType.Convolution,
        'Conv2D': NodeType.Convolution,
        'DepthwiseConv1D': NodeType.Convolution,
        'DepthwiseConv2D': NodeType.Convolution,
        'SeparableConv1D': NodeType.Convolution,
        'SeparableConv2D': NodeType.Convolution,
        'Conv2DTranspose': NodeType.Convolution,
        'Conv1DTranspose': NodeType.Convolution,
        'Dense': NodeType.InnerProduct,
        'Concatenate': NodeType.Concat,
        'Add': NodeType.Eltwise,
        'MaxPooling1D': NodeType.Pooling,
        'MaxPooling2D': NodeType.Pooling,
        'AveragePooling1D': NodeType.Pooling,
        'AveragePooling2D': NodeType.Pooling,
        'GlobalMaxPooling1D': NodeType.Pooling,
        'GlobalAveragePooling1D': NodeType.Pooling,
        'GlobalMaxPooling2D': NodeType.Pooling,
        'GlobalAveragePooling2D': NodeType.Pooling,
        'UpSampling1D': NodeType.UpSampling,
        'UpSampling2D': NodeType.UpSampling,
        'InputLayer': NodeType.Input,
        'Softmax': NodeType.SoftMax,
        'Flatten': NodeType.Flatten,
        'Reshape': NodeType.Reshape,
        'Dropout': NodeType.DropOut,
        'ZeroPadding1D': NodeType.Padding,
        'ZeroPadding2D': NodeType.Padding,
    }
    netarg_dict = {}
    global_input_nodes = []

    netdef = json.loads(net_def)
    is_sequential = (netdef['class_name'] == 'Sequential')
    if type(netdef['config']) is list:
        layers = netdef['config']
    else:
        layers = netdef['config']['layers']
    netarg_dict["debug_node"] = netweight

    # get data_format parameter
    is_channel_first = True
    for layer in layers:
        layer_type = layer['class_name']
        if layer_type in type_map:
            if type_map[layer_type] in (NodeType.Convolution,
                                        NodeType.Pooling):
                if 'data_format' in layer['config']:
                    if layer['config']['data_format'] == 'channels_first':
                        is_channel_first = True
                    else:
                        is_channel_first = False
                else:
                    is_channel_first = False
                break

    node_map = OrderedDict()
    # Handle each layer node
    for i, layer in enumerate(layers):
        layer_type = layer['class_name']
        config = layer['config']
        layer_name = config['name']
        logging.debug('Handling layer %d, Name: %s, Type %s', i, layer_name,
                      layer_type)

        # if the first node is not input node, create a dummy input node
        if i == 0 and layer_type not in ['InputLayer', 'Layer']:
            node = cnn_layer.LayerNode('Input', NodeType.Input, None)
            global_input_nodes.append(node)
            shape = config['batch_input_shape']
            # handle FC only model
            if len(shape) == 2:
                dim = (shape[1], )
            # handle 1D input dimensions
            elif len(shape) == 3:
                if is_channel_first:
                    dim = (shape[2], 1, shape[1])
                else:
                    dim = (shape[1], 1, shape[2])
            else:
                if is_channel_first:
                    dim = (shape[3], shape[2], shape[1])
                else:
                    dim = (shape[2], shape[1], shape[3])
            node.input_dim = dim
            node_map[""] = node

        if is_sequential:
            if len(node_map) > 0:
                input_nodes = list(node_map.values())[-1]
            else:
                input_nodes = []
        else:
            # search for exsisting input and output nodes
            input_nodes = []
            if len(layer['inbound_nodes']) > 0:
                inbound_nodes = [x[0] for x in layer['inbound_nodes'][0]]
                for label in inbound_nodes:
                    if label in node_map:
                        input_nodes.append(node_map[label])

        if (layer_type == 'Dropout'):
            node_map[layer_name] = cnn_layer.LayerNode(layer_name,
                                                       NodeType.DropOut,
                                                       input_nodes)
            continue
        elif layer_type == 'Activation':
            activation = config['activation']
            if activation == 'softmax':
                node_type = NodeType.SoftMax
            else:
                if activation == 'relu':
                    node_type = NodeType.ReLU
                elif activation == 'tanh':
                    node_type = NodeType.TanH
                elif activation == 'sigmoid':
                    node_type = NodeType.Sigmoid
                elif activation == "elu":
                    node_type = NodeType.ELU
                elif activation == 'relu6':
                    node_type = NodeType.ReLU6
                node = cnn_layer.LayerNode(layer_name, node_type, input_nodes)
                node_map[layer_name] = node
                continue
        elif layer_type == 'ReLU':
            if 'threshold' in config and config['threshold'] != 0.0:
                logging.error('ReLU layer can not support \'threshold\' != 0.')
                raise cnn_exception.ParseError('Unsupported Layer')
            if ('max_value' in config and config['max_value'] == 6
                    and ('negative_slope' not in config
                         or config['negative_slope'] == 0)):
                node_type = NodeType.ReLU6
            elif 'max_value' not in config or config['max_value'] is None:
                node_type = NodeType.ReLU
            else:
                logging.error('ReLU layer with unsupported parameters.')
                raise cnn_exception.ParseError('Unsupported Layer')
            node = cnn_layer.LayerNode(layer_name, node_type, input_nodes)
            if 'negative_slope' in config:
                param = cnn_layer.NodeParam()
                param.relu_param = config['negative_slope']
                node.param = param
            node_map[layer_name] = node
            continue
        elif layer_type == 'LeakyReLU':
            node = cnn_layer.LayerNode(layer_name, NodeType.ReLU, input_nodes)
            param = cnn_layer.NodeParam()
            param.relu_param = config['alpha']
            node.param = param
            node_map[layer_name] = node
            continue
        elif layer_type == 'PReLU':
            # check if it is using shared axis for width and height axes
            if 'shared_axes' in config:
                shared_axes = config['shared_axes']
            else:
                shared_axes = None
            if (is_channel_first and shared_axes == [2, 3]
                    or not is_channel_first and shared_axes == [1, 2]):
                node = cnn_layer.LayerNode(layer_name, NodeType.PReLU,
                                           input_nodes)
                if netweight is not None:
                    weights = get_weights(netweight, layer_name, need_flip,
                                          ['alpha'])
                    node.set_weight_bias(weights[0], None)
                node_map[layer_name] = node
            else:
                logging.error('PReLU layer must set its shared_axes to'
                              'width and height axes.')
                raise cnn_exception.ParseError('Unsupported Layer')
            continue
        elif layer_type == 'BatchNormalization':
            node = cnn_layer.LayerNode(layer_name, NodeType.BatchNorm,
                                       input_nodes)
            param = cnn_layer.NodeParam()
            param.epsilon = config['epsilon']
            node.param = param
            if netweight is not None:
                weights = get_weights(
                    netweight, layer_name, need_flip,
                    ['gamma', 'beta', 'moving_mean', 'moving_variance'])
                node.set_mean_var(weights[2], weights[3])
                node.set_weight_bias(weights[0], weights[1])
            node_map[layer_name] = node
            continue
        elif layer_type == 'ZeroPadding1D':
            # there is no padding layer if caffe so just extract padding info
            pad = config['padding']
            try:
                if len(pad) == 2:
                    if not all(int(x) == x for x in pad):
                        raise ValueError(
                            "Unsupported Keras ZeroPadding1D: %s" % pad)
                    padding = [int(pad[0]), int(pad[1]), 0, 0]
                elif len(pad) == 1:
                    if int(pad[0]) != pad[0]:
                        raise ValueError(
                            "Unsupported Keras ZeroPadding1D: %s" % pad)
                    padding = [int(pad[0])] * 4
                elif len(pad) == 0:
                    padding = [0, 0, 0, 0]
                else:
                    raise ValueError("Unsupported Keras ZeroPadding2D: %s" %
                                     pad)
            except TypeError:
                if int(pad) != pad:
                    raise ValueError("Unsupported Keras ZeroPadding2D: %s" %
                                     pad)
                padding = [int(pad)] * 4
            node = cnn_layer.LayerNode(layer_name, NodeType.Padding,
                                       input_nodes)
            param = cnn_layer.NodeParam()
            param.pad_lrtb = padding
            node.param = param
            node_map[layer_name] = node
            continue
        elif layer_type == 'ZeroPadding2D':
            # there is no padding layer if caffe so just extract padding info
            pad = config['padding']
            try:
                if len(pad) == 4:
                    if not all(int(x) == x for x in pad):
                        raise ValueError(
                            "Unsupported Keras ZeroPadding2D: %s" % pad)
                    padding = list(int(x) for x in pad)
                elif len(pad) == 2:
                    padding = [0, 0, 0, 0]
                    try:
                        if len(pad[0]) == 2:
                            padding[0] = pad[0][0]
                            padding[1] = pad[0][1]
                        elif len(pad[0]) == 1:
                            padding[0] = pad[0][0]
                            padding[1] = pad[0][0]
                        elif len(pad[0]) == 0:
                            padding[0] = 0
                            padding[1] = 0
                        else:
                            raise ValueError(
                                "Unsupported Keras ZeroPadding2D: %s" % pad)
                    except TypeError:
                        if int(pad[0]) != pad[0]:
                            raise ValueError(
                                "Unsupported Keras ZeroPadding2D: %s" % pad)
                        padding[0] = int(pad[0])
                        padding[1] = int(pad[0])
                    try:
                        if len(pad[1]) == 2:
                            padding[2] = pad[1][0]
                            padding[3] = pad[1][1]
                        elif len(pad[1]) == 1:
                            padding[2] = pad[1][0]
                            padding[3] = pad[1][0]
                        elif len(pad[1]) == 0:
                            padding[2] = 0
                            padding[3] = 0
                        else:
                            raise ValueError(
                                "Unsupported Keras ZeroPadding2D: %s" % pad)
                    except TypeError:
                        if int(pad[1]) != pad[1]:
                            raise ValueError(
                                "Unsupported Keras ZeroPadding2D: %s" % pad)
                        padding[2] = int(pad[1])
                        padding[3] = int(pad[1])
                elif len(pad) == 1:
                    if int(pad[0]) != pad[0]:
                        raise ValueError(
                            "Unsupported Keras ZeroPadding2D: %s" % pad)
                    padding = [0, 0, 0, 0]
                elif len(pad) == 0:
                    padding = [0, 0, 0, 0]
                else:
                    raise ValueError("Unsupported Keras ZeroPadding2D: %s" %
                                     pad)
            except TypeError:
                if int(pad) != pad:
                    raise ValueError("Unsupported Keras ZeroPadding2D: %s" %
                                     pad)
                padding = [int(pad), int(pad), int(pad), int(pad)]
            node = cnn_layer.LayerNode(layer_name, NodeType.Padding,
                                       input_nodes)
            param = cnn_layer.NodeParam()
            param.pad_lrtb = padding
            node.param = param
            node_map[layer_name] = node
            continue
        elif layer_type == 'Merge':
            mode = config['mode']
            if mode == 'concat':
                node_type = NodeType.Concat
            elif mode == 'add':
                node_type = NodeType.Eltwise
        elif layer_type == 'Layer' and 'batch_input_shape' in config:
            node_type = NodeType.Input
        elif layer_type in custom_layer:
            custom_config = custom_layer[layer_type]
            # set parameter type if it is the first time
            if type(custom_config[0]) is list:
                c_type_map = {int: 'int', bool: 'bool', float: 'float'}
                param_list = OrderedDict()
                for param_name in custom_config[0]:
                    type_0 = type(config[param_name])
                    if type_0 is list:
                        type_1 = type(config[param_name][0])
                        list_size = len(config[param_name])
                        c_type = '{:s}[{:d}]'.format(c_type_map[type_1],
                                                     list_size)
                    elif type_0 in c_type_map:
                        c_type = c_type_map[type_0]
                    param_list[param_name] = c_type
                custom_config = (param_list, custom_config[1])
                custom_layer[layer_type] = custom_config
            node_type = NodeType.Custom
        else:
            if layer_type not in type_map:
                raise cnn_exception.ParseError(
                    'Unknown layer, Name: {}, Type: {}'.format(
                        layer_name, layer_type))

            node_type = type_map[layer_type]
        node = cnn_layer.LayerNode(layer_name, node_type, input_nodes)

        # add this node to node_map
        if layer_name in node_map:
            raise cnn_exception.ParseError('Ill-formed layer. name:' +
                                           layer_name)
        node_map[layer_name] = node

        if node_type == NodeType.Input:
            global_input_nodes.append(node)
            shape = config['batch_input_shape']
            # handle 1D input dimensions
            if len(shape) == 2:
                dim = (1, 1, shape[1])
            elif len(shape) == 3:
                if is_channel_first:
                    dim = (shape[2], 1, shape[1])
                else:
                    dim = (shape[1], 1, shape[2])
            else:
                if is_channel_first:
                    dim = (shape[3], shape[2], shape[1])
                else:
                    dim = (shape[2], shape[1], shape[3])
            node.input_dim = dim
        elif node_type == NodeType.Convolution:
            is_1D = (layer_type[-2] == '1')
            param = cnn_layer.NodeParam()
            # For keras, output is not set for depthwise convolution
            # skip setting num_output and set it when calculating in_out sizes
            if (layer_type[:-2] != 'DepthwiseConv'
                    and layer_type[:-2] != 'SeparableConv'):
                param.num_output = config['filters']
            if is_1D:
                param.kernel_size = (config['kernel_size'][0], 1)
            else:
                param.kernel_size = (config['kernel_size'][1],
                                     config['kernel_size'][0])
            param.keras_padding = config['padding']
            param.dilation = config['dilation_rate']
            param.is_deconv = (layer_type[6:] == "Transpose")
            param.deconv_output_padding = config.get("output_padding")
            if is_1D:
                param.stride = (config['strides'][0], 1)
            else:
                param.stride = (config['strides'][1], config['strides'][0])
            if (layer_type[:-2] == 'DepthwiseConv'
                    or layer_type[:-2] == 'SeparableConv'):
                param.group = config['depth_multiplier']
                if param.group > 1:
                    logging.error('Depthwise/Separable Convolution with'
                                  '\'depth_multiplier\' is not supported.')
                    raise cnn_exception.ParseError('Unsupported param')
            node.param = param

            if layer_type[:-2] == 'SeparableConv':
                point_node = cnn_layer.LayerNode(layer_name + '_point',
                                                 node_type, node)
                node_map[layer_name] = point_node
                param = cnn_layer.NodeParam()
                param.num_output = config['filters']
                point_node.param = param

            if config['activation'] != 'linear':
                inplace_node = set_inplace_node(
                    list(node_map.values())[-1], config)
                if inplace_node:
                    node_map[layer_name] = inplace_node
            if netweight is not None:
                if layer_type[:-2] == 'SeparableConv':
                    weights = get_weights(
                        netweight, layer_name, need_flip,
                        ['depthwise_kernel', 'pointwise_kernel', 'bias'])
                    node.set_weight_bias(weights[0], None)
                    point_node.set_weight_bias(weights[1], weights[2])
                else:
                    weights = get_weights(netweight, layer_name, need_flip,
                                          ['kernel', 'bias'])
                    node.set_weight_bias(weights[0], weights[1])
        elif node_type == NodeType.Pooling:
            param = cnn_layer.NodeParam()
            if layer_type in ('MaxPooling1D', 'MaxPooling2D',
                              'GlobalMaxPooling1D', 'GlobalMaxPooling2D'):
                param.pool = 0
            else:
                param.pool = 1
            if layer_type in ('MaxPooling1D', 'AveragePooling1D'):
                param.kernel_size = (config['pool_size'][0], 1)
                param.keras_padding = config['padding']
                param.stride = (config['strides'][0], 1)
            elif layer_type in ('MaxPooling2D', 'AveragePooling2D'):
                param.kernel_size = (config['pool_size'][1],
                                     config['pool_size'][0])
                param.keras_padding = config['padding']
                param.stride = (config['strides'][1], config['strides'][0])
            else:
                param.is_global = True
            node.param = param
        elif node_type == NodeType.UpSampling:
            is_1D = (layer_type[-2] == '1')
            param = cnn_layer.NodeParam()
            if is_1D:
                param.kernel_size = (config['size'][0], 1)
            else:
                param.kernel_size = (config['size'][1], config['size'][0])
            node.param = param
        elif node_type == NodeType.InnerProduct:
            param = cnn_layer.NodeParam()
            param.num_output = config['units']
            node.param = param
            if config['activation'] != 'linear':
                inplace_node = set_inplace_node(node, config)
                if inplace_node:
                    node_map[layer_name] = inplace_node
            if netweight is not None:
                weights = get_weights(netweight, layer_name, need_flip,
                                      ['kernel', 'bias'])
                node.set_weight_bias(weights[0], weights[1])
        elif node_type == NodeType.Reshape:
            param = cnn_layer.NodeParam()
            param.reshape_param = tuple(config['target_shape'])
            node.param = param
        elif node_type == NodeType.Concat:
            param = cnn_layer.NodeParam()
            if 'axis' in config:
                param.axis = config['axis']
            elif 'concat_axis' in config:
                param.axis = config['concat_axis']
            if param.axis > 0:
                param.axis -= 1
            if is_channel_first and param.axis >= 0:
                param.axis = 2 - param.axis
            node.param = param
        elif node_type == NodeType.SoftMax:
            param = cnn_layer.NodeParam()
            if 'axis' in config:
                param.axis = config['axis']
            else:
                param.axis = -1
            node.param = param
        elif node_type == NodeType.Custom:
            param = cnn_layer.NodeParam()
            custom_config = custom_layer[layer_type]
            custom_param = (OrderedDict(
                {x: config[x]
                 for x in custom_config[0]}), custom_config[1], layer_type)
            param.custom_param = custom_param
            node.param = param

    _set_node_output(node_map)
    global_output_nodes = []
    for node in node_map.values():
        if len(node.output_nodes) == 0:
            global_output_nodes.append(node)
    return global_input_nodes, global_output_nodes, netarg_dict