Пример #1
0
    def insert_pre_processing(graph: Graph, input_node: Node, node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (-1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        in_name = input_node.soft_get('name', input_node.id)
        features_dim_idx, has_layout = get_dim_from_layout(input_node, 'C')
        if features_dim_idx is None:
            if has_layout:
                log.warning('Layout for input {} doesn\'t have channel ("C") dimension to apply {} preprocessing. '
                            'Skipping this input.'.format(in_name, preprocessing_name))
            features_dim_idx = get_features_dim(graph.graph['layout'], len(input_node.shape))
        assert compatible_dims(value.size, input_node.shape[features_dim_idx]) or value.size == 1, \
            "Incompatible layout, please specify correct layout for the node"

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = in_name + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph, op=op, port_value_dict={1: value}, op_attrs={'name': name})

        if input_node.op == 'Parameter' and input_node.has_and_set('data_type'):
            dtype = input_node.data_type
            if np.issubdtype(dtype, np.floating):
                value = value.astype(dtype)

        if input_node.is_out_port_connected(0) and len(input_node.out_port(0).get_destinations()) == 1:
            # There are models with pattern Parameter(uint8) -> Convert(float).
            # Adding mean/scale leads to the following:
            # Parameter(uint8) -> Mean/Scale -> Convert(float) which is incorrect.
            # To fix this mean and scale preprocessing node is inserted after Convert(float) node.
            out_node = input_node.out_port(0).get_destination().node
            convert_type = out_node.soft_get('dst_type')
            if out_node.soft_get('type') == "Convert" and (convert_type in [np.float32, np.float16]):
                input_node = out_node
                if convert_type != value.dtype:
                    new_value = value.astype(convert_type)
                    const_node = preprocessing.in_port(1).get_connection().get_source().node
                    const_node['value'] = new_value

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0), "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
Пример #2
0
 def get_channel_index(node: Node) -> int:
     guessed_layout = 'NCHW'
     if node.has_valid('rt_info'):
         rt_info = node.rt_info
         if rt_info.contains('old_api_map_order'):
             old_api_map_version = rt_info.get_attribute_version(
                 'old_api_map_order')
             old_api_map = rt_info.info['old_api_map_order',
                                        old_api_map_version]
             if 'inverse_order' in old_api_map.info:
                 order = old_api_map.info['inverse_order']
                 assert len(order) == len(guessed_layout)
                 guessed_layout = np.array(list(guessed_layout))[order]
                 guessed_layout = ''.join(guessed_layout)
     idx, has_layout = get_dim_from_layout(node, 'C')
     if has_layout:
         return idx
     else:
         return get_features_dim(guessed_layout, len(node.shape))
Пример #3
0
    def get_suitable_channel_index(node: Node, shape):
        if len(shape) != 4:
            return None

        guessed_layout = 'NCHW'
        if node.has_valid('rt_info'):
            rt_info = node.rt_info
            if rt_info.contains('old_api_map_order'):
                old_api_map_version = rt_info.get_attribute_version('old_api_map_order')
                old_api_map = rt_info.info['old_api_map_order', old_api_map_version]
                if 'inverse_order' in old_api_map.info:
                    order = old_api_map.info['inverse_order']
                    assert len(order) == len(guessed_layout)
                    guessed_layout = np.array(list(guessed_layout))[order]
                    guessed_layout = ''.join(guessed_layout)
        idx, has_layout = get_dim_from_layout(node, 'C')
        if not has_layout:
            idx = get_features_dim(guessed_layout, len(node.shape))
        if compatible_dims(shape[idx], 3):
            return idx
        else:
            return None
Пример #4
0
def override_batch(graph: Graph, batch: int):
    """
    Overrides batch for nodes with 'op' param set to 'Parameter'
    Parameters
    ----------
    graph: graph to operate on
    batch: user defined integer value to override batch
    """
    if batch is not None:
        in_nodes = graph.get_op_nodes(op='Parameter')
        for node in in_nodes:
            if not node.soft_get('fixed_batch', False):
                name = node.soft_get('name', node.id)
                idx, has_layout = get_dim_from_layout(node, 'N')
                if has_layout:
                    if idx is not None:
                        node['shape'][idx] = batch
                    else:
                        log.warning(
                            'Layout for input {} doesn\'t have batch dimension. Skipping this input.'.format(name))
                else:
                    validate_batch_in_shape(node['shape'], name)
                    node['shape'][0] = batch