예제 #1
0
def read_learning_info(pb: io.BufferedReader):
    while True:
        read_placeholder(pb, 1)
        first_char = pb.read(1)
        pb.seek(-2, os.SEEK_CUR)
        position = pb.tell()
        if first_char == b'L':
            cur_pos = pb.tell()
            token = find_next_tag(pb)
            pb.seek(cur_pos)
            if token in ['<LearnRateCoef>', '<LearningRate>']:
                token = bytes(token, 'ascii')
            else:
                log.debug('Unexpected tag: {}'.format(token))
                break
        elif first_char == b'B':
            token = b'<BiasLearnRateCoef>'
        elif first_char == b'M':
            token = b'<MaxNorm>'
        elif first_char == b'!':  # token = b'<EndOfComponent>'
            break
        else:
            break
        try:
            read_token_value(pb, token)
        except Error:
            pb.seek(position)
            break
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        read_learning_info(pb)

        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error(
                'Weights shape does not correspond to the `output` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        mapping_rule = {
            'output': output,
            'patch_stride': patch_stride,
            'bias_term': None,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
            'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'kernel_spatial': np.array([1, kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': [2, 3],
            'group': 1,
            'reshape_kernel': True,
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<ConvolutionModel>')
        in_shape = read_token_value(pb, b'<NumFiltersIn>')
        out_shape = read_token_value(pb, b'<NumFiltersOut>')
        height_in = read_token_value(pb, b'<HeightIn>')
        height_out = read_token_value(pb, b'<HeightOut>')
        height_subsample = read_token_value(pb, b'<HeightSubsampleOut>')
        collect_until_token(pb, b'<Offsets>')
        offsets = read_binary_vector_of_pairs(pb,
                                              read_token=False,
                                              dtype=np.int32)
        collect_until_token(pb, b'<RequiredTimeOffsets>')
        time_offsets = read_binary_vector(pb, read_token=False, dtype=np.int32)
        collect_until_token(pb, b'<LinearParams>')
        weights, _ = read_binary_matrix(pb)
        collect_until_token(pb, b'<BiasParams>')
        biases = read_binary_vector(pb)

        offsets = offsets.reshape([len(offsets) // 2, 2])
        mapping_rule = {  # stride for h axis
            'height_subsample': height_subsample,
            # input dimension for h axis
            'height_in': height_in,
            # output dimension for h axis
            'height_out': height_out,
            # input dimension for channel axis
            'in_channels': in_shape,
            # output dimension for channel axis
            'out_channels': out_shape,
            # array with pairs like the following
            # [ (-1, -1) (-1, 0) (-1, 1)
            #   (0, -1)  (0, 0)  (0, 1)
            #   (1, -1)  (1, 0)  (1, 1)]
            #  it means that kernel 3x3 will be applied to calculate current value of output
            'offsets': offsets,
            # required time offsets to calculate current convolution
            # time_offsets = [-1, 0, 1] for previous example means no padding for time axis and
            # 3 values should be prepared
            # time_offsets = [0] means zero padding [1, 1] for time axis
            'time_offsets': time_offsets,
            'out-size': out_shape * height_out
        }

        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        TimeHeightConvolutionComponent.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #4
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<PoolSize>')
        kernel = read_binary_integer32_token(pb)
        tag = find_next_tag(pb)
        if tag == '<PoolStep>':
            read_placeholder(pb, 1)
            stride = read_binary_integer32_token(pb)
            pool_step = stride
            pool_stride = read_token_value(pb, b'<PoolStride>')
        elif tag == '<PoolStride>':
            stride = 1
            pool_step = None
            read_placeholder(pb, 1)
            pool_stride = read_binary_integer32_token(pb)
        else:
            raise Error('Can not extract parameters for {}'.format(node))

        mapping_rule = {
            'window': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, stride, stride], dtype=np.int64),
            'pool_stride': pool_stride,
            'pool_step': pool_step,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'pool_method': 'max',
        }
        mapping_rule.update(layout_attrs())
        Pooling.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #5
0
def load_kalid_nnet2_model(graph, file_descr, nnet_name):
    input_name = 'Input'
    graph.add_node(input_name, name=input_name, kind='op', op='Parameter', parameters=None, shape=None)

    prev_layer_id = input_name

    all_components = load_components(file_descr, graph)

    used_layers = set()
    for layer_id in all_components:
        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Parameter':
            parameters = Node(graph, layer_id).parameters
            input_dim = read_token_value(parameters, b'<InputDim>')
            prev_node['shape'] = np.array([1, input_dim], dtype=np.int64)
        prev_node.add_output_port(0)
        Node(graph, layer_id).add_input_port(0)
        graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
        used_layers.add(prev_layer_id)
        prev_layer_id = layer_id
        log.debug('{} and {} were connected'.format(prev_layer_id, layer_id))

    # Tensor names information corresponding to a node is stored on outgoing edges.
    # As output nodes do not have outgoing edges, fake outputs are required. In the following code
    # for each output Identity node is added, and tensor name for the output is kept
    # on (output, fake output) edge. After Result nodes adding transformation fake outputs
    # are deleted from graph.
    output_layers = graph.nodes - used_layers
    add_outputs_identity(graph, output_layers, lambda g, output, fake_output: g.create_edge(
        Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
예제 #6
0
def load_kalid_nnet2_model(graph, file_descr, nnet_name):
    input_name = 'Input'
    graph.add_node(input_name,
                   name=input_name,
                   kind='op',
                   op='Parameter',
                   parameters=None,
                   shape=None)

    prev_layer_id = input_name

    all_components = load_components(file_descr, graph)

    for layer_id in all_components:
        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Parameter':
            parameters = Node(graph, layer_id).parameters
            input_dim = read_token_value(parameters, b'<InputDim>')
            prev_node['shape'] = np.array([1, input_dim], dtype=np.int64)
        prev_node.add_output_port(0)
        Node(graph, layer_id).add_input_port(0)
        graph.create_edge(
            prev_node, Node(graph, layer_id), 0, 0,
            create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
        prev_layer_id = layer_id
        log.debug('{} and {} were connected'.format(prev_layer_id, layer_id))
예제 #7
0
def load_kalid_nnet2_model(file_descr, nnet_name):
    graph = Graph(name=nnet_name)
    input_name = 'Input'
    input_shape = np.array([])
    graph.add_node(input_name,
                   name=input_name,
                   kind='op',
                   op='Input',
                   parameters=None,
                   shape=None)

    prev_layer_id = input_name

    collect_until_token(file_descr, b'<Nnet>')
    num_components = read_token_value(file_descr, b'<NumComponents>')
    log.debug('Network contains {} components'.format(num_components))
    collect_until_token(file_descr, b'<Components>')
    for _ in range(num_components):
        component_type = find_next_component(file_descr)

        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break
        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        layer_id = graph.unique_id(prefix=component_type)
        graph.add_node(layer_id,
                       parameters=get_parameters(file_descr, start_index,
                                                 end_index),
                       op=component_type,
                       kind='op')

        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Input':
            parameters = Node(graph, layer_id).parameters
            input_dim = read_token_value(parameters, b'<InputDim>')
            prev_node['shape'] = np.array([1, input_dim], dtype=np.int64)
            input_shape = np.array([1, input_dim], dtype=np.int64)
        graph.add_edge(prev_layer_id, layer_id,
                       **create_edge_attrs(prev_layer_id, layer_id))
        prev_layer_id = layer_id
        log.debug('{} (type is {}) was loaded'.format(prev_layer_id,
                                                      component_type))
    return graph, input_shape
예제 #8
0
파일: loader.py 프로젝트: yding10/openvino
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
    """
    Load ParallelComponent of the Kaldi model.
    ParallelComponent contains parallel nested networks.
    VariadicSplit is inserted before nested networks.
    Outputs of nested networks concatenate with layer Concat.

    :param file_descr: descriptor of the model file
    :param graph: graph with the topology.
    :param prev_layer_id: id of the input layers for parallel component layer
    :return: id of the concat layer - last layer of the parallel component layers
    """
    nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
    log.debug(
        'Model contains parallel component with {} nested networks'.format(
            nnet_count))

    split_points = []
    outputs = []
    inputs = []

    for i in range(nnet_count):
        read_token_value(file_descr, b'<NestedNnet>')
        collect_until_token(file_descr, b'<Nnet>')
        g = Graph()
        load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))

        # input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis
        # 1st axis contains input_size of the nested subnetwork
        # we split input from the main network to subnetworks
        input_node = Node(g, 'Parameter')
        split_points.append(input_node['shape'][1])
        g.remove_node(input_node.id)

        mapping = {
            node: graph.unique_id(node)
            for node in g.nodes(data=False) if node in graph
        }
        g = nx.relabel_nodes(g, mapping)
        for val in mapping.values():
            g.node[val]['name'] = val
        graph.add_nodes_from(g.nodes(data=True))
        graph.add_edges_from(g.edges(data=True))
        sorted_nodes = tuple(nx.topological_sort(g))

        outputs.append(Node(graph, sorted_nodes[-1]))
        inputs.append(Node(graph, sorted_nodes[0]))

    split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
    attrs = {
        'out_ports_count': nnet_count,
        'size_splits': split_points,
        'axis': 1,
        'name': split_id
    }
    variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
    prev_layer_node = Node(graph, prev_layer_id)
    prev_layer_node.add_output_port(0)
    graph.create_edge(
        prev_layer_node, variadic_split_node, 0, 0,
        create_edge_attrs(prev_layer_id, variadic_split_node.id,
                          prev_layer_id))

    concat_id = graph.unique_id(prefix='Concat')
    graph.add_node(concat_id, parameters=None, op='concat', kind='op')
    concat_node = Node(graph, concat_id)

    # Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent
    # and each subnetwork's output to concat_node
    for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
        output_node.add_output_port(0)
        concat_node.add_input_port(i)
        graph.create_edge(
            output_node, concat_node, 0, i,
            create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
        graph.create_edge(
            variadic_split_node, input_node, i, 0,
            create_edge_attrs(variadic_split_node.id, input_node.id,
                              variadic_split_node.id, 0, i))
    return concat_id
예제 #9
0
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
    """
    Load ParallelComponent of the Kaldi model.
    ParallelComponent contains parallel nested networks.
    Slice is inserted before nested networks.
    Outputs of nested networks concatenate with layer Concat.

    :param file_descr: descriptor of the model file
    :param graph: graph with the topology.
    :param prev_layer_id: id of the input layers for parallel component layer
    :return: id of the concat layer - last layer of the parallel component layers
    """
    nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
    log.debug(
        'Model contains parallel component with {} nested networks'.format(
            nnet_count))

    slice_id = graph.unique_id(prefix='Slice')
    graph.add_node(slice_id, parameters=None, op='slice', kind='op')

    slice_node = Node(graph, slice_id)
    graph.add_edge(prev_layer_id, slice_id,
                   **create_edge_attrs(prev_layer_id, slice_id))
    slices_points = []

    outputs = []

    for i in range(nnet_count):
        read_token_value(file_descr, b'<NestedNnet>')
        collect_until_token(file_descr, b'<Nnet>')
        g, shape = load_kalid_nnet1_model(file_descr,
                                          'Nested_net_{}'.format(i))
        input_nodes = [
            n for n in graph.nodes(data=True) if n[1]['op'] == 'Input'
        ]
        if i != nnet_count - 1:
            slices_points.append(shape[1])
        g.remove_node(input_nodes[0][0])
        mapping = {
            node: graph.unique_id(node)
            for node in g.nodes(data=False) if node in graph
        }
        g = nx.relabel_nodes(g, mapping)
        for val in mapping.values():
            g.node[val]['name'] = val
        graph.add_nodes_from(g.nodes(data=True))
        graph.add_edges_from(g.edges(data=True))
        sorted_nodes = tuple(nx.topological_sort(g))
        edge_attrs = create_edge_attrs(slice_id, sorted_nodes[0])
        edge_attrs['out'] = i
        graph.add_edge(slice_id, sorted_nodes[0], **edge_attrs)
        outputs.append(sorted_nodes[-1])
    packed_sp = struct.pack("B", 4) + struct.pack("I", len(slices_points))
    for i in slices_points:
        packed_sp += struct.pack("I", i)
    slice_node.parameters = io.BytesIO(packed_sp)
    concat_id = graph.unique_id(prefix='Concat')
    graph.add_node(concat_id, parameters=None, op='concat', kind='op')
    for i, output in enumerate(outputs):
        edge_attrs = create_edge_attrs(output, concat_id)
        edge_attrs['in'] = i
        graph.add_edge(output, concat_id, **edge_attrs)
    return concat_id