Пример #1
0
def load_components(file_descr, graph, component_layer_map=None):
    num_components = collect_until_token_and_read(file_descr,
                                                  b'<NumComponents>')
    log.debug('Network contains {} components'.format(num_components))
    is_nnet3 = False if component_layer_map is None else True

    if not is_nnet3:
        collect_until_token(file_descr, b'<Components>')

    all_components = list()
    name = ""
    for _ in range(num_components):
        if is_nnet3:
            name = collect_until_token_and_read(file_descr, b'<ComponentName>',
                                                np.string_)

        component_type = find_next_component(file_descr)
        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break

        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        # read dim info where possible to simplify shape calculation for MemoryOffset
        # shape calculation for MemoryOffset can't be done through shape of previous layer because
        # it is separated in 2 parts to remove cycle from graph
        file_descr.seek(start_index)
        dim = 0
        dim_words = {b'<Dim>', b'<InputDim>'}
        for dim_word in dim_words:
            try:
                collect_until_token(file_descr,
                                    dim_word,
                                    size_search_zone=end_index - start_index)
                cur_index = file_descr.tell()
                if start_index < cur_index < end_index:
                    dim = read_binary_integer32_token(file_descr)
                    break
                else:
                    file_descr.seek(start_index)
            except Error:
                file_descr.seek(start_index)

        if is_nnet3:
            if name in component_layer_map:
                layer_id = component_layer_map[name][0]
                for layer in component_layer_map[name]:
                    node = Node(graph, layer)
                    node['parameters'] = get_parameters(
                        file_descr, start_index, end_index)
                    node['op'] = component_type
                    # Read dim info where possible to simplify shape calculation for MemoryOffset
                    for o_n_name, params in node.get_outputs():
                        o_n = Node(graph, o_n_name)
                        if o_n['op'] == 'MemoryOffset' and dim != 0:
                            o_n['parameters']['element_size'] = int64_array(
                                [1, dim])
            else:
                raise Error("Something wrong with layer {}".format(name))
        else:
            layer_id = graph.unique_id(prefix=component_type)
            graph.add_node(layer_id,
                           parameters=get_parameters(file_descr, start_index,
                                                     end_index),
                           op=component_type,
                           kind='op')
        if hasattr(graph, 'op_names_statistic'):
            graph.op_names_statistic[component_type] += 1

        all_components.append(layer_id)
        log.debug('{} (type is {}) was loaded'.format(layer_id,
                                                      component_type))

    return all_components
Пример #2
0
 def test_get_pb(self):
     component = '<AffineComponent>'
     test_file = b'somefakeinfo<another>info<tag>' + bytes(end_of_component_tag, 'ascii') + b'</Nnet>'
     end_tag, end_position = find_end_of_component(self.bytesio_from(test_file), component[1:-1].lower())
     pb = get_parameters(self.bytesio_from(test_file), 0, end_position)
Пример #3
0
def load_kalid_nnet1_model(graph, file_descr, name):
    prev_layer_id = 'Parameter'
    graph.add_node(prev_layer_id,
                   name=prev_layer_id,
                   kind='op',
                   op='Parameter',
                   parameters=None)

    # find out output layer, it can be only one due to chain structure of nnet1 model
    output_layer = None
    while True:
        component_type = find_next_component(file_descr)
        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break

        layer_o = read_binary_integer32_token(file_descr)
        layer_i = read_binary_integer32_token(file_descr)

        if component_type == 'parallelcomponent':
            prev_layer_id = load_parallel_component(file_descr, graph,
                                                    prev_layer_id)
            find_end_of_component(file_descr, component_type)
            continue

        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        end_index -= len(end_tag)
        layer_id = graph.unique_id(prefix=component_type)
        graph.add_node(layer_id,
                       parameters=get_parameters(file_descr, start_index,
                                                 end_index),
                       op=component_type,
                       kind='op',
                       layer_i=layer_i,
                       layer_o=layer_o)
        if hasattr(graph, 'op_names_statistic'):
            graph.op_names_statistic[component_type] += 1

        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Parameter':
            prev_node['shape'] = np.array([1, layer_i], dtype=np.int64)

        prev_node.add_output_port(0)
        Node(graph, layer_id).add_input_port(0)
        graph.create_edge(
            prev_node, Node(graph, layer_id), 0, 0,
            create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
        prev_layer_id = layer_id
        output_layer = layer_id
        log.debug('{} (type is {}) was loaded'.format(prev_layer_id,
                                                      component_type))

    # Tensor names information corresponding to a node is stored on outgoing edges.
    # As output nodes do not have outgoing edges, fake outputs are required. In the following code
    # for each output Identity node is added, and tensor name for the output is kept
    # on (output, fake output) edge. After Result nodes adding transformation fake outputs
    # are deleted from graph.
    assert output_layer is not None, "Output layer is not found in graph"
    add_outputs_identity(
        graph, [output_layer], lambda g, output, fake_output: g.create_edge(
            Node(g, output), Node(g, fake_output), 0, 0,
            create_edge_attrs(output, fake_output, output)))