Exemple #1
0
    def extract(cls, node):
        pb = node.parameters
        mapping_rule = {'context': list()}
        tag = find_next_tag(pb)
        if tag == '<LeftContext>':
            read_placeholder(pb, 1)
            l_context = read_binary_integer32_token(pb)
            tag = find_next_tag(pb)
            if tag != '<RightContext>':
                raise Error(
                    'Unknown token {} in SpliceComponent node {}'.format(
                        tag, node.id))
            read_placeholder(pb, 1)
            r_context = read_binary_integer32_token(pb)
            for i in range(-l_context, r_context + 1):
                mapping_rule['context'].append(i)
        elif tag == '<Context>':
            collect_until_whitespace(pb)
            mapping_rule['context'] = read_binary_vector(pb,
                                                         False,
                                                         dtype=np.int32)
        else:
            raise Error('Unknown token {} in SpliceComponent node {}'.format(
                tag, node.id))

        tag = find_next_tag(pb)
        if tag == '<ConstComponentDim>':
            read_placeholder(pb, 1)
            const_dim = read_binary_integer32_token(pb)
            mapping_rule['const_dim'] = const_dim

        Splice.update_node_stat(node, mapping_rule)
        return cls.enabled
Exemple #2
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<PoolSize>')
        kernel = read_binary_integer32_token(pb)
        tag = find_next_tag(pb)
        if tag == '<PoolStep>':
            read_placeholder(pb, 1)
            stride = read_binary_integer32_token(pb)
            pool_step = stride
            pool_stride = read_token_value(pb, b'<PoolStride>')
        elif tag == '<PoolStride>':
            stride = 1
            pool_step = None
            read_placeholder(pb, 1)
            pool_stride = read_binary_integer32_token(pb)
        else:
            raise Error('Can not extract parameters for {}'.format(node))

        mapping_rule = {
            'window': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, stride, stride], dtype=np.int64),
            'pool_stride': pool_stride,
            'pool_step': pool_step,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'pool_method': 'max',
        }
        mapping_rule.update(layout_attrs())
        Pooling.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        try:
            collect_until_token(pb, b'<InputDim>')
        except Error:
            raise Error("<InputDim> was not found")
        in_dim = read_binary_integer32_token(pb)

        try:
            collect_until_token(pb, b'<OutputDim>')
        except Error:
            raise Error("<OutputDim> was not found")
        out_dim = read_binary_integer32_token(pb)

        assert in_dim % out_dim == 0

        group = in_dim / out_dim

        try:
            collect_until_token(pb, b'<P>')
        except Error:
            raise Error("<P> was not found")
        p = read_binary_float_token(pb)

        attrs = {
                 'group': group,
                 'p': p,
        }

        PNormOp.update_node_stat(node, attrs)
        return cls.enabled
Exemple #4
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<BlockDim>')
        block_dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<Epsilon>')
        eps = read_binary_float_token(pb)

        collect_until_token(pb, b'<TargetRms>')
        target_rms = read_binary_float_token(pb)

        collect_until_token(pb, b'<StatsMean>')
        mean = read_binary_vector(pb)

        collect_until_token(pb, b'<StatsVar>')
        var = read_binary_vector(pb)

        scale = target_rms / np.sqrt(var + eps)

        shift = -target_rms * mean / np.sqrt(var + eps)

        scale = np.tile(scale, dim // block_dim)
        shift = np.tile(shift, dim // block_dim)

        attrs = {'out-size': dim}
        embed_input(attrs, 1, 'weights', scale)
        embed_input(attrs, 2, 'biases', shift)

        ScaleShiftOp.update_node_stat(node, attrs)
        return cls.enabled
Exemple #5
0
def load_kalid_nnet1_model(graph, file_descr, name):
    prev_layer_id = 'Parameter'
    graph.add_node(prev_layer_id,
                   name=prev_layer_id,
                   kind='op',
                   op='Parameter',
                   parameters=None)

    used_layers = set()
    while True:
        component_type = find_next_component(file_descr)
        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break

        layer_o = read_binary_integer32_token(file_descr)
        layer_i = read_binary_integer32_token(file_descr)

        if component_type == 'parallelcomponent':
            prev_layer_id = load_parallel_component(file_descr, graph,
                                                    prev_layer_id)
            find_end_of_component(file_descr, component_type)
            continue

        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        end_index -= len(end_tag)
        layer_id = graph.unique_id(prefix=component_type)
        graph.add_node(layer_id,
                       parameters=get_parameters(file_descr, start_index,
                                                 end_index),
                       op=component_type,
                       kind='op',
                       layer_i=layer_i,
                       layer_o=layer_o)

        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Parameter':
            prev_node['shape'] = np.array([1, layer_i], dtype=np.int64)

        prev_node.add_output_port(0)
        Node(graph, layer_id).add_input_port(0)
        graph.create_edge(
            prev_node, Node(graph, layer_id), 0, 0,
            create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
        used_layers.add(prev_layer_id)
        prev_layer_id = layer_id
        log.debug('{} (type is {}) was loaded'.format(prev_layer_id,
                                                      component_type))

    # Tensor names information corresponding to a node is stored on outgoing edges.
    # As output nodes do not have outgoing edges, fake outputs are required. In the following code
    # for each output Identity node is added, and tensor name for the output is kept
    # on (output, fake output) edge. After Result nodes adding transformation fake outputs
    # are deleted from graph.
    output_layers = graph.nodes - used_layers
    add_outputs_identity(
        graph, output_layers, lambda g, output, fake_output: g.create_edge(
            Node(g, output), Node(g, fake_output), 0, 0,
            create_edge_attrs(output, fake_output, output)))
Exemple #6
0
def read_binary_matrix(file_desc: io.BufferedReader, read_token: bool = True):
    if read_token:
        read_placeholder(file_desc)
    rows_number = read_binary_integer32_token(file_desc)
    cols_number = read_binary_integer32_token(file_desc)
    # to compare: ((float *)a->buffer())[10]
    return read_blob(file_desc,
                     rows_number * cols_number), (rows_number, cols_number)
Exemple #7
0
def read_binary_vector(file_desc: io.BufferedReader,
                       read_token: bool = True,
                       dtype=np.float32):
    if read_token:
        read_placeholder(file_desc)
    elements_number = read_binary_integer32_token(file_desc)
    return read_blob(file_desc, elements_number, dtype)
    def replace_op(self, graph: Graph, node: Node):
        pb = node.parameters
        weights_size = read_binary_integer32_token(pb)
        weights = read_blob(pb, weights_size, dtype=np.int32) - 1
        const_attrs = {
            'name': 'indexes/{}'.format(node.id),
            'value': np.array(weights),
            'shape': [weights_size],
            'data_type': np.int32
        }
        indexes_node = Const(graph).create_node(attrs=const_attrs)

        perm_in_1 = Const(
            graph, {
                'value': np.array([1, 0], dtype=np.int64),
                'shape': [2],
                'data_type': np.int64
            }).create_node()
        axis_const = Const(graph, {'value': int64_array(0)}).create_node()
        perm1_node = Transpose(graph, {
            'name': 'input_permute'
        }).create_node([node.in_node(0)])
        perm1_node.in_port(0).connect(node.in_port(0).get_source())
        perm1_node.in_port(1).connect(perm_in_1.out_port(0))

        gather_node = Gather(graph, {}).create_node()
        gather_node.in_port(0).connect(perm1_node.out_port(0))
        gather_node.in_port(1).connect(indexes_node.out_port(0))
        gather_node.in_port(2).connect(axis_const.out_port(0))

        perm2_node = Transpose(graph, {'name': 'output_permute'}).create_node()
        perm2_node.in_port(0).connect(gather_node.out_port(0))
        perm2_node.in_port(1).connect(perm_in_1.out_port(0))

        return [perm2_node.id]
Exemple #9
0
def load_components(file_descr, graph, component_layer_map=None):
    num_components = collect_until_token_and_read(file_descr, b'<NumComponents>')
    log.debug('Network contains {} components'.format(num_components))
    is_nnet3 = False if component_layer_map is None else True

    if not is_nnet3:
        collect_until_token(file_descr, b'<Components>')

    all_components = list()
    name = ""
    for _ in range(num_components):
        if is_nnet3:
            name = collect_until_token_and_read(file_descr, b'<ComponentName>', np.string_)

        component_type = find_next_component(file_descr)
        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break

        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        # read dim info where possible to simplify shape calculation for MemoryOffset
        # shape calculation for MemoryOffset can't be done through shape of previous layer because
        # it is separated in 2 parts to remove cycle from graph
        file_descr.seek(start_index)
        dim = 0
        try:
            collect_until_token(file_descr, b'<Dim>', size_search_zone=end_index - start_index)
            cur_index = file_descr.tell()
            if start_index < cur_index < end_index:
                dim = read_binary_integer32_token(file_descr)
            else:
                file_descr.seek(start_index)
        except Error:
            file_descr.seek(start_index)

        if is_nnet3:
            if name in component_layer_map:
                layer_id = component_layer_map[name][0]
                for layer in component_layer_map[name]:
                    node = Node(graph, layer)
                    node['parameters'] = get_parameters(file_descr, start_index, end_index)
                    node['op'] = component_type
                    # Read dim info where possible to simplify shape calculation for MemoryOffset
                    for o_n_name, params in node.get_outputs():
                        o_n = Node(graph, o_n_name)
                        if o_n['op'] == 'MemoryOffset' and dim != 0:
                            o_n['parameters']['element_size'] = dim
            else:
                raise Error("Something wrong with layer {}".format(name))
        else:
            layer_id = graph.unique_id(prefix=component_type)
            graph.add_node(layer_id,
                           parameters=get_parameters(file_descr, start_index, end_index),
                           op=component_type,
                           kind='op')

        all_components.append(layer_id)
        log.debug('{} (type is {}) was loaded'.format(layer_id, component_type))

    return all_components
Exemple #10
0
    def replace_op(self, graph: Graph, node: Node):
        pb = node.parameters
        weights_size = read_binary_integer32_token(pb)
        weights = read_blob(pb, weights_size, dtype=np.int32) - 1

        node_name = node.soft_get('name', node.id)
        const_attrs = {
                       'name': node_name + '/indexes',
                       'value': np.array(weights),
                       'shape': [weights_size],
                       'data_type': np.int32
                      }
        indexes_node = Const(graph).create_node(attrs=const_attrs)

        perm_in_1 = Const(graph, {'value': int64_array([1, 0]), 'name': node_name + '/order'}).create_node()
        perm1_node = Transpose(graph, {'name': node_name + '/input_permute'}).create_node([node.in_node(0)])
        perm1_node.in_port(0).connect(node.in_port(0).get_source())
        perm1_node.in_port(1).connect(perm_in_1.out_port(0))

        gather_node = create_op_with_const_inputs(graph, Gather, {2: int64_array(0)}, {'name': node_name + '/gather'})
        gather_node.in_port(0).connect(perm1_node.out_port(0))
        gather_node.in_port(1).connect(indexes_node.out_port(0))

        perm2_node = Transpose(graph, {'name': node_name + '/output_permute'}).create_node()
        perm2_node.in_port(0).connect(gather_node.out_port(0))
        perm2_node.in_port(1).connect(perm_in_1.out_port(0))

        return [perm2_node.id]
Exemple #11
0
def load_kalid_nnet1_model(file_descr, name):
    graph = Graph(name=name)

    prev_layer_id = 'Input'
    graph.add_node(prev_layer_id,
                   name=prev_layer_id,
                   kind='op',
                   op='Input',
                   parameters=None)
    input_shape = []

    while True:
        component_type = find_next_component(file_descr)
        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break

        layer_o = read_binary_integer32_token(file_descr)
        layer_i = read_binary_integer32_token(file_descr)

        if component_type == 'parallelcomponent':
            prev_layer_id = load_parallel_component(file_descr, graph,
                                                    prev_layer_id)
            continue

        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        end_index -= len(end_tag)
        layer_id = graph.unique_id(prefix=component_type)
        graph.add_node(layer_id,
                       parameters=get_parameters(file_descr, start_index,
                                                 end_index),
                       op=component_type,
                       kind='op',
                       layer_i=layer_i,
                       layer_o=layer_o)

        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Input':
            prev_node['shape'] = np.array([1, layer_i], dtype=np.int64)
            input_shape = np.array([1, layer_i], dtype=np.int64)
        graph.add_edge(prev_layer_id, layer_id,
                       **create_edge_attrs(prev_layer_id, layer_id))
        prev_layer_id = layer_id
        log.debug('{} (type is {}) was loaded'.format(prev_layer_id,
                                                      component_type))
    return graph, input_shape
Exemple #12
0
 def extract(node):
     pb = node.parameters
     weights_size = read_binary_integer32_token(pb)
     weights = read_blob(pb, weights_size, dtype=np.int32) - 1
     attrs = {'infer': copy_shape_infer}
     embed_input(attrs, 1, 'indexes', weights)
     Permute.update_node_stat(node, attrs)
     return __class__.enabled
Exemple #13
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<MaxChange>')
        max_change = read_binary_float_token(pb)

        collect_until_token(pb, b'<L2Regularize>')
        collect_until_token(pb, b'<LearningRate>')

        collect_until_token(pb, b'<TimeOffsets>')
        time_offsets = read_binary_vector(pb, False, np.int32)

        collect_until_token(pb, b'<LinearParams>')
        weights, weights_shape = read_binary_matrix(pb)
        collect_until_token(pb, b'<BiasParams>')
        bias_params = read_binary_vector(pb)

        collect_until_token(pb, b'<OrthonormalConstraint>')
        orthonormal_constraint = read_binary_float_token(
            pb)  # used only on training

        collect_until_token(pb, b'<UseNaturalGradient>')
        use_natural_grad = read_binary_bool_token(pb)  # used only on training
        collect_until_token(pb, b'<NumSamplesHistory>')
        num_samples_hist = read_binary_float_token(pb)

        collect_until_token(pb, b'<AlphaInOut>')
        alpha_in_out = read_binary_float_token(pb), read_binary_float_token(
            pb)  # for training, usually (4, 4)

        # according to Kaldi documentation http://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1TdnnComponent.html#details
        # it looks like it's used only during training (but not 100% sure)
        collect_until_token(pb, b'<RankInOut>')
        rank_in_out = read_binary_integer32_token(
            pb), read_binary_integer32_token(pb)

        biases = np.array(bias_params) if len(bias_params) != 0 else None
        attrs = {
            'weights': np.reshape(weights, weights_shape),
            'biases': biases,
            'time_offsets': time_offsets,
        }
        TdnnComponent.update_node_stat(node, attrs)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<BlockDim>')
        block_dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<TimePeriod>')
        time_period = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<DropoutProportion>')
        dropout_proporion = read_binary_float_token(pb)

        # collect_until_token(pb, b'<Continuous>')
        Identity.update_node_stat(node, {})

        return cls.enabled
    def extract(node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<BlockDim>')
        block_dim = read_binary_integer32_token(pb)

        if block_dim != dim:
            raise Error(
                "Dim is not equal BlockDim for BatchNorm is not supported")

        collect_until_token(pb, b'<Epsilon>')
        eps = read_binary_float_token(pb)

        collect_until_token(pb, b'<TargetRms>')
        target_rms = read_binary_float_token(pb)

        collect_until_token(pb, b'<TestMode>')
        test_mode = read_binary_bool_token(pb)

        if test_mode is not False:
            raise Error("Test mode True for BatchNorm is not supported")

        collect_until_token(pb, b'<StatsMean>')
        mean = read_binary_vector(pb)

        collect_until_token(pb, b'<StatsVar>')
        var = read_binary_vector(pb)

        scale = target_rms / np.sqrt(var + eps)

        shift = -target_rms * mean / np.sqrt(var + eps)
        attrs = {}
        embed_input(attrs, 1, 'weights', scale)
        embed_input(attrs, 2, 'biases', shift)
        ScaleShiftOp.update_node_stat(node, attrs)
        return __class__.enabled
Exemple #16
0
 def extract(node):
     pb = node.parameters
     num_slice_points = read_binary_integer32_token(pb)
     mapping_rule = {
         'axis': 1,
         'slice_point': read_blob(pb, num_slice_points, np.int32),
         'batch_dims': 0,
         'spatial_dims': 1,
         'infer': caffe_slice_infer
     }
     node.parameters.close()
     Slice.update_node_stat(node, mapping_rule)
     return __class__.enabled
Exemple #17
0
 def extract(node):
     pb = node.parameters
     collect_until_token(pb, b'<Dim>')
     dim = read_binary_integer32_token(pb)
     target_rms = 1
     d_scaled = dim * target_rms**2
     in_norm = np.zeros([dim], np.float64)
     in_norm += 1.0 / d_scaled
     in_norm = np.maximum(in_norm, 2.**(-66))
     in_norm = np.power(in_norm, -0.5)
     attrs = {}
     embed_input(attrs, 1, 'weights', in_norm)
     ScaleShiftOp.update_node_stat(node, attrs)
     return __class__.enabled
Exemple #18
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<Scale>')
        scale = read_binary_float_token(pb)

        # TODO add real batch here
        attrs = {}
        embed_input(attrs, 1, 'weights', np.full([dim], scale))
        ScaleShiftOp.update_node_stat(node, attrs)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        try:
            collect_until_token(pb, b'<Dim>')
        except Error:
            try:
                pb.seek(0)
                collect_until_token(pb, b'<InputDim>')
            except Error:
                raise Error("Neither <Dim> nor <InputDim> were found")
        in_dim = read_binary_integer32_token(pb)

        try:
            collect_until_token(pb, b'<TargetRms>')
            target_rms = read_binary_float_token(pb)
        except Error:
            # model does not contain TargetRms
            target_rms = 1.0

        try:
            collect_until_token(pb, b'<AddLogStddev>')
            add_log = read_binary_bool_token(pb)
        except Error:
            # model does not contain AddLogStddev
            add_log = False

        if add_log is not False:
            raise Error(
                "AddLogStddev True  in Normalize component is not supported")

        scale = target_rms * np.sqrt(in_dim)

        attrs = {
            'eps': 0.00000001,
            'across_spatial': 0,
            'channel_shared': 1,
            'in_dim': in_dim,
        }
        embed_input(attrs, 1, 'weights', [scale])

        NormalizeOp.update_node_stat(node, attrs)
        return cls.enabled
Exemple #20
0
 def test_read_binary_integer32_token(self):
     stream = self.bytesio_from(
         self.pack_value(4, 'B') + self.pack_value(32, self.uint32_fmt))
     self.assertEqual(read_binary_integer32_token(stream), 32)