Exemple #1
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<BlockDim>')
        block_dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<Epsilon>')
        eps = read_binary_float_token(pb)

        collect_until_token(pb, b'<TargetRms>')
        target_rms = read_binary_float_token(pb)

        collect_until_token(pb, b'<StatsMean>')
        mean = read_binary_vector(pb)

        collect_until_token(pb, b'<StatsVar>')
        var = read_binary_vector(pb)

        scale = target_rms / np.sqrt(var + eps)

        shift = -target_rms * mean / np.sqrt(var + eps)

        scale = np.tile(scale, dim // block_dim)
        shift = np.tile(shift, dim // block_dim)

        attrs = {'out-size': dim}
        embed_input(attrs, 1, 'weights', scale)
        embed_input(attrs, 2, 'biases', shift)

        ScaleShiftOp.update_node_stat(node, attrs)
        return cls.enabled
    def extract(node):
        clip_value = 50
        pb = node.parameters
        res = collect_until_whitespace(pb)
        if res == b'<CellClip>':
            clip_value = get_uint32(pb.read(4))
        collect_until_token(pb, b'FM')
        gifo_x_weights, gifo_x_weights_shape = read_binary_matrix(pb, False)
        gifo_r_weights, gifo_r_weights_shape = read_binary_matrix(pb)
        gifo_biases = read_binary_vector(pb)
        input_gate_weights = read_binary_vector(pb)
        forget_gate_weights = read_binary_vector(pb)
        output_gate_weights = read_binary_vector(pb)

        projection_weights, projection_weights_shape = read_binary_matrix(pb)

        mapping_rule = {'gifo_x_weights_shape': gifo_x_weights_shape,
                        'gifo_r_weights_shape': gifo_r_weights_shape,
                        'projection_weights_shape': projection_weights_shape,
                        'clip_value': clip_value
                        }

        embed_input(mapping_rule, 1, 'gifo_x_weights', gifo_x_weights)
        embed_input(mapping_rule, 2, 'gifo_r_weights', gifo_r_weights)
        embed_input(mapping_rule, 3, 'gifo_biases', gifo_biases)
        embed_input(mapping_rule, 4, 'input_gate_weights', input_gate_weights)
        embed_input(mapping_rule, 5, 'forget_gate_weights', forget_gate_weights)
        embed_input(mapping_rule, 6, 'output_gate_weights', output_gate_weights)
        embed_input(mapping_rule, 7, 'projection_weights', projection_weights)

        LSTMCell.update_node_stat(node, mapping_rule)
        return __class__.enabled
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<ConvolutionModel>')
        in_shape = read_token_value(pb, b'<NumFiltersIn>')
        out_shape = read_token_value(pb, b'<NumFiltersOut>')
        height_in = read_token_value(pb, b'<HeightIn>')
        height_out = read_token_value(pb, b'<HeightOut>')
        height_subsample = read_token_value(pb, b'<HeightSubsampleOut>')
        collect_until_token(pb, b'<Offsets>')
        offsets = read_binary_vector_of_pairs(pb,
                                              read_token=False,
                                              dtype=np.int32)
        collect_until_token(pb, b'<RequiredTimeOffsets>')
        time_offsets = read_binary_vector(pb, read_token=False, dtype=np.int32)
        collect_until_token(pb, b'<LinearParams>')
        weights, _ = read_binary_matrix(pb)
        collect_until_token(pb, b'<BiasParams>')
        biases = read_binary_vector(pb)

        offsets = offsets.reshape([len(offsets) // 2, 2])
        mapping_rule = {  # stride for h axis
            'height_subsample': height_subsample,
            # input dimension for h axis
            'height_in': height_in,
            # output dimension for h axis
            'height_out': height_out,
            # input dimension for channel axis
            'in_channels': in_shape,
            # output dimension for channel axis
            'out_channels': out_shape,
            # array with pairs like the following
            # [ (-1, -1) (-1, 0) (-1, 1)
            #   (0, -1)  (0, 0)  (0, 1)
            #   (1, -1)  (1, 0)  (1, 1)]
            #  it means that kernel 3x3 will be applied to calculate current value of output
            'offsets': offsets,
            # required time offsets to calculate current convolution
            # time_offsets = [-1, 0, 1] for previous example means no padding for time axis and
            # 3 values should be prepared
            # time_offsets = [0] means zero padding [1, 1] for time axis
            'time_offsets': time_offsets,
            'out-size': out_shape * height_out
        }

        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        TimeHeightConvolutionComponent.update_node_stat(node, mapping_rule)
        return cls.enabled
Exemple #4
0
    def extract(cls, node):
        pb = node.parameters
        mapping_rule = {'context': list()}
        tag = find_next_tag(pb)
        if tag == '<LeftContext>':
            read_placeholder(pb, 1)
            l_context = read_binary_integer32_token(pb)
            tag = find_next_tag(pb)
            if tag != '<RightContext>':
                raise Error(
                    'Unknown token {} in SpliceComponent node {}'.format(
                        tag, node.id))
            read_placeholder(pb, 1)
            r_context = read_binary_integer32_token(pb)
            for i in range(-l_context, r_context + 1):
                mapping_rule['context'].append(i)
        elif tag == '<Context>':
            collect_until_whitespace(pb)
            mapping_rule['context'] = read_binary_vector(pb,
                                                         False,
                                                         dtype=np.int32)
        else:
            raise Error('Unknown token {} in SpliceComponent node {}'.format(
                tag, node.id))

        tag = find_next_tag(pb)
        if tag == '<ConstComponentDim>':
            read_placeholder(pb, 1)
            const_dim = read_binary_integer32_token(pb)
            mapping_rule['const_dim'] = const_dim

        Splice.update_node_stat(node, mapping_rule)
        return cls.enabled
Exemple #5
0
 def extract(cls, node):
     pb = node.parameters
     read_learning_info(pb)
     weights = read_binary_vector(pb)
     mapping_rule = {}
     embed_input(mapping_rule, 1, 'weights', weights)
     ScaleShiftOp.update_node_stat(node, mapping_rule)
     return cls.enabled
Exemple #6
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<MaxChange>')
        max_change = read_binary_float_token(pb)

        collect_until_token(pb, b'<L2Regularize>')
        collect_until_token(pb, b'<LearningRate>')

        collect_until_token(pb, b'<TimeOffsets>')
        time_offsets = read_binary_vector(pb, False, np.int32)

        collect_until_token(pb, b'<LinearParams>')
        weights, weights_shape = read_binary_matrix(pb)
        collect_until_token(pb, b'<BiasParams>')
        bias_params = read_binary_vector(pb)

        collect_until_token(pb, b'<OrthonormalConstraint>')
        orthonormal_constraint = read_binary_float_token(
            pb)  # used only on training

        collect_until_token(pb, b'<UseNaturalGradient>')
        use_natural_grad = read_binary_bool_token(pb)  # used only on training
        collect_until_token(pb, b'<NumSamplesHistory>')
        num_samples_hist = read_binary_float_token(pb)

        collect_until_token(pb, b'<AlphaInOut>')
        alpha_in_out = read_binary_float_token(pb), read_binary_float_token(
            pb)  # for training, usually (4, 4)

        # according to Kaldi documentation http://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1TdnnComponent.html#details
        # it looks like it's used only during training (but not 100% sure)
        collect_until_token(pb, b'<RankInOut>')
        rank_in_out = read_binary_integer32_token(
            pb), read_binary_integer32_token(pb)

        biases = np.array(bias_params) if len(bias_params) != 0 else None
        attrs = {
            'weights': np.reshape(weights, weights_shape),
            'biases': biases,
            'time_offsets': time_offsets,
        }
        TdnnComponent.update_node_stat(node, attrs)
        return cls.enabled
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        read_learning_info(pb)

        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error(
                'Weights shape does not correspond to the `output` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        mapping_rule = {
            'output': output,
            'patch_stride': patch_stride,
            'bias_term': None,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
            'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'kernel_spatial': np.array([1, kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': [2, 3],
            'group': 1,
            'reshape_kernel': True,
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
Exemple #8
0
 def extract(cls, node):
     pb = node.parameters
     read_learning_info(pb)
     biases = read_binary_vector(pb)
     bias_term = True
     mapping_rule = {'bias_term': bias_term}
     embed_input(mapping_rule, 1, 'weights', np.ones(biases.shape))
     embed_input(mapping_rule, 2, 'biases', biases)
     ScaleShiftOp.update_node_stat(node, mapping_rule)
     return cls.enabled
Exemple #9
0
def load_priors(file_descr, graph):
    try:
        collect_until_token(file_descr, b'<Priors>')
    except Error:
        # just ignore if priors were not found
        return
    if graph.graph['cmd_params'].counts is not None:
        graph.graph['priors'] = read_binary_vector(file_descr)
    else:
        log.error(
            "Model contains Prior values, if you want to embed them into the generated IR add option --counts=\"\" to command line",
            extra={'is_warning': True})
Exemple #10
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<Params>')
        weights = read_binary_vector(pb)
        find_next_tag(pb)
        read_placeholder(pb, 1)

        mapping_rule = {'layout': 'NCHW'}
        embed_input(mapping_rule, 1, 'weights', weights)

        ScaleShiftOp.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(node):
        pb = node.parameters
        read_learning_info(pb)
        weights, weights_shape = read_binary_matrix(pb)
        biases = read_binary_vector(pb)

        mapping_rule = {'out-size': weights_shape[0], 'layout': 'NCHW'}
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        InnerProduct.update_node_stat(node, mapping_rule)
        return __class__.enabled
    def extract(node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<BlockDim>')
        block_dim = read_binary_integer32_token(pb)

        if block_dim != dim:
            raise Error(
                "Dim is not equal BlockDim for BatchNorm is not supported")

        collect_until_token(pb, b'<Epsilon>')
        eps = read_binary_float_token(pb)

        collect_until_token(pb, b'<TargetRms>')
        target_rms = read_binary_float_token(pb)

        collect_until_token(pb, b'<TestMode>')
        test_mode = read_binary_bool_token(pb)

        if test_mode is not False:
            raise Error("Test mode True for BatchNorm is not supported")

        collect_until_token(pb, b'<StatsMean>')
        mean = read_binary_vector(pb)

        collect_until_token(pb, b'<StatsVar>')
        var = read_binary_vector(pb)

        scale = target_rms / np.sqrt(var + eps)

        shift = -target_rms * mean / np.sqrt(var + eps)
        attrs = {}
        embed_input(attrs, 1, 'weights', scale)
        embed_input(attrs, 2, 'biases', shift)
        ScaleShiftOp.update_node_stat(node, attrs)
        return __class__.enabled
    def extract(cls, node):
        pb = node.parameters
        read_learning_info(pb)
        weights, weights_shape = read_binary_matrix(pb)
        biases = read_binary_vector(pb)

        mapping_rule = {
            'out-size': weights_shape[0],
            'transpose_weights': True,
        }
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        FullyConnected.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<Bias>')
        biases = read_binary_vector(pb)
        find_next_tag(pb)
        read_placeholder(pb, 1)

        mapping_rule = {
            'layout': 'NCHW',
            'bias_term': True,
            'out-size': biases.shape[0],
        }
        embed_input(mapping_rule, 2, 'biases', biases)

        ScaleShiftOp.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(node):
        pb = node.parameters
        collect_until_token(pb, b'<LinearParams>')
        weights, weights_shape = read_binary_matrix(pb)
        tag = find_next_tag(pb)
        read_placeholder(pb, 1)
        if tag != '<BiasParams>':
            raise Error('FixedAffineComponent must contain BiasParams')
        biases = read_binary_vector(pb)

        mapping_rule = {'out-size': weights_shape[0], 'layout': 'NCHW'}
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        InnerProduct.update_node_stat(node, mapping_rule)
        return __class__.enabled
Exemple #16
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<LinearParams>')
        weights, weights_shape = read_binary_matrix(pb)
        tag = find_next_tag(pb)
        read_placeholder(pb, 1)
        if tag != '<BiasParams>':
            raise Error('FixedAffineComponent must contain BiasParams')
        biases = read_binary_vector(pb)

        mapping_rule = {
            'out-size': weights_shape[0],
            'transpose_weights': True,
        }
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        FullyConnected.update_node_stat(node, mapping_rule)
        return cls.enabled