Beispiel #1
0
    def extract(node: Node):
        attrs = {'axis': node.pb.bias_param.axis}
        embed_input(attrs, 1, 'bias', node.model_pb.blobs[0].data, 'biases')

        Add.update_node_stat(node, attrs)

        return __class__.enabled
Beispiel #2
0
    def extract(cls, node):
        pb = node.pb
        model = node.model_pb
        param = pb.scale_param
        attrs = {
            'axis': param.axis,
        }

        if model is None and len(pb.bottom) == 1:
            # default weights and biases for scale layer if the caffemodel file doesn't contain them
            model = NamedAttrsClass({
                'blobs':
                np.array([
                    NamedAttrsClass({'data': np.array([1])}),
                    NamedAttrsClass({'data': np.array([0])})
                ])
            })
        # scale with 1 input and 1 or 2 blobs
        if model and len(model.blobs) != 0 and len(pb.bottom) == 1:
            attrs.update(weights_biases(param.bias_term, model))
        # 2 inputs + bias
        elif len(pb.bottom) == 2 and param.bias_term:
            if model is None or len(model.blobs) == 0:
                # default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them
                model = NamedAttrsClass({
                    'blobs':
                    np.array([NamedAttrsClass({'data': np.array([0])})])
                })

            embed_input(attrs, 1, 'biases', model.blobs[0].data)
        ScaleShiftOp.update_node_stat(node, attrs)
        return cls.enabled
Beispiel #3
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<BlockDim>')
        block_dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<Epsilon>')
        eps = read_binary_float_token(pb)

        collect_until_token(pb, b'<TargetRms>')
        target_rms = read_binary_float_token(pb)

        collect_until_token(pb, b'<StatsMean>')
        mean = read_binary_vector(pb)

        collect_until_token(pb, b'<StatsVar>')
        var = read_binary_vector(pb)

        scale = target_rms / np.sqrt(var + eps)

        shift = -target_rms * mean / np.sqrt(var + eps)

        scale = np.tile(scale, dim // block_dim)
        shift = np.tile(shift, dim // block_dim)

        attrs = {'out-size': dim}
        embed_input(attrs, 1, 'weights', scale)
        embed_input(attrs, 2, 'biases', shift)

        ScaleShiftOp.update_node_stat(node, attrs)
        return cls.enabled
Beispiel #4
0
def scale_ext(pl, ml):
    param = pl.scale_param
    attrs = {
        'op': 'ScaleShift',
        'type': 'ScaleShift',
        'axis': param.axis,
        'infer': copy_shape_infer
    }
    if ml is None and len(pl.bottom) == 1:
        # default weights and biases for scale layer if the caffemodel file doesn't contain them
        ml = NamedAttrsClass({
            'blobs':
            np.array([
                NamedAttrsClass({'data': np.array([1])}),
                NamedAttrsClass({'data': np.array([0])})
            ])
        })
    # scale with 1 input and 1 or 2 blobs
    if ml and len(ml.blobs) != 0 and len(pl.bottom) == 1:
        attrs.update(weights_biases(param.bias_term, ml))
    # 2 inputs + bias
    elif len(pl.bottom) == 2 and param.bias_term:
        if ml is None or len(ml.blobs) == 0:
            # default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them
            ml = NamedAttrsClass({
                'blobs':
                np.array([NamedAttrsClass({'data': np.array([0])})])
            })

        embed_input(attrs, 1, 'biases', ml.blobs[0].data)

    return attrs
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.augmentation_param
        # slice_dim is deprecated parameter and is used as alias for axis
        # however if slice_dim is defined and axis is default, we use slice_dim
        update_attrs = {
            'crop_width': param.crop_width,
            'crop_height': param.crop_height,
            'write_augmented': param.write_augmented,
            'max_multiplier': param.max_multiplier,
            'augment_during_test': int(param.augment_during_test),
            'recompute_mean': param.recompute_mean,
            'write_mean': param.write_mean,
            'mean_per_pixel': int(param.mean_per_pixel),
            'mean': param.mean,
            'mode': param.mode,
            'bottomwidth': param.bottomwidth,
            'bottomheight': param.bottomheight,
            'num': param.num,
            'chromatic_eigvec': param.chromatic_eigvec
        }

        mapping_rule = merge_attrs(param, update_attrs)

        if node.model_pb:
            for index in range(0, len(node.model_pb.blobs)):
                embed_input(mapping_rule, index + 1, 'custom_{}'.format(index),
                            node.model_pb.blobs[index].data)

        # update the attributes of the node
        DataAugmentationOp.update_node_stat(node, mapping_rule)
        return cls.enabled
Beispiel #6
0
    def replace_op(self, graph: Graph, node: Node):
        attrs = {'name': node.id + "/ScaleShift_"}

        param = graph.node[node.id]['pb'].bn_param
        pb_model = graph.node[node.id]['model_pb']
        blobs = pb_model.blobs

        if len(blobs) != 4:
            raise Error("Incorrect number of blobs in BN layer {}".format(
                node.id))

        mean = np.array(blobs[0].data)
        var = np.array(blobs[1].data)
        betta = np.array(blobs[2].data)
        gamma = np.array(blobs[3].data)

        gamma = gamma + np.repeat(param.eps, gamma.shape)

        scale = 1.0 / np.sqrt(gamma) * mean
        shift = var - betta * scale

        embed_input(attrs, 1, 'scale', scale, 'weights')
        embed_input(attrs, 2, 'bias', shift, 'biases')

        ss = ScaleShiftOp(graph, attrs)
        scale_shift = ss.create_node([node.in_node(0)])

        return [scale_shift.id]
Beispiel #7
0
 def extract(node):
     pb = node.parameters
     weights_size = read_binary_integer32_token(pb)
     weights = read_blob(pb, weights_size, dtype=np.int32) - 1
     attrs = {'infer': copy_shape_infer}
     embed_input(attrs, 1, 'indexes', weights)
     Permute.update_node_stat(node, attrs)
     return __class__.enabled
Beispiel #8
0
 def extract(cls, node):
     pb = node.parameters
     read_learning_info(pb)
     weights = read_binary_vector(pb)
     mapping_rule = {}
     embed_input(mapping_rule, 1, 'weights', weights)
     ScaleShiftOp.update_node_stat(node, mapping_rule)
     return cls.enabled
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        read_learning_info(pb)

        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error(
                'Weights shape does not correspond to the `output` attribute of Convolution layer. '
                + refer_to_faq_msg(93))

        mapping_rule = {
            'output': output,
            'patch_stride': patch_stride,
            'bias_term': None,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
            'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'kernel_spatial': np.array([1, kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': [2, 3],
            'group': 1,
            'reshape_kernel': True,
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
Beispiel #10
0
 def extract(cls, node):
     pb = node.parameters
     read_learning_info(pb)
     biases = read_binary_vector(pb)
     bias_term = True
     mapping_rule = {'bias_term': bias_term}
     embed_input(mapping_rule, 1, 'weights', np.ones(biases.shape))
     embed_input(mapping_rule, 2, 'biases', biases)
     ScaleShiftOp.update_node_stat(node, mapping_rule)
     return cls.enabled
Beispiel #11
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<Params>')
        weights = read_binary_vector(pb)
        find_next_tag(pb)
        read_placeholder(pb, 1)

        mapping_rule = {'layout': 'NCHW'}
        embed_input(mapping_rule, 1, 'weights', weights)

        ScaleShiftOp.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(node):
        pb = node.parameters
        read_learning_info(pb)
        weights, weights_shape = read_binary_matrix(pb)
        biases = read_binary_vector(pb)

        mapping_rule = {'out-size': weights_shape[0], 'layout': 'NCHW'}
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        InnerProduct.update_node_stat(node, mapping_rule)
        return __class__.enabled
Beispiel #13
0
 def extract(node):
     pb = node.parameters
     collect_until_token(pb, b'<Dim>')
     dim = read_binary_integer32_token(pb)
     target_rms = 1
     d_scaled = dim * target_rms**2
     in_norm = np.zeros([dim], np.float64)
     in_norm += 1.0 / d_scaled
     in_norm = np.maximum(in_norm, 2.**(-66))
     in_norm = np.power(in_norm, -0.5)
     attrs = {}
     embed_input(attrs, 1, 'weights', in_norm)
     ScaleShiftOp.update_node_stat(node, attrs)
     return __class__.enabled
Beispiel #14
0
    def extract(cls, node):
        eps = node.pb.batch_norm_param.eps
        attrs = {'eps': eps}
        pb_model = None if not node.soft_get('model_pb',
                                             None) else node.model_pb
        if pb_model:
            blobs = pb_model.blobs
            assert len(
                blobs) >= 2, 'BatchNorm accepts not less then two input blobs'
            mean = np.array(blobs[0].data)
            variance = np.array(blobs[1].data)

            if len(blobs) == 3:
                scale = blobs[2].data[0]
                if scale != 0:
                    scale = 1.0 / scale
                mean *= scale
                variance *= scale

            embed_input(attrs, 1, 'gamma', np.ones(mean.shape), 'gamma')
            embed_input(attrs, 2, 'beta', np.zeros(variance.shape), 'beta')
            embed_input(attrs, 3, 'mean', mean, 'biases')
            embed_input(attrs, 4, 'variance', variance, 'weights')

        BatchNormInference.update_node_stat(node, attrs)
        return cls.enabled
Beispiel #15
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<Params>')
        weights, weights_shape = read_binary_matrix(pb)

        mapping_rule = {
            'out-size': weights_shape[0],
            'transpose_weights': True,
        }

        embed_input(mapping_rule, 1, 'weights', weights)

        FullyConnected.update_node_stat(node, mapping_rule)
        return cls.enabled
Beispiel #16
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<Scale>')
        scale = read_binary_float_token(pb)

        # TODO add real batch here
        attrs = {}
        embed_input(attrs, 1, 'weights', np.full([dim], scale))
        ScaleShiftOp.update_node_stat(node, attrs)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<ConvolutionModel>')
        in_shape = read_token_value(pb, b'<NumFiltersIn>')
        out_shape = read_token_value(pb, b'<NumFiltersOut>')
        height_in = read_token_value(pb, b'<HeightIn>')
        height_out = read_token_value(pb, b'<HeightOut>')
        height_subsample = read_token_value(pb, b'<HeightSubsampleOut>')
        collect_until_token(pb, b'<Offsets>')
        offsets = read_binary_vector_of_pairs(pb,
                                              read_token=False,
                                              dtype=np.int32)
        collect_until_token(pb, b'<RequiredTimeOffsets>')
        time_offsets = read_binary_vector(pb, read_token=False, dtype=np.int32)
        collect_until_token(pb, b'<LinearParams>')
        weights, _ = read_binary_matrix(pb)
        collect_until_token(pb, b'<BiasParams>')
        biases = read_binary_vector(pb)

        offsets = offsets.reshape([len(offsets) // 2, 2])
        mapping_rule = {  # stride for h axis
            'height_subsample': height_subsample,
            # input dimension for h axis
            'height_in': height_in,
            # output dimension for h axis
            'height_out': height_out,
            # input dimension for channel axis
            'in_channels': in_shape,
            # output dimension for channel axis
            'out_channels': out_shape,
            # array with pairs like the following
            # [ (-1, -1) (-1, 0) (-1, 1)
            #   (0, -1)  (0, 0)  (0, 1)
            #   (1, -1)  (1, 0)  (1, 1)]
            #  it means that kernel 3x3 will be applied to calculate current value of output
            'offsets': offsets,
            # required time offsets to calculate current convolution
            # time_offsets = [-1, 0, 1] for previous example means no padding for time axis and
            # 3 values should be prepared
            # time_offsets = [0] means zero padding [1, 1] for time axis
            'time_offsets': time_offsets,
            'out-size': out_shape * height_out
        }

        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        TimeHeightConvolutionComponent.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        read_learning_info(pb)
        weights, weights_shape = read_binary_matrix(pb)
        biases = read_binary_vector(pb)

        mapping_rule = {
            'out-size': weights_shape[0],
            'transpose_weights': True,
        }
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        FullyConnected.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<Bias>')
        biases = read_binary_vector(pb)
        find_next_tag(pb)
        read_placeholder(pb, 1)

        mapping_rule = {
            'layout': 'NCHW',
            'bias_term': True,
            'out-size': biases.shape[0],
        }
        embed_input(mapping_rule, 2, 'biases', biases)

        ScaleShiftOp.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(node):
        pb = node.parameters
        collect_until_token(pb, b'<LinearParams>')
        weights, weights_shape = read_binary_matrix(pb)
        tag = find_next_tag(pb)
        read_placeholder(pb, 1)
        if tag != '<BiasParams>':
            raise Error('FixedAffineComponent must contain BiasParams')
        biases = read_binary_vector(pb)

        mapping_rule = {'out-size': weights_shape[0], 'layout': 'NCHW'}
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        InnerProduct.update_node_stat(node, mapping_rule)
        return __class__.enabled
Beispiel #21
0
 def test_embed_input_w_bin_name(self):
     attrs = {}
     blob = np.array([1, 2])
     name = 'weights'
     embed_input(attrs, 1, name, blob, 'special_name')
     exp_res = {
         'weights': blob,
         'embedded_inputs': [(1, name, {
             'bin': 'special_name'
         })]
     }
     for key in exp_res.keys():
         if key == name:
             np.testing.assert_equal(attrs[key], exp_res[key])
         else:
             self.assertEqual(attrs[key], exp_res[key])
Beispiel #22
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<LinearParams>')
        weights, weights_shape = read_binary_matrix(pb)
        tag = find_next_tag(pb)
        read_placeholder(pb, 1)
        if tag != '<BiasParams>':
            raise Error('FixedAffineComponent must contain BiasParams')
        biases = read_binary_vector(pb)

        mapping_rule = {
            'out-size': weights_shape[0],
            'transpose_weights': True,
        }
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        FullyConnected.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(cls, node):
        pb = node.parameters
        try:
            collect_until_token(pb, b'<Dim>')
        except Error:
            try:
                pb.seek(0)
                collect_until_token(pb, b'<InputDim>')
            except Error:
                raise Error("Neither <Dim> nor <InputDim> were found")
        in_dim = read_binary_integer32_token(pb)

        try:
            collect_until_token(pb, b'<TargetRms>')
            target_rms = read_binary_float_token(pb)
        except Error:
            # model does not contain TargetRms
            target_rms = 1.0

        try:
            collect_until_token(pb, b'<AddLogStddev>')
            add_log = read_binary_bool_token(pb)
        except Error:
            # model does not contain AddLogStddev
            add_log = False

        if add_log is not False:
            raise Error(
                "AddLogStddev True  in Normalize component is not supported")

        scale = target_rms * np.sqrt(in_dim)

        attrs = {
            'eps': 0.00000001,
            'across_spatial': 0,
            'channel_shared': 1,
            'in_dim': in_dim,
        }
        embed_input(attrs, 1, 'weights', [scale])

        NormalizeOp.update_node_stat(node, attrs)
        return cls.enabled
Beispiel #24
0
    def extract(cls, node):
        pb = node.parameters

        collect_until_token(pb, b'<Params>')
        ifo_x_weights, ifo_x_weights_shape = read_binary_matrix(pb)

        try:
            use_dropout = collect_until_token_and_read(pb, b'<UseDropout>',
                                                       np.bool)
        except Error:
            # layer have not UseDropout attribute, so setup it to False
            use_dropout = False

        mapping_rule = {'use_dropout': use_dropout}

        assert len(
            ifo_x_weights_shape
        ) == 2, "Unexpected shape of weights in LSTMNonLinearityComponent"
        assert ifo_x_weights_shape[
            0] == 3, "Unexpected shape of weights in LSTMNonLinearityComponent"

        ifo_x_weights = ifo_x_weights.reshape(ifo_x_weights_shape)
        embed_input(mapping_rule, 1, 'i_weights', ifo_x_weights[0][:])
        embed_input(mapping_rule, 2, 'f_weights', ifo_x_weights[1][:])
        embed_input(mapping_rule, 3, 'o_weights', ifo_x_weights[2][:])

        LstmNonLinearity.update_node_stat(node, mapping_rule)
        return cls.enabled
    def extract(node):
        pb = node.parameters

        collect_until_token(pb, b'<Dim>')
        dim = read_binary_integer32_token(pb)

        collect_until_token(pb, b'<BlockDim>')
        block_dim = read_binary_integer32_token(pb)

        if block_dim != dim:
            raise Error(
                "Dim is not equal BlockDim for BatchNorm is not supported")

        collect_until_token(pb, b'<Epsilon>')
        eps = read_binary_float_token(pb)

        collect_until_token(pb, b'<TargetRms>')
        target_rms = read_binary_float_token(pb)

        collect_until_token(pb, b'<TestMode>')
        test_mode = read_binary_bool_token(pb)

        if test_mode is not False:
            raise Error("Test mode True for BatchNorm is not supported")

        collect_until_token(pb, b'<StatsMean>')
        mean = read_binary_vector(pb)

        collect_until_token(pb, b'<StatsVar>')
        var = read_binary_vector(pb)

        scale = target_rms / np.sqrt(var + eps)

        shift = -target_rms * mean / np.sqrt(var + eps)
        attrs = {}
        embed_input(attrs, 1, 'weights', scale)
        embed_input(attrs, 2, 'biases', shift)
        ScaleShiftOp.update_node_stat(node, attrs)
        return __class__.enabled
Beispiel #26
0
def batch_norm_ext(pb_layer, pb_model):
    """
    Extracts properties of the BatchNorm layer.
    In case of scale, scale is merged into mean and variance
    Args:
        pl: proto layer, contains own properties of the layer, i.e epsilon
        ml: caffemodel layer, contains blobs with 0: mean, 1: variance, (opt)2: scale

    Returns:
        attrs object with type, partial inference function and mean/variance properties.
    """
    assert pb_layer, 'Protobuf layer can not be empty'
    param = pb_layer.batch_norm_param
    attrs = {
        'op': 'BatchNormalization',
        'type': 'BatchNormalization',
        'epsilon': param.eps,
        'infer': copy_shape_infer
    }

    if not pb_model:
        return attrs

    blobs = pb_model.blobs
    assert len(blobs) >= 2, 'BatchNorm accepts not less then two input blobs'
    mean = np.array(blobs[0].data)
    variance = np.array(blobs[1].data)

    if len(blobs) == 3:
        scale = blobs[2].data[0]
        if scale != 0:
            scale = 1.0 / scale
        mean *= scale
        variance *= scale

    embed_input(attrs, 1, 'mean', mean, 'biases')
    embed_input(attrs, 2, 'variance', variance, 'weights')

    return attrs
Beispiel #27
0
def extract_custom_blobs(node):
    """
    Enumerate all blobs in node.model_pb, for each blob
    creates a new embedded input of name 'custom_X', where X is an index >= 0 according
    to the order blobs appear in node.model_pb. The order is also enforced by input port index.
    So the order of blobs is preserved in the final IR generation.
    Order is important because they can be accessed by indices (in addition to names).
    Update node attributes in-place.
    """
    base_port = len(node.in_nodes())
    if not hasattr(node.model_pb, 'blobs'):
        return
    for i, blob in enumerate(node.model_pb.blobs):
        port = base_port + i
        internal_name = '_custom_blob_' + str(i)
        log.debug("Found new custom blob of length {} for node {}. ".format(
            len(blob.data),
            node.name if node.has_valid('name') else '<UNKNOWN>') +
                  "It will appear as input {} and internal attribute {}.".
                  format(port, internal_name))
        embed_input(node.graph.node[node.id], port, internal_name, blob.data,
                    blob_name(i))
Beispiel #28
0
    def extract(node):
        pb = node.parameters
        collect_until_token(pb, b'<Params>')
        ifo_x_weights, ifo_x_weights_shape = read_binary_matrix(pb)

        mapping_rule = {}

        embed_input(mapping_rule, 1, 'i_weights', ifo_x_weights[0:1024])
        embed_input(mapping_rule, 2, 'f_weights', ifo_x_weights[1024:2048])
        embed_input(mapping_rule, 3, 'o_weights', ifo_x_weights[2048:])

        LstmNonLinearity.update_node_stat(node, mapping_rule)
        return __class__.enabled
Beispiel #29
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<Params>')
        ifo_x_weights, ifo_x_weights_shape = read_binary_matrix(pb)

        mapping_rule = {}

        assert len(ifo_x_weights_shape) == 2, "Unexpected shape of weights in LSTMNonLinearityComponent"
        assert ifo_x_weights_shape[0] == 3, "Unexpected shape of weights in LSTMNonLinearityComponent"

        ifo_x_weights = ifo_x_weights.reshape(ifo_x_weights_shape)
        embed_input(mapping_rule, 1, 'i_weights', ifo_x_weights[0][:])
        embed_input(mapping_rule, 2, 'f_weights', ifo_x_weights[1][:])
        embed_input(mapping_rule, 3, 'o_weights', ifo_x_weights[2][:])

        LstmNonLinearity.update_node_stat(node, mapping_rule)
        return cls.enabled
Beispiel #30
0
    def extract(cls, node):
        clip_value = 50
        pb = node.parameters
        res = collect_until_whitespace(pb)
        if res == b'<CellClip>':
            clip_value = get_uint32(pb.read(4))
        collect_until_token(pb, b'FM')
        gifo_x_weights, gifo_x_weights_shape = read_binary_matrix(pb, False)
        gifo_r_weights, gifo_r_weights_shape = read_binary_matrix(pb)
        gifo_biases = read_binary_vector(pb)
        input_gate_weights = read_binary_vector(pb)
        forget_gate_weights = read_binary_vector(pb)
        output_gate_weights = read_binary_vector(pb)

        projection_weights, projection_weights_shape = read_binary_matrix(pb)

        mapping_rule = {
            'gifo_x_weights_shape': gifo_x_weights_shape,
            'gifo_r_weights_shape': gifo_r_weights_shape,
            'projection_weights_shape': projection_weights_shape,
            'clip_value': clip_value,
            'format': 'kaldi',
        }

        embed_input(mapping_rule, 1, 'gifo_x_weights', gifo_x_weights)
        embed_input(mapping_rule, 2, 'gifo_r_weights', gifo_r_weights)
        embed_input(mapping_rule, 3, 'gifo_biases', gifo_biases)
        embed_input(mapping_rule, 4, 'input_gate_weights', input_gate_weights)
        embed_input(mapping_rule, 5, 'forget_gate_weights',
                    forget_gate_weights)
        embed_input(mapping_rule, 6, 'output_gate_weights',
                    output_gate_weights)
        embed_input(mapping_rule, 7, 'projection_weights', projection_weights)

        LSTMCell.update_node_stat(node, mapping_rule)
        return cls.enabled