def extract(cls, node): param = node.pb.inner_product_param pb_model = node.model_pb attrs = { 'out-size': param.num_output, 'transpose_weights': not param.transpose, } attrs.update(weights_biases(param.bias_term, pb_model)) FullyConnected.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): attr = get_mxnet_layer_attrs(node.symbol_dict) num_hidden = attr.int('num_hidden', None) assert num_hidden is not None, "{} node with no `num_hidden` parameter found".format(cls.op) attrs = { 'out-size': num_hidden, 'transpose_weights': True, } FullyConnected.update_node_stat(node, attrs) return cls.enabled
def extract(cls, node): pb = node.parameters collect_until_token(pb, b'<Params>') weights, weights_shape = read_binary_matrix(pb) mapping_rule = { 'out-size': weights_shape[0], 'transpose_weights': True, } embed_input(mapping_rule, 1, 'weights', weights) FullyConnected.update_node_stat(node, mapping_rule) return cls.enabled
def extract(cls, node): pb = node.parameters read_learning_info(pb) weights, weights_shape = read_binary_matrix(pb) biases = read_binary_vector(pb) mapping_rule = { 'out-size': weights_shape[0], 'transpose_weights': True, } embed_input(mapping_rule, 1, 'weights', weights) embed_input(mapping_rule, 2, 'biases', biases) FullyConnected.update_node_stat(node, mapping_rule) return cls.enabled
def extract(cls, node): pb = node.parameters collect_until_token(pb, b'<LinearParams>') weights, weights_shape = read_binary_matrix(pb) tag = find_next_tag(pb) read_placeholder(pb, 1) if tag != '<BiasParams>': raise Error('FixedAffineComponent must contain BiasParams') biases = read_binary_vector(pb) mapping_rule = { 'out-size': weights_shape[0], 'transpose_weights': True, } embed_input(mapping_rule, 1, 'weights', weights) embed_input(mapping_rule, 2, 'biases', biases) FullyConnected.update_node_stat(node, mapping_rule) return cls.enabled
def replace_op(self, graph: Graph, node: Node): input_node = node.in_node() memory_pair_input = unique_id('id') memory_pair_output = unique_id('id') # Input -> FullyConnected fc_layer_after_input_attrs = { 'name': 'input_fullyconnected', 'out-size': node.gifo_x_weights_shape[0], 'transpose_weights': True, 'bias_term': True, } fc_layer_after_input = FullyConnected( graph, fc_layer_after_input_attrs).create_node([input_node]) input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 1, 'weights', node.gifo_x_weights) input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 2, 'biases', node.gifo_biases) prev_lstm_output = Memory( graph, { 'name': 'prev_memory_output', 'id': memory_pair_input, 'index': 1, 'size': 2, 'shape': np.array([node.gifo_r_weights_shape[1]], dtype=np.int64) }).create_node() # *Memory(output) -> FullyConnected fc_layer_from_prev_state_attrs = { 'name': 'prev_memory_output_fullyconnected', 'out-size': node.gifo_r_weights_shape[0], 'transpose_weights': True, 'bias_term': False, } fc_layer_from_prev_state = FullyConnected( graph, fc_layer_from_prev_state_attrs).create_node([prev_lstm_output]) input_as_const(fc_layer_from_prev_state, fc_layer_from_prev_state_attrs, 1, 'weights', node.gifo_r_weights) # Memory -> FullyConnected \ # *Eltwise(sum) # Input -> FullyConnected / join_input_prev_state_sum = Add(graph, { 'name': 'join_input_eltwise', }).create_node([fc_layer_from_prev_state, fc_layer_after_input]) # *Eltwise(sum) -> Split # it is split into 4 nodes: Act, Eltw*3 # the following order is mandatory # ___Tanh # / # Split ---(2)Eltwise(sum) # |\ # | \__(3)Eltwise(sum) # |____(4)Eltwise(sum) split_joined_input = Split( graph, { 'name': 'join_input_split', 'axis': 1, 'num_split': 4, 'out_ports_count': 4, }).create_node([join_input_prev_state_sum]) prev_lstm_state = Memory( graph, { 'name': 'prev_memory_state', 'id': memory_pair_output, 'index': 1, 'size': 2, 'shape': np.array([node.input_gate_weights.shape[0]], dtype=np.int64) }).create_node() # *Memory(state) -> *ScaleShift(input) state_input_scaleshift_attrs = { 'name': 'input_scaleshift', 'bias_term': False } state_input_scaleshift = ScaleShiftOp( graph, state_input_scaleshift_attrs).create_node([prev_lstm_state]) input_as_const(state_input_scaleshift, state_input_scaleshift_attrs, 1, 'weights', node.input_gate_weights) # *Memory(state) -> *ScaleShift(forget) state_forget_scaleshift_attrs = { 'name': 'forget_scaleshift', 'bias_term': False } state_forget_scaleshift = ScaleShiftOp( graph, state_forget_scaleshift_attrs).create_node([prev_lstm_state]) input_as_const(state_forget_scaleshift, state_forget_scaleshift_attrs, 1, 'weights', node.forget_gate_weights) # Split \ # (2)Eltwise(sum) # Memory(state) -> *ScaleShift(input) / join_prev_lstm_input_joined_input_sum = Add( graph, { 'name': 'join_prev_lstm_input_joined_input_eltwise', }).create_node([(split_joined_input, 1), state_input_scaleshift]) # Split \ # (3)Eltwise(sum) # Memory(state) -> *ScaleShift(forget) / join_prev_lstm_input_joined_forget_sum = Add( graph, { 'name': 'join_prev_lstm_input_joined_forget_sum', }).create_node([(split_joined_input, 2), state_forget_scaleshift]) # Split -> Tanh remember_tahn = Tanh(graph, { 'name': 'remember_tahnv' }).create_node([(split_joined_input, 0)]) # Split -> (2)Eltwise(sum) -> *Sigmoid remember_sigmoid = Sigmoid(graph, { 'name': 'remember_sigmoid' }).create_node([join_prev_lstm_input_joined_input_sum]) # Split -> (3)Eltwise(sum) -> **Sigmoid forget_sigmoid = Sigmoid(graph, { 'name': 'forget_sigmoid' }).create_node([join_prev_lstm_input_joined_forget_sum]) # *Memory(state) \ # (6)Eltwise(mul) # Split -> (3)Eltwise(sum) -> **Sigmoid / join_forget_prev_state_mul = Mul(graph, { 'name': 'join_forget_prev_state_mul', }).create_node([forget_sigmoid, prev_lstm_state]) # Split -> Tahn \ # (5)Eltwise(mul) # Split -> (2)Eltwise(sum) -> *Sigmoid / join_remember_candidates_mul = Mul( graph, { 'name': 'join_remember_candidates_mul', }).create_node([remember_tahn, remember_sigmoid]) # (5)Eltwise(mul) \ # (7)Eltwise(sum) # (6)Eltwise(mul) / join_forget_remember_sum = Add(graph, { 'name': 'join_forget_remember_sum', }).create_node( [join_forget_prev_state_mul, join_remember_candidates_mul]) # (7)Eltwise(sum) -> Clamp join_forget_clamp = Clamp( graph, { 'name': 'join_forget_clamp', 'max': node.clip_value, 'min': -node.clip_value }).create_node([join_forget_remember_sum]) # # Clamp -> (2)Memory(state) next_lstm_state = Memory( graph, { 'name': 'next_lstm_state', 'id': memory_pair_output, 'index': 0, 'size': 2, 'shape': np.array([node.input_gate_weights.shape[0]], dtype=np.int64) }).create_node([join_forget_clamp]) Result(graph, { 'name': 'next_lstm_state_out' }).create_node([next_lstm_state]) # Clamp -> (2)Tahn state_filtered_tahn = Tanh(graph, { 'name': 'state_filtered_tahn' }).create_node([join_forget_clamp]) # Clamp -> (2)ScaleShift clamp_scaleshift_attrs = { 'name': 'clamp_scaleshift', 'bias_term': False } clamp_scaleshift = ScaleShiftOp( graph, clamp_scaleshift_attrs).create_node([join_forget_clamp]) input_as_const(clamp_scaleshift, clamp_scaleshift_attrs, 1, 'weights', node.output_gate_weights) # Split \ # (4)Eltwise(sum) # Clamp -> (2)ScaleShift / join_next_lstm_input_joined_input_sum = Add( graph, { 'name': 'join_next_lstm_input_joined_input_sum', }).create_node([(split_joined_input, 3), clamp_scaleshift]) # (4)Eltwise(sum) -> (3)Sigmoid output_sigmoid = Sigmoid(graph, { 'name': 'output_sigmoid' }).create_node([join_next_lstm_input_joined_input_sum]) # (4)Eltwise(sum) -> (3)Sigmoid \ # (5)Eltwise(mul) # Clamp -> (2)Tahn / joined_output_mul = Mul(graph, { 'name': 'joined_output_mul' }).create_node([state_filtered_tahn, output_sigmoid]) # (5)Eltwise(mul) -> (3)FullyConnected fc_output_attrs = { 'name': 'FullyConnected', 'out-size': node.projection_weights_shape[0], 'transpose_weights': True, 'bias_term': False } fc_output = FullyConnected(graph, fc_output_attrs).create_node( [joined_output_mul]) input_as_const(fc_output, fc_output_attrs, 1, 'weights', node.projection_weights) # / (2)Memory(output) # (3)FullyConnected # \ Output (any next node) (edge created automatically after replacement) next_lstm_output = Memory( graph, { 'name': 'next_lstm_output', 'id': memory_pair_input, 'index': 0, 'size': 2, 'shape': np.array([node.gifo_r_weights_shape[1]], dtype=np.int64) }).create_node([fc_output]) Result(graph, { 'name': 'next_lstm_output_out' }).create_node([next_lstm_output]) return [fc_output.id]
def replace_op(self, graph: Graph, node: Node): input_out_port = node.in_port(0).get_source() memory_pair_input = unique_id('id') memory_pair_output = unique_id('id') # Input -> FullyConnected fc_layer_after_input_attrs = { 'name': 'input_fullyconnected', 'out-size': node.gifo_x_weights_shape[0], 'transpose_weights': True, 'bias_term': True, } fc_layer_after_input = FullyConnected( graph, fc_layer_after_input_attrs).create_node() fc_layer_after_input.in_port(0).connect(input_out_port) input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 1, 'weights', node.gifo_x_weights) input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 2, 'biases', node.gifo_biases) init_value_prev_lstm_output = create_zero_value_with_batch_from_input( input_out_port, node.gifo_r_weights_shape[1]) prev_lstm_output = ReadValue(graph, { 'name': 'prev_memory_output', 'variable_id': memory_pair_input }).create_node() prev_lstm_output.in_port(0).connect( init_value_prev_lstm_output.out_port(0)) # *Memory(output) -> FullyConnected fc_layer_from_prev_state_attrs = { 'name': 'prev_memory_output_fullyconnected', 'out-size': node.gifo_r_weights_shape[0], 'transpose_weights': True, 'bias_term': False, } fc_layer_from_prev_state = FullyConnected( graph, fc_layer_from_prev_state_attrs).create_node() fc_layer_from_prev_state.in_port(0).connect( prev_lstm_output.out_port(0)) input_as_const(fc_layer_from_prev_state, fc_layer_from_prev_state_attrs, 1, 'weights', node.gifo_r_weights) # Memory -> FullyConnected \ # *Eltwise(sum) # Input -> FullyConnected / join_input_prev_state_sum = Add(graph, { 'name': 'join_input_eltwise' }).create_node() join_input_prev_state_sum.in_port(0).connect( fc_layer_from_prev_state.out_port(0)) join_input_prev_state_sum.in_port(1).connect( fc_layer_after_input.out_port(0)) # *Eltwise(sum) -> Split # it is split into 4 nodes: Act, Eltw*3 # the following order is mandatory # ___Tanh # / # Split ---(2)Eltwise(sum) # |\ # | \__(3)Eltwise(sum) # |____(4)Eltwise(sum) split_joined_input_axis = Const(graph, { 'value': np.int64(1) }).create_node() split_joined_input = Split(graph, { 'name': 'join_input_split', 'num_splits': 4, 'out_ports_count': 4 }).create_node() split_joined_input.in_port(0).connect( join_input_prev_state_sum.out_port(0)) split_joined_input.in_port(1).connect( split_joined_input_axis.out_port(0)) # prev_lstm_state = Memory(graph, {'name': 'prev_memory_state', # 'id': memory_pair_output, # 'index': 1, # 'size': 2, # 'shape': np.array([node.input_gate_weights.shape[0]], dtype=np.int64) # }).create_node() init_value_prev_lstm_state = create_zero_value_with_batch_from_input( split_joined_input.out_port(0), node.input_gate_weights.shape[0]) prev_lstm_state = ReadValue(graph, { 'name': 'prev_memory_state', 'variable_id': memory_pair_output }).create_node() prev_lstm_state.in_port(0).connect( init_value_prev_lstm_state.out_port(0)) # *Memory(state) -> *ScaleShift(input) state_input_scaleshift_attrs = { 'name': 'input_scaleshift', 'bias_term': False } state_input_scaleshift = ScaleShiftOp( graph, state_input_scaleshift_attrs).create_node() state_input_scaleshift.in_port(0).connect(prev_lstm_state.out_port(0)) input_as_const(state_input_scaleshift, state_input_scaleshift_attrs, 1, 'weights', node.input_gate_weights) # *Memory(state) -> *ScaleShift(forget) state_forget_scaleshift_attrs = { 'name': 'forget_scaleshift', 'bias_term': False } state_forget_scaleshift = ScaleShiftOp( graph, state_forget_scaleshift_attrs).create_node() state_forget_scaleshift.in_port(0).connect(prev_lstm_state.out_port(0)) input_as_const(state_forget_scaleshift, state_forget_scaleshift_attrs, 1, 'weights', node.forget_gate_weights) # Split \ # (2)Eltwise(sum) # Memory(state) -> *ScaleShift(input) / join_prev_lstm_input_joined_input_sum = Add( graph, { 'name': 'join_prev_lstm_input_joined_input_eltwise' }).create_node() join_prev_lstm_input_joined_input_sum.in_port(0).connect( split_joined_input.out_port(1)) join_prev_lstm_input_joined_input_sum.in_port(1).connect( state_input_scaleshift.out_port(0)) # Split \ # (3)Eltwise(sum) # Memory(state) -> *ScaleShift(forget) / join_prev_lstm_input_joined_forget_sum = Add( graph, { 'name': 'join_prev_lstm_input_joined_forget_sum', }).create_node() join_prev_lstm_input_joined_forget_sum.in_port(0).connect( split_joined_input.out_port(2)) join_prev_lstm_input_joined_forget_sum.in_port(1).connect( state_forget_scaleshift.out_port(0)) # Split -> Tanh remember_tahn = Tanh(graph, {'name': 'remember_tahnv'}).create_node() remember_tahn.in_port(0).connect(split_joined_input.out_port(0)) # Split -> (2)Eltwise(sum) -> *Sigmoid remember_sigmoid = Sigmoid(graph, { 'name': 'remember_sigmoid' }).create_node() remember_sigmoid.in_port(0).connect( join_prev_lstm_input_joined_input_sum.out_port(0)) # Split -> (3)Eltwise(sum) -> **Sigmoid forget_sigmoid = Sigmoid(graph, { 'name': 'forget_sigmoid' }).create_node() forget_sigmoid.in_port(0).connect( join_prev_lstm_input_joined_forget_sum.out_port(0)) # *Memory(state) \ # (6)Eltwise(mul) # Split -> (3)Eltwise(sum) -> **Sigmoid / join_forget_prev_state_mul = Mul(graph, { 'name': 'join_forget_prev_state_mul' }).create_node() join_forget_prev_state_mul.in_port(0).connect( forget_sigmoid.out_port(0)) join_forget_prev_state_mul.in_port(1).connect( prev_lstm_state.out_port(0)) # Split -> Tahn \ # (5)Eltwise(mul) # Split -> (2)Eltwise(sum) -> *Sigmoid / join_remember_candidates_mul = Mul( graph, { 'name': 'join_remember_candidates_mul' }).create_node() join_remember_candidates_mul.in_port(0).connect( remember_tahn.out_port(0)) join_remember_candidates_mul.in_port(1).connect( remember_sigmoid.out_port(0)) # (5)Eltwise(mul) \ # (7)Eltwise(sum) # (6)Eltwise(mul) / join_forget_remember_sum = Add(graph, { 'name': 'join_forget_remember_sum' }).create_node() join_forget_remember_sum.in_port(0).connect( join_forget_prev_state_mul.out_port(0)) join_forget_remember_sum.in_port(1).connect( join_remember_candidates_mul.out_port(0)) # (7)Eltwise(sum) -> Clamp join_forget_clamp = create_op_with_const_inputs( graph, Clamp, { 1: np.array(-node.clip_value, dtype=np.float32), 2: np.array(node.clip_value, dtype=np.float32) }, {'name': 'join_forget_clamp'}, join_forget_remember_sum) # # Clamp -> (2)Memory(state) next_lstm_state = Assign(graph, { 'name': 'next_lstm_state', 'variable_id': memory_pair_output }).create_node() next_lstm_state.in_port(0).connect(join_forget_clamp.out_port(0)) res_node = Result(graph, {'name': 'next_lstm_state_out'}).create_node() res_node.in_port(0).connect(next_lstm_state.out_port(0)) # Clamp -> (2)Tahn state_filtered_tahn = Tanh(graph, { 'name': 'state_filtered_tahn' }).create_node() state_filtered_tahn.in_port(0).connect(join_forget_clamp.out_port(0)) # Clamp -> (2)ScaleShift clamp_scaleshift_attrs = { 'name': 'clamp_scaleshift', 'bias_term': False } clamp_scaleshift = ScaleShiftOp(graph, clamp_scaleshift_attrs).create_node() clamp_scaleshift.in_port(0).connect(join_forget_clamp.out_port(0)) input_as_const(clamp_scaleshift, clamp_scaleshift_attrs, 1, 'weights', node.output_gate_weights) # Split \ # (4)Eltwise(sum) # Clamp -> (2)ScaleShift / join_next_lstm_input_joined_input_sum = Add( graph, { 'name': 'join_next_lstm_input_joined_input_sum', }).create_node() join_next_lstm_input_joined_input_sum.in_port(0).connect( split_joined_input.out_port(3)) join_next_lstm_input_joined_input_sum.in_port(1).connect( clamp_scaleshift.out_port(0)) # (4)Eltwise(sum) -> (3)Sigmoid output_sigmoid = Sigmoid(graph, { 'name': 'output_sigmoid' }).create_node() output_sigmoid.in_port(0).connect( join_next_lstm_input_joined_input_sum.out_port(0)) # (4)Eltwise(sum) -> (3)Sigmoid \ # (5)Eltwise(mul) # Clamp -> (2)Tahn / joined_output_mul = Mul(graph, { 'name': 'joined_output_mul' }).create_node() joined_output_mul.in_port(0).connect(state_filtered_tahn.out_port(0)) joined_output_mul.in_port(1).connect(output_sigmoid.out_port(0)) # (5)Eltwise(mul) -> (3)FullyConnected fc_output_attrs = { 'name': 'FullyConnected', 'out-size': node.projection_weights_shape[0], 'transpose_weights': True, 'bias_term': False } fc_output = FullyConnected(graph, fc_output_attrs).create_node() fc_output.in_port(0).connect(joined_output_mul.out_port(0)) input_as_const(fc_output, fc_output_attrs, 1, 'weights', node.projection_weights) # / (2)Memory(output) # (3)FullyConnected # \ Output (any next node) (edge created automatically after replacement) next_lstm_output = Assign(graph, { 'name': 'next_lstm_output', 'variable_id': memory_pair_input }).create_node() next_lstm_output.in_port(0).connect(fc_output.out_port(0)) res_node_lstm_output = Result(graph, { 'name': 'next_lstm_output_out' }).create_node() res_node_lstm_output.in_port(0).connect(next_lstm_output.out_port(0)) return [fc_output.id]
def replace_pattern(graph: Graph, match: dict): node = match['matmul'] name = node.soft_get('name', node.id) A_shape = node.in_port(0).data.get_shape() B_shape = node.in_port(1).data.get_shape() out_shape = node.out_port(0).data.get_shape() assert A_shape is not None and B_shape is not None and out_shape is not None B_value = node.in_port(1).data.get_value() if (B_value is not None or node.in_port(1).get_source().node.has_and_set('stop_value_propagation')) and B_shape[ B_shape != 1].size <= 2: # transferring from MatMul representation: [B, I, K] * [B, K, O] = [B, I, O] # to FullyConnected representation: [I, K] * [O, K] = [I, O] B, I, K, O, aligned_A_shape, aligned_B_shape = MatMulToFullyConnected.get_matmul_BIKO(node) # weights normalization if not node.transpose_b: # FullyConnected weights layout is OI # MatMul second input layout is (B)IO transpose_order = list(range(B_shape.size)) transpose_order[-1], transpose_order[-2] = transpose_order[-2], transpose_order[-1] order = Const(graph, {'value': int64_array(transpose_order)}).create_node() transpose = Transpose(graph, {'name': name + '/weights_transpose'}).create_node() weights_source = node.in_port(1).get_source() node.in_port(1).get_connection().set_source(transpose.out_port(0)) transpose.in_port(0).connect(weights_source) transpose.in_port(1).connect(order.out_port(0)) order.infer(order) transpose.infer(transpose) if node.in_port(1).data.get_shape().size != 2: const = Const(graph, {'value': int64_array([-1, K])}).create_node() reshape = Reshape(graph, {'name': name + '/weights_reshape'}).create_node() weights_source = node.in_port(1).get_source() node.in_port(1).get_connection().set_source(reshape.out_port(0)) reshape.in_port(0).connect(weights_source) reshape.in_port(1).connect(const.out_port(0)) const.infer(const) reshape.infer(reshape) assert np.all(np.array_equal(node.in_port(1).data.get_shape(), int64_array([O, K]))), \ "MatMul `{}` was not converted to FullyConnected: wrong weights shape: {}, " \ "B={}, I={}, K={}, O={}".format(name, node.in_port(1).data.get_shape(), B, I, K, O) node.in_port(1).bin = 'weights' del node['transpose_b'] # input normalization if node.transpose_a: transpose_order = list(range(A_shape.size)) transpose_order[-1], transpose_order[-2] = transpose_order[-2], transpose_order[-1] order = Const(graph, {'value': int64_array(transpose_order)}).create_node() transpose = Transpose(graph, {'name': name + '/input_transpose'}).create_node() input_source = node.in_port(0).get_source() node.in_port(0).get_connection().set_source(transpose.out_port(0)) transpose.in_port(0).connect(input_source) transpose.in_port(1).connect(order.out_port(0)) order.infer(order) transpose.infer(transpose) if A_shape.size != 2: const = Const(graph, {'value': int64_array([-1, K])}).create_node() reshape = Reshape(graph, {'name': name + '/input_reshape'}).create_node() input_source = node.in_port(0).get_source() node.in_port(0).get_connection().set_source(reshape.out_port(0)) reshape.in_port(0).connect(input_source) reshape.in_port(1).connect(const.out_port(0)) const.infer(const) reshape.infer(reshape) assert np.all(np.array_equal(node.in_port(0).data.get_shape(), int64_array([np.prod(B) * I, K]))), \ "MatMul `{}` wasn't converted to FullyConnected: wrong input shape: {}, " \ "B={}, I={}, K={}, O={}".format(name, node.in_port(0).data.get_shape(), B, I, K, O) del node['transpose_a'] FullyConnected.update_node_stat(node, {'out-size': O}) # output normalization if out_shape.size != 2: const = Const(graph, {'value': int64_array([*B, I, O])}).create_node() reshape = Reshape(graph, {'name': name + '/output_reshape'}).create_node() dst = node.out_port(0).get_destination() node.out_port(0).get_connection().set_destination(reshape.in_port(0)) const.out_port(0).connect(reshape.in_port(1)) reshape.out_port(0).connect(dst) node.infer(node) const.infer(const) reshape.infer(reshape) else: assert A_shape.size == out_shape.size assert B_shape.size <= out_shape.size if B_shape.size != out_shape.size: unsqueeze_dim = Const(graph, {'value': int64_array(list(range(out_shape.size - B_shape.size))) }).create_node() unsqueeze = Unsqueeze(graph, {}).create_node() B_source = node.in_port(1).get_source() node.in_port(1).get_connection().set_source(unsqueeze.out_port(0)) unsqueeze.in_port(0).connect(B_source) unsqueeze.in_port(1).connect(unsqueeze_dim.out_port(0)) unsqueeze_dim.infer(unsqueeze_dim) unsqueeze.infer(unsqueeze) Gemm.update_node_stat(node, { 'transpose_a': node.has_and_set('transpose_a'), 'transpose_b': node.has_and_set('transpose_b'), })