Example #1
0
    def extract(node):
        param = node.pb.elu_param
        attrs = collect_attributes(param)
        attrs['operation'] = 'elu'

        Activation.update_node_stat(node, attrs)
        return ELUFrontExtractor.enabled
Example #2
0
 def test_activation_elu_infer(self):
     graph = build_graph(self.nodes_attributes,
                         [
                             ('node_1', 'activation_node'),
                             ('activation_node', 'node_3')
                         ],
                         {
                             'node_1': {
                                 'value': np.array([6, -4, -2, -1])
                             },
                             'activation_node': {
                                 'operation': 'elu',
                                 'alpha': 1.0,
                             },
                             'node_3': {
                                 'value': None
                             }
                         })
     graph.graph['layout'] = 'NCHW'
     activation_node = Node(graph, 'activation_node')
     Activation.infer(activation_node)
     exp_shape = np.array([227, 227, 227, 227])
     res_shape = graph.node['node_3']['shape']
     res_value = graph.node['node_3']['value']
     exp_value = np.array([6., -0.98168436, -0.86466472, -0.63212056])
     for i, value in enumerate(exp_shape):
         self.assertEqual(res_shape[i], value)
     for i, value in enumerate(exp_value):
         self.assertAlmostEqual(res_value[i], value)
Example #3
0
def add_activation_function_after_node(graph: Graph, node: Node,
                                       activation_function: str):
    """
    The function adds node with activation function defined by string 'activation_function' which gets input from the
    node 'node'.
    :param graph: graph to operate on.
    :param node: node to add activation after.
    :param activation_function: string defining the activation function. These values are read from TensorFlow* object
    detection API pipeline configuration file
    :return: activation function node.
    """
    if activation_function == 'SOFTMAX':
        # softmax to be applied to the confidence
        softmax_conf_op = Softmax(graph, dict(axis=-1, nchw_layout=True))
        activation_node = softmax_conf_op.create_node([node],
                                                      dict(name=node.name +
                                                           '/softmax'))
    elif activation_function == 'SIGMOID':
        # sigmoid activation function to be applied to the confidence
        sigmoid_conf_op = Activation(
            graph, dict(operation='sigmoid', nchw_layout=True))
        activation_node = sigmoid_conf_op.create_node([node],
                                                      dict(name=node.name +
                                                           '/sigmoid'))
    elif activation_function == 'IDENTITY':
        # in case of Identity do nothing and just use result from the input node
        activation_node = node
    else:
        raise Error('Unknown post-processing activation function "{}".'.format(
            activation_function))
    return activation_node
Example #4
0
 def test_activation_infer(self):
     graph = build_graph(self.nodes_attributes,
                         [
                             ('node_1', 'activation_node'),
                             ('activation_node', 'node_3')
                         ],
                         {
                             'node_1': {
                                 'value': np.array([0, 7, 3, -1])
                             },
                             'activation_node': {
                                 'operation': 'relu6'
                             },
                             'node_3': {
                                 'value': None
                             }
                         })
     graph.graph['layout'] = 'NCHW'
     activation_node = Node(graph, 'activation_node')
     Activation.infer(activation_node)
     exp_shape = np.array([227, 227, 227, 227])
     res_shape = graph.node['node_3']['shape']
     res_value = graph.node['node_3']['value']
     exp_value = np.array([0, 6, 3, 0])
     for i, value in enumerate(exp_shape):
         self.assertEqual(res_shape[i], value)
     for i, value in enumerate(exp_value):
         self.assertEqual(res_value[i], value)
Example #5
0
    def extract(node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        act_type = attrs.str('act_type', 'leaky')
        if act_type == 'prelu':
            prelu_attrs = {
                'channel_shared': 1,
                'filler_type': 'constant',
                'filler_value': 0,
                'min': 0,
                'max': 1,
                'mean': 0,
                'std': 0,
                'sparse': -1,
                'variance_norm': "caffe.FillerParameter.FAN_IN"
            }
            PreluOp.update_node_stat(node, prelu_attrs)
        elif act_type == 'elu':
            Activation.update_node_stat(node, {'operation': act_type})
        elif act_type == 'leaky':
            negative_slope = attrs.float('slope', 0.25)
            ReLU.update_node_stat(node, {'negative_slope': negative_slope})
        else:
            raise Error(
                "Operation '{}' not supported. Please register it as custom op. "
                + refer_to_faq_msg(86), act_type)

        return LeakyReLUFrontExtractor.enabled
Example #6
0
    def extract(node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        act_type = attrs.str('act_type', 'relu')
        if act_type == 'sigmoid' or act_type == 'tanh':
            Activation.update_node_stat(node, {'operation': act_type})
        elif act_type == 'relu':
            ReLU.update_node_stat(node)
        else:
            raise Error(
                "Operation '{}' not supported. Please register it as custom op. "
                + refer_to_faq_msg(86), act_type)

        return ActivationFrontExtractor.enabled
Example #7
0
 def extract(node):
     Activation.update_node_stat(node, {'operation': 'tanh'})
     return Tanh.enabled
Example #8
0
 def extract(node):
     alpha = onnx_attr(node, 'alpha', 'f', default=1.0)
     Activation.update_node_stat(node, {'operation': 'elu', 'alpha': alpha})
     return EluFrontExtractor.enabled
Example #9
0
 def extract(node):
     Activation.update_node_stat(node, {'operation': 'sigmoid'})
     return Sigmoid.enabled
    def replace_op(self, graph: Graph, node: Node):
        input_node = node.in_node()

        memory_pair_input = unique_id('id')
        memory_pair_output = unique_id('id')

        # Input -> FullyConnected
        fc_layer_after_input_attrs = {
            'name': 'input_fullyconnected',
            'num_output': node.gifo_x_weights_shape[0],
            'bias_term': True
        }

        embed_input(fc_layer_after_input_attrs, 1, 'weights',
                    node.gifo_x_weights)
        embed_input(fc_layer_after_input_attrs, 2, 'biases', node.gifo_biases)
        fc_layer_after_input = InnerProduct(
            graph, fc_layer_after_input_attrs).create_node([input_node])

        prev_lstm_output = Memory(
            graph, {
                'name': 'prev_memory_output',
                'id': memory_pair_input,
                'index': 1,
                'size': 2,
                'shape': np.array([node.gifo_r_weights_shape[1]],
                                  dtype=np.int64)
            }).create_node()

        # *Memory(output) -> FullyConnected
        fc_layer_from_prev_state_attrs = {
            'name': 'prev_memory_output_fullyconnected',
            'num_output': node.gifo_r_weights_shape[0],
            'bias_term': False
        }

        embed_input(fc_layer_from_prev_state_attrs, 1, 'weights',
                    node.gifo_r_weights)
        fc_layer_from_prev_state = InnerProduct(
            graph,
            fc_layer_from_prev_state_attrs).create_node([prev_lstm_output])

        # Memory -> FullyConnected  \
        #                           *Eltwise(sum)
        # Input -> FullyConnected   /
        join_input_prev_state_sum = Eltwise(graph, {
            'name': 'join_input_eltwise',
            'operation': 'sum'
        }).create_node([fc_layer_from_prev_state, fc_layer_after_input])

        # *Eltwise(sum) -> Split
        # it is split into 4 nodes: Act, Eltw*3
        # the following order is mandatory
        #       ___Tanh
        #      /
        # Split ---(2)Eltwise(sum)
        #     |\
        #     | \__(3)Eltwise(sum)
        #     |____(4)Eltwise(sum)
        split_joined_input = Split(
            graph, {
                'name': 'join_input_split',
                'axis': 1,
                'num_split': 4,
                'out_ports_count': 4,
            }).create_node([join_input_prev_state_sum])

        prev_lstm_state = Memory(
            graph, {
                'name':
                'prev_memory_state',
                'id':
                memory_pair_output,
                'index':
                1,
                'size':
                2,
                'shape':
                np.array([node.input_gate_weights.shape[0]], dtype=np.int64)
            }).create_node()

        # *Memory(state) -> *ScaleShift(input)
        state_input_scaleshift_attrs = {
            'name': 'input_scaleshift',
            'bias_term': False
        }
        embed_input(state_input_scaleshift_attrs, 1, 'weights',
                    node.input_gate_weights)
        state_input_scaleshift = ScaleShiftOp(
            graph, state_input_scaleshift_attrs).create_node([prev_lstm_state])

        # *Memory(state) -> *ScaleShift(forget)
        state_forget_scaleshift_attrs = {
            'name': 'forget_scaleshift',
            'bias_term': False
        }
        embed_input(state_forget_scaleshift_attrs, 1, 'weights',
                    node.forget_gate_weights)
        state_forget_scaleshift = ScaleShiftOp(
            graph,
            state_forget_scaleshift_attrs).create_node([prev_lstm_state])

        # Split                                 \
        #                                       (2)Eltwise(sum)
        # Memory(state) -> *ScaleShift(input)  /
        join_prev_lstm_input_joined_input_sum = Eltwise(
            graph, {
                'name': 'join_prev_lstm_input_joined_input_eltwise',
                'operation': 'sum'
            }).create_node([(split_joined_input, 1), state_input_scaleshift])
        # Split                                 \
        #                                       (3)Eltwise(sum)
        # Memory(state) -> *ScaleShift(forget)  /
        join_prev_lstm_input_joined_forget_sum = Eltwise(
            graph, {
                'name': 'join_prev_lstm_input_joined_forget_sum',
                'operation': 'sum'
            }).create_node([(split_joined_input, 2), state_forget_scaleshift])

        # Split -> Tanh
        remember_tahn = Activation(graph, {
            'name': 'remember_tahnv',
            'operation': 'tanh'
        }).create_node([(split_joined_input, 0)])

        # Split -> (2)Eltwise(sum) -> *Sigmoid
        remember_sigmoid = Activation(graph, {
            'name': 'remember_sigmoid',
            'operation': 'sigmoid'
        }).create_node([join_prev_lstm_input_joined_input_sum])

        # Split -> (3)Eltwise(sum) -> **Sigmoid
        forget_sigmoid = Activation(graph, {
            'name': 'forget_sigmoid',
            'operation': 'sigmoid'
        }).create_node([join_prev_lstm_input_joined_forget_sum])

        # *Memory(state)                        \
        #                                       (6)Eltwise(mul)
        # Split -> (3)Eltwise(sum) -> **Sigmoid /
        join_forget_prev_state_mul = Eltwise(graph, {
            'name': 'join_forget_prev_state_mul',
            'operation': 'mul'
        }).create_node([forget_sigmoid, prev_lstm_state])

        # Split -> Tahn                         \
        #                                       (5)Eltwise(mul)
        # Split -> (2)Eltwise(sum) -> *Sigmoid   /
        join_remember_candidates_mul = Eltwise(graph, {
            'name': 'join_remember_candidates_mul',
            'operation': 'mul'
        }).create_node([remember_tahn, remember_sigmoid])

        # (5)Eltwise(mul)  \
        #               (7)Eltwise(sum)
        # (6)Eltwise(mul)   /
        join_forget_remember_sum = Eltwise(graph, {
            'name': 'join_forget_remember_sum',
            'operation': 'sum'
        }).create_node(
            [join_forget_prev_state_mul, join_remember_candidates_mul])

        # (7)Eltwise(sum) -> Clamp
        join_forget_clamp = Clamp(
            graph, {
                'name': 'join_forget_clamp',
                'max': node.clip_value,
                'min': -node.clip_value
            }).create_node([join_forget_remember_sum])
        #
        # Clamp -> (2)Memory(state)
        Memory(
            graph, {
                'name':
                'next_lstm_state',
                'id':
                memory_pair_output,
                'index':
                0,
                'size':
                2,
                'shape':
                np.array([node.input_gate_weights.shape[0]], dtype=np.int64)
            }).create_node([join_forget_clamp])

        # Clamp -> (2)Tahn
        state_filtered_tahn = Activation(graph, {
            'name': 'state_filtered_tahn',
            'operation': 'tanh'
        }).create_node([join_forget_clamp])

        # Clamp -> (2)ScaleShift
        clamp_scaleshift_attrs = {
            'name': 'clamp_scaleshift',
            'bias_term': False
        }
        embed_input(clamp_scaleshift_attrs, 1, 'weights',
                    node.output_gate_weights)
        clamp_scaleshift = ScaleShiftOp(
            graph, clamp_scaleshift_attrs).create_node([join_forget_clamp])

        # Split                 \
        #                       (4)Eltwise(sum)
        # Clamp -> (2)ScaleShift /
        join_next_lstm_input_joined_input_sum = Eltwise(
            graph, {
                'name': 'join_next_lstm_input_joined_input_sum',
                'operation': 'sum'
            }).create_node([(split_joined_input, 3), clamp_scaleshift])

        # (4)Eltwise(sum) -> (3)Sigmoid
        output_sigmoid = Activation(graph, {
            'name': 'output_sigmoid',
            'operation': 'sigmoid'
        }).create_node([join_next_lstm_input_joined_input_sum])

        # (4)Eltwise(sum) -> (3)Sigmoid         \
        #                                       (5)Eltwise(mul)
        # Clamp -> (2)Tahn                      /
        joined_output_mul = Eltwise(graph, {
            'name': 'joined_output_mul',
            'operation': 'mul'
        }).create_node([state_filtered_tahn, output_sigmoid])

        # (5)Eltwise(mul) -> (3)FullyConnected
        fc_output_attrs = {
            'name': 'FullyConnected',
            'num_output': node.projection_weights_shape[0],
            'bias_term': False
        }
        embed_input(fc_output_attrs, 1, 'weights', node.projection_weights)
        fc_output = InnerProduct(graph, fc_output_attrs).create_node(
            [joined_output_mul])

        #                   / (2)Memory(output)
        # (3)FullyConnected
        #                   \ Output (any next node) (edge created automatically after replacement)
        Memory(
            graph, {
                'name': 'next_lstm_output',
                'id': memory_pair_input,
                'index': 0,
                'size': 2,
                'shape': np.array([node.gifo_r_weights_shape[1]],
                                  dtype=np.int64)
            }).create_node([fc_output])

        return [fc_output.id]
Example #11
0
 def extract(node):
     Activation.update_node_stat(node, {'operation': 'relu6'})
     return True
Example #12
0
 def replace_pattern(self, graph: Graph, match: [str, Node]):
     node = match['activation']
     Activation.update_node_stat(node, dict(operation=node.type.lower()))
Example #13
0
 def extract(node):
     Activation.update_node_stat(node, {'operation': 'elu'})
     return Elu.enabled
Example #14
0
 def extract(node):
     Activation.update_node_stat(node, {'operation': 'exp'})
     return __class__.enabled