Пример #1
0
 def apply(self, model):
     graph = model.graph
     node_ind = 0
     graph_modified = False
     execution_context = model.make_empty_exec_context()
     for n in graph.node:
         node_ind += 1
         node_inp_inits = list(map(lambda x: model.get_initializer(x), n.input))
         node_inp_dyn = list(filter(lambda x: x is None, node_inp_inits))
         node_out = n.output[0]
         is_all_constant_inputs = len(node_inp_dyn) == 0
         ishape = model.get_tensor_shape(n.input[0])
         is_const_shape = (n.op_type == "Shape") and (ishape is not None)
         if is_all_constant_inputs or is_const_shape:
             # this node has no dynamic inputs, only constant ones -- so we can
             # do constant folding.
             oxe.execute_node(n, execution_context, graph)
             # use the execution result as an initializer
             model.set_initializer(node_out, execution_context[node_out])
             # remove old node
             graph.node.remove(n)
             graph_modified = True
     if graph_modified:
         model = model.transform(InferShapes())
     return (model, graph_modified)
Пример #2
0
 def apply(self, model):
     graph = model.graph
     node_ind = 0
     graph_modified = False
     for n in graph.node:
         node_ind += 1
         if n.op_type == "Add":
             consumer = model.find_consumer(n.output[0])
             if consumer is not None and consumer.op_type == "Conv":
                 conv_node = consumer
                 add_node = n
                 add_weight_name = n.input[1]
                 conv_in_name = consumer.input[0]
                 conv_in_shape = model.get_tensor_shape(conv_in_name)
                 A = model.get_initializer(add_weight_name)
                 assert A is not None, "Initializer for add weights is not set."
                 start_name = n.input[0]
                 end_name = consumer.output[0]
                 conv_out_shape = model.get_tensor_shape(end_name)
                 if all(x == 1 for x in A.shape):
                     # create a tensor filled with the add constant, in
                     # the shape expected by the convolution
                     conv_in_const = np.zeros(conv_in_shape,
                                              dtype=np.float32)
                     conv_in_const.fill(A.item())
                     # create an execution context and put in const input
                     exec_ctx = model.make_empty_exec_context()
                     exec_ctx[conv_in_name] = conv_in_const
                     # execute the conv node only
                     execute_node(conv_node, exec_ctx, model.graph)
                     # retrieve the conv output
                     Anew = exec_ctx[end_name]
                     # strip out repetition
                     Anew = Anew[0, :, 0, 0].reshape(1, -1, 1, 1)
                     # update the add weight
                     model.set_initializer(add_weight_name, Anew)
                     # rewire add input to be conv input
                     conv_node.input[0] = start_name
                     model.set_tensor_shape(start_name, conv_in_shape)
                     # use old conv input tensor as conv output
                     conv_node.output[0] = conv_in_name
                     model.set_tensor_shape(conv_in_name, conv_out_shape)
                     # use new conv output as new add node input
                     add_node.input[0] = conv_in_name
                     # use old conv output as new add node output
                     add_node.output[0] = end_name
                     # move add node past conv node
                     graph.node.remove(add_node)
                     graph.node.insert(node_ind, add_node)
                     graph_modified = True
     model = model.transform(InferShapes())
     return (model, graph_modified)
Пример #3
0
    def apply(self, model):
        graph = model.graph
        node_ind = 0
        graph_modified = False
        for n in graph.node:
            node_ind += 1
            if (n.op_type == "Add" and not model.is_fork_node(n)
                    and not model.is_join_node(n)):
                consumer = model.find_consumer(n.output[0])
                if (consumer is not None and consumer.op_type == "Conv"
                        and not model.is_join_node(consumer)):
                    conv_node = consumer
                    add_node = n
                    add_weight_name = n.input[1]
                    conv_in_name = consumer.input[0]
                    conv_in_shape = model.get_tensor_shape(conv_in_name)
                    # assume datalayout to be NCHW
                    channels = conv_in_shape[1]
                    A = model.get_initializer(add_weight_name)
                    if A is None:
                        warnings.warn("Add param is not constant, skipping")
                        continue
                    start_name = n.input[0]
                    end_name = consumer.output[0]
                    conv_out_shape = model.get_tensor_shape(end_name)

                    using_padding = True
                    pads = list(get_by_name(consumer.attribute, "pads").ints)
                    if sum(pads) == 0:
                        using_padding = False
                    if (all(x == 1 for x in A.shape) or A.shape
                            == (1, channels, 1, 1)) and not using_padding:
                        # create a tensor filled with the add constant, in
                        # the shape expected by the convolution
                        conv_in_const = np.zeros(conv_in_shape,
                                                 dtype=np.float32)
                        if A.shape == (1, channels, 1, 1):
                            for ch in range(channels):
                                conv_in_const[0][ch].fill(A[0][ch].item())
                        else:
                            conv_in_const.fill(A.item())
                        # create an execution context and put in const input
                        exec_ctx = model.make_empty_exec_context()
                        exec_ctx[conv_in_name] = conv_in_const
                        # execute the conv node only
                        execute_node(conv_node, exec_ctx, model.graph)
                        # retrieve the conv output
                        Anew = exec_ctx[end_name]

                        # strip out repetition if no padding
                        Anew = Anew[0, :, 0, 0].reshape(1, -1, 1, 1)
                        # update the add weight
                        model.set_initializer(add_weight_name, Anew)
                        # rewire add input to be conv input
                        conv_node.input[0] = start_name
                        model.set_tensor_shape(start_name, conv_in_shape)
                        # use old conv input tensor as conv output
                        conv_node.output[0] = conv_in_name
                        model.set_tensor_shape(conv_in_name, conv_out_shape)
                        # use new conv output as new add node input
                        add_node.input[0] = conv_in_name
                        # use old conv output as new add node output
                        add_node.output[0] = end_name
                        # move add node past conv node
                        graph.node.remove(add_node)
                        graph.node.insert(node_ind, add_node)
                        graph_modified = True

        model = model.transform(InferShapes())
        return (model, graph_modified)
Пример #4
0
    def apply(self, model):
        graph = model.graph
        node_ind = 0
        graph_modified = False
        execution_context = model.make_empty_exec_context()
        for n in graph.node:
            node_ind += 1
            if n.op_type == "Quant" or n.op_type == "BipolarQuant":
                node_inp_inits = list(
                    map(lambda x: model.get_initializer(x), n.input))
                node_inp_dyn = list(filter(lambda x: x is None,
                                           node_inp_inits))
                node_out = n.output[0]
                is_all_constant_inputs = len(node_inp_dyn) == 0
                ishape = model.get_tensor_shape(n.input[0])
                is_const_shape = (n.op_type == "Shape") and (ishape
                                                             is not None)
                if is_all_constant_inputs or is_const_shape:
                    # Check node validity
                    if (n.op_type == "Quant"
                            and not model.get_initializer(n.input[2]) == 0):
                        raise ValueError(
                            "Only Quant nodes with zero-point == 0 "
                            "are currently supported.")
                    if model.is_fork_node(n):
                        raise ValueError(
                            "Weights quantized with the Quant node are not "
                            "allowed to be fork nodes node.")
                    target_node = model.find_direct_successors(n)
                    if target_node is None:
                        raise RuntimeError(
                            "Weights quantized with the Quant node must have "
                            "a successor node.")
                    else:
                        target_node = target_node[0]
                    # If there is a DebugMarker in the weight path,
                    # then the DebugMarker needs to be removed before any further
                    # action is taken. Because this node interferes
                    # with how the constant folding tries to determine how to
                    # apply scale factors and in any case the DebugMarker would not
                    # return useful information after folding.
                    if target_node.op_type == "DebugMarker":
                        remove_node_and_rewire(model, target_node)
                        model = model.transform(FoldTransposeIntoQuantInit())
                        return model, True

                    # Continue with constant folding the quant node
                    scale = model.get_initializer(n.input[1])
                    unity_scale = (scale.flatten() == 1.0).all()
                    # this node has no dynamic inputs, only constant ones -- so we can
                    # do constant folding.
                    oxe.execute_node(n, execution_context, graph)
                    q_node_output = execution_context[node_out]
                    # Check we can directly constant fold
                    if unity_scale:
                        # use the execution result as an initializer
                        model.set_initializer(node_out, q_node_output)
                    else:
                        # Check next operator type
                        mul_like_nodes = ["Mul", "Div", "Conv", "MatMul"]
                        add_like_nodes = ["Add", "Sub"]
                        all_supported_ops = mul_like_nodes.copy()
                        all_supported_ops.extend(add_like_nodes)

                        if target_node.op_type not in all_supported_ops:
                            raise ValueError(
                                f"Can't constant fold Quant weight node "
                                f"into node type {target_node.op_type} "
                                f"at node: {target_node}.")

                        # For both mul and Add:
                        # Move the scale factor behind the next operator
                        scale = model.get_initializer(n.input[1])
                        new_initializer = q_node_output / scale
                        # Round, to correct for floating point errors
                        new_initializer = np.round(new_initializer)
                        model.set_initializer(node_out, new_initializer)
                        q_inst = getCustomOp(n)
                        new_dtype = q_inst.get_integer_datatype(model)
                        model.set_tensor_datatype(node_out, new_dtype)

                        # Reshape scale for Conv if required
                        if target_node.op_type == "Conv" and len(
                                scale.shape) > 0:
                            bias_shape = [1] * len(scale.shape)
                            bias_shape[1] = -1
                            scale = scale.reshape(bias_shape)

                        if scale.shape == (1, ):
                            scale = scale[0]
                            mul_shape = tuple()
                        else:
                            mul_shape = scale.shape
                        mul_tensor = helper.make_tensor_value_info(
                            model.make_new_valueinfo_name(),
                            TensorProto.FLOAT,
                            mul_shape,
                        )
                        graph.value_info.append(mul_tensor)
                        model.set_initializer(mul_tensor.name, scale)

                        successor = model.find_consumers(node_out)
                        if successor is None:
                            raise RuntimeError(
                                "Can only constant fold scaled Quant weights "
                                "if a successor exists.")
                        successor = successor[0]
                        succ_output_name = successor.output[0]

                        output_shape = model.get_tensor_shape(
                            successor.output[0])
                        act_mul_tensor = helper.make_tensor_value_info(
                            model.make_new_valueinfo_name(),
                            TensorProto.FLOAT,
                            output_shape,
                        )
                        graph.value_info.append(act_mul_tensor)
                        successor.output[0] = act_mul_tensor.name

                        mul_node = helper.make_node(
                            "Mul",
                            [act_mul_tensor.name, mul_tensor.name],
                            [succ_output_name],
                        )
                        graph.node.insert(node_ind, mul_node)

                        if target_node.op_type in add_like_nodes:
                            # Move the scale factor behind also in-front of
                            # the next operator
                            div_tensor = helper.make_tensor_value_info(
                                model.make_new_valueinfo_name(),
                                TensorProto.FLOAT,
                                mul_shape,
                            )
                            graph.value_info.append(div_tensor)
                            model.set_initializer(div_tensor.name, scale)

                            succ_input_name = successor.input[0]
                            act_mul_tensor = helper.make_tensor_value_info(
                                model.make_new_valueinfo_name(),
                                TensorProto.FLOAT,
                                output_shape,
                            )
                            graph.value_info.append(act_mul_tensor)
                            successor.input[0] = act_mul_tensor.name

                            div_node = helper.make_node(
                                "Div",
                                [succ_input_name, div_tensor.name],
                                [act_mul_tensor.name],
                            )
                            graph.node.insert(node_ind, div_node)

                    # remove old node
                    graph.node.remove(n)
                    graph_modified = True
                    model = model.transform(InferShapes())
                    return (model, graph_modified)
        return (model, graph_modified)