Exemplo n.º 1
0
    def prepare(
        cls,
        model,  # type: ModelProto
        device,  # type: singa device
        **kwargs  # type: Any
    ):  # type: (...) -> Optional[BackendRep]
        '''
        Args:
            model: onnx model proto
            device: singa device
        Return:
            SingaBackendRep instance
        '''
        super(SingaBackend, cls).prepare(model, device, **kwargs)
        name2tensor = {}
        for node in model.graph.node:
            if (node.op_type == 'Constant'):
                data = helper.get_attribute_value(node.attribute[0])
                requires_grad, stores_grad = True, True
                if len(node.attribute) == 3:
                    requires_grad = helper.get_attribute_value(
                        node.attribute[1])
                    stores_grad = helper.get_attribute_value(node.attribute[2])
                t = tensor.Tensor(device=device,
                                  data=numpy_helper.to_array(data),
                                  requires_grad=requires_grad,
                                  stores_grad=stores_grad)

                name2tensor[node.output[0]] = t

        return SingaBackendRep(model, device, name2tensor)
Exemplo n.º 2
0
    def prepare(
        cls,
        model,  # type: ModelProto
        device,  # type: singa device
        **kwargs  # type: Any
    ):  # type: (...) -> Optional[BackendRep]
        """
        Args:
            model: onnx model proto
            device: singa device
        Return:
            SingaBackendRep instance
        """
        super(SingaBackend, cls).prepare(model, device, **kwargs)
        name2tensor = {}
        for node in model.graph.node:
            if node.op_type == "Constant":
                data = helper.get_attribute_value(node.attribute[0])
                requires_grad, stores_grad = True, True
                if len(node.attribute) == 3:
                    requires_grad = helper.get_attribute_value(
                        node.attribute[1]
                    )
                    stores_grad = helper.get_attribute_value(node.attribute[2])
                t = tensor.Tensor(
                    device=device,
                    data=numpy_helper.to_array(data),
                    requires_grad=requires_grad,
                    stores_grad=stores_grad,
                )

                name2tensor[node.output[0]] = t

        return SingaBackendRep(model, device, name2tensor)
    def transpose(self, node):
        """Function representing transpose

        Args:
            node (node): ONNX node representing transpose operation

        :meta private:
        """
        nodeName = node.output[0]
        inputName = node.input[0]

        # Get attributes
        perm = None
        for attr in node.attribute:
            if attr.name == "perm":
                perm = get_attribute_value(attr)
        if perm is None:
            raise RuntimeError(
                "Permutation indices not specified by attibute 'perm'")
        self.shapeMap[nodeName] = [self.shapeMap[inputName][p] for p in perm]
        if inputName in self.varMap:
            self.varMap[nodeName] = \
            np.transpose(self.varMap[node.input[0]].reshape(self.shapeMap[node.input[0]]),
                         perm)
        elif inputName in self.constantMap:
            self.constantMap[nodeName] = np.transpose(
                self.constantMap[inputName], perm)
Exemplo n.º 4
0
def all_recurrents_should_bidirectional(onnx_model):
    return all([
        helper.get_attribute_value(attr) == b'bidirectional'
        for node in onnx_model.graph.node
        if node.op_type in ['GRU', 'LSTM', 'RNN'] for attr in node.attribute
        if attr.name == 'direction'
    ])
Exemplo n.º 5
0
 def get_perm(onode):
     try:
         return next(
             helper.get_attribute_value(attr) for attr in onode.attribute
             if attr.name == 'perm')
     except StopIteration:
         return []
Exemplo n.º 6
0
    def check_node_attribute(node,
                             attribute_name: str,
                             expected_value,
                             default_value=None):
        """Verify that a node has expected value for an attribute.

        Args:
            node (NodeProto): a node to check
            attribute_name (str): name of attribute
            expected_value (Any): expected value of the attribute
            default_value (Any, optional): default value if the attribute does not exist. Defaults to None.

        Returns:
            bool: whether the check is passed or not
        """
        value = default_value
        for attr in node.attribute:
            if attr.name == attribute_name:
                value = helper.get_attribute_value(attr)

        if isinstance(expected_value, list):
            return (isinstance(value, ndarray)
                    or isinstance(value, list)) and array_equal(
                        expected_value, value, equal_nan=False)
        else:
            return value == expected_value
Exemplo n.º 7
0
    def flatten(self, node):
        """
        Function representing flatten.
        Unlike numpy.flatten(), ONNX's Flatten operation reshapes
        a (d_0, d_1, ..., d_n) tensor into a 2D tensor with shape
        (d_0 * d_1 * ... * d_(axis-1), d_axis * d_(axis+1) * ... * d_n).
        Arguments:
            node: (node) representing flatten operation
        """
        nodeName = node.output[0]

        # Assume first input is array to be flattened
        inputName = node.input[0]
        axis = 1
        for attr in node.attribute:
            if attr.name == "axis":
                axis = get_attribute_value(attr)

        dimension1 = int(np.prod(self.shapeMap[inputName][:axis]))
        dimension2 = int(np.prod(self.shapeMap[inputName][axis:]))
        newShape = [dimension1, dimension2]
        self.shapeMap[nodeName] = newShape

        if inputName in self.varMap:
            self.varMap[nodeName] = self.varMap[inputName].reshape(newShape)
        elif inputName in self.constantMap:
            self.constantMap[nodeName] = self.constantMap[inputName].reshape(newShape)
Exemplo n.º 8
0
    def version_6(cls, ctx, node, **kwargs):
        # T output = All(T x, list(int) reduce_indices, @bool keepdims)
        # T output = Any(T x, list(int) reduce_indices, @bool keepdims)
        reduce_dim = node.inputs[1].get_tensor_value()

        # for Any, the reduce_indices can be scalar as observed.
        if np.isscalar(reduce_dim):
            reduce_dim = [reduce_dim]

        if ctx.opset < 11:
            utils.make_sure(all(i >= 0 for i in reduce_dim), "negative reduce axis is not supported in onnx for now")

        cast = ctx.make_node(op_type="Cast", inputs=[node.input[0]], attr={"to": onnx_pb.TensorProto.FLOAT})
        keepdims = helper.get_attribute_value(node.get_attr("keep_dims"))
        op_type = "ReduceMin" if node.type == "All" else "ReduceSum"
        reduce_node = ctx.make_node(op_type=op_type, inputs=cast.output,
                                    attr={"axes": reduce_dim, "keepdims": keepdims})

        zero_node = ctx.make_const(utils.make_name("zero_reduce"), np.array(0, dtype=np.float32))

        shapes = node.output_shapes
        dtypes = node.output_dtypes
        ctx.remove_node(node.name)
        ctx.make_node(op_type="Greater", inputs=[reduce_node.output[0], zero_node.output[0]],
                      name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
Exemplo n.º 9
0
    def test_tensor_data(self):
        tensors = {
            "empty_tensor": np.array([], dtype=np.float32),
            "multi_dim_empty_tensor": np.array([[], []], dtype=np.float32),
            "scalar": np.array(1., dtype=np.float32),
            "one_item_array": np.array([1.], dtype=np.float32),
            "normal_array": np.array([[1., 2.], [2., 3.]], dtype=np.float32)
        }
        tf_reset_default_graph()
        with tf_session() as sess:
            for n, data in tensors.items():
                tf.constant(data, dtype=tf.float32, name=n)

        for tf_node in sess.graph.get_operations():
            name = tf_node.name
            self.assertTrue(name in tensors.keys())

            self.assertTrue("value" in tf_node.node_def.attr)
            # convert to onnx tensor value
            tensor_value = tf_utils.tf_to_onnx_tensor(
                tf_utils.get_tf_node_attr(tf_node, "value"),
                name=utils.port_name(tf_node.name)
            )
            attr = helper.make_attribute("value", tensor_value)
            # same as node.get_tensor_value(is_list=False)
            actual = numpy_helper.to_array(helper.get_attribute_value(attr))

            expected = tensors[name]

            self.assertTrue(np.array_equal(expected, actual))
Exemplo n.º 10
0
    def get_tensor_value(self, as_list=True):
        """Get value for onnx tensor.
        Args:
            as_list: whether return numpy ndarray in list.
        Returns:
            If as_list=True, return the array as a (possibly nested) list.
            Otherwise, return data of type np.ndarray.

            If a tensor is a scalar having value 1,
                when as_list=False, return np.array(1), type is <class 'numpy.ndarray'>
                when as_list=True, return 1, type is <class 'int'>.
        """
        if not self.is_const():
            raise ValueError("get tensor value: {} must be Const".format(
                self.name))
        if self.type == "Const":
            t = self.get_attr("value")
            if t:
                t = numpy_helper.to_array(helper.get_attribute_value(t))
        else:
            self._GraphCheck()
            t = self.graph.get_saved_tensor(self)
        if as_list is True and t is not None:
            t = t.tolist()  # t might be scalar after tolist()
        return t
Exemplo n.º 11
0
 def get_tensor_value(self,as_list=True):
     assert self.is_const, "Failed: Node {} must be Const".format(self.name)
     t=self.get_attr('value')
     t=numpy_helper.to_array(helper.get_attribute_value(t))
     if as_list:
         t=t.tolist()
     return t
Exemplo n.º 12
0
    def test_attr_sparse_tensor_repeated_protos(self):  # type: () -> None
        dense_shape = [3, 3]
        sparse_values = [
            1.764052391052246, 0.40015721321105957, 0.978738009929657
        ]
        values_tensor = helper.make_tensor(name='sparse_values',
                                           data_type=TensorProto.FLOAT,
                                           dims=[len(sparse_values)],
                                           vals=np.array(sparse_values).astype(
                                               np.float32),
                                           raw=False)

        linear_indicies = [2, 3, 5]
        indicies_tensor = helper.make_tensor(
            name='indicies',
            data_type=TensorProto.INT64,
            dims=[len(linear_indicies)],
            vals=np.array(linear_indicies).astype(np.int64),
            raw=False)
        sparse_tensor = helper.make_sparse_tensor(values_tensor,
                                                  indicies_tensor, dense_shape)

        repeated_sparse = [sparse_tensor, sparse_tensor]
        attr = helper.make_attribute("sparse_attrs", repeated_sparse)
        self.assertEqual(attr.name, "sparse_attrs")
        checker.check_attribute(attr)
        for s in helper.get_attribute_value(attr):
            checker.check_sparse_tensor(s)
Exemplo n.º 13
0
    def update_node_shape_dtype(self, node, override=False):
        """Try the best to infer shapes and dtypes for outputs of the node,
        by default, we respect TF shapes and dtypes.
        """
        if node.is_const() or node.is_graph_input():
            return
        # NOTE: only support onnx node for now
        if not utils.is_onnx_domain(node.domain):
            return

        logger.debug("Infer shape and dtype for [%s]", node.name)
        # NOTE: shape inference for some ops need the input values of the op, e.g., Reshape
        # op needs the "Shape" value to infer output shape.
        initializers = []
        for i, inp in enumerate(node.inputs):
            if not inp:
                if logger.isEnabledFor(logging.VERBOSE):
                    logger.warning(
                        "[%s] infer a inexistent node: [%s], please check the code",
                        node.name, node.input[i]
                    )
                continue
            if inp.is_const():
                t = inp.get_attr("value")
                tensor = helper.get_attribute_value(t)
                tensor.name = inp.output[0]
                initializers.append(tensor)

        input_shapes = [self.get_shape(i) for i in node.input]
        input_dtypes = [self.get_dtype(i) for i in node.input]

        shapes, dtypes = infer_onnx_shape_dtype(node, self._opset, input_shapes, input_dtypes, initializers)
        if not shapes or not dtypes:
            return

        for output, shape, dtype in zip(node.output, shapes, dtypes):
            if dtype == TensorProto.UNDEFINED:
                logger.debug("Inferred dtype for [%s, type: %s] is UNDEFINED, SKIP", node.name, node.type)
            else:
                existing_dtype = self.get_dtype(output)
                if existing_dtype is not None and existing_dtype != dtype:
                    if override:
                        logger.warning("Override dtype of %s from %s to %s", output, existing_dtype, dtype)
                    else:
                        dtype = existing_dtype
                self.set_dtype(output, dtype)
                logger.debug("Set dtype of [%s] to %s", output, dtype)

            if shape is None:
                logger.debug("Inferred shape for [%s, type: %s] is None, SKIP", node.name, node.type)
            else:
                existing_shape = self.get_shape(output)
                if existing_shape is not None and not utils.are_shapes_equal(existing_shape, shape):
                    if override:
                        logger.warning("Override shape of %s from %s to %s", output, existing_shape, shape)
                    else:
                        shape = existing_shape
                self.set_shape(output, shape)
                logger.debug("Set shape of [%s] to %s", output, shape)
Exemplo n.º 14
0
def get_attrs(schema):
    def get_attr_type_optional(attr_type):
        return 'OptionalAttr<{}>'.format(
            onnx_attr_type_to_mlir_attr_type(attr_type))

    def get_attr_type_with_default(attr_type, attr_default):
        return 'DefaultValuedAttr<{}, "{}">'.format(
            onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)

    if not schema.attributes:
        return OrderedDict()

    name_to_type = OrderedDict()
    for _, attr in sorted(schema.attributes.items()):
        qualified_attr_name = "{}.{}".format(schema.name, attr.name)
        if qualified_attr_name in special_attr_defaults:
            name_to_type[attr.name] = get_attr_type_with_default(
                *special_attr_defaults[qualified_attr_name])
        if qualified_attr_name in special_attr_types:
            name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(
                special_attr_types[qualified_attr_name])
        # option holds either required or default value
        elif attr.required:
            name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(
                attr.type)
        elif attr.default_value.name:

            def format_value(value):  # type: (Any) -> Text
                if isinstance(value, float):
                    formatted = str(np.round(value, 5))
                    # use default formatting, unless too long.
                    if (len(formatted) > 10):
                        formatted = str("({:e})".format(value))
                    return formatted
                elif isinstance(
                        value,
                    (bytes, bytearray)) and sys.version_info[0] == 3:
                    return str(value.decode('utf-8'))
                return str(value)

            default_value = helper.get_attribute_value(attr.default_value)
            if isinstance(default_value, list):
                default_value = [format_value(val) for val in default_value]
                default_value_str = '{}'.format(default_value)
                default_value_str = default_value_str.replace('[', '{', 1)
                default_value_str = default_value_str.replace(']', '}', 1)
                if Text(attr.type) == "AttrType.STRINGS":
                    default_value_str = default_value_str.replace("'", '\\"')
                else:
                    default_value_str = default_value_str.replace("'", '')
            else:
                default_value = format_value(default_value)
                default_value_str = default_value

            name_to_type[attr.name] = get_attr_type_with_default(
                attr.type, default_value_str)
        else:
            name_to_type[attr.name] = get_attr_type_optional(attr.type)
    return name_to_type
Exemplo n.º 15
0
    def maxpoolEquations(self, node, makeEquations):
        """Function to generate maxpooling equations

        Args:
            node (node): ONNX node representing maxpool operation
            makeEquations (bool): True if we need to create new variables and maxpool constraints

        :meta private:
        """
        nodeName = node.output[0]

        # Extract attributes and define shape
        inputShape = self.shapeMap[node.input[0]]
        kernel_shape = [1, 1]
        strides = [1, 1]
        for attr in node.attribute:
            if attr.name == 'kernel_shape':
                kernel_shape = get_attribute_value(attr)
            elif attr.name == 'strides':
                strides = get_attribute_value(attr)

        outputShape = [dim for dim in inputShape]
        outputShape[2] = int(
            np.ceil((inputShape[2] - ((kernel_shape[0] - 1) + 1) + 1) /
                    strides[0]))
        outputShape[3] = int(
            np.ceil((inputShape[3] - ((kernel_shape[1] - 1) + 1) + 1) /
                    strides[1]))
        self.shapeMap[nodeName] = outputShape

        if not makeEquations:
            return

        inVars = self.varMap[node.input[0]]
        outVars = self.makeNewVariables(nodeName)
        for i in range(outputShape[2]):
            for j in range(outputShape[3]):
                for k in range(outputShape[1]):
                    maxVars = set()
                    for di in range(strides[0] * i,
                                    strides[0] * i + kernel_shape[0]):
                        for dj in range(strides[1] * j,
                                        strides[1] * j + kernel_shape[1]):
                            if di < inputShape[2] and dj < inputShape[3]:
                                maxVars.add(inVars[0][k][di][dj])
                    self.addMaxConstraint(maxVars, outVars[0][k][i][j])
Exemplo n.º 16
0
 def get_attr_value(self,name,default=None):
     attr=self.get_attr(name)
     if attr:
         attr_val=helper.get_attribute_value(attr)
         if isinstance(attr_val,bytes):
             attr_val=attr_val.decode('utf-8')
         return attr_val
     return default
Exemplo n.º 17
0
 def get_tensor_type(self):
     """Get the onnx data type of a tensor."""
     t = self.get_attr("value")
     if t:
         t = helper.get_attribute_value(t)
         if t:
             return utils.ONNX_TO_NUMPY_DTYPE[t.data_type]
     return onnx_pb.TensorProto.FLOAT
Exemplo n.º 18
0
    def create_graph_from_onnx_graph(graph_proto):
        """Create Graph loading onnx graph proto."""
        output_shapes = {}
        output_dtypes = {}

        shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(
            graph_proto.value_info)
        output_shapes.update(shapes)
        output_dtypes.update(dtypes)

        shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(
            graph_proto.output)
        output_shapes.update(shapes)
        output_dtypes.update(dtypes)

        non_const_nodes = []
        const_nodes = []
        for n in graph_proto.node:
            if n.op_type == "Constant":
                const_nodes.append(n)
                continue
            non_const_nodes.append(n)

        output_names = []
        for n in graph_proto.output:
            output_names.append(n.name)

        g = Graph(non_const_nodes, output_shapes, output_dtypes, None, None,
                  None, output_names)
        GraphUtil._parse_graph_initializer(g, graph_proto)

        for n in const_nodes:
            name = n.output[0]
            tensor = None
            for a in n.attribute:
                if a.name == "value":
                    tensor = helper.get_attribute_value(a)
                    if not isinstance(tensor, TensorProto):
                        raise ValueError(
                            "Constant value is not a tensor, unexpected.")
                    break

            if tensor:
                g.set_initializer(name, tensor)
            else:
                raise ValueError(
                    "failed to parse tensor value from Constant node")

        GraphUtil._parse_graph_input(g, graph_proto)

        for n in g.get_nodes():
            for attr_name, attr_val in n.attr.items():
                if attr_val.HasField('g'):
                    # it was assumed that the a.g has inferred shapes/dtypes.
                    sub_g = GraphUtil.create_graph_from_onnx_graph(attr_val.g)
                    n.set_body_graph_as_attr(attr_name, sub_g)
        return g
Exemplo n.º 19
0
 def find_mask_input(self, excluded_graph_inputs):
     for node in self.nodes():
         if node.op_type == 'Softmax':
             mask_path = self.match_parent_path(
                 node, ['Add', 'Mul', 'Sub', 'Cast', 'Slice', 'Unsqueeze'],
                 [0, 1, None, 1, 0, 0])
             if mask_path is None:
                 continue
             add_node, mul_node, sub_node, cast_node, slice_node, unsqueeze_node = mask_path
             if self.has_constant_input(mul_node,
                                        -10000) and self.has_constant_input(
                                            sub_node, 1):
                 graph_inputs = self.get_graph_inputs(sub_node,
                                                      recursive=True)
                 inputs = [
                     input for input in graph_inputs
                     if input not in excluded_graph_inputs
                 ]
                 if len(inputs) > 1:
                     print("Found multiple candidates of mask input",
                           inputs)
                     return None
                 if len(inputs) == 1:
                     return inputs[0]
                 # Duplicated input found. Try to simplify the graph.
                 path_to_be_simplified = self.match_parent_path(
                     mask_path[-1], [
                         "ConstantOfShape", "Cast", "Concat", "Unsqueeze",
                         "Squeeze", "Slice", "Cast", "Shape"
                     ], [0, 0, 0, 0, 0, 0, 0, 0])
                 duplicated_inputs = [
                     input for input in graph_inputs
                     if input in excluded_graph_inputs
                 ]
                 # Simplify graph for dynamic axes.
                 if path_to_be_simplified and duplicated_inputs and len(
                         duplicated_inputs) == 1 and duplicated_inputs[
                             0] == path_to_be_simplified[-1].input[0]:
                     logger.debug("Simplify semgent id path...")
                     constantofshape_node = path_to_be_simplified[0]
                     constantofshape_value = helper.get_attribute_value(
                         constantofshape_node.attribute[0])
                     graph_name = self.get_graph_by_node(
                         constantofshape_node).name
                     self.add_node(
                         helper.make_node('Shape',
                                          inputs=[duplicated_inputs[0]],
                                          outputs=["input_shape_for_mask"]),
                         graph_name)
                     self.add_node(
                         helper.make_node('ConstantOfShape',
                                          inputs=["input_shape_for_mask"],
                                          outputs=[unsqueeze_node.input[0]],
                                          value=constantofshape_value),
                         graph_name)
                 return unsqueeze_node.input[0]
     return None
Exemplo n.º 20
0
 def get_tensor(self):
     if not self.is_const():
         if self.type == "Identity":
             return self.inputs[0].get_tensor()
         raise ValueError("get tensor: {} must be Const".format(self.name))
     t = self.get_attr("value")
     if t:
         t = numpy_helper.to_array(helper.get_attribute_value(t))
     return t
Exemplo n.º 21
0
def get_onnx_attribute(operation, name, default=None):
    attr = next((x for x in operation.attribute if x.name == name), None)
    if attr is None:
        value = default
    else:
        value = helper.get_attribute_value(attr)
        if isinstance(value, bytes):
            value = value.decode()
    return value
Exemplo n.º 22
0
 def add(self, attr):  # type: (onnx.AttributeProto) -> None
     assert self.name in [None, attr.name]
     self.name = attr.name
     value = helper.get_attribute_value(attr)
     # Turn list into tuple so we can put it into set
     # As value can be string, don't blindly turn `collections.Iterable`
     # into tuple.
     if isinstance(value, list):
         value = tuple(value)
     self.values.add(str(value))
Exemplo n.º 23
0
 def add(self, attr):  # type: (onnx.AttributeProto) -> None
     assert self.name in [None, attr.name]
     self.name = attr.name
     value = helper.get_attribute_value(attr)
     # Turn list into tuple so we can put it into set
     # As value can be string, don't blindly turn `collections.Iterable`
     # into tuple.
     if isinstance(value, list):
         value = tuple(value)
     self.values.add(str(value))
Exemplo n.º 24
0
def convert_node(node):
    fields = OrderedDict((f[0].name, f[1]) for f in node.ListFields())
    attributes = fields.pop("attribute", [])
    attrs = OrderedDict((a.name, convert_field(helper.get_attribute_value(a))) for a in attributes)
    fields = OrderedDict((f, convert_field(v)) for f, v in fields.items())
    op_type = fields.pop("op_type")
    if op_type == "Cast" and "to" in attrs:
        attrs["to"] = convert_tensor_type(attrs["to"])
    inputs = fields.pop("input", [])
    outputs = fields.pop("output", [])
    return make_node_traced(op_type, inputs=inputs, outputs=outputs, **fields, **attrs)
Exemplo n.º 25
0
    def scalar_to_dim1(self):
        """Get value for onnx tensor."""
        if not self.is_const():
            raise ValueError("get tensor value: {} must be Const".format(self.name))

        t = self.get_attr("value")
        if t:
            t = helper.get_attribute_value(t)
            if not t.dims:
                t.dims.extend([1])
        return t.dims
Exemplo n.º 26
0
    def create_graph_from_onnx_model(onnx_model_proto):
        """Create Graph loading onnx model proto"""
        # apply shape inference on the model
        inferred_model = shape_inference.infer_shapes(onnx_model_proto)
        graph_proto = inferred_model.graph

        output_shapes = {}
        output_dtypes = {}

        shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.value_info)
        output_shapes.update(shapes)
        output_dtypes.update(dtypes)

        shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.output)
        output_shapes.update(shapes)
        output_dtypes.update(dtypes)

        shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.input)
        output_shapes.update(shapes)
        output_dtypes.update(dtypes)

        non_const_nodes = []
        const_nodes = []
        for n in graph_proto.node:
            if n.op_type == "Constant":
                const_nodes.append(n)
                continue
            non_const_nodes.append(n)

        output_names = []
        for n in graph_proto.output:
            output_names.append(n.name)

        g = Graph(non_const_nodes, output_shapes, output_dtypes, None, None, None, output_names)
        GraphUtil._parse_graph_initializer(g, graph_proto)

        for n in const_nodes:
            name = n.output[0]
            tensor = None
            for a in n.attribute:
                if a.name == "value":
                    tensor = helper.get_attribute_value(a)
                    if not isinstance(tensor, TensorProto):
                        raise ValueError("Constant value is not a tensor, unexpected.")
                    break

            if tensor:
                g.set_initializer(name, tensor)
            else:
                raise ValueError("failed to parse tensor value from Constant node")

        GraphUtil._parse_graph_input(g, graph_proto)
        return g
Exemplo n.º 27
0
 def constant(self, node):
     """
     Function representing a constant tensor
     Arguments:
         node: (node) representing constant operation
     """
     nodeName = node.output[0]
     for attr in node.attribute:
         if attr.name == "value":
             self.constantMap[nodeName] = numpy_helper.to_array(get_attribute_value(attr))
             return
     raise RuntimeError("Could not find value of tensor constant")
Exemplo n.º 28
0
    def cast(self, node):
        """Function representing cast

        Args:
            node (node): ONNX node representing cast operation

        :meta private:
        """
        nodeName = node.output[0]
        inputName = node.input[0]
        self.shapeMap[nodeName] = self.shapeMap[inputName]
        
        # Try to find type to cast to. If not found, raise error
        to = None
        for attr in node.attribute:
            if attr.name == "to":
                to = get_attribute_value(attr)
        if to is None:
            raise RuntimeError("Casting type not specified with attribute 'to'")
            
        # Cast input array to correct type, and throw error if type is unknown
        if inputName in self.constantMap:
            if to == TensorProto.FLOAT16:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('float16')
            elif to == TensorProto.FLOAT:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('float32')
            elif to == TensorProto.DOUBLE:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('double')
            elif to == TensorProto.UINT8:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('uint8')
            elif to == TensorProto.UINT16:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('uint16')
            elif to == TensorProto.UINT32:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('uint32')
            elif to == TensorProto.UINT64:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('uint64')
            elif to == TensorProto.INT8:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('int8')
            elif to == TensorProto.INT16:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('int16')
            elif to == TensorProto.INT32:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('int32')
            elif to == TensorProto.INT64:
                self.constantMap[nodeName] = self.constantMap[inputName].astype('int64')
            else:
                err_msg = "Unknown type for casting: %d\n" % to
                err_msg += "Check here for ONNX TensorProto: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto"
                raise NotImplementedError(err_msg)
                
        # We shouldn't be casting variables to different types, since Marabou assumes variables have double precision
        elif inputName in self.varMap:
            raise NotImplementedError("Casting variables not allowed with Marabou")
Exemplo n.º 29
0
def get_attribute_value2(attr):
    """
    get_attribute_value with tensor conversion
    """

    if attr.type == onnx.AttributeProto.TENSOR:
        dtype = np.dtype(TENSOR_TYPE_TO_NP_TYPE[attr.t.data_type])
        data = attr.t.raw_data
        value = np.frombuffer(
            data, dtype=dtype, count=(len(data) // dtype.itemsize))
    else:
        value = get_attribute_value(attr)
    return value
    def batchNorm(self, node, makeEquations):
        """Function to generate equations for a BatchNormalization

        Args:
            node (node): ONNX node representing the BatchNormalization operation

        :meta private
        """

        nodeName = node.output[0]
        inputName = node.input[0]
        self.shapeMap[nodeName] = self.shapeMap[inputName]

        # Get attributes
        epsilon = None
        for attr in node.attribute:
            if attr.name == "epsilon":
                epsilon = get_attribute_value(attr)

        # Get inputs
        scales = self.constantMap[node.input[1]].reshape(-1)
        biases = self.constantMap[node.input[2]].reshape(-1)
        input_means = self.constantMap[node.input[3]].reshape(-1)
        input_variances = self.constantMap[node.input[4]].reshape(-1)

        if not makeEquations:
            return

        numChannels = len(scales)

        # Get variables
        inputVars = self.varMap[inputName].reshape(numChannels, -1)
        outputVars = self.makeNewVariables(nodeName).reshape(numChannels, -1)
        assert (inputVars.shape == outputVars.shape)

        numInputs = inputVars.shape[1]

        for i in range(numChannels):
            for j in range(numInputs):
                # Add equation
                # To know this computation,
                # refer to https://github.com/onnx/onnx/blob/master/docs/Operators.md#batchnormalization.
                e = MarabouUtils.Equation()
                e.addAddend(-1, outputVars[i][j])
                e.addAddend(
                    1 / np.sqrt(input_variances[i] + epsilon) * scales[i],
                    inputVars[i][j])
                e.setScalar(input_means[i] /
                            np.sqrt(input_variances[i] + epsilon) * scales[i] -
                            biases[i])
                self.addEquation(e)
Exemplo n.º 31
0
def _get_attribute_value(attr):
    if attr.type == AttributeProto.TENSOR:
        return "{}, {}".format(_data_type_str(attr.t.data_type),
                               numpy_helper.to_array(attr.t))
    if attr.type == AttributeProto.GRAPH:
        # TODO revise when graph node is available
        return "<graph>"
    if attr.type == AttributeProto.TENSORS:
        # TODO revise to see contents
        return "<tensors>..."
    if attr.type == AttributeProto.GRAPHS:
        # TODO revise when graph node is available
        return "<graphs>..."
    return helper.get_attribute_value(attr)
Exemplo n.º 32
0
 def set_tensor_value(self, new_val):
     """Set new value for existing onnx tensor."""
     if not self.is_const():
         raise ValueError("get tensor value: {} must be Const".format(self.name))
     t = self.get_attr("value")
     if not t:
         raise ValueError("set tensor value: {} is None".format(self.name))
     t = helper.get_attribute_value(t)
     if not t.raw_data:
         raise ValueError("set tensor value: {} is not raw_data".format(self.name))
     t.raw_data = new_val.tobytes()
     for i, _ in enumerate(t.dims):
         t.dims[i] = new_val.shape[i]
     # track shapes in _output_shapes
     self.graph.set_shape(t.name, t.dims)
Exemplo n.º 33
0
def attribute2dict(node):
    # create a dictionary from the node attribute name to value
    attr = {}
    for a in node.attribute:
        attr[a.name] = helper.get_attribute_value(a)
    return attr