Exemple #1
0
 def test_Mean1D(self):
     npr = np.divide(np.add(self.np_a, self.np_b), 2.)
     dcr = dc.mean(dc.vectorTensorFloat([self.dc_a, self.dc_b]))
     np.testing.assert_allclose(npr,
                                np.array(dcr.data()).astype(np.float32),
                                rtol=1e-3,
                                atol=1e-3)
Exemple #2
0
 def test_Max1D(self):
     npr = np.maximum(self.np_a, self.np_b)
     dcr = dc.max(dc.vectorTensorFloat([self.dc_a, self.dc_b]))
     np.testing.assert_allclose(npr,
                                np.array(dcr.data()).astype(np.float32),
                                rtol=1e-3,
                                atol=1e-3)
Exemple #3
0
 def test_Mean2D(self):
     np_a = np.reshape(self.np_a, (6, 4))
     np_b = np.reshape(self.np_b, (6, 4))
     dc_a = dc.reshape(self.dc_a, (6, 4))
     dc_b = dc.reshape(self.dc_b, (6, 4))
     npr = np.divide(np.add(np_a, np_b), 2.)
     dcr = dc.mean(dc.vectorTensorFloat([dc_a, dc_b]))
     np.testing.assert_allclose(npr.flatten(),
                                np.array(dcr.data()).astype(np.float32),
                                rtol=1e-3,
                                atol=1e-3)
Exemple #4
0
    def test_Max4D(self):
        np_a = np.reshape(self.np_a, (2, 2, 3, 2))
        np_b = np.reshape(self.np_b, (2, 2, 3, 2))
        dc_a = dc.reshape(self.dc_a, (2, 2, 3, 2))
        dc_b = dc.reshape(self.dc_b, (2, 2, 3, 2))

        npr = np.maximum(np_a, np_b)
        dcr = dc.max(dc.vectorTensorFloat([dc_a, dc_b]))
        np.testing.assert_allclose(npr.flatten(),
                                   np.array(dcr.data()).astype(np.float32),
                                   rtol=1e-3,
                                   atol=1e-3)
Exemple #5
0
 def test_Maxof4(self):
     np_a = np.reshape(self.np_a, (5, 4))
     np_b = np.reshape(self.np_b, (2, 5, 4))
     np_c = np.reshape(self.np_c, (5, 4))
     np_d = np.reshape(self.np_d, (2, 5, 4))
     npr = np.maximum(np.maximum(np_a, np_b), np.maximum(np_c, np_d))
     dc_a = dc.reshape(self.dc_a, (5, 4))
     dc_b = dc.reshape(self.dc_b, (2, 5, 4))
     dc_c = dc.reshape(self.dc_c, (5, 4))
     dc_d = dc.reshape(self.dc_d, (2, 5, 4))
     dcr = dc.max(dc.vectorTensorFloat([dc_a, dc_b, dc_c, dc_d]))
     np.testing.assert_allclose(npr.flatten(),
                                np.array(dcr.data()).astype(np.float32),
                                rtol=1e-3,
                                atol=1e-3)
Exemple #6
0
    def addOPNode(self, node):

        op_type = dnnc.getOpCode(node.op_type)
        if (op_type is dnnc.opInvalid):
            print("ERROR (ONNX):" + node.op_type +
                  " is not a valid graph-node op type.")
            return None

        dcNode = self._dcGraph.addOPNode(node.name, op_type)

        for nd in node.input:
            dcNode.addInput(nd)

        for nd in node.output:
            dcNode.addOutput(nd)

        for attr in node.attribute:
            attr_type = dnnc.IR_DataType_NOTYPE
            attr_vals = []
            attr_vec = None
            if attr.type == onnx.AttributeProto.INT:
                attr_type = dnnc.IR_DataType_INT32
                attr_vals.append(attr.i)
                attr_vec = dnnc.vectorInt(attr_vals)
            elif attr.type == onnx.AttributeProto.INTS:
                attr_type = dnnc.IR_DataType_INT32
                for val in attr.ints:
                    attr_vals.append(int(val))
                attr_vec = dnnc.vectorInt(attr_vals)
            elif attr.type == onnx.AttributeProto.FLOAT:
                attr_type = dnnc.IR_DataType_FLOAT
                attr_vals.append(attr.f)
                attr_vec = dnnc.vectorFloat(attr_vals)
            elif attr.type == onnx.AttributeProto.FLOATS:
                attr_type = dnnc.IR_DataType_FLOAT
                for val in attr.floats:
                    attr_vals.append(float(val))
                attr_vec = dnnc.vectorFloat(attr_vals)
            elif attr.type == onnx.AttributeProto.STRING:
                attr_type = dnnc.IR_DataType_STRING
                attr_vals.append(str(attr.s))
                attr_vec = dnnc.vectorStr(attr_vals)
            elif attr.type == onnx.AttributeProto.STRINGS:
                attr_type = dnnc.IR_DataType_STRING
                for val in attr.strings:
                    attr_vals.append(str(val))
                attr_vec = dnnc.vectorStr(attr_vals)
            elif attr.type == onnx.AttributeProto.TENSOR:
                if (attr.t.data_type == onnx.TensorProto.INT8
                        or attr.t.data_type == onnx.TensorProto.INT16
                        or attr.t.data_type == onnx.TensorProto.INT32
                        or attr.t.data_type == onnx.TensorProto.INT64):

                    attr_type = attr.t.data_type
                    attr_data = None
                    pack_format = 'P'
                    if (attr.t.data_type == onnx.TensorProto.INT8):
                        pack_format = 'b'
                    if (attr.t.data_type == onnx.TensorProto.INT16):
                        pack_format = 'h'
                    if (attr.t.data_type == onnx.TensorProto.INT32):
                        if (attr.t.int32_data):
                            attr_data = attr.t.int32_data
                        pack_format = 'i'
                    if (attr.t.data_type == onnx.TensorProto.INT64):
                        if (attr.t.int64_data):
                            attr_data = attr.t.int64_data
                        pack_format = 'q'

                    if (attr_data is None):
                        len = 1
                        for d in attr.t.dims:
                            len *= d
                        attr_data = struct.unpack(pack_format * len,
                                                  attr.t.raw_data)

                    if (attr_data is not None):
                        attr_tensor = dnnc.intTensor(attr.t.dims, attr.name)
                        attr_tensor.load(attr_data)
                        attr_vec = dnnc.vectorTensorInt()
                        attr_vec.push_back(attr_tensor)
                    else:
                        print("ERROR (ONNX): could not extract data for graph-node " + \
                                node.name + "\'s attribute " +  attr.name + ".\n")

                elif (attr.t.data_type == onnx.TensorProto.FLOAT16
                      or attr.t.data_type == onnx.TensorProto.FLOAT
                      or attr.t.data_type == onnx.TensorProto.DOUBLE):

                    attr_type = attr.t.data_type
                    attr_data = None
                    pack_format = 'P'
                    if (attr.t.data_type == onnx.TensorProto.FLOAT16):
                        if (attr.t.float_data):
                            attr_data = attr.t.float_data
                        pack_format = 'e'
                    if (attr.t.data_type == onnx.TensorProto.FLOAT):
                        if (attr.t.float_data):
                            attr_data = attr.t.float_data
                        pack_format = 'f'
                    if (attr.t.data_type == onnx.TensorProto.DOUBLE):
                        if (attr.t.double_data):
                            attr_data = attr.t.double_data
                        pack_format = 'd'

                    if (attr_data is None):
                        len = 1
                        for d in attr.t.dims:
                            len *= d
                        attr_data = struct.unpack(pack_format * len,
                                                  attr.t.raw_data)

                    if (attr_data is not None):
                        attr_tensor = dnnc.floatTensor(attr.t.dims, attr.name)
                        attr_tensor.load(attr_data)
                        attr_vec = dnnc.vectorTensorFloat()
                        attr_vec.push_back(attr_tensor)
                    else:
                        print("ERROR (ONNX): could not extract data for graph-node " + \
                                node.name + "\'s attribute " +  attr.name + ".\n")
                else:
                    print("ERROR (ONNX): attribute tensor's datatype " +
                          str(attr.t.data_type) + " isn't understood.")

            elif attr.type == onnx.AttributeProto.TENSORS:
                attr_type = dnnc.IR_DataType_TENSORS
                attr_vals.append(attr.tensors)
                attr_vec = dnnc.vectorTensorFloat(dnnc.floatTensor(attr_vals))
            elif attr.type == onnx.AttributeProto.GRAPH:
                attr_type = dnnc.IR_DataType_GRAPH
                attr_vals.append(attr.g)
                print(
                    "ERROR (ONNX): sub-graph in graph-node is not yet supported."
                )
            elif attr.type == onnx.AttributeProto.GRAPHS:
                attr_type = dnnc.IR_DataType_GRAPH
                attr_vals.append(attr.graphs)
                print(
                    "ERROR (ONNX): sub-graph in graph-node is not yet supported."
                )
            else:
                print("ERROR (ONNX): graph-node " + node.name + "\'s attribute " + \
                       attr.name + " type " + str(attr.type) + " is not valid.")
                continue

            if (attr_type is dnnc.IR_DataType_NOTYPE or attr_vec is None
                    or attr_vec.size() == 0):
                print("ERROR (ONNX): graph-node " + node.name + "\'s attribute " + \
                       attr.name + " has no data.")
                continue

            attr_code = dnnc.getAttrName(attr.name)
            if (attr_code is dnnc.attr_invalid):
                print("WARN (ONNX): " + attr.name +
                      " is not a valid graph-node attribute.")
                print("             operator " + node.op_type +
                      " will be added without this attribute.")

            cAttrData = dnnc.irTypeData(attr_type, attr_vec)
            cAttr = dnnc.nodeAttribute(attr_code, cAttrData)
            dcNode.addAttribute(cAttr)

        return dcNode