def test_matmul_relu(self): """Tests that matrix multiply can be combined with a simple relu.""" lhs = tile.Value.from_ndims(2) rhs = tile.Value.from_dimensions((3, None)) out = op.relu(op.matmul(lhs, rhs)) func = tile.compose(self._ctx, self._dev, inputs=[('lhs', lhs), ('rhs', rhs)], outputs=[('out', out)]) invoker = plaidml.Invoker(self._ctx, func) invoker.set_input('lhs', self.make_inited_tensor((3, 3))) invoker.set_input('rhs', self.make_inited_tensor((3, 3))) output = self.make_output_tensor(invoker.get_output_shape('out')) invoker.set_output('out', output) invoker.invoke() with output.mmap_current() as view: self.assertEqual(view[0], 1.0 + 8.0 + 21.0) self.assertEqual(view[1], 2.0 + 10.0 + 24.0) self.assertEqual(view[2], 3.0 + 12.0 + 27.0) self.assertEqual(view[(1, 0)], 4.0 + 20.0 + 42.0) self.assertEqual(view[(1, 1)], 8.0 + 25.0 + 48.0) self.assertEqual(view[(1, 2)], 12.0 + 30.0 + 54.0) self.assertEqual(view[6], 7.0 + 32.0 + 63.0) self.assertEqual(view[7], 14.0 + 40.0 + 72.0) self.assertEqual(view[8], 21.0 + 48.0 + 81.0)
def test_tuple_deriv(self): """Test tuples work via derivatives""" A = tile.Value.from_ndims(2) B = tile.Value.from_ndims(2) out_dims = (A.shape.dims[0], B.shape.dims[1]) out_shape = tile.Shape(tile.common_dtype(A.shape.dtype, B.shape.dtype), out_dims) out = tile.Operation( """ function (A[I, K], B[K, J]) -> (O) { T = tuple(A, B); C = element(T, 0); D = element(T, 1); O[i, j : I, J] = +(C[i, k] * D[k, j]); } """, [('A', A), ('B', B)], [('O', out_shape)]).outputs['O'] tot = op.summation(out, [0, 1]) dA = op.gradients(tot, [A])[0] func = tile.compose(self._ctx, self._dev, inputs=[('A', A), ('B', B)], outputs=[('DA', dA)]) invoker = plaidml.Invoker(self._ctx, func) invoker.set_input('A', self.make_inited_tensor((3, 3))) invoker.set_input('B', self.make_inited_tensor((3, 3))) output = self.make_output_tensor(invoker.get_output_shape('DA')) invoker.set_output('DA', output) invoker.invoke()
def run_node(cls, node, inputs, device=None): if not device: device = cls._get_default_device() super(PlaidMLBackend, cls).run_node(node, inputs, device) dev = plaidml.Device(cls.ctx, cls.device_configs[device]) try: bindings = {} for (name, py_input) in zip(node.input, inputs): bindings[name] = tile.Value.from_python_value(py_input, ctx=cls.ctx, dev=dev) cls._apply_node(_load_ops(), node, bindings) func = tile.compose( cls.ctx, dev, inputs=[], outputs=[(_as_output_id(name), bindings[name]) for name in node.output]) invoker = plaidml.Invoker(cls.ctx, func) tensors = [ plaidml.Tensor(dev, invoker.get_output_shape(_as_output_id(name))) for name in node.output ] for (name, tensor) in zip(node.output, tensors): invoker.set_output(_as_output_id(name), tensor) invoker.invoke() return [tensor.as_ndarray(cls.ctx) for tensor in tensors] finally: dev.close()
def prepare(cls, model, device=None, **kwargs): if not device: device = list(cls.device_configs.keys())[0] super(PlaidMLBackend, cls).prepare(model, device, **kwargs) ops = _load_ops(model.opset_import) dev = plaidml.Device(cls.ctx, cls.device_configs[device].config) bindings = {} graph = model.graph initializers = set() for initializer in graph.initializer: initializers.add(initializer.name) bindings[initializer.name] = tile.Value.from_var( opset_util.onnx_tensor_to_plaidml_tensor(cls.ctx, dev, initializer), initializer.dims, opset_util.ONNX_DTYPE_TO_PLAIDML[initializer.data_type]) input_valinfos = [] for valinfo in graph.input: if valinfo.name not in initializers: bindings[valinfo.name] = opset_util.onnx_type_to_placeholder_value(valinfo.type) input_valinfos.append(valinfo) for node in graph.node: cls._apply_node(ops, node, bindings) func = tile.compose( cls.ctx, dev, inputs=[(inp.name, bindings[inp.name]) for inp in graph.input if inp.name not in initializers], outputs=[(outp.name, bindings[outp.name]) for outp in graph.output]) return PlaidMLBackendRep(model, cls.ctx, dev, func, input_valinfos)
def test_argmax(self): """Validates that the composition works.""" inp = tile.Value.from_ndims(2) out = op.argmax(inp) func = tile.compose(self._ctx, self._dev, inputs=[('inp', inp)], outputs=[('out', out)]) invoker = plaidml.Invoker(self._ctx, func) invoker.set_input('inp', self.make_inited_tensor((3, 4))) output = self.make_output_tensor(invoker.get_output_shape('out')) invoker.set_output('out', output) invoker.invoke() # Any increasing tensor reduced along an increasing dimension will # have an argmax equal to the dimension size with output.mmap_current() as view: for dim in range(0, 3): self.assertEqual(view[(dim)], 3)
def test_equals_argmax(self): """Validates that the =(argmax, argmax) composition works.""" lhs = tile.Value.from_ndims(2) rhs = tile.Value.from_ndims(2) out = op.equal(op.argmax(lhs), op.argmax(rhs)) func = tile.compose(self._ctx, self._dev, inputs=[('lhs', lhs), ('rhs', rhs)], outputs=[('out', out)]) invoker = plaidml.Invoker(self._ctx, func) invoker.set_input('lhs', self.make_inited_tensor((3, 4))) invoker.set_input('rhs', self.make_inited_tensor((3, 4))) output = self.make_output_tensor(invoker.get_output_shape('out')) invoker.set_output('out', output) invoker.invoke() with output.mmap_current() as view: for dim in range(0, 2): self.assertEqual(view[(dim, 0)], True)