Пример #1
0
 def test_tuple_deriv(self):
     """Test tuples work via derivatives"""
     A = tile.Value.from_ndims(2)
     B = tile.Value.from_ndims(2)
     out_dims = (A.shape.dims[0], B.shape.dims[1])
     out_shape = tile.Shape(tile.common_dtype(A.shape.dtype, B.shape.dtype),
                            out_dims)
     out = tile.Operation(
         """
         function (A[I, K], B[K, J]) -> (O) {
             T = tuple(A, B);
             C = element(T, 0);
             D = element(T, 1);
             O[i, j : I, J] = +(C[i, k] * D[k, j]);
         }
         """, [('A', A), ('B', B)], [('O', out_shape)]).outputs['O']
     tot = op.summation(out, [0, 1])
     dA = op.gradients(tot, [A])[0]
     func = tile.compose(self._ctx,
                         self._dev,
                         inputs=[('A', A), ('B', B)],
                         outputs=[('DA', dA)])
     invoker = plaidml.Invoker(self._ctx, func)
     invoker.set_input('A', self.make_inited_tensor((3, 3)))
     invoker.set_input('B', self.make_inited_tensor((3, 3)))
     output = self.make_output_tensor(invoker.get_output_shape('DA'))
     invoker.set_output('DA', output)
     invoker.invoke()
Пример #2
0
    def test_matmul_relu(self):
        """Tests that matrix multiply can be combined with a simple relu."""
        lhs = tile.Value.from_ndims(2)
        rhs = tile.Value.from_dimensions((3, None))
        out = op.relu(op.matmul(lhs, rhs))
        func = tile.compose(self._ctx,
                            self._dev,
                            inputs=[('lhs', lhs), ('rhs', rhs)],
                            outputs=[('out', out)])

        invoker = plaidml.Invoker(self._ctx, func)
        invoker.set_input('lhs', self.make_inited_tensor((3, 3)))
        invoker.set_input('rhs', self.make_inited_tensor((3, 3)))
        output = self.make_output_tensor(invoker.get_output_shape('out'))
        invoker.set_output('out', output)
        invoker.invoke()

        with output.mmap_current() as view:
            self.assertEqual(view[0], 1.0 + 8.0 + 21.0)
            self.assertEqual(view[1], 2.0 + 10.0 + 24.0)
            self.assertEqual(view[2], 3.0 + 12.0 + 27.0)
            self.assertEqual(view[(1, 0)], 4.0 + 20.0 + 42.0)
            self.assertEqual(view[(1, 1)], 8.0 + 25.0 + 48.0)
            self.assertEqual(view[(1, 2)], 12.0 + 30.0 + 54.0)
            self.assertEqual(view[6], 7.0 + 32.0 + 63.0)
            self.assertEqual(view[7], 14.0 + 40.0 + 72.0)
            self.assertEqual(view[8], 21.0 + 48.0 + 81.0)
Пример #3
0
    def run_node(cls, node, inputs, device=None):
        if not device:
            device = cls._get_default_device()
        super(PlaidMLBackend, cls).run_node(node, inputs, device)
        dev = plaidml.Device(cls.ctx, cls.device_configs[device])
        try:
            bindings = {}

            for (name, py_input) in zip(node.input, inputs):
                bindings[name] = tile.Value.from_python_value(py_input, ctx=cls.ctx, dev=dev)

            cls._apply_node(_load_ops(), node, bindings)

            func = tile.compose(
                cls.ctx,
                dev,
                inputs=[],
                outputs=[(_as_output_id(name), bindings[name]) for name in node.output])

            invoker = plaidml.Invoker(cls.ctx, func)

            tensors = [
                plaidml.Tensor(dev, invoker.get_output_shape(_as_output_id(name)))
                for name in node.output
            ]
            for (name, tensor) in zip(node.output, tensors):
                invoker.set_output(_as_output_id(name), tensor)

            invoker.invoke()

            return [tensor.as_ndarray(cls.ctx) for tensor in tensors]

        finally:
            dev.close()
Пример #4
0
    def run(self, inputs, **kwargs):
        if not self._invoker:
            self._invoker = plaidml.Invoker(self._ctx, self._func)

        # TODO: Use the datatype from the model.
        for inp, valinfo in zip(inputs, self._input_valinfos):
            val = tile.Value.from_python_value(inp, ctx=self._ctx, dev=self._dev).var
            self._invoker.set_input(_as_input_id(valinfo.name), val)
        outputs = []
        all_zero_outputs = True
        for valinfo in self._model.graph.output:
            shape = self._invoker.get_output_shape(_as_output_id(valinfo.name))
            for d in shape.dimensions:
                if d.size == 0:
                    break
            else:
                all_zero_outputs = False
            output = plaidml.Tensor(self._dev, shape)
            outputs.append(output)
            self._invoker.set_output(_as_output_id(valinfo.name), output)

        if not all_zero_outputs:
            self._invoker.invoke()

        return [output.as_ndarray(self._ctx) for output in outputs]
def get_value(x):

    func = plaidml.tile.compose(_ctx, _device, [], [('out', x)])
    invoker = plaidml.Invoker(_ctx, func)
    shape = invoker.get_output_shape('out')
    tensor = plaidml.Tensor(_device, shape)
    invoker.set_output('out', tensor)
    invoker.invoke()
    array = np.ndarray(
        x.shape.dims, dtype=plaidml.tile.PLAIDML_DTYPE_TO_NUMPY[x.shape.dtype])
    with tensor.mmap_current() as view:
        view.copy_to_ndarray(array)
    return array
Пример #6
0
    def run(self, inputs, **kwargs):
        if not self._invoker:
            self._invoker = plaidml.Invoker(self._ctx, self._func)

        # TODO: Use the datatype from the model.
        for inp, valinfo in zip(inputs, self._input_valinfos):
            val = tile.Value.from_python_value(inp, ctx=self._ctx, dev=self._dev).var
            self._invoker.set_input(valinfo.name, val)
        outputs = []
        for valinfo in self._model.graph.output:
            shape = self._invoker.get_output_shape(valinfo.name)
            output = plaidml.Tensor(self._dev, shape)
            outputs.append(output)
            self._invoker.set_output(valinfo.name, output)

        self._invoker.invoke()

        return [output.as_ndarray(self._ctx) for output in outputs]
Пример #7
0
    def test_argmax(self):
        """Validates that the composition works."""
        inp = tile.Value.from_ndims(2)
        out = op.argmax(inp)
        func = tile.compose(self._ctx,
                            self._dev,
                            inputs=[('inp', inp)],
                            outputs=[('out', out)])

        invoker = plaidml.Invoker(self._ctx, func)
        invoker.set_input('inp', self.make_inited_tensor((3, 4)))
        output = self.make_output_tensor(invoker.get_output_shape('out'))
        invoker.set_output('out', output)
        invoker.invoke()

        # Any increasing tensor reduced along an increasing dimension will
        # have an argmax equal to the dimension size
        with output.mmap_current() as view:
            for dim in range(0, 3):
                self.assertEqual(view[(dim)], 3)
Пример #8
0
    def test_equals_argmax(self):
        """Validates that the =(argmax, argmax) composition works."""
        lhs = tile.Value.from_ndims(2)
        rhs = tile.Value.from_ndims(2)
        out = op.equal(op.argmax(lhs), op.argmax(rhs))
        func = tile.compose(self._ctx,
                            self._dev,
                            inputs=[('lhs', lhs), ('rhs', rhs)],
                            outputs=[('out', out)])

        invoker = plaidml.Invoker(self._ctx, func)
        invoker.set_input('lhs', self.make_inited_tensor((3, 4)))
        invoker.set_input('rhs', self.make_inited_tensor((3, 4)))
        output = self.make_output_tensor(invoker.get_output_shape('out'))
        invoker.set_output('out', output)
        invoker.invoke()

        with output.mmap_current() as view:
            for dim in range(0, 2):
                self.assertEqual(view[(dim, 0)], True)