Beispiel #1
0
    def run_node(cls, node, inputs, device=None):
        if not device:
            device = cls._get_default_device()
        super(PlaidMLBackend, cls).run_node(node, inputs, device)
        dev = plaidml.Device(cls.ctx, cls.device_configs[device])
        try:
            bindings = {}

            for (name, py_input) in zip(node.input, inputs):
                bindings[name] = tile.Value.from_python_value(py_input, ctx=cls.ctx, dev=dev)

            cls._apply_node(_load_ops(), node, bindings)

            func = tile.compose(
                cls.ctx,
                dev,
                inputs=[],
                outputs=[(_as_output_id(name), bindings[name]) for name in node.output])

            invoker = plaidml.Invoker(cls.ctx, func)

            tensors = [
                plaidml.Tensor(dev, invoker.get_output_shape(_as_output_id(name)))
                for name in node.output
            ]
            for (name, tensor) in zip(node.output, tensors):
                invoker.set_output(_as_output_id(name), tensor)

            invoker.invoke()

            return [tensor.as_ndarray(cls.ctx) for tensor in tensors]

        finally:
            dev.close()
Beispiel #2
0
    def testTransferLargeNDArray(self):
        size = 3000000
        shape = (size,)
        dtype = plaidml.DATA_FLOAT32

        ctx = plaidml.Context()
        with plaidml.open_first_device(ctx) as dev:
            expected = np.random.uniform(low=0, high=100, size=size)
            tensor = plaidml.Tensor(dev, plaidml.Shape(ctx, dtype, *shape))
            actual = np.ndarray(shape, dtype='f4')

            pr = cProfile.Profile()
            pr.enable()

            with tensor.mmap_discard(ctx) as view:
                view.copy_from_ndarray(expected)
                view.writeback()

            with tensor.mmap_current() as view:
                view.copy_to_ndarray(actual)

            pr.disable()
            pr.print_stats()

            np.testing.assert_array_almost_equal(actual, expected, decimal=4)
Beispiel #3
0
def onnx_tensor_to_plaidml_tensor(ctx, dev, tensor):
    """
    Converts an ONNX tensor proto to a TILE tensor value.
    
    Args:
        ctx (plaidml.Context): The context for the value creation.
        dev (plaidml.Device): The PlaidML device on which the value will be used.
        tensor (onnx_pb.TensorProto): The tensor data.
    
    Returns:
        plaidml.Var: A variable describing the tensor value.
    """
    if not tensor.data_type in ONNX_DTYPE_TO_PLAIDML:
        six.raise_from(
            NotImplementedError(
                'ONNX data type {} is not yet implemented by the PlaidML ONNX backend'.format(
                    onnx_pb.TensorProto.DataType.Name(tensor.data_type))), None)
    dtype = ONNX_DTYPE_TO_PLAIDML[tensor.data_type]
    var = plaidml.Tensor(dev, plaidml.Shape(ctx, dtype, *tensor.dims))
    with var.mmap_discard(ctx) as view:
        # TODO: Map ONNX datatypes to strings to use for conversion.
        # ALso, consider precompiling the structs.
        if tensor.raw_data:
            view[:len(view)] = struct.unpack_from(
                _ONNX_TENSOR_DATATYPE_TO_UNPACK_TEMPLATE[tensor.data_type].format(len(view)),
                tensor.raw_data)
        else:
            view[:len(view)] = _ONNX_TENSOR_DATATYPE_TO_GETTER[tensor.data_type](tensor)
        view.writeback()
    return var
Beispiel #4
0
    def run(self, inputs, **kwargs):
        if not self._invoker:
            self._invoker = plaidml.Invoker(self._ctx, self._func)

        # TODO: Use the datatype from the model.
        for inp, valinfo in zip(inputs, self._input_valinfos):
            val = tile.Value.from_python_value(inp, ctx=self._ctx, dev=self._dev).var
            self._invoker.set_input(_as_input_id(valinfo.name), val)
        outputs = []
        all_zero_outputs = True
        for valinfo in self._model.graph.output:
            shape = self._invoker.get_output_shape(_as_output_id(valinfo.name))
            for d in shape.dimensions:
                if d.size == 0:
                    break
            else:
                all_zero_outputs = False
            output = plaidml.Tensor(self._dev, shape)
            outputs.append(output)
            self._invoker.set_output(_as_output_id(valinfo.name), output)

        if not all_zero_outputs:
            self._invoker.invoke()

        return [output.as_ndarray(self._ctx) for output in outputs]
Beispiel #5
0
 def validate(self):
     with plaidml.open_first_device(self.ctx) as dev:
         matmul = plaidml.Function(
             "function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }"
         )
         shape = plaidml.Shape(self.ctx, plaidml.DType.FLOAT32, 3, 3)
         a = plaidml.Tensor(dev, shape)
         b = plaidml.Tensor(dev, shape)
         c = plaidml.Tensor(dev, shape)
         plaidml.run(self.ctx,
                     matmul,
                     inputs={
                         "B": b,
                         "C": c
                     },
                     outputs={"A": a})
Beispiel #6
0
    def runMatrixMultiply(self, ctx, dev):
        matmul = plaidml.Function(
            "function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }"
        )
        shape = plaidml.Shape(ctx, plaidml.DATA_FLOAT32, 3, 3)
        b = plaidml.Tensor(dev, shape)
        with b.mmap_discard(ctx) as view:
            view[0] = 1.0
            view[1] = 2.0
            view[2] = 3.0
            view[3] = 4.0
            view[4] = 5.0
            view[5] = 6.0
            view[6] = 7.0
            view[7] = 8.0
            view[8] = 9.0
            view.writeback()

        c = plaidml.Tensor(dev, shape)
        with c.mmap_discard(ctx) as view:
            view[(0, 0)] = 1.0
            view[(0, 1)] = 2.0
            view[(0, 2)] = 3.0
            view[(1, 0)] = 4.0
            view[(1, 1)] = 5.0
            view[(1, 2)] = 6.0
            view[(2, 0)] = 7.0
            view[(2, 1)] = 8.0
            view[(2, 2)] = 9.0
            view.writeback()

        a = plaidml.Tensor(dev, shape)

        plaidml.run(ctx, matmul, inputs={"B": b, "C": c}, outputs={"A": a})

        with a.mmap_current() as view:
            self.assertEqual(view[0], 1.0 + 8.0 + 21.0)
            self.assertEqual(view[1], 2.0 + 10.0 + 24.0)
            self.assertEqual(view[2], 3.0 + 12.0 + 27.0)
            self.assertEqual(view[(1, 0)], 4.0 + 20.0 + 42.0)
            self.assertEqual(view[(1, 1)], 8.0 + 25.0 + 48.0)
            self.assertEqual(view[(1, 2)], 12.0 + 30.0 + 54.0)
            self.assertEqual(view[6], 7.0 + 32.0 + 63.0)
            self.assertEqual(view[7], 14.0 + 40.0 + 72.0)
            self.assertEqual(view[8], 21.0 + 48.0 + 81.0)
Beispiel #7
0
    def make_output_tensor(self, shape):
        """Builds an uninitialized output tensor.

        Args:
            shape (plaidml.Shape): The shape of the tensor.

        Returns:
            plaidml.Tensor: The uninitialized tensor.
        """
        return plaidml.Tensor(self._dev, shape)
Beispiel #8
0
    def testManualReshape(self):
        ctx = plaidml.Context()
        reshape = plaidml.Function(
            "function (I) -> (O) { F[3*j + k: 4 * 3] = >(I[j,k]); O[p,q : 6,2] = >(F[2*p + q]);}"
        )
        iShape = plaidml.Shape(ctx, plaidml.DATA_FLOAT32, 4, 3)
        oShape = plaidml.Shape(ctx, plaidml.DATA_FLOAT32, 6, 2)
        with plaidml.open_first_device(ctx) as dev:
            I = plaidml.Tensor(dev, iShape)
            with I.mmap_discard(ctx) as view:
                view[0] = 1.0
                view[1] = 2.0
                view[2] = 3.0
                view[3] = 4.0
                view[4] = 5.0
                view[5] = 6.0
                view[6] = 7.0
                view[7] = 8.0
                view[8] = 9.0
                view[9] = 10.0
                view[10] = 11.0
                view[11] = 12.0
                view.writeback()

            O = plaidml.Tensor(dev, oShape)
            plaidml.run(ctx, reshape, inputs={"I": I}, outputs={"O": O})
            with O.mmap_current() as view:
                self.assertEqual(view[0], 1.0)
                self.assertEqual(view[1], 2.0)
                self.assertEqual(view[2], 3.0)
                self.assertEqual(view[3], 4.0)
                self.assertEqual(view[4], 5.0)
                self.assertEqual(view[5], 6.0)
                self.assertEqual(view[6], 7.0)
                self.assertEqual(view[7], 8.0)
                self.assertEqual(view[8], 9.0)
                self.assertEqual(view[9], 10.0)
                self.assertEqual(view[10], 11.0)
                self.assertEqual(view[11], 12.0)
def get_value(x):

    func = plaidml.tile.compose(_ctx, _device, [], [('out', x)])
    invoker = plaidml.Invoker(_ctx, func)
    shape = invoker.get_output_shape('out')
    tensor = plaidml.Tensor(_device, shape)
    invoker.set_output('out', tensor)
    invoker.invoke()
    array = np.ndarray(
        x.shape.dims, dtype=plaidml.tile.PLAIDML_DTYPE_TO_NUMPY[x.shape.dtype])
    with tensor.mmap_current() as view:
        view.copy_to_ndarray(array)
    return array
Beispiel #10
0
 def testBufferRanges(self):
     ctx = plaidml.Context()
     with plaidml.open_first_device(ctx) as dev:
         buf = plaidml.Tensor(dev, plaidml.Shape(ctx, plaidml.DATA_FLOAT32, 10))
         with buf.mmap_current() as view:
             self.assertEqual(len(view), 10)
             view[0] = 1
             with self.assertRaises(IndexError):
                 view[10] = 0
             view[9] = 2
             view[-1] = 4
             self.assertEqual(view[9], 4)
             view[0:10:3] = (1,2,3,4)
             self.assertEqual(view[3], 2)
             self.assertSequenceEqual(view[0:10:3], (1,2,3,4))
Beispiel #11
0
    def run(self, inputs, **kwargs):
        if not self._invoker:
            self._invoker = plaidml.Invoker(self._ctx, self._func)

        # TODO: Use the datatype from the model.
        for inp, valinfo in zip(inputs, self._input_valinfos):
            val = tile.Value.from_python_value(inp, ctx=self._ctx, dev=self._dev).var
            self._invoker.set_input(valinfo.name, val)
        outputs = []
        for valinfo in self._model.graph.output:
            shape = self._invoker.get_output_shape(valinfo.name)
            output = plaidml.Tensor(self._dev, shape)
            outputs.append(output)
            self._invoker.set_output(valinfo.name, output)

        self._invoker.invoke()

        return [output.as_ndarray(self._ctx) for output in outputs]
Beispiel #12
0
 def __call__(self, inputs):
     for (name, val) in zip(self._input_names, inputs):
         if isinstance(val, six.integer_types):
             val = plaidml.Integer(val)
         elif isinstance(val, float):
             val = plaidml.Real(val)
         else:
             val = K.variable(val, dtype=self._input_types[name]).var
         self._invoker.set_input(name, val)
     tensors = [
         plaidml.Tensor(K._device(), self._invoker.get_output_shape(name))
         for name in self._output_names
     ]
     for (name, t) in zip(self._output_names, tensors):
         self._invoker.set_output(name, t)
     self._invoker.invoke()
     res = []
     for t, np_arr in zip(tensors, self._np_arrs):
         with t.mmap_current() as view:
             view.copy_to_ndarray(np_arr)
         res.append(np_arr)
     return res
Beispiel #13
0
    def make_inited_tensor(self,
                           dims,
                           dtype=plaidml.DType.FLOAT32,
                           start=1.0,
                           step=1.0):
        """Builds an initialized tensor.

        Args:
            dtype (plaidml.DType): The type of the tensor elements.
            start (number): The value of the initial (flattened) tensor element.
            step (number): The increment to add to `start` for each subsequent element.
            dims ((int)): The sizes of each dimension of the tensor.

        Returns:
            plaidml.Tensor: The initialized tensor.
        """
        shape = plaidml.Shape(self._ctx, dtype, *dims)
        tensor = plaidml.Tensor(self._dev, shape)
        with tensor.mmap_discard(self._ctx) as view:
            for idx in range(len(view)):
                view[idx] = start + (idx * step)
            view.writeback()
        return tensor
Beispiel #14
0
def main():
    ctx = plaidml.Context()
    plaidml.quiet()

    def choice_prompt(question, choices, default):
        inp = ""
        while not inp in choices:
            inp = input("{0}? ({1})[{2}]:".format(question, ",".join(choices), default))
            if not inp:
                inp = default
            elif inp not in choices:
                print("Invalid choice: {}".format(inp))
        return inp

    print("""
PlaidML Setup ({0})

Thanks for using PlaidML!

Some Notes:
  * Bugs and other issues: https://github.com/plaidml/plaidml
  * Questions: https://stackoverflow.com/questions/tagged/plaidml
  * Say hello: https://groups.google.com/forum/#!forum/plaidml-dev
  * PlaidML is licensed under the GNU AGPLv3
 """.format(plaidml.__version__))

    # Operate as if nothing is set
    plaidml.settings._setup_for_test(plaidml.settings.user_settings)

    plaidml.settings.experimental = False
    devices, _ = plaidml.devices(ctx, limit=100, return_all=True)
    plaidml.settings.experimental = True
    exp_devices, unmatched = plaidml.devices(ctx, limit=100, return_all=True)

    if not (devices or exp_devices):
        if not unmatched:
            print("""
No OpenCL devices found. Check driver installation.
Read the helpful, easy driver installation instructions from our README:
http://github.com/plaidml/plaidml
""")
        else:
            print("""
No supported devices found. Run 'clinfo' and file an issue containing the full output.
""")
        sys.exit(-1)

    print("Default Config Devices:")
    if not devices:
        print("   No devices.")
    for dev in devices:
        print("   {0} : {1}".format(dev.id.decode(), dev.description.decode()))

    print("\nExperimental Config Devices:")
    if not exp_devices:
        print("   No devices.")
    for dev in exp_devices:
        print("   {0} : {1}".format(dev.id.decode(), dev.description.decode()))

    print(
        "\nUsing experimental devices can cause poor performance, crashes, and other nastiness.\n")
    exp = choice_prompt("Enable experimental device support", ["y", "n"], "n")
    plaidml.settings.experimental = exp == "y"
    try:
        devices = plaidml.devices(ctx, limit=100)
    except plaidml.exceptions.PlaidMLError:
        print("\nNo devices available in chosen config. Rerun plaidml-setup.")
        sys.exit(-1)

    if len(devices) > 1:
        print("""
Multiple devices detected (You can override by setting PLAIDML_DEVICE_IDS).
Please choose a default device:
""")
        devrange = range(1, len(devices) + 1)
        for i in devrange:
            print("   {0} : {1}".format(i, devices[i - 1].id.decode()))
        dev = choice_prompt("\nDefault device", [str(i) for i in devrange], "1")
        plaidml.settings.device_ids = [devices[int(dev) - 1].id.decode()]

    print("\nSelected device:\n    {0}".format(plaidml.devices(ctx)[0]))
    print("""
PlaidML sends anonymous usage statistics to help guide improvements.
We'd love your help making it better.
""")

    tel = choice_prompt("Enable telemetry reporting", ["y", "n"], "y")
    plaidml.settings.telemetry = tel == "y"

    print("\nAlmost done. Multiplying some matrices...")
    # Reinitialize to send a usage report
    print("Tile code:")
    print("  function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }")
    with plaidml.open_first_device(ctx) as dev:
        matmul = plaidml.Function(
            "function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }")
        shape = plaidml.Shape(ctx, plaidml.DType.FLOAT32, 3, 3)
        a = plaidml.Tensor(dev, shape)
        b = plaidml.Tensor(dev, shape)
        c = plaidml.Tensor(dev, shape)
        plaidml.run(ctx, matmul, inputs={"B": b, "C": c}, outputs={"A": a})
    print("Whew. That worked.\n")

    sav = choice_prompt("Save settings to {0}".format(plaidml.settings.user_settings), ["y", "n"],
                        "y")
    if sav == "y":
        plaidml.settings.save(plaidml.settings.user_settings)
    print("Success!\n")
def main():
    ctx = plaidml.Context()
    plaidml.quiet()

    def choice_prompt(question, choices, default):
        inp = ""
        while not inp in choices:
            inp = input("{0}? ({1})[{2}]:".format(question, ",".join(choices),
                                                  default))
            if not inp:
                inp = default
            elif inp not in choices:
                print("Invalid choice: {}".format(inp))
        return inp

    print("""
PlaidML Setup ({0})

Thanks for using PlaidML!

The feedback we have received from our users indicates an ever-increasing need
for performance, programmability, and portability. During the past few months,
we have been restructuring PlaidML to address those needs.  To make all the
changes we need to make while supporting our current user base, all development
of PlaidML has moved to a branch — plaidml-v1. We will continue to maintain and
support the master branch of PlaidML and the stable 0.7.0 release.

Read more here: https://github.com/plaidml/plaidml 

Some Notes:
  * Bugs and other issues: https://github.com/plaidml/plaidml/issues
  * Questions: https://stackoverflow.com/questions/tagged/plaidml
  * Say hello: https://groups.google.com/forum/#!forum/plaidml-dev
  * PlaidML is licensed under the Apache License 2.0
 """.format(plaidml.__version__))

    # Placeholder env var
    if os.getenv("PLAIDML_VERBOSE"):
        # change verbose settings to PLAIDML_VERBOSE, or 4 if PLAIDML_VERBOSE is invalid
        try:
            arg_verbose = int(os.getenv("PLAIDML_VERBOSE"))
        except ValueError:
            arg_verbose = 4
        plaidml._internal_set_vlog(arg_verbose)
        print("INFO:Verbose logging has been enabled - verbose level",
              arg_verbose, "\n")
        if plaidml.settings.default_config:
            (cfg_path,
             cfg_file) = os.path.split(plaidml.settings.default_config)
        else:
            (cfg_path, cfg_file) = ("Unknown", "Unknown")
        if plaidml.settings.experimental_config:
            (exp_path,
             exp_file) = os.path.split(plaidml.settings.experimental_config)
        else:
            (exp_path, exp_file) = ("Unknown", "Unknown")

    # Operate as if nothing is set
    plaidml.settings._setup_for_test(plaidml.settings.user_settings)

    plaidml.settings.experimental = False
    devices, _ = plaidml.devices(ctx, limit=100, return_all=True)
    plaidml.settings.experimental = True
    exp_devices, unmatched = plaidml.devices(ctx, limit=100, return_all=True)

    if not (devices or exp_devices):
        if not unmatched:
            print("""
No OpenCL devices found. Check driver installation.
Read the helpful, easy driver installation instructions from our README:
http://github.com/plaidml/plaidml
""")
        else:
            print("""
No supported devices found. Run 'clinfo' and file an issue containing the full output.
""")
        sys.exit(-1)

    if devices and os.getenv("PLAIDML_VERBOSE"):
        print("Default Config File Location:")
        print("   {0}/".format(cfg_path))

    print("\nDefault Config Devices:")
    if not devices:
        print("   No devices.")
    for dev in devices:
        print("   {0} : {1}".format(dev.id.decode(), dev.description.decode()))

    if exp_devices and os.getenv("PLAIDML_VERBOSE"):
        print("\nExperimental Config File Location:")
        print("   {0}/".format(exp_path))

    print("\nExperimental Config Devices:")
    if not exp_devices:
        print("   No devices.")
    for dev in exp_devices:
        print("   {0} : {1}".format(dev.id.decode(), dev.description.decode()))

    print(
        "\nUsing experimental devices can cause poor performance, crashes, and other nastiness.\n"
    )
    exp = choice_prompt("Enable experimental device support", ["y", "n"], "n")
    plaidml.settings.experimental = exp == "y"
    try:
        devices = plaidml.devices(ctx, limit=100)
    except plaidml.exceptions.PlaidMLError:
        print("\nNo devices available in chosen config. Rerun plaidml-setup.")
        sys.exit(-1)

    if devices:
        dev = 1
        if len(devices) > 1:
            print("""
Multiple devices detected (You can override by setting PLAIDML_DEVICE_IDS).
Please choose a default device:
""")
            devrange = range(1, len(devices) + 1)
            for i in devrange:
                print("   {0} : {1}".format(i, devices[i - 1].id.decode()))
            dev = choice_prompt("\nDefault device", [str(i) for i in devrange],
                                "1")
        plaidml.settings.device_ids = [devices[int(dev) - 1].id.decode()]

    print("\nSelected device:\n    {0}".format(plaidml.devices(ctx)[0]))

    print("\nAlmost done. Multiplying some matrices...")
    # Reinitialize to send a usage report
    print("Tile code:")
    print(
        "  function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }"
    )
    with plaidml.open_first_device(ctx) as dev:
        matmul = plaidml.Function(
            "function (B[X,Z], C[Z,Y]) -> (A) { A[x,y : X,Y] = +(B[x,z] * C[z,y]); }"
        )
        shape = plaidml.Shape(ctx, plaidml.DType.FLOAT32, 3, 3)
        a = plaidml.Tensor(dev, shape)
        b = plaidml.Tensor(dev, shape)
        c = plaidml.Tensor(dev, shape)
        plaidml.run(ctx, matmul, inputs={"B": b, "C": c}, outputs={"A": a})
    print("Whew. That worked.\n")

    sav = choice_prompt(
        "Save settings to {0}".format(plaidml.settings.user_settings),
        ["y", "n"], "y")
    if sav == "y":
        plaidml.settings.save(plaidml.settings.user_settings)
    print("Success!\n")