Exemple #1
0
    def generate_outputs(self):
        pipeline = _Pipeline.current()
        if pipeline is None and self._op.preserve:
            _Pipeline._raise_pipeline_required("Operators with side-effects ")
        # Add outputs
        if self._op.device == "gpu" or self._op.device == "mixed":
            output_device = "gpu"
        else:
            output_device = "cpu"

        num_output = self._op.schema.CalculateOutputs(
            self._spec) + self._op.schema.CalculateAdditionalOutputs(
                self._spec)

        if num_output == 0 and self._op.preserve:
            t_name = type(self._op).__name__ + "_id_" + str(self.id) + "_sink"
            pipeline.add_sink(_DataNode(t_name, output_device, self))
            return

        for i in range(num_output):
            t_name = self._name
            if num_output > 1:
                t_name += "[{}]".format(i)
            t = _DataNode(t_name, output_device, self)
            self._spec.AddOutput(t.name, t.device)
            if self._op.preserve:
                pipeline.add_sink(t)
            self.append_output(t)
Exemple #2
0
    def __call__(self, *inputs, **kwargs):
        pipeline = Pipeline.current()
        if pipeline is None:
            Pipeline._raise_no_current_pipeline("NumbaFunction")
        inputs = ops._preprocess_inputs(inputs, self._impl_name, self._device,
                                        None)
        if pipeline is None:
            Pipeline._raise_pipeline_required("NumbaFunction operator")
        if (len(inputs) > self._schema.MaxNumInput()
                or len(inputs) < self._schema.MinNumInput()):
            raise ValueError(("Operator {} expects from {} to " +
                              "{} inputs, but received {}.").format(
                                  type(self).__name__,
                                  self._schema.MinNumInput(),
                                  self._schema.MaxNumInput(), len(inputs)))
        for inp in inputs:
            if not isinstance(inp, _DataNode):
                raise TypeError((
                    "Expected inputs of type `DataNode`. Received input of type '{}'. "
                    + "Python Operators do not support Multiple Input Sets."
                ).format(type(inp).__name__))
        op_instance = ops._OperatorInstance(inputs, self, **kwargs)
        op_instance.spec.AddArg("run_fn", self.run_fn)
        op_instance.spec.AddArg("setup_fn",
                                self.setup_fn) if self.setup_fn else None
        op_instance.spec.AddArg("out_types", self.out_types)
        op_instance.spec.AddArg("in_types", self.in_types)
        op_instance.spec.AddArg("outs_ndim", self.outs_ndim)
        op_instance.spec.AddArg("ins_ndim", self.ins_ndim)
        op_instance.spec.AddArg("device", self.device)
        op_instance.spec.AddArg("batch_processing", self.batch_processing)
        if self.device == 'gpu':
            op_instance.spec.AddArg("blocks", self.blocks)
            op_instance.spec.AddArg("threads_per_block",
                                    self.threads_per_block)

        if self.num_outputs == 0:
            t_name = self._impl_name + "_id_" + str(op_instance.id) + "_sink"
            t = _DataNode(t_name, self._device, op_instance)
            pipeline.add_sink(t)
            return
        outputs = []

        for i in range(self.num_outputs):
            t_name = op_instance._name
            if self.num_outputs > 1:
                t_name += "[{}]".format(i)
            t = _DataNode(t_name, self._device, op_instance)
            op_instance.spec.AddOutput(t.name, t.device)
            op_instance.append_output(t)
            pipeline.add_sink(t)
            outputs.append(t)
        return outputs[0] if len(outputs) == 1 else outputs
Exemple #3
0
    def __call__(self, *inputs, **kwargs):
        inputs = _preprocess_inputs(inputs, self._impl_name, self._device, None)
        pipeline = _Pipeline.current()
        if pipeline is None:
            _Pipeline._raise_pipeline_required("PythonFunction operator")
        if pipeline.exec_async or pipeline.exec_pipelined:
            raise RuntimeError("PythonFunction can be used only in pipelines with `exec_async` and "
                               "`exec_pipelined` set to False.")
        if (len(inputs) > self._schema.MaxNumInput() or
                len(inputs) < self._schema.MinNumInput()):
            raise ValueError(
                ("Operator {} expects from {} to " +
                 "{} inputs, but received {}.")
                .format(type(self).__name__,
                        self._schema.MinNumInput(),
                        self._schema.MaxNumInput(),
                        len(inputs)))
        for inp in inputs:
            if not isinstance(inp, _DataNode):
                raise TypeError(
                      ("Expected inputs of type `DataNode`. Received input of type '{}'. " +
                       "Python Operators do not support Multiple Input Sets.")
                      .format(type(inp).__name__))
        op_instance = _OperatorInstance(inputs, self, **kwargs)
        op_instance.spec.AddArg("function_id", id(self.function))
        op_instance.spec.AddArg("num_outputs", self.num_outputs)
        op_instance.spec.AddArg("device", self.device)
        if self.num_outputs == 0:
            t_name = self._impl_name + "_id_" + str(op_instance.id) + "_sink"
            t = _DataNode(t_name, self._device, op_instance)
            pipeline.add_sink(t)
            return
        outputs = []


        for i in range(self.num_outputs):
            t_name = op_instance._name
            if self.num_outputs > 1:
                t_name += "[{}]".format(i)
            t = _DataNode(t_name, self._device, op_instance)
            op_instance.spec.AddOutput(t.name, t.device)
            op_instance.append_output(t)
            pipeline.add_sink(t)
            outputs.append(t)
        return outputs[0] if len(outputs) == 1 else outputs