Ejemplo n.º 1
0
    def deserialize_and_build(self, serialized_pipeline):
        """Deserialize and build the pipeline given in serialized form.

        Parameters
        ----------
        serialized_pipeline : str
                              Serialized pipeline.
        """
        self._pipe = b.Pipeline(serialized_pipeline,
                                self._max_batch_size,
                                self._num_threads,
                                self._device_id,
                                self._exec_pipelined,
                                self._prefetch_queue_depth,
                                self._exec_async,
                                self._bytes_per_sample,
                                self._set_affinity,
                                self._max_streams,
                                self._default_cuda_stream_priority)
        self._pipe.SetExecutionTypes(self._exec_pipelined, self._exec_separated, self._exec_async)
        self._pipe.SetQueueSizes(self._cpu_queue_size, self._gpu_queue_size)
        self._pipe.EnableExecutorMemoryStats(self._enable_memory_stats)
        self._prepared = True
        self._pipe.Build()
        self._built = True
Ejemplo n.º 2
0
    def _prepare_graph(self):
        self._pipe = b.Pipeline(
            self._batch_size, self._num_threads, self._device_id, self._seed,
            self._exec_pipelined, self._prefetch_queue_depth, self._exec_async,
            self._bytes_per_sample, self._set_affinity, self._max_streams,
            self._default_cuda_stream_priority)
        self._pipe.SetExecutionTypes(self._exec_pipelined,
                                     self._exec_separated, self._exec_async)
        self._pipe.SetQueueSizes(self._cpu_queue_size, self._gpu_queue_size)
        outputs = self.define_graph()
        if (not isinstance(outputs, tuple) and not isinstance(outputs, list)):
            outputs = (outputs, )

        for output in outputs:
            if not isinstance(output, Edge.EdgeReference):
                raise TypeError(
                    ("Expected outputs of type "
                     "EdgeReference. Received "
                     "output type {}").format(type(output).__name__))

        # Backtrack to construct the graph
        op_ids = set()
        edges = deque(outputs)
        ops = []
        while edges:
            current_edge = edges.popleft()
            source_op = current_edge.source
            if source_op is None:
                raise RuntimeError("Pipeline encountered "
                                   "Edge with no source op.")

            # To make sure we don't double count ops in
            # the case that they produce more than one
            # output, we keep track of the unique op ids
            # for each op we encounter and only add the
            # op if we have not already
            if source_op.id not in op_ids:
                op_ids.add(source_op.id)
                source_op.check_args()
                ops.append(source_op)
            else:
                # If the op was already added, we need to
                # change its position to the top of the list.
                # This ensures topological ordering of ops
                # when adding to the backend pipeline
                ops.remove(source_op)
                ops.append(source_op)
            for edge in source_op.inputs:
                if isinstance(edge, list):
                    for e in edge:
                        edges.append(e)
                else:
                    edges.append(edge)

        # Add the ops to the graph and build the backend
        while ops:
            op = ops.pop()
            self._pipe.AddOperator(op.spec, op.name)
        self._prepared = True
        self._names_and_devices = [(e.name, e.device) for e in outputs]
Ejemplo n.º 3
0
 def deserialize_and_build(self, serialized_pipeline):
     self._pipe = b.Pipeline(serialized_pipeline, self._batch_size,
                             self._num_threads, self._device_id,
                             self._exec_pipelined, self._exec_async,
                             self._bytes_per_sample, self._set_affinity,
                             self._max_streams)
     self._prepared = True
     self._pipe.Build()
     self._built = True
Ejemplo n.º 4
0
    def deserialize(cls, serialized_pipeline=None, filename=None, **kwargs):
        """Deserialize and build pipeline.

        Deserialize pipeline, previously serialized with ``serialize()`` method.

        Returned pipeline is already built.

        Alternatively, additional arguments can be passed, which will be used when instantiating
        the pipeline. Refer to Pipeline constructor for full list of arguments. By default,
        the pipeline will be instantiated with the arguments from serialized pipeline.

        Note, that ``serialized_pipeline`` and ``filename`` parameters are mutually exclusive

        Parameters
        ----------
        serialized_pipeline : str
                   Pipeline, serialized using ``serialize()`` method.
        filename : str
                   File, from which serialized pipeline will be read.
        kwargs : dict
                   Refer to Pipeline constructor for full list of arguments.

        Returns
        ----------
        Deserialized and built pipeline.
        """
        kw = kwargs
        if (serialized_pipeline is None) == (filename is None):  # XNOR
            raise ValueError(
                "serialized_pipeline and filename arguments are mutually exclusive. "
                "Precisely one of them should be defined.")
        pipeline = cls()
        if filename is not None:
            with open(filename, 'rb') as pipeline_file:
                serialized_pipeline = pipeline_file.read()
        pipeline._pipe = b.Pipeline(serialized_pipeline,
                                    kw.get("batch_size", -1),
                                    kw.get("num_threads", -1),
                                    kw.get("device_id", -1),
                                    kw.get("exec_pipelined", True),
                                    kw.get("prefetch_queue_depth", 2),
                                    kw.get("exec_async", True),
                                    kw.get("bytes_per_sample", 0),
                                    kw.get("set_affinity", False),
                                    kw.get("max_streams", -1),
                                    kw.get("default_cuda_stream_priority", 0))
        pipeline._pipe.SetExecutionTypes(pipeline._exec_pipelined,
                                         pipeline._exec_separated,
                                         pipeline._exec_async)
        pipeline._pipe.SetQueueSizes(pipeline._cpu_queue_size,
                                     pipeline._gpu_queue_size)
        pipeline._pipe.EnableExecutorMemoryStats(pipeline._enable_memory_stats)
        pipeline._prepared = True
        pipeline._pipe.Build()
        pipeline._built = True
        return pipeline
Ejemplo n.º 5
0
    def deserialize_and_build(self, serialized_pipeline):
        """Deserialize and build the pipeline given in serialized form.

        Parameters
        ----------
        serialized_pipeline : str
                              Serialized pipeline.
        """
        self._pipe = b.Pipeline(serialized_pipeline, self._batch_size,
                                self._num_threads, self._device_id,
                                self._exec_pipelined, self._exec_async,
                                self._bytes_per_sample, self._set_affinity,
                                self._max_streams)
        self._prepared = True
        self._pipe.Build()
        self._built = True
Ejemplo n.º 6
0
 def __init__(self,
              batch_size,
              num_threads,
              device_id,
              seed=-1,
              exec_pipelined=True,
              exec_async=True,
              bytes_per_sample=0,
              set_affinity=False,
              max_streams=-1):
     self._pipe = b.Pipeline(batch_size, num_threads, device_id, seed,
                             exec_pipelined, exec_async, bytes_per_sample,
                             set_affinity, max_streams)
     self.seed = seed
     self._exec_pipelined = exec_pipelined
     self._built = False
     self._first_iter = True
     self._prepared = False
     self._names_and_devices = None
     self._exec_async = exec_async
     self._bytes_per_sample = bytes_per_sample
     self._set_affinity = set_affinity
     self._max_streams = max_streams
Ejemplo n.º 7
0
    def _prepare_graph(self, define_graph=None):
        self._pipe = b.Pipeline(
            self._batch_size, self._num_threads, self._device_id, self._seed,
            self._exec_pipelined, self._prefetch_queue_depth, self._exec_async,
            self._bytes_per_sample, self._set_affinity, self._max_streams,
            self._default_cuda_stream_priority)
        self._pipe.SetExecutionTypes(self._exec_pipelined,
                                     self._exec_separated, self._exec_async)
        self._pipe.SetQueueSizes(self._cpu_queue_size, self._gpu_queue_size)

        if define_graph is not None:
            if self._graph_out is not None:
                raise RuntimeError(
                    "Duplicate graph definition - `define_graph` argument "
                    "should not be specified when graph was defined with a call to `set_outputs`."
                )
        else:
            define_graph = self.define_graph

        if self._graph_out:
            outputs = self._graph_out
        else:
            with self:
                outputs = define_graph()
        if isinstance(outputs, tuple):
            outputs = list(outputs)
        elif not isinstance(outputs, list):
            outputs = [outputs]

        for i in range(len(outputs)):
            if isinstance(outputs[i], types.ScalarConstant):
                import nvidia.dali.ops
                outputs[i] = nvidia.dali.ops._instantiate_constant_node(
                    "cpu", outputs[i])
            _data_node._check(outputs[i])

        # Backtrack to construct the graph
        op_ids = set()
        edges = deque(list(outputs) + self._sinks)
        ops = []
        while edges:
            current_edge = edges.popleft()
            source_op = current_edge.source
            if source_op is None:
                raise RuntimeError("Pipeline encountered "
                                   "Edge with no source op.")

            # To make sure we don't double count ops in
            # the case that they produce more than one
            # output, we keep track of the unique op ids
            # for each op we encounter and only add the
            # op if we have not already
            if source_op.id not in op_ids:
                op_ids.add(source_op.id)
                source_op.check_args()
                ops.append(source_op)
            else:
                # If the op was already added, we need to
                # change its position to the top of the list.
                # This ensures topological ordering of ops
                # when adding to the backend pipeline
                ops.remove(source_op)
                ops.append(source_op)
            for edge in source_op.inputs:
                if isinstance(edge, list):
                    for e in edge:
                        edges.append(e)
                else:
                    edges.append(edge)

        # Add the ops to the graph and build the backend
        related_logical_id = {}
        self._ops = []
        while ops:
            op = ops.pop()
            self._ops.append(op)
            if op.relation_id not in related_logical_id:
                related_logical_id[op.relation_id] = self._pipe.AddOperator(
                    op.spec, op.name)
            else:
                self._pipe.AddOperator(op.spec, op.name,
                                       related_logical_id[op.relation_id])
        self._prepared = True
        self._setup_input_callbacks()
        self._names_and_devices = [(e.name, e.device) for e in outputs]