def pipe_and_output(input, output=None, num_threads=1, processor=None, name=None, capacity=None, group=None, final_outputs=None): """ Similar to `pipe`, with the additional ability for the pipe Task to return output values to the `Session` once done. Returns: Tuple (out_queue, *task_outputs) out_queue: same as return value of `pipe`. task_outputs: TaskOutput object, fetchable from the client after session.run() returns. """ result, step = _pipe_step(input, output, num_threads, processor, name, capacity, group, final_outputs) assert step is not None task = Task(step=step, group=group, outputs=final_outputs) output = None if final_outputs is not None: output = task.outputs() if type(final_outputs) not in (list, tuple): output = output[0] return result, output
def init(self, nodes=None, retrieve_from_epoch=None): """ Build a Task that will be run once after the job's `init_group` is run. This task will determine which blobs need to be snapshoted. If retrieve_from_epoch is not None, then the snapshot metadata is retrieved from a previously saved snapshot. """ assert nodes is None or len(nodes) == 1, ( 'SnapshotManager only supports single node.') net = core.Net('get_blob_list') if retrieve_from_epoch is None: net.GetAllBlobNames( [], self._blob_names, include_shared=False) else: net.Load( [], self._blob_names, db=self._dbname(retrieve_from_epoch), db_type=self._db_type, absolute_path=True) task = Task(step=net, outputs=[self._blob_names]) self._names_output = task.outputs()[0] return task
def init(self, nodes=None, retrieve_from_epoch=None): """ Build a Task that will be run once after the job's `init_group` is run. This task will determine which blobs need to be checkpointed. If retrieve_from_epoch is not None, then the checkpoint metadata is retrieved from a previously saved checkpoint. """ assert nodes is None or len(nodes) == 1, ( 'CheckpointManager only supports single node.') net = core.Net('get_blob_list') if retrieve_from_epoch is None: net.GetAllBlobNames( [], self._blob_names, include_shared=False) else: net.Load( [], self._blob_names, db=self._dbname(retrieve_from_epoch), db_type=self._db_type, absolute_path=True) task = Task(step=net, outputs=[self._blob_names]) self._names_output = task.outputs()[0] return task
def pipe_and_output( input, output=None, num_threads=1, processor=None, name=None, capacity=None, group=None, final_outputs=None): """ Similar to `pipe`, with the additional ability for the pipe Task to return output values to the `Session` once done. Returns: Tuple (out_queue, *task_outputs) out_queue: same as return value of `pipe`. task_outputs: TaskOutput object, fetchable from the client after session.run() returns. """ result, step = _pipe_step( input, output, num_threads, processor, name, capacity, group, final_outputs) assert step is not None task = Task(step=step, group=group, outputs=final_outputs) output = None if final_outputs is not None: output = task.outputs() if type(final_outputs) not in (list, tuple): output = output[0] return result, output
def add_stop_signal(self, output): if isinstance(output, core.BlobReference): t = Task(outputs=[output], group=self.epoch_group) output = t.outputs()[0] assert isinstance(output, TaskOutput) self.stop_signals.append(output)