def __init__(self, snapshot_config, max_cpus=1): self._snapshotter = Snapshotter(snapshot_config.snapshot_dir, snapshot_config.snapshot_mode, snapshot_config.snapshot_gap) if max_cpus > 1: # pylint: disable=import-outside-toplevel from garage.sampler import singleton_pool singleton_pool.initialize(max_cpus) self._has_setup = False self._plot = False self._setup_args = None self._train_args = None self._stats = ExperimentStats(total_itr=0, total_env_steps=0, total_epoch=0, last_path=None) self._algo = None self._env = None self._policy = None self._sampler = None self._plotter = None self._start_time = None self._itr_start_time = None self.step_itr = None self.step_path = None
def __init__(self, sess=None, max_cpus=1): """Create a new local runner. Args: max_cpus: The maximum number of parallel sampler workers. sess: An optional tensorflow session. A new session will be created immediately if not provided. Note: The local runner will set up a joblib task pool of size max_cpus possibly later used by BatchSampler. If BatchSampler is not used, the processes in the pool will remain dormant. This setup is required to use tensorflow in a multiprocess environment before a tensorflow session is created because tensorflow is not fork-safe. See https://github.com/tensorflow/tensorflow/issues/2448. """ if max_cpus > 1: from garage.sampler import singleton_pool singleton_pool.initialize(max_cpus) self.sess = sess or tf.Session() self.has_setup = False self.plot = False
def setup_method(self): self.graph = tf.Graph() self.sess = tf.Session(graph=self.graph) self.sess.__enter__() logger.add_output(NullOutput()) deterministic.set_seed(1) # initialize global singleton_pool for each test case from garage.sampler import singleton_pool singleton_pool.initialize(1)
def __init__(self, snapshot_config, max_cpus=1): self._snapshotter = Snapshotter(snapshot_config.snapshot_dir, snapshot_config.snapshot_mode, snapshot_config.snapshot_gap) if max_cpus > 1: from garage.sampler import singleton_pool singleton_pool.initialize(max_cpus) self.has_setup = False self.plot = False self._setup_args = None self.train_args = None
def setup_method(self): self.graph = tf.Graph() for c in self.graph.collections: self.graph.clear_collection(c) self.graph_manager = self.graph.as_default() self.graph_manager.__enter__() self.sess = tf.compat.v1.Session(graph=self.graph) self.sess_manager = self.sess.as_default() self.sess_manager.__enter__() self.sess.__enter__() logger.add_output(NullOutput()) deterministic.set_seed(1) # initialize global singleton_pool for each test case from garage.sampler import singleton_pool singleton_pool.initialize(1)
def __init__(self, snapshot_config=None, sess=None, max_cpus=1): if snapshot_config: self._snapshotter = Snapshotter(snapshot_config.snapshot_dir, snapshot_config.snapshot_mode, snapshot_config.snapshot_gap) else: self._snapshotter = Snapshotter() if max_cpus > 1: from garage.sampler import singleton_pool singleton_pool.initialize(max_cpus) self.sess = sess or tf.Session() self.sess_entered = False self.has_setup = False self.plot = False self._setup_args = None self.train_args = None
def initialize(n_parallel): singleton_pool.initialize(n_parallel) singleton_pool.run_each( _worker_init, [(id, ) for id in range(singleton_pool.n_parallel)])