def __init__(self, sess, env, network, log_dir, goal_checker=None): self.sess = sess self.env = env self.network = network self.goal_checker = goal_checker if network.summaries_op is not None: self.summary_writer = tf.summary.FileWriter(log_dir, flush_secs=1) self.logger = easy_tf_log.Logger() self.logger.set_writer(self.summary_writer.event_writer) else: self.summary_writer = None self.logger = None self.updates = 0 self.last_state = self.env.reset() self.goal_checker.reset() self.last_goal_inputs = self.last_state[1] if len(self.last_goal_inputs) % 3 != 0: raise Exception( 'Proprioceptions must be multiples of three. Current size of Proprioceptions: {0}' .format(len(self.last_goal_inputs))) self.goals = self.make_goals(self.last_goal_inputs) self.last_state = self.last_state[0] self.episode_values = []
def __init__(self, sess, env, network, log_dir): self.sess = sess self.env = env self.network = network self.bank_ops = [] self.banks_template = OrderedDict() for key in network.memory_bank: self.banks_template[key] = [] b = network.memory_bank[key] self.bank_ops += b.update if network.summaries_op is not None: self.summary_writer = tf.summary.FileWriter(log_dir, flush_secs=1) self.logger = easy_tf_log.Logger() self.logger.set_writer(self.summary_writer.event_writer) else: self.summary_writer = None self.logger = None self.updates = 0 self.last_state = self.env.reset() if type(self.last_state) is tuple: self.last_extra_inputs = self.last_state[1] self.last_state = self.last_state[0] else: self.last_extra_inputs = None self.episode_values = []
def __init__(self, sess, env, network, log_dir, goal_checker=None): if goal_checker is None: raise Exception("goal_checker must be a class with __call__(goal, pos, min, max, default_value) method implemented!!!") self.sess = sess self.env = env self.network = network self.goal_checker = goal_checker self.current_goal = 0 if network.summaries_op is not None: self.summary_writer = tf.summary.FileWriter(log_dir, flush_secs=1) self.logger = easy_tf_log.Logger() self.logger.set_writer(self.summary_writer.event_writer) else: self.summary_writer = None self.logger = None self.updates = 0 self.last_state = self.env.reset() self.goal_checker.reset() self.last_goal_inputs = self.last_state[1] if len(self.last_goal_inputs) % 3 != 0: raise Exception('Proprioceptions must be multiples of three. Current size of Proprioceptions: {0}'.format(len(self.last_goal_inputs))) self.goals = self.make_goals(self.last_goal_inputs) self.current_goal = np.random.choice(len(self.goals)) self.last_state = self.last_state[0] self.episode_values = []
def init_logging(self): create_dirs() # $ tensorboard --logdir=logs --port=6006 # then go to http://acai.local:6006/#scalars&run=. self.rewards_dir_path = os.path.join(curr_dir_path, 'logs', 'rewards') self.log_dir_path = os.path.join(curr_dir_path, 'logs', 'train') self.save_historical_logs() #using easy_tf_log self.logger = easy_tf_log.Logger() self.logger.set_log_dir(self.rewards_dir_path) #using std tensorboard method self.log_every_n = 10 self.train_writer = tf.summary.FileWriter( "logs/train/", flush_secs=5) #self.sess.graph)
def __init__(self, sess, env, network, log_dir): self.sess = sess self.env = env self.network = network if network.summaries_op is not None: self.summary_writer = tf.summary.FileWriter(log_dir, flush_secs=1) self.logger = easy_tf_log.Logger() self.logger.set_writer(self.summary_writer.event_writer) else: self.summary_writer = None self.logger = None self.updates = 0 self.last_state = self.env.reset() self.episode_values = []
def __init__(self, env, log_prefix="", log_dir=None): Wrapper.__init__(self, env) if log_prefix: self.log_prefix = log_prefix + ": " else: self.log_prefix = "" if log_dir is not None: self.logger = easy_tf_log.Logger() self.logger.set_log_dir(log_dir) else: self.logger = None self.episode_rewards = None self.episode_length_steps = None self.episode_n = -1 self.episode_done = None
def test_measure_rate(self): with tempfile.TemporaryDirectory() as temp_dir: logger = easy_tf_log.Logger(log_dir=temp_dir) logger.measure_rate('foo', 0) time.sleep(1) logger.measure_rate('foo', 10) time.sleep(1) logger.measure_rate('foo', 25) event_filename = list(os.scandir(temp_dir))[0].path event_n = 0 rates = [] for event in tf.train.summary_iterator(event_filename): if event_n == 0: # metadata event_n += 1 continue rates.append(event.summary.value[0].simple_value) event_n += 1 np.testing.assert_array_almost_equal(rates, [10., 15.], decimal=1)
import time import easy_tf_log for i in range(3): logger = easy_tf_log.Logger() logger.set_log_dir(f'run-seed{i}') logger.logkv('foo', 0) time.sleep(1.0) logger.logkv('foo', 1 + i / 3)
# Logging using the global logger # Will log to automatically-created 'logs' directory for i in range(10): easy_tf_log.tflog('foo', i) for j in range(10, 20): easy_tf_log.tflog('bar', j) easy_tf_log.set_dir('logs2') for k in range(20, 30): easy_tf_log.tflog('baz', k) for l in range(5): easy_tf_log.tflog('qux', l, step=(10 * l)) # Logging using a Logger object logger = easy_tf_log.Logger(log_dir='logs3') for i in range(10): logger.log_key_value('quux', i) logger.log_list_stats('quuz', [1, 2, 3, 4, 5]) logger.measure_rate('corge', 10) time.sleep(1) logger.measure_rate('corge', 20) # Logged rate: (20 - 10) / 1 time.sleep(2) logger.measure_rate('corge', 30) # Logged rate: (30 - 20) / 2