def log(self, step: Union[int, Color], *dicts, silent=False, **kwargs) -> None: """ :param step: the global step, be it the global timesteps or the epoch step :param dicts: a dictionary of key/value pairs, allowing more flexible key name with '/' etc. :param silent: Bool, log but do not print. To keep the standard out silent. :param kwargs: key/value arguments. :return: """ if self.step != step and self.step is not None: self.flush() self.step = step data_dict = {} for d in dicts: data_dict.update(d) data_dict.update(kwargs) if silent: self.do_not_print_list.update(data_dict.keys()) summary = tf.Summary() for key, v in data_dict.items(): try: summary.value.add( tag=key, simple_value=v.value if type(v) is Color else v) except TypeError as e: M.debug(key, v) raise e self.data[key] = v self.summary_writer.add_summary(summary, step)
def make_flip_flop(): a = tf.get_variable('a', shape=(), dtype=tf.float32, initializer=tf.zeros_initializer()) b = tf.get_variable('b', shape=(), dtype=tf.float32, initializer=tf.ones_initializer()) t = tf.get_variable('t', shape=(), dtype=tf.float32, initializer=tf.zeros_initializer()) sess.run(tf.global_variables_initializer()) a2t = lambda: t.assign(a) t2a = lambda: a.assign(t) b2t = lambda: t.assign(a) t2b = lambda: b.assign(t) b2a = lambda: a.assign(b) a2b = lambda: b.assign(a) unreliable_swap_op = tf.group(a2t(), b2a(), t2b()) good_swap_op = serial(a2t, b2a, t2b) # M.debug(sess.run((a, b, t))) # sess.run(a2t) # M.debug(sess.run([a, b, t])) # sess.run(b2a) # M.debug(sess.run([a, b, t])) # sess.run(t2b) # M.debug(sess.run([a, b, t])) # # sess.run(unreliable_swap_op) # M.debug(sess.run([a, b, t])) sess.run(good_swap_op) M.debug(sess.run([a, b, t])) @M.timeit def run(n=10000): for i in range(n): sess.run(good_swap_op) # sess.run(a2t) # sess.run(b2a) # sess.run(t2b) return run
import tensorflow as tf from moleskin import moleskin as M import ge_tf_utils import baselines.common.tf_util as U if __name__ == "__main__": with U.single_threaded_session() as sess: with tf.variable_scope('scope'): with tf.variable_scope('child', reuse=False): original = tf.get_variable('x', shape=[2, 3], dtype=tf.float32) with tf.variable_scope('scope', reuse=True): with tf.variable_scope('child', reuse=False): assert tf.get_variable_scope( )._reuse == True, "child scope of reusable parent is always True" reused = tf.get_variable('x', shape=[2, 3], dtype=tf.float32) U.initialize() M.debug(sess.run(original)) M.debug(sess.run(reused)) M.debug(ge_tf_utils.get('scope/child/x')) assert (sess.run(original) == sess.run(reused) ).all(), 'original, reused, and one from scope are identical'
from moleskin import moleskin as M class Something: a = 0 @M.timeit def fn(): for i in range(1000000): if Something.a: Something.a = 1 else: Something.a = 0 M.debug(fn())