def flops():
    x = tf.random_uniform([N, N])
    y = tf.random_uniform([N, N])

    def _matmul(x, y):
        return tf.tensordot(x, y, axes=[[1], [0]]), y

    return tf.reduce_sum(tpu.repeat(COUNT, _matmul, [x, y]))
예제 #2
0
 def tpu_loop_fn():
     return tpu.repeat(batch_count,
                       tpu_step_fn,
                       inputs,
                       infeed_queue=infeed_queue)
예제 #3
0
 def train_loop():
     return tpu.repeat(self.iterations, tpu_train_step, [_INITIAL_LOSS])
예제 #4
0
 def eval_loop():
   return tpu.repeat(self.eval_steps, tpu_eval_step, [])
예제 #5
0
 def iterate_on_tpu():
     return tpu.repeat(self._iterations_per_step, dequeueing_fn, [])
 def iterate_on_tpu():
   return tpu.repeat(iterations, run_fn, [initial_loop_values])
 def eval_loop():
     with tf.variable_scope("resnet", reuse=tf.AUTO_REUSE):
         return tpu.repeat(int(self.eval_steps), eval_step,
                           [_INITIAL_LOSS])
 def train_eval_loop():
     return tpu.repeat(self.max_train_iterations, train_eval_step,
                       [_INITIAL_LOSS])
 def train_loop():
     with tf.variable_scope("resnet", reuse=tf.AUTO_REUSE):
         return tpu.repeat(self.iterations, tpu_step, [_INITIAL_LOSS])
예제 #10
0
 def tpu_loop():
   return tpu.repeat(
       num_steps, tpu_step, [_INITIAL_LOSS], infeed_queue=infeed_queue[0])
예제 #11
0
 def iterate_on_tpu():
   return tpu.repeat(iterations, dequeueing_fn, [])
예제 #12
0
 def iterate_on_tpu():
   return tpu.repeat(iterations, run_fn, [])
예제 #13
0
 def iterate_on_tpu():
   return tpu.repeat(self._iterations_per_step, dequeueing_fn, [])