def testHandleDeletion(self):
    if not tf.test.is_built_with_cuda():
      return True
    if not self.haveGpu0():
      return True

    dtype = tf.float32

    config = tf.ConfigProto(log_device_placement=True)
    sess = tf.Session(config=config)

    # initial values live on CPU
    with tf.device("/cpu:0"):
      one = tf.constant(1, dtype=dtype)
      one_handle = sess.run(tf.get_session_handle(one))
      x_handle = sess.run(tf.get_session_handle(one))

    # addition lives on GPU
    with tf.device("/gpu:0"):
      add_holder1, add_tensor1 = tf.get_session_tensor(one_handle.handle, dtype)
      add_holder2, add_tensor2 = tf.get_session_tensor(one_handle.handle, dtype)
      add_op = tf.add(add_tensor1, add_tensor2)
      add_output = tf.get_session_handle(add_op)


    # add 1 to tensor 20 times to exceed _DEAD_HANDLES_THRESHOLD
    for _ in range(20):
      x_handle = sess.run(add_output, feed_dict={add_holder1: one_handle.handle,
                                                 add_holder2: x_handle.handle})
  def testHandleAddGpu(self):
    # Simple addition test that catches when TensorFlow is built with wrong
    # compute capability.

    dt = tf.float32
    sess = tf.Session()

    if not tf.test.is_built_with_cuda():
      return True
    if not self.haveGpu0():
      return True

    with tf.device("gpu:0"):
      val_op = tf.ones((), dtype=dt)
      handle_op = tf.get_session_handle(val_op)

      py_handle = sess.run(handle_op)
      tf_handle = py_handle.handle
      holder1, tensor1 = tf.get_session_tensor(tf_handle, dt)
      holder2, tensor2 = tf.get_session_tensor(tf_handle, dt)
      add_op = tf.add(tensor1, tensor2)
      result_handle_op = tf.get_session_handle(add_op)
      for _ in range(10):
        tf_result_handle = sess.run(result_handle_op,
                                    feed_dict={holder1: tf_handle,
                                               holder2: tf_handle})
        np_result = tf_result_handle.eval()
        if np_result < 1.9:
          print(np_result)
      self.assertEqual(np_result, 2)
Beispiel #3
0
    def __init__(self, session, dtype, shape=None, name=None):
        """Create a PersistentTensor.

        Args:
            session: The session in which the tensor persists.
            dtype: The tensor data type.
            shape: Optional shape of the tensor.
            name: Optional name of the tensor.
        """
        with tf.name_scope(name, "PersistentTensor"):
            self.session = session
            self.assign_ph = tf.placeholder(dtype, shape=shape, name="assign_ph")
            self.assign_op = tf.get_session_handle(self.assign_ph, name="assign_op")

            # Need to create dummy handle in order to call
            # tf.get_session_tensor
            dummy_handle = session.run(
                self.assign_op,
                feed_dict={self.assign_ph: _np_zeros_for_shape(shape, dtype=dtype)},
            )
            try:
                self.handle_ph, self.value = tf.get_session_tensor(
                    dummy_handle.handle, dtype=dtype, name="value"
                )
            finally:
                dummy_handle.delete()
            self.value.set_shape(tf.TensorShape(shape))
            self.handles = {}
Beispiel #4
0
  def testMultiDevices(self):
    with self.test_session() as sess:
      with tf.device("/gpu:0"):
        a = tf.constant(1.0)
        a_handle = sess.run(tf.get_session_handle(a))
      with tf.device("/cpu:0"):
        b = tf.constant(2.0)
        b_handle = sess.run(tf.get_session_handle(b))

      a_p, a_t = tf.get_session_tensor(tf.float32)
      b_p, b_t = tf.get_session_tensor(tf.float32)
      c = tf.add(a_t, b_t)
      c_handle = sess.run(
          tf.get_session_handle(c),
          feed_dict={a_p: a_handle.handle,
                     b_p: b_handle.handle})
      self.assertEqual(3.0, c_handle.eval())
Beispiel #5
0
def test_tf_persistent(N, iters):

    arr = tf.ones(N, dtype=dtype)
    arr_handle_op = tf.get_session_handle(arr)
    sess = env.session

    arr_handle = sess.run(arr_handle_op)
    holder1, dynamic_arr1 = tf.get_session_tensor(arr_handle.handle,
                                                  dtype=dtype)
    holder2, dynamic_arr2 = tf.get_session_tensor(arr_handle.handle,
                                                  dtype=dtype)
    result = tf.get_session_handle(tf.mul(dynamic_arr1, dynamic_arr2))

    run_metadata = tf.RunMetadata()

    times = []
    for i in range(iters):
        start_time = time.time()
        # collect metadata from last step

        feeds = {holder1: arr_handle.handle, holder2: arr_handle.handle}
        if i == iters - 1:
            sess.run(
                result,
                feed_dict=feeds,
                options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                run_metadata=run_metadata)
        else:
            sess.run(result, feed_dict=feeds)

        end_time = time.time()
        times.append(end_time - start_time)

    g = tf.get_default_graph()
    open("tf_persistent_timeline.pbtxt",
         "w").write(str(run_metadata.step_stats))

    trace = timeline.Timeline(step_stats=run_metadata.step_stats)
    trace_file = open('tf_persistent.ctf', 'w')
    trace_file.write(trace.generate_chrome_trace_format())
    trace_file.close()

    open("tf_persistent.pbtxt", "w").write(str(g.as_graph_def()))

    return np.asarray(times) * 10**6
Beispiel #6
0
    def testMultiDevices(self):
        with self.test_session() as sess:
            with tf.device("/gpu:0"):
                a = tf.constant(1.0)
                a_handle = sess.run(tf.get_session_handle(a))
            with tf.device("/cpu:0"):
                b = tf.constant(2.0)
                b_handle = sess.run(tf.get_session_handle(b))

            a_p, a_t = tf.get_session_tensor(tf.float32)
            b_p, b_t = tf.get_session_tensor(tf.float32)
            c = tf.add(a_t, b_t)
            c_handle = sess.run(tf.get_session_handle(c),
                                feed_dict={
                                    a_p: a_handle.handle,
                                    b_p: b_handle.handle
                                })
            self.assertEqual(3.0, c_handle.eval())
Beispiel #7
0
def mnist_model(train_data_flat, train_labels, x0):
  """Creates a simple linear model that evaluates cross-entropy loss and
  gradient on MNIST dataset. Mirrors 'linear' model from train-on-mnist.lua

  Result is a Python callable that accepts ITensor parameter vector and returns
  ITensor loss and gradient.
  """
  
  #  batchSize = 60000
  batchSize = 1
  x_size = 10
  x_offset = 512

  # reshape flat parameter vector into W and b parameter matrices
  x_placeholder, param = tf.get_session_tensor(x0.tf_handle, x0.dtype)
  W_flat = tf.slice(param, [0], [x_size*10])
  W = tf.reshape(W_flat, [x_size, 10])
  b_flat = tf.slice(param, [x_size*10], [10])
  b = tf.reshape(b_flat, [1, 10])

  # create model
  data = tf.Variable(tf.zeros_initializer((batchSize, x_size), dtype=dtype))
  targets = tf.Variable(tf.zeros_initializer((batchSize, x_size), dtype=dtype))
  logits = tf.matmul(data, W) + b
  cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, targets)

  # create loss and gradient ops
  cross_entropy_loss = tf.reduce_mean(cross_entropy)
  Wnorm = tf.reduce_sum(tf.square(W))
  bnorm = tf.reduce_sum(tf.square(b))
  loss = cross_entropy_loss + (bnorm + Wnorm)/2
  [grad] = tf.gradients(loss, [param])

  # get handle ops that will be used to initialize ITensors
  loss_handle_tensor = tf.get_session_handle(loss)
  grad_handle_tensor = tf.get_session_handle(grad)

  # initialize data and targets
  data_placeholder = tf.placeholder(dtype=dtype)
  data_init = data.assign(data_placeholder)
  labels_placeholder = tf.placeholder(shape=(batchSize), dtype=tf.int32)
  labels_onehot = tf.one_hot(labels_placeholder - 1, 10, dtype=dtype)
  targets_init = targets.assign(labels_onehot)
  sess.run(data_init, feed_dict={data_placeholder:train_data_flat[:batchSize,x_offset:x_offset+x_size]})
  sess.run(targets_init, feed_dict={labels_placeholder:
                                    train_labels[:batchSize]})

  # Create our callable that works on persistent Tensors
  def eval_model(x):
    loss_handle, grad_handle = sess.run([loss_handle_tensor,
                                         grad_handle_tensor],
                                        feed_dict={x_placeholder: x.tf_handle})
    return [env.handle_to_itensor(loss_handle),
            env.handle_to_itensor(grad_handle)]

  return eval_model
Beispiel #8
0
  def testHandlePlacement(self):
    with self.test_session() as sess:
      a = tf.constant(1.0)
      a_handle_op = tf.get_session_handle(a)
      b = tf.constant(2.0)
      b_handle_op = tf.get_session_handle(b)

      a_handle = sess.run(a_handle_op)
      b_handle = sess.run(b_handle_op)

      a_p, a_t = tf.get_session_tensor(a_handle.handle, tf.float32)
      b_p, b_t = tf.get_session_tensor(b_handle.handle, tf.float32)

      c = tf.add(a_t, b_t)
      c_handle = sess.run(
          tf.get_session_handle(c),
          feed_dict={a_p: a_handle.handle,
                     b_p: b_handle.handle})
      self.assertEqual(3.0, c_handle.eval())
    def testHandleForType(tf_dtype):
      for use_gpu in [True, False]:
        if not self.haveGpu0():
          return True
        with self.test_session(use_gpu=use_gpu) as sess:
          n = 3
          input_value = tf.ones((n, n), dtype=tf_dtype)
          handle1 = tf.get_session_handle(input_value)
          handle2 = tf.get_session_handle(input_value)
          py_handle1, py_handle2 = sess.run([handle1, handle2])
          holder1, tensor1 = tf.get_session_tensor(py_handle1.handle, tf_dtype)
          holder2, tensor2 = tf.get_session_tensor(py_handle2.handle, tf_dtype)
          tensor3 = tf.add(tensor1, tensor2)

          feed_dict = {holder1: py_handle1.handle, holder2: py_handle2.handle}
          tensor3_numpy = sess.run(tensor3, feed_dict=feed_dict)

          np_dtype = tf_dtype.as_numpy_dtype()
          self.assertAllEqual(tensor3_numpy, 2*np.ones((n, n), dtype=np_dtype))
Beispiel #10
0
    def testHandlePlacement(self):
        with self.test_session() as sess:
            a = tf.constant(1.0)
            a_handle_op = tf.get_session_handle(a)
            b = tf.constant(2.0)
            b_handle_op = tf.get_session_handle(b)

            a_handle = sess.run(a_handle_op)
            b_handle = sess.run(b_handle_op)

            a_p, a_t = tf.get_session_tensor(a_handle.handle, tf.float32)
            b_p, b_t = tf.get_session_tensor(b_handle.handle, tf.float32)

            c = tf.add(a_t, b_t)
            c_handle = sess.run(tf.get_session_handle(c),
                                feed_dict={
                                    a_p: a_handle.handle,
                                    b_p: b_handle.handle
                                })
            self.assertEqual(3.0, c_handle.eval())
Beispiel #11
0
  def testHandleGC(self):
    with self.test_session() as sess:
      # initial values live on CPU
      with tf.device("/cpu:0"):
        one = tf.constant(1, dtype=tf.float32)
        one_handle = sess.run(tf.get_session_handle(one))
        x_handle = sess.run(tf.get_session_handle(one))

      # addition lives on GPU
      with tf.device("/gpu:0"):
        add_h1, add_t1 = tf.get_session_tensor(one_handle.handle, tf.float32)
        add_h2, add_t2 = tf.get_session_tensor(x_handle.handle, tf.float32)
        add_op = tf.add(add_t1, add_t2)
        add_output = tf.get_session_handle(add_op)

      # add 1 to tensor 20 times
      for _ in range(20):
        x_handle = sess.run(add_output,
                            feed_dict={add_h1: one_handle.handle,
                                       add_h2: x_handle.handle})
Beispiel #12
0
def test_tf_persistent(N, iters):

  arr = tf.ones(N, dtype=dtype)
  arr_handle_op = tf.get_session_handle(arr)
  sess = env.session

  arr_handle = sess.run(arr_handle_op)
  holder1, dynamic_arr1 = tf.get_session_tensor(arr_handle.handle, dtype=dtype)
  holder2, dynamic_arr2 = tf.get_session_tensor(arr_handle.handle, dtype=dtype)
  result = tf.get_session_handle(tf.mul(dynamic_arr1, dynamic_arr2))
  
  run_metadata = tf.RunMetadata()

  times = []
  for i in range(iters):
    start_time = time.time()
    # collect metadata from last step

    feeds = {holder1: arr_handle.handle, holder2: arr_handle.handle}
    if i == iters-1:
      sess.run(result, feed_dict=feeds,
               options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
               run_metadata=run_metadata)
    else:
      sess.run(result, feed_dict=feeds)
    
    end_time = time.time()
    times.append(end_time-start_time)

  g = tf.get_default_graph()
  open("tf_persistent_timeline.pbtxt","w").write(str(run_metadata.step_stats))

  trace = timeline.Timeline(step_stats=run_metadata.step_stats)
  trace_file = open('tf_persistent.ctf', 'w')
  trace_file.write(trace.generate_chrome_trace_format())
  trace_file.close()

  open("tf_persistent.pbtxt", "w").write(str(g.as_graph_def()))

  return np.asarray(times)*10**6
Beispiel #13
0
  def testHandleBasic(self):
    with self.test_session() as sess:
      # Return a handle.
      a = tf.constant(10)
      b = tf.constant(5)
      c = tf.mul(a, b)
      h = tf.get_session_handle(c)
      h = sess.run(h)

      # Feed a tensor handle.
      f, x = tf.get_session_tensor(tf.int32)
      y = tf.mul(x, 10)
      self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
Beispiel #14
0
    def testHandleBasic(self):
        with self.test_session() as sess:
            # Return a handle.
            a = tf.constant(10)
            b = tf.constant(5)
            c = tf.mul(a, b)
            h = tf.get_session_handle(c)
            h = sess.run(h)

            # Feed a tensor handle.
            f, x = tf.get_session_tensor(tf.int32)
            y = tf.mul(x, 10)
            self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
Beispiel #15
0
    def testHandleGC(self):
        with self.test_session() as sess:
            # initial values live on CPU
            with tf.device("/cpu:0"):
                one = tf.constant(1, dtype=tf.float32)
                one_handle = sess.run(tf.get_session_handle(one))
                x_handle = sess.run(tf.get_session_handle(one))

            # addition lives on GPU
            with tf.device("/gpu:0"):
                add_h1, add_t1 = tf.get_session_tensor(one_handle.handle,
                                                       tf.float32)
                add_h2, add_t2 = tf.get_session_tensor(x_handle.handle,
                                                       tf.float32)
                add_op = tf.add(add_t1, add_t2)
                add_output = tf.get_session_handle(add_op)

            # add 1 to tensor 20 times
            for _ in range(20):
                x_handle = sess.run(add_output,
                                    feed_dict={
                                        add_h1: one_handle.handle,
                                        add_h2: x_handle.handle
                                    })
Beispiel #16
0
    def testHandleForLoop(self):
        with self.test_session() as sess:
            # Initialize a handle.
            a = tf.constant(0)
            h = tf.get_session_handle(a)
            h = sess.run(h)

            # Do some computation.
            f, x = tf.get_session_tensor(tf.int32)
            # Must define the loop body outside the loop.
            h_x = tf.get_session_handle(tf.add(x, 1))
            for _ in range(100):
                # This exercises garbage collection.
                h = sess.run(h_x, feed_dict={f: h.handle})

            self.assertEqual(100, h.eval())
Beispiel #17
0
  def testHandleForLoop(self):
    with self.test_session() as sess:
      # Initialize a handle.
      a = tf.constant(0)
      h = tf.get_session_handle(a)
      h = sess.run(h)

      # Do some computation.
      f, x = tf.get_session_tensor(tf.int32)
      # Must define the loop body outside the loop.
      h_x = tf.get_session_handle(tf.add(x, 1))
      for _ in range(100):
        # This exercises garbage collection.
        h = sess.run(h_x, feed_dict={f: h.handle})

      self.assertEqual(100, h.eval())
def test_tf_persistent():
    tf.reset_default_graph()
    arr = tf.ones(N, dtype=dtype)
    arr_handle_op = tf.get_session_handle(tf.identity(arr))
    sess = tf.Session()
    arr_handle = sess.run(arr_handle_op)
    holder, dynamic_arr = tf.get_session_tensor(arr_handle.handle, dtype=dtype)
    result = tf.reduce_sum(dynamic_arr)
    result_fetch = tf.group(result)

    times = []
    for i in range(iters):
        start_time = time.time()
        sess.run(result_fetch, feed_dict={holder: arr_handle.handle})
        end_time = time.time()
        times.append(end_time - start_time)

    return np.asarray(times)
def test_tf_persistent():
  tf.reset_default_graph()
  arr = tf.ones(N, dtype=dtype)
  arr_handle_op = tf.get_session_handle(tf.identity(arr))
  sess = tf.Session()
  arr_handle = sess.run(arr_handle_op)
  holder, dynamic_arr = tf.get_session_tensor(arr_handle.handle, dtype=dtype)
  result = tf.reduce_sum(dynamic_arr)
  result_fetch = tf.group(result)

  times = []
  for i in range(iters):
    start_time = time.time()
    sess.run(result_fetch, feed_dict={holder: arr_handle.handle})
    end_time = time.time()
    times.append(end_time-start_time)

  return np.asarray(times)
Beispiel #20
0
  def testHandleWhileLoop(self):
    with self.test_session() as sess:
      # Initialize a handle.
      a = tf.constant(0)
      h = tf.get_session_handle(a)
      h = sess.run(h)

      # Do some computation.
      f, x = tf.get_session_tensor(tf.int32)
      b = tf.constant(100)
      p = tf.less(x, b)
      # Must define the loop body outside the loop.
      h_x = tf.get_session_handle(tf.add(x, 1))
      while True:
        rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
        if not rp:
          break

      self.assertEqual(101, h.eval())
Beispiel #21
0
    def testHandleCond(self):
        with self.test_session() as sess:
            # Return a handle and a value
            a = tf.constant(10)
            b = tf.constant(5)
            p = tf.less(a, b)
            c = tf.mul(a, b)
            h = tf.get_session_handle(c)
            p, h = sess.run([p, h])

            # Run by feeding a tensor handle.
            f, x = tf.get_session_tensor(tf.int32)
            if p:
                y = tf.mul(x, 10)
            else:
                y = tf.mul(x, 100)
            result = sess.run(y, feed_dict={f: h.handle})

            self.assertEqual(5000, result)
Beispiel #22
0
  def testHandleCond(self):
    with self.test_session() as sess:
      # Return a handle and a value
      a = tf.constant(10)
      b = tf.constant(5)
      p = tf.less(a, b)
      c = tf.mul(a, b)
      h = tf.get_session_handle(c)
      p, h = sess.run([p, h])

      # Run by feeding a tensor handle.
      f, x = tf.get_session_tensor(tf.int32)
      if p:
        y = tf.mul(x, 10)
      else:
        y = tf.mul(x, 100)
      result = sess.run(y, feed_dict={f: h.handle})

      self.assertEqual(5000, result)
Beispiel #23
0
    def testHandleWhileLoop(self):
        with self.test_session() as sess:
            # Initialize a handle.
            a = tf.constant(0)
            h = tf.get_session_handle(a)
            h = sess.run(h)

            # Do some computation.
            f, x = tf.get_session_tensor(tf.int32)
            b = tf.constant(100)
            p = tf.less(x, b)
            # Must define the loop body outside the loop.
            h_x = tf.get_session_handle(tf.add(x, 1))
            while True:
                rp, h = sess.run([p, h_x], feed_dict={f: h.handle})
                if not rp:
                    break

            self.assertEqual(101, h.eval())
  def testPlaceholderIssue(self):
    # Test for https://github.com/tensorflow/tensorflow/issues/2587
    if not tf.test.is_built_with_cuda():
      return True
    if not self.haveGpu0():
      return True

    config = tf.ConfigProto()
    with self.test_session(config=config) as sess:
      dtype = tf.float32
      for device in ["cpu:0", "gpu:0"]:
        if not self.haveGpu0():
          continue
        with tf.device(device):
          a_const = tf.constant(1, dtype)

          a_handle = sess.run(tf.get_session_handle(a_const))
          b_holder, b_tensor = tf.get_session_tensor(a_handle.handle, dtype)
          b_numpy = sess.run(b_tensor, feed_dict={b_holder: a_handle.handle})
          assert b_numpy == 1
Beispiel #25
0
  def testHandleMover(self):
    with self.test_session() as sess:
      # Return a handle.
      a = tf.constant(10)
      b = tf.constant(5)
      c = tf.mul(a, b)
      h = tf.get_session_handle(c)
      h = sess.run(h)

      # Feed a tensor handle.
      f, x = tf.get_session_tensor(tf.int32)
      y = tf.mul(x, 10)
      self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))

      # Feed another tensor handle.
      with tf.device("/gpu:0"):
        a = tf.constant(10)
        h = tf.get_session_handle(a)
        h = sess.run(h)
        self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
Beispiel #26
0
    def testHandleMover(self):
        with self.test_session() as sess:
            # Return a handle.
            a = tf.constant(10)
            b = tf.constant(5)
            c = tf.mul(a, b)
            h = tf.get_session_handle(c)
            h = sess.run(h)

            # Feed a tensor handle.
            f, x = tf.get_session_tensor(tf.int32)
            y = tf.mul(x, 10)
            self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))

            # Feed another tensor handle.
            with tf.device("/gpu:0"):
                a = tf.constant(10)
                h = tf.get_session_handle(a)
                h = sess.run(h)
                self.assertEqual(100, sess.run(y, feed_dict={f: h.handle}))
Beispiel #27
0
tf.space_to_batch_nd()
tf.batch_to_space()
tf.space_to_batch()

tf.depth_to_space()
tf.space_to_depth()

tf.dtypes

tf.get_collection()
tf.get_collection_ref()
tf.get_default_session()
tf.get_local_variable
tf.get_seed()
tf.get_session_handle()
tf.get_session_tensor()
tf.get_default_graph()
tf.get_summary_op()
tf.get_variable()
tf.get_variable_scope()
tf.set_random_seed()
tf.serialize_tensor()
tf.save_v2()
tf.scalar_mul()
tf.scan()
tf.scatter_add()
tf.scatter_div()
tf.scatter_mul()
tf.scatter_nd()
tf.scatter_nd_add()
tf.scatter_nd_non_aliasing_add()
Beispiel #28
0
def mnist_model(train_data_flat, train_labels, x0):
    """Creates a simple linear model that evaluates cross-entropy loss and
  gradient on MNIST dataset. Mirrors 'linear' model from train-on-mnist.lua

  Result is a Python callable that accepts ITensor parameter vector and returns
  ITensor loss and gradient.
  """

    #  batchSize = 60000
    batchSize = 1
    x_size = 10
    x_offset = 512

    # reshape flat parameter vector into W and b parameter matrices
    x_placeholder, param = tf.get_session_tensor(x0.tf_handle, x0.dtype)
    W_flat = tf.slice(param, [0], [x_size * 10])
    W = tf.reshape(W_flat, [x_size, 10])
    b_flat = tf.slice(param, [x_size * 10], [10])
    b = tf.reshape(b_flat, [1, 10])

    # create model
    data = tf.Variable(tf.zeros_initializer((batchSize, x_size), dtype=dtype))
    targets = tf.Variable(
        tf.zeros_initializer((batchSize, x_size), dtype=dtype))
    logits = tf.matmul(data, W) + b
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, targets)

    # create loss and gradient ops
    cross_entropy_loss = tf.reduce_mean(cross_entropy)
    Wnorm = tf.reduce_sum(tf.square(W))
    bnorm = tf.reduce_sum(tf.square(b))
    loss = cross_entropy_loss + (bnorm + Wnorm) / 2
    [grad] = tf.gradients(loss, [param])

    # get handle ops that will be used to initialize ITensors
    loss_handle_tensor = tf.get_session_handle(loss)
    grad_handle_tensor = tf.get_session_handle(grad)

    # initialize data and targets
    data_placeholder = tf.placeholder(dtype=dtype)
    data_init = data.assign(data_placeholder)
    labels_placeholder = tf.placeholder(shape=(batchSize), dtype=tf.int32)
    labels_onehot = tf.one_hot(labels_placeholder - 1, 10, dtype=dtype)
    targets_init = targets.assign(labels_onehot)
    sess.run(data_init,
             feed_dict={
                 data_placeholder:
                 train_data_flat[:batchSize, x_offset:x_offset + x_size]
             })
    sess.run(targets_init,
             feed_dict={labels_placeholder: train_labels[:batchSize]})

    # Create our callable that works on persistent Tensors
    def eval_model(x):
        loss_handle, grad_handle = sess.run(
            [loss_handle_tensor, grad_handle_tensor],
            feed_dict={x_placeholder: x.tf_handle})
        return [
            env.handle_to_itensor(loss_handle),
            env.handle_to_itensor(grad_handle)
        ]

    return eval_model