コード例 #1
0
ファイル: convnet.py プロジェクト: Crazyonxh/tensorflow
def train_mnist_single_machine(data_dir, num_epochs, use_fake_data=False):
  """Train a ConvNet on MNIST.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
  # Load a dataset.
  tf.logging.info("Loading MNIST into memory.")
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=128,
      use_fake_data=use_fake_data,
      flatten_images=False)

  # Build a ConvNet.
  loss, statistics, layer_collection = build_model(
      examples, labels, num_labels=10)

  # Fit model.
  return minimize_loss_single_machine(loss, statistics, layer_collection)
コード例 #2
0
ファイル: convnet.py プロジェクト: sgcm520/tensorflow2
def train_mnist_distributed_sync_replicas(task_id,
                                          is_chief,
                                          num_worker_tasks,
                                          num_ps_tasks,
                                          master,
                                          data_dir,
                                          num_epochs,
                                          op_strategy,
                                          use_fake_data=False):
  """Train a ConvNet on MNIST using Sync replicas optimizer.

  Args:
    task_id: int. Integer in [0, num_worker_tasks). ID for this worker.
    is_chief: `boolean`, `True` if the worker is chief worker.
    num_worker_tasks: int. Number of workers in this distributed training setup.
    num_ps_tasks: int. Number of parameter servers holding variables.
    master: string. IP and port of TensorFlow runtime process.
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    op_strategy: `string`, Strategy to run the covariance and inverse
      ops. If op_strategy == `chief_worker` then covaraiance and inverse
      update ops are run on chief worker otherwise they are run on dedicated
      workers.

    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.

  Raises:
    ValueError: If `op_strategy` not in ["chief_worker", "dedicated_workers"].
  """
  # Load a dataset.
  tf.logging.info("Loading MNIST into memory.")
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=128,
      use_fake_data=use_fake_data,
      flatten_images=False)

  # Build a ConvNet.
  layer_collection = lc.LayerCollection()
  with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
    loss, accuracy = build_model(
        examples, labels, num_labels=10, layer_collection=layer_collection)

  # Fit model.
  checkpoint_dir = None if data_dir is None else os.path.join(data_dir, "kfac")
  if op_strategy == "chief_worker":
    return distributed_grads_only_and_ops_chief_worker(
        task_id, is_chief, num_worker_tasks, num_ps_tasks, master,
        checkpoint_dir, loss, accuracy, layer_collection)
  elif op_strategy == "dedicated_workers":
    return distributed_grads_and_ops_dedicated_workers(
        task_id, is_chief, num_worker_tasks, num_ps_tasks, master,
        checkpoint_dir, loss, accuracy, layer_collection)
  else:
    raise ValueError("Only supported op strategies are : {}, {}".format(
        "chief_worker", "dedicated_workers"))
コード例 #3
0
ファイル: convnet.py プロジェクト: BhaskarNallani/tensorflow
def train_mnist_single_machine(data_dir,
                               num_epochs,
                               use_fake_data=False,
                               device="/gpu:0"):
  """Train a ConvNet on MNIST.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.
    device: string, Either '/cpu:0' or '/gpu:0'. The covaraince and inverse
      update ops are run on this device.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
  # Load a dataset.
  tf.logging.info("Loading MNIST into memory.")
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=128,
      use_fake_data=use_fake_data,
      flatten_images=False)

  # Build a ConvNet.
  layer_collection = lc.LayerCollection()
  loss, accuracy = build_model(
      examples, labels, num_labels=10, layer_collection=layer_collection)

  # Fit model.
  return minimize_loss_single_machine(
      loss, accuracy, layer_collection, device=device)
コード例 #4
0
ファイル: mlp.py プロジェクト: BhaskarNallani/tensorflow
def train_mnist(data_dir, num_epochs, use_fake_data=False):
  """Train an MLP on MNIST.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
  # Load a dataset.
  tf.logging.info("Loading MNIST into memory.")
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=64,
      flatten_images=True,
      use_fake_data=use_fake_data)

  # Build an MLP. The model's layers will be added to the LayerCollection.
  tf.logging.info("Building model.")
  layer_collection = lc.LayerCollection()
  loss, accuracy = build_model(examples, labels, 10, layer_collection)

  # Fit model.
  minimize(loss, accuracy, layer_collection, 1)
コード例 #5
0
 def input_fn():
     tf.logging.info("Loading MNIST into memory.")
     return mnist.load_mnist(data_dir,
                             num_epochs=num_epochs,
                             batch_size=64,
                             flatten_images=True,
                             use_fake_data=use_fake_data)
コード例 #6
0
def train_mnist_single_machine(data_dir, num_epochs, use_fake_data=False):
    """Train a ConvNet on MNIST.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
    # Load a dataset.
    tf.logging.info("Loading MNIST into memory.")
    examples, labels = mnist.load_mnist(data_dir,
                                        num_epochs=num_epochs,
                                        batch_size=128,
                                        use_fake_data=use_fake_data,
                                        flatten_images=False)

    # Build a ConvNet.
    layer_collection = lc.LayerCollection()
    loss, accuracy = build_model(examples,
                                 labels,
                                 num_labels=10,
                                 layer_collection=layer_collection)

    # Fit model.
    return minimize_loss_single_machine(loss, accuracy, layer_collection)
コード例 #7
0
def train_mnist(data_dir, num_epochs, use_fake_data=False):
  """Train an MLP on MNIST.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
  # Load a dataset.
  tf.logging.info("Loading MNIST into memory.")
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=64,
      flatten_images=True,
      use_fake_data=use_fake_data)

  # Build an MLP. The model's layers will be added to the LayerCollection.
  tf.logging.info("Building model.")
  layer_collection = lc.LayerCollection()
  loss, accuracy = build_model(examples, labels, 10, layer_collection)

  # Fit model.
  minimize(loss, accuracy, layer_collection)
コード例 #8
0
ファイル: mlp.py プロジェクト: BhaskarNallani/tensorflow
 def input_fn():
   tf.logging.info("Loading MNIST into memory.")
   return mnist.load_mnist(
       data_dir,
       num_epochs=num_epochs,
       batch_size=64,
       flatten_images=True,
       use_fake_data=use_fake_data)
コード例 #9
0
def train_mnist_multitower(data_dir,
                           num_epochs,
                           num_towers,
                           use_fake_data=True):
    """Train a ConvNet on MNIST.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    num_towers: int. Number of CPUs to split inference across.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
    # Load a dataset.
    tf.logging.info("Loading MNIST into memory.")
    tower_batch_size = 128
    batch_size = tower_batch_size * num_towers
    tf.logging.info(
        ("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
         "tower batch size.") % (batch_size, num_towers, tower_batch_size))
    examples, labels = mnist.load_mnist(data_dir,
                                        num_epochs=num_epochs,
                                        batch_size=batch_size,
                                        use_fake_data=use_fake_data,
                                        flatten_images=False)

    # Split minibatch across towers.
    examples = tf.split(examples, num_towers)
    labels = tf.split(labels, num_towers)

    # Build an MLP. Each tower's layers will be added to the LayerCollection.
    layer_collection = lc.LayerCollection()
    tower_results = []
    for tower_id in range(num_towers):
        with tf.device("/cpu:%d" % tower_id):
            with tf.name_scope("tower%d" % tower_id):
                with tf.variable_scope(tf.get_variable_scope(),
                                       reuse=(tower_id > 0)):
                    tf.logging.info("Building tower %d." % tower_id)
                    tower_results.append(
                        build_model(examples[tower_id], labels[tower_id], 10,
                                    layer_collection))
    losses, accuracies = zip(*tower_results)

    # Average across towers.
    loss = tf.reduce_mean(losses)
    accuracy = tf.reduce_mean(accuracies)

    # Fit model.
    session_config = tf.ConfigProto(allow_soft_placement=False,
                                    device_count={"CPU": num_towers})
    return minimize_loss_single_machine(loss,
                                        accuracy,
                                        layer_collection,
                                        session_config=session_config)
コード例 #10
0
ファイル: mlp.py プロジェクト: BhaskarNallani/tensorflow
def train_mnist_multitower(data_dir,
                           num_epochs,
                           num_towers,
                           use_fake_data=False):
  """Train an MLP on MNIST, splitting the minibatch across multiple towers.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    num_towers: int. Number of CPUs to split minibatch across.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
  # Load a dataset.
  tower_batch_size = 64
  batch_size = tower_batch_size * num_towers
  tf.logging.info(
      ("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
       "tower batch size.") % (batch_size, num_towers, tower_batch_size))
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=batch_size,
      flatten_images=True,
      use_fake_data=use_fake_data)

  # Split minibatch across towers.
  examples = tf.split(examples, num_towers)
  labels = tf.split(labels, num_towers)

  # Build an MLP. Each tower's layers will be added to the LayerCollection.
  layer_collection = lc.LayerCollection()
  tower_results = []
  for tower_id in range(num_towers):
    with tf.device("/cpu:%d" % tower_id):
      with tf.name_scope("tower%d" % tower_id):
        with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
          tf.logging.info("Building tower %d." % tower_id)
          tower_results.append(
              build_model(examples[tower_id], labels[tower_id], 10,
                          layer_collection))
  losses, accuracies = zip(*tower_results)

  # Average across towers.
  loss = tf.reduce_mean(losses)
  accuracy = tf.reduce_mean(accuracies)

  # Fit model.
  session_config = tf.ConfigProto(
      allow_soft_placement=False, device_count={
          "CPU": num_towers
      })
  return minimize(
      loss, accuracy, layer_collection, num_towers,
      session_config=session_config)
コード例 #11
0
  def testValues(self):
    """Ensure values are in their expected range."""
    with tf.Graph().as_default():
      examples, labels = mnist.load_mnist(
          data_dir=None, num_epochs=1, batch_size=64, use_fake_data=True)

      with self.test_session() as sess:
        examples_, labels_ = sess.run([examples, labels])
        self.assertTrue(np.all((0 <= examples_) & (examples_ < 1)))
        self.assertTrue(np.all((0 <= labels_) & (labels_ < 10)))
コード例 #12
0
    def testValues(self):
        """Ensure values are in their expected range."""
        with tf.Graph().as_default():
            examples, labels = mnist.load_mnist(data_dir=None,
                                                num_epochs=1,
                                                batch_size=64,
                                                use_fake_data=True)

            with self.test_session() as sess:
                examples_, labels_ = sess.run([examples, labels])
                self.assertTrue(np.all((0 <= examples_) & (examples_ < 1)))
                self.assertTrue(np.all((0 <= labels_) & (labels_ < 10)))
コード例 #13
0
    def testNotFlattenedShapes(self):
        """Ensure non-flattened images are their appropriate shape."""
        with tf.Graph().as_default():
            examples, labels = mnist.load_mnist(data_dir=None,
                                                num_epochs=1,
                                                batch_size=64,
                                                flatten_images=False,
                                                use_fake_data=True)

            with self.test_session() as sess:
                examples_, labels_ = sess.run([examples, labels])
                self.assertEqual(examples_.shape, (64, 28, 28, 1))
                self.assertEqual(labels_.shape, (64, ))
コード例 #14
0
  def testNotFlattenedShapes(self):
    """Ensure non-flattened images are their appropriate shape."""
    with tf.Graph().as_default():
      examples, labels = mnist.load_mnist(
          data_dir=None,
          num_epochs=1,
          batch_size=64,
          flatten_images=False,
          use_fake_data=True)

      with self.test_session() as sess:
        examples_, labels_ = sess.run([examples, labels])
        self.assertEqual(examples_.shape, (64, 28, 28, 1))
        self.assertEqual(labels_.shape, (64,))
コード例 #15
0
def train_mnist_distributed(task_id,
                            num_worker_tasks,
                            num_ps_tasks,
                            master,
                            data_dir,
                            num_epochs,
                            use_fake_data=False):
    """Train a ConvNet on MNIST.

  Args:
    task_id: int. Integer in [0, num_worker_tasks). ID for this worker.
    num_worker_tasks: int. Number of workers in this distributed training setup.
    num_ps_tasks: int. Number of parameter servers holding variables.
    master: string. IP and port of TensorFlow runtime process.
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
    # Load a dataset.
    tf.logging.info("Loading MNIST into memory.")
    examples, labels = mnist.load_mnist(data_dir,
                                        num_epochs=num_epochs,
                                        batch_size=128,
                                        use_fake_data=use_fake_data,
                                        flatten_images=False)

    # Build a ConvNet.
    layer_collection = lc.LayerCollection()
    with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
        loss, accuracy = build_model(examples,
                                     labels,
                                     num_labels=10,
                                     layer_collection=layer_collection)

    # Fit model.
    checkpoint_dir = None if data_dir is None else os.path.join(
        data_dir, "kfac")
    return minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks,
                                     master, checkpoint_dir, loss, accuracy,
                                     layer_collection)
コード例 #16
0
ファイル: convnet.py プロジェクト: AbhinavJain13/tensorflow
def train_mnist_distributed(task_id,
                            num_worker_tasks,
                            num_ps_tasks,
                            master,
                            data_dir,
                            num_epochs,
                            use_fake_data=False):
  """Train a ConvNet on MNIST.

  Args:
    task_id: int. Integer in [0, num_worker_tasks). ID for this worker.
    num_worker_tasks: int. Number of workers in this distributed training setup.
    num_ps_tasks: int. Number of parameter servers holding variables.
    master: string. IP and port of TensorFlow runtime process.
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
  # Load a dataset.
  tf.logging.info("Loading MNIST into memory.")
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=128,
      use_fake_data=use_fake_data,
      flatten_images=False)

  # Build a ConvNet.
  layer_collection = lc.LayerCollection()
  with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
    loss, accuracy = build_model(
        examples, labels, num_labels=10, layer_collection=layer_collection)

  # Fit model.
  checkpoint_dir = None if data_dir is None else os.path.join(data_dir, "kfac")
  return minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks,
                                   master, checkpoint_dir, loss, accuracy,
                                   layer_collection)
コード例 #17
0
ファイル: mlp.py プロジェクト: codemogroup/Interview-Bot
def train_mnist(data_dir, num_epochs, use_fake_data=False):
    """Train an MLP on MNIST.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    use_fake_data: bool. If True, generate a synthetic dataset.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
    # Load a dataset.
    tf.logging.info("Loading MNIST into memory.")
    examples, labels = mnist.load_mnist(data_dir,
                                        num_epochs=num_epochs,
                                        batch_size=64,
                                        flatten_images=True,
                                        use_fake_data=use_fake_data)

    # Build an MLP. For each layer, we'll keep track of the preactivations,
    # activations, weights, and bias.
    tf.logging.info("Building model.")
    pre0, act0, params0 = fc_layer(layer_id=0,
                                   inputs=examples,
                                   output_size=128)
    pre1, act1, params1 = fc_layer(layer_id=1, inputs=act0, output_size=64)
    pre2, act2, params2 = fc_layer(layer_id=2, inputs=act1, output_size=32)
    logits, _, params3 = fc_layer(layer_id=3, inputs=act2, output_size=10)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                       logits=logits))
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32))

    # Register parameters. K-FAC needs to know about the inputs, outputs, and
    # parameters of each layer and the logits powering the posterior probability
    # over classes.
    tf.logging.info("Building KFAC Optimizer.")
    layer_collection = lc.LayerCollection()
    layer_collection.register_fully_connected(params0, examples, pre0)
    layer_collection.register_fully_connected(params1, act0, pre1)
    layer_collection.register_fully_connected(params2, act1, pre2)
    layer_collection.register_fully_connected(params3, act2, logits)
    layer_collection.register_categorical_predictive_distribution(logits)

    # Train with K-FAC. We'll use a decreasing learning rate that's cut in 1/2
    # every 10k iterations.
    global_step = tf.train.get_or_create_global_step()
    optimizer = opt.KfacOptimizer(learning_rate=tf.train.exponential_decay(
        0.00002, global_step, 10000, 0.5, staircase=True),
                                  cov_ema_decay=0.95,
                                  damping=0.0001,
                                  layer_collection=layer_collection,
                                  momentum=0.99)
    train_op = optimizer.minimize(loss, global_step=global_step)

    tf.logging.info("Starting training.")
    with tf.train.MonitoredTrainingSession() as sess:
        while not sess.should_stop():
            # K-FAC has 3 primary ops,
            # - train_op: Update the weights with the minibatch's gradient.
            # - cov_update_op: Update statistics used for building K-FAC's
            #   preconditioner matrix.
            # - inv_update_op: Update preconditioner matrix using statistics.
            #
            # The first 2 of these are cheap and should be done with each step. The
            # latter is more expensive, and should be updated ~100 iterations.
            global_step_, loss_, accuracy_, _, _ = sess.run([
                global_step, loss, accuracy, train_op, optimizer.cov_update_op
            ])

            if global_step_ % 100 == 0:
                sess.run(optimizer.inv_update_op)

            if global_step_ % 100 == 0:
                tf.logging.info("global_step: %d | loss: %f | accuracy: %f",
                                global_step_, loss_, accuracy_)

    return accuracy_
コード例 #18
0
ファイル: convnet.py プロジェクト: sgcm520/tensorflow2
def train_mnist_multitower(data_dir, num_epochs, num_towers,
                           use_fake_data=True, devices=None):
  """Train a ConvNet on MNIST.

  Training data is split equally among the towers. Each tower computes loss on
  its own batch of data and the loss is aggregated on the CPU. The model
  variables are placed on first tower. The covariance and inverse update ops
  and variables are placed on GPUs in a round robin manner.

  Args:
    data_dir: string. Directory to read MNIST examples from.
    num_epochs: int. Number of passes to make over the training set.
    num_towers: int. Number of CPUs to split inference across.
    use_fake_data: bool. If True, generate a synthetic dataset.
    devices: string, Either list of CPU or GPU. The covaraince and inverse
      update ops are run on this device.

  Returns:
    accuracy of model on the final minibatch of training data.
  """
  if devices:
    device_count = {"GPU": num_towers}
  else:
    device_count = {"CPU": num_towers}

  devices = devices or [
      "/cpu:{}".format(tower_id) for tower_id in range(num_towers)
  ]
  # Load a dataset.
  tf.logging.info("Loading MNIST into memory.")
  tower_batch_size = 128
  batch_size = tower_batch_size * num_towers
  tf.logging.info(
      ("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
       "tower batch size.") % (batch_size, num_towers, tower_batch_size))
  examples, labels = mnist.load_mnist(
      data_dir,
      num_epochs=num_epochs,
      batch_size=batch_size,
      use_fake_data=use_fake_data,
      flatten_images=False)

  # Split minibatch across towers.
  examples = tf.split(examples, num_towers)
  labels = tf.split(labels, num_towers)

  # Build an MLP. Each tower's layers will be added to the LayerCollection.
  layer_collection = lc.LayerCollection()
  tower_results = []
  for tower_id in range(num_towers):
    with tf.device(devices[tower_id]):
      with tf.name_scope("tower%d" % tower_id):
        with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
          tf.logging.info("Building tower %d." % tower_id)
          tower_results.append(
              build_model(examples[tower_id], labels[tower_id], 10,
                          layer_collection))
  losses, accuracies = zip(*tower_results)

  # Average across towers.
  loss = tf.reduce_mean(losses)
  accuracy = tf.reduce_mean(accuracies)

  # Fit model.

  session_config = tf.ConfigProto(
      allow_soft_placement=False,
      device_count=device_count,
  )

  g_step = tf.train.get_or_create_global_step()
  optimizer = opt.KfacOptimizer(
      learning_rate=0.0001,
      cov_ema_decay=0.95,
      damping=0.001,
      layer_collection=layer_collection,
      placement_strategy="round_robin",
      cov_devices=devices,
      inv_devices=devices,
      momentum=0.9)
  (cov_update_thunks,
   inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()

  def make_update_op(update_thunks):
    update_ops = [thunk() for thunk in update_thunks]
    return tf.group(*update_ops)

  cov_update_op = make_update_op(cov_update_thunks)
  with tf.control_dependencies([cov_update_op]):
    inverse_op = tf.cond(
        tf.equal(tf.mod(g_step, _INVERT_EVERY), 0),
        lambda: make_update_op(inv_update_thunks), tf.no_op)
    with tf.control_dependencies([inverse_op]):
      train_op = optimizer.minimize(loss, global_step=g_step)

  tf.logging.info("Starting training.")
  with tf.train.MonitoredTrainingSession(config=session_config) as sess:
    while not sess.should_stop():
      global_step_, loss_, accuracy_, _ = sess.run(
          [g_step, loss, accuracy, train_op])

      if global_step_ % _INVERT_EVERY == 0:
        tf.logging.info("global_step: %d | loss: %f | accuracy: %s",
                        global_step_, loss_, accuracy_)