コード例 #1
0
    def benchmarkEagerLinearRegression(self):
        num_epochs = 10
        num_batches = 200
        batch_size = 64
        dataset = linear_regression.synthetic_dataset(w=tf.random_uniform(
            [3, 1]),
                                                      b=tf.random_uniform([1]),
                                                      noise_level=0.01,
                                                      batch_size=batch_size,
                                                      num_batches=num_batches)
        burn_in_dataset = dataset.take(10)

        model = linear_regression.LinearModel()

        with tf.device(device()):
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)

            # Perform burn-in.
            linear_regression.fit(model, burn_in_dataset, optimizer)

            start_time = time.time()
            for _ in range(num_epochs):
                linear_regression.fit(model, dataset, optimizer)
            wall_time = time.time() - start_time

            examples_per_sec = num_epochs * num_batches * batch_size / wall_time
            self.report_benchmark(
                name="eager_train_%s" %
                ("gpu" if tfe.num_gpus() > 0 else "cpu"),
                iters=num_epochs * num_batches,
                extras={"examples_per_sec": examples_per_sec},
                wall_time=wall_time)
コード例 #2
0
  def benchmarkGraphLinearRegression(self):
    num_epochs = 10
    num_batches = 200
    batch_size = 64
    dataset = linear_regression.synthetic_dataset_helper(
        w=tf.random_uniform([3, 1]),
        b=tf.random_uniform([1]),
        num_features=3,
        noise_level=0.01,
        batch_size=batch_size,
        num_batches=num_batches)
    iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
    x, y = iterator.get_next()

    model = linear_regression.LinearModel()

    if tf.test.is_gpu_available():
      use_gpu = True
      device = "/device:GPU:0"
    else:
      use_gpu = False
      device = "/device:CPU:0"

    with tf.device(device):
      loss = linear_regression.mean_square_loss(model, x, y)
      optimization_step = tf.train.GradientDescentOptimizer(
          learning_rate=0.1).minimize(loss)

    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())

      def train(num_epochs):
        for _ in range(num_epochs):
          sess.run(iterator.initializer)
          try:
            while True:
              _, _ = sess.run([optimization_step, loss])
          except tf.errors.OutOfRangeError:
            pass

      # Warmup: a single epoch.
      train(1)

      start_time = time.time()
      train(num_epochs)
      wall_time = time.time() - start_time

      examples_per_sec = num_epochs * num_batches * batch_size / wall_time
      self.report_benchmark(
          name="graph_train_%s" %
          ("gpu" if use_gpu else "cpu"),
          iters=num_epochs * num_batches,
          extras={"examples_per_sec": examples_per_sec},
          wall_time=wall_time)
コード例 #3
0
  def testLinearRegression(self):
    true_w = [[1.0], [-0.5], [2.0]]
    true_b = [1.0]

    model = linear_regression.LinearModel()
    dataset = linear_regression.synthetic_dataset(
        true_w, true_b, noise_level=0., batch_size=64, num_batches=40)

    with tf.device(device()):
      optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
      linear_regression.fit(model, dataset, optimizer, logdir=self._tmp_logdir)

      self.assertAllClose(true_w, model.variables[0].numpy(), rtol=1e-2)
      self.assertAllClose(true_b, model.variables[1].numpy(), rtol=1e-2)
      self.assertTrue(glob.glob(os.path.join(self._tmp_logdir, "events.out.*")))