def _test_complete_flow(self,
                            train_input_fn,
                            eval_input_fn,
                            predict_input_fn,
                            prediction_size,
                            lr_decay=False):
        def make_opt():
            gstep = training_util.get_or_create_global_step()
            lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
            return training.GradientDescentOptimizer(lr)

        gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
        dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
        est = estimator.GANEstimator(
            generator_fn=generator_fn,
            discriminator_fn=discriminator_fn,
            generator_loss_fn=losses.wasserstein_generator_loss,
            discriminator_loss_fn=losses.wasserstein_discriminator_loss,
            generator_optimizer=gopt,
            discriminator_optimizer=dopt,
            model_dir=self._model_dir)

        # TRAIN
        num_steps = 10
        est.train(train_input_fn, steps=num_steps)

        # EVALUTE
        scores = est.evaluate(eval_input_fn)
        self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))

        # PREDICT
        predictions = np.array([x for x in est.predict(predict_input_fn)])

        self.assertAllEqual(prediction_size, predictions.shape)
Exemple #2
0
 def setUp(self):
     super(GANHeadTest, self).setUp()
     self.gan_head = head.gan_head(
         generator_loss_fn=dummy_loss,
         discriminator_loss_fn=dummy_loss,
         generator_optimizer=training.GradientDescentOptimizer(1.0),
         discriminator_optimizer=training.GradientDescentOptimizer(1.0))
     self.assertTrue(isinstance(self.gan_head, head.GANHead))
Exemple #3
0
 def setUp(self):
   super(GANHeadTest, self).setUp()
   self.gan_head = head.gan_head(
       generator_loss_fn=dummy_loss,
       discriminator_loss_fn=dummy_loss,
       generator_optimizer=training.GradientDescentOptimizer(1.0),
       discriminator_optimizer=training.GradientDescentOptimizer(1.0),
       get_eval_metric_ops_fn=self.get_metrics)
   self.assertIsInstance(self.gan_head, head.GANHead)
    def _test_complete_flow(self,
                            train_input_fn,
                            eval_input_fn,
                            predict_input_fn,
                            prediction_size,
                            lr_decay=False,
                            joint_train=True):
        def make_opt():
            gstep = training_util.get_or_create_global_step()
            lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
            return training.GradientDescentOptimizer(lr)

        gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
        dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
        est = estimator.TPUGANEstimator(
            generator_fn=generator_fn,
            discriminator_fn=discriminator_fn,
            generator_loss_fn=losses.wasserstein_generator_loss,
            discriminator_loss_fn=losses.wasserstein_discriminator_loss,
            generator_optimizer=gopt,
            discriminator_optimizer=dopt,
            joint_train=joint_train,
            get_eval_metric_ops_fn=get_metrics,
            train_batch_size=4,
            eval_batch_size=10,
            predict_batch_size=8,
            use_tpu=FLAGS.use_tpu,
            config=self._config)

        # Train.
        num_steps_train = 10
        est.train(train_input_fn, steps=num_steps_train)

        # Evaluate.
        num_steps_eval = 2
        scores = est.evaluate(eval_input_fn, steps=num_steps_eval)
        self.assertEqual(num_steps_train + num_steps_eval,
                         scores[ops.GraphKeys.GLOBAL_STEP])
        self.assertIn('loss', six.iterkeys(scores))
        self.assertEqual(
            scores['discriminator_loss'] + scores['generator_loss'],
            scores['loss'])
        self.assertIn('mse_custom_metric', six.iterkeys(scores))

        # Predict.
        predictions = np.array(
            [x['generated_data'] for x in est.predict(predict_input_fn)])
        self.assertAllEqual(prediction_size, predictions.shape)
def _configure_optimizer(learning_rate, opt_type='adam'):
    if opt_type == 'adadelta':
        optimizer = training.AdadeltaOptimizer(learning_rate,
                                               rho=FLAGS.adadelta_rho,
                                               epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'adagrad':
        optimizer = training.AdagradOptimizer(
            learning_rate,
            initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
    elif opt_type == 'adam':
        optimizer = training.AdamOptimizer(learning_rate, )
    elif opt_type == 'ftrl':
        optimizer = training.FtrlOptimizer(
            learning_rate,
            learning_rate_power=FLAGS.ftrl_learning_rate_power,
            initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
            l1_regularization_strength=FLAGS.ftrl_l1,
            l2_regularization_strength=FLAGS.ftrl_l2)
    elif opt_type == 'momentum':
        optimizer = training.MomentumOptimizer(learning_rate,
                                               momentum=FLAGS.momentum,
                                               name='Momentum')
    elif opt_type == 'rmsprop':
        optimizer = training.RMSPropOptimizer(learning_rate,
                                              decay=FLAGS.rmsprop_decay,
                                              momentum=FLAGS.rmsprop_momentum,
                                              epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'sgd':
        optimizer = training.GradientDescentOptimizer(learning_rate)
    else:
        raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
    return optimizer
Exemple #6
0
  def testRNNWithKerasGRUCell(self):
    with self.cached_session() as sess:
      input_shape = 10
      output_shape = 5
      timestep = 4
      batch = 100
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train)
      cell = keras.layers.GRUCell(output_shape)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape))
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape))

      outputs, state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32)
      self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
      self.assertEqual(state.shape.as_list(), [None, output_shape])
      loss = losses.softmax_cross_entropy(predict, state)
      train_op = training.GradientDescentOptimizer(0.001).minimize(loss)

      sess.run([variables_lib.global_variables_initializer()])
      _, outputs, state = sess.run(
          [train_op, outputs, state], {inputs: x_train, predict: y_train})

      self.assertEqual(len(outputs), batch)
      self.assertEqual(len(state), batch)
Exemple #7
0
    def _model_fn(features, labels, mode):
      predictions = layers.dense(
          features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
      export_outputs = {
          'predictions': export.RegressionOutput(predictions)
      }

      if mode == model_fn_lib.ModeKeys.PREDICT:
        return model_fn_lib.EstimatorSpec(
            mode, predictions=predictions, export_outputs=export_outputs)

      loss = losses.mean_squared_error(labels, predictions)
      train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize(
          loss, training.get_global_step())
      eval_metric_ops = {
          'absolute_error': metrics_lib.mean_absolute_error(
              labels, predictions)
      }

      return model_fn_lib.EstimatorSpec(
          mode,
          predictions=predictions,
          loss=loss,
          train_op=train_op,
          eval_metric_ops=eval_metric_ops,
          export_outputs=export_outputs)
    def _create_head_with_eval_metric_ops(self, mode, loss, eval_metric_ops):
        """Creates a head returning `TPUEstimatorSpec` based on mode.

    This version contains eval that will not run on TPUs, where eval_metric_ops
    has not been split into a metrics_fn that runs on CPUs. The intent is to
    test the entire eval (model_fn forward pass) and metrics output on CPU.

    Args:
      mode: The mode such as TRAIN, EVAL.
      loss: Training loss `Tensor`. Must be either scalar, or with shape `[1]`.
      eval_metric_ops: Dict of metric results keyed by name.

    Returns:
      An EstimatorSpec for EVAL or TPUEstimatorSpec otherwise.
    """
        if mode == _EVAL:
            return model_fn_lib.EstimatorSpec(mode=mode,
                                              eval_metric_ops=eval_metric_ops,
                                              loss=loss)
        # Train
        optimizer = tf.compat.v1.tpu.CrossShardOptimizer(
            training.GradientDescentOptimizer(learning_rate=0.5))
        train_op = optimizer.minimize(loss,
                                      global_step=training.get_global_step())
        return tpu_estimator.TPUEstimatorSpec(mode=mode,
                                              train_op=train_op,
                                              loss=loss)
Exemple #9
0
    def _test_incorrect_weight_clip_value_helper(self, use_tuple):
        opt = training.GradientDescentOptimizer(1.0)

        if use_tuple:
            with self.assertRaisesRegexp(ValueError, 'must be positive'):
                clip_weights.clip_discriminator_weights(opt,
                                                        self.tuple,
                                                        weight_clip=-1)
        else:
            with self.assertRaisesRegexp(ValueError, 'must be positive'):
                clip_weights.clip_weights(opt, self.variables, weight_clip=-1)
Exemple #10
0
 def _create_head(self, mode, loss, eval_metrics):
   """Creates a head returning `TPUEstimatorSpec` based on mode."""
   if mode == _EVAL:
     return tpu_estimator.TPUEstimatorSpec(
         mode=mode, eval_metrics=eval_metrics, loss=loss)
   # Train
   optimizer = tf.tpu.CrossShardOptimizer(
       training.GradientDescentOptimizer(learning_rate=0.5))
   train_op = optimizer.minimize(loss, global_step=training.get_global_step())
   return tpu_estimator.TPUEstimatorSpec(
       mode=mode, train_op=train_op, loss=loss)
  def _test_warm_start(self, warm_start_from=None):
    """Tests whether WarmStartSettings work as intended."""
    def generator_with_new_variable(noise_dict, mode):
      variable_scope.get_variable(name=self.new_variable_name,
                                  initializer=self.new_variable_value,
                                  trainable=True)
      return generator_fn(noise_dict, mode)

    est = estimator.TPUGANEstimator(
        generator_fn=generator_fn,
        discriminator_fn=discriminator_fn,
        generator_loss_fn=losses.wasserstein_generator_loss,
        discriminator_loss_fn=losses.wasserstein_discriminator_loss,
        generator_optimizer=training.GradientDescentOptimizer(1.0),
        discriminator_optimizer=training.GradientDescentOptimizer(1.0),
        train_batch_size=4,
        use_tpu=FLAGS.use_tpu,
        config=self._config)

    def train_input_fn(params):
      data = np.zeros([params['batch_size'], 4], dtype=np.float32)
      return data, data

    est.train(train_input_fn, steps=1)

    est_warm = estimator.TPUGANEstimator(
        generator_fn=generator_with_new_variable,
        discriminator_fn=discriminator_fn,
        generator_loss_fn=losses.wasserstein_generator_loss,
        discriminator_loss_fn=losses.wasserstein_discriminator_loss,
        generator_optimizer=training.GradientDescentOptimizer(1.0),
        discriminator_optimizer=training.GradientDescentOptimizer(1.0),
        config=tpu_config.RunConfig(
            model_dir=None if warm_start_from else self._model_dir),
        train_batch_size=4,
        use_tpu=FLAGS.use_tpu,
        warm_start_from=warm_start_from)

    est_warm.train(train_input_fn, steps=1)

    return est_warm
    def model_fn(features, labels, mode, params):
        loss = None
        train_op = None
        export_outputs = None

        # This could be some pre-processing on CPU like calls to input layer with
        # embedding columns.
        x2 = features['x'] * 2

        def computation(input_tensor):
            return layers.dense(
                input_tensor,
                1,
                kernel_initializer=init_ops.zeros_initializer())

        if mode != _PREDICT:
            predictions = computation(x2)
            loss = losses.mean_squared_error(labels, predictions)
            optimizer = tf.tpu.CrossShardOptimizer(
                training.GradientDescentOptimizer(learning_rate=0.5))
            train_op = optimizer.minimize(loss, training.get_global_step())
        else:
            inputs = [x2]
            if params['use_tpu']:
                predictions = array_ops.identity(
                    tpu_estimator.inference_on_tpu(computation,
                                                   inputs,
                                                   num_batch_threads=1,
                                                   max_batch_size=2,
                                                   batch_timeout_micros=100),
                    name='predictions')
            else:
                predictions = array_ops.identity(computation(*inputs),
                                                 name='predictions')
            key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
            export_outputs = {
                key: export_lib.PredictOutput({'prediction': predictions})
            }

            classes = string_ops.as_string(predictions, name='classes')
            classification_output = export_lib.ClassificationOutput(
                classes=classes)
            export_outputs['classification'] = classification_output

        return tpu_estimator.TPUEstimatorSpec(
            mode,
            loss=loss,
            train_op=train_op,
            predictions={'predictions': predictions},
            export_outputs=export_outputs)
Exemple #13
0
    def testImplicitGradOverEmbeddingLookup(self):
        batch_size = 8
        embedding_size = 512
        vocab_size = 1000
        lrn_rate = 0.1
        random_init = random_ops.random_uniform([vocab_size, embedding_size])

        x = array_ops.ones((batch_size), dtypes.int64)
        embedding = resource_variable_ops.ResourceVariable(
            initial_value=random_init, dtype=dtypes.float32, name='embedding')

        def f():
            tape.watch_variable(embedding)
            embedded_x = embedding_ops.embedding_lookup(embedding, x)
            return constant_op.constant(1.0, dtypes.float32) - embedded_x

        grad = backprop.implicit_grad(f)()[0][0]
        opt = training.GradientDescentOptimizer(lrn_rate)

        with context.graph_mode(), self.test_session():
            tf_x = array_ops.ones((batch_size), dtypes.int64)
            # TODO(ashankar,apassos): Change to ResourceVariable.
            tf_embedding = variables.Variable(random_init.numpy(),
                                              name='tf_embedding')
            tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
            tf_y = 1.0 - tf_embedded_x
            tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
            tf_opt = training.GradientDescentOptimizer(0.1)
            tf_embedding.initializer.run()

            self.assertAllClose(tf_grad.indices.eval(), grad.indices)
            self.assertAllClose(tf_grad.values.eval(), grad.values)

            tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
            expected = tf_embedding.eval()
        opt.apply_gradients([(grad, embedding)])
        self.assertAllClose(expected, embedding.read_value())
 def test_metrics_consistent(self):
     # Tests that the identity metrics used to report in-sample predictions match
     # the behavior of standard metrics.
     g = ops.Graph()
     with g.as_default():
         features = {
             feature_keys.TrainEvalFeatures.TIMES:
             array_ops.zeros((1, 1)),
             feature_keys.TrainEvalFeatures.VALUES:
             array_ops.zeros((1, 1, 1)),
             "ticker":
             array_ops.reshape(
                 math_ops.cast(variables.VariableV1(
                     name="ticker",
                     initial_value=0,
                     dtype=dtypes.int64,
                     collections=[ops.GraphKeys.LOCAL_VARIABLES
                                  ]).count_up_to(10),
                               dtype=dtypes.float32), (1, 1, 1))
         }
         model_fn = ts_head_lib.TimeSeriesRegressionHead(
             model=_TickerModel(),
             state_manager=state_management.PassthroughStateManager(),
             optimizer=train.GradientDescentOptimizer(
                 0.001)).create_estimator_spec
         outputs = model_fn(features=features,
                            labels=None,
                            mode=estimator_lib.ModeKeys.EVAL)
         metric_update_ops = [
             metric[1] for metric in outputs.eval_metric_ops.values()
         ]
         loss_mean, loss_update = metrics.mean(outputs.loss)
         metric_update_ops.append(loss_update)
         with self.cached_session() as sess:
             coordinator = coordinator_lib.Coordinator()
             queue_runner_impl.start_queue_runners(sess, coord=coordinator)
             variables.local_variables_initializer().run()
             sess.run(metric_update_ops)
             loss_evaled, metric_evaled, nested_metric_evaled = sess.run(
                 (loss_mean, outputs.eval_metric_ops["ticker"][0],
                  outputs.eval_metric_ops[
                      feature_keys.FilteringResults.STATE_TUPLE][0][0]))
             # The custom model_utils metrics for in-sample predictions should be in
             # sync with the Estimator's mean metric for model loss.
             self.assertAllClose(0., loss_evaled)
             self.assertAllClose((((0., ), ), ), metric_evaled)
             self.assertAllClose((((0., ), ), ), nested_metric_evaled)
             coordinator.request_stop()
             coordinator.join()
    def _test_warm_start(self, warm_start_from=None):
        """Tests whether WarmStartSettings work as intended."""
        def generator_with_new_variable(noise_dict, mode):
            variable_scope.get_variable(name=self.new_variable_name,
                                        initializer=self.new_variable_value,
                                        trainable=True)
            return generator_fn(noise_dict, mode)

        def train_input_fn():
            data = np.zeros([3, 4])
            return {'x': data}, data

        est = estimator.GANEstimator(
            generator_fn=generator_fn,
            discriminator_fn=discriminator_fn,
            generator_loss_fn=losses.wasserstein_generator_loss,
            discriminator_loss_fn=losses.wasserstein_discriminator_loss,
            generator_optimizer=training.GradientDescentOptimizer(1.0),
            discriminator_optimizer=training.GradientDescentOptimizer(1.0),
            model_dir=self._model_dir)

        est.train(train_input_fn, steps=1)

        est_warm = estimator.GANEstimator(
            generator_fn=generator_with_new_variable,
            discriminator_fn=discriminator_fn,
            generator_loss_fn=losses.wasserstein_generator_loss,
            discriminator_loss_fn=losses.wasserstein_discriminator_loss,
            generator_optimizer=training.GradientDescentOptimizer(1.0),
            discriminator_optimizer=training.GradientDescentOptimizer(1.0),
            model_dir=None if warm_start_from else self._model_dir,
            warm_start_from=warm_start_from)

        est_warm.train(train_input_fn, steps=1)

        return est_warm
Exemple #16
0
 def test_applies_norm(self):
     optimizer = extenders.clip_gradients_by_norm(
         training.GradientDescentOptimizer(1.0), clip_norm=3.)
     with ops.Graph().as_default():
         w = variables.Variable(1., name='weight')
         x = constant_op.constant(5.)
         y = -x * w
         grads = optimizer.compute_gradients(y, var_list=[w])[0]
         opt_op = optimizer.minimize(y, var_list=[w])
         with training.MonitoredSession() as sess:
             grads_value = sess.run(grads)
             self.assertEqual(-5., grads_value[0])
             sess.run(opt_op)
             new_w = sess.run(w)
             self.assertEqual(4., new_w)  # 1 + 1*3 (w - lr * clipped_grad)
    def test_parameter_class_members_with_value_hints(self):
        def test_fn(opt):
            opt.minimize(0)

        node = self._parse_and_analyze(
            test_fn, {'training': training},
            arg_types={
                'opt': (('%s.GradientDescentOptimizer' % training.__name__),
                        training.GradientDescentOptimizer(0.1))
            })

        attr_call_node = node.body[0].body[0].value.func
        self.assertEquals(
            tuple(training.__name__.split('.')) +
            ('GradientDescentOptimizer', ),
            anno.getanno(attr_call_node, 'type_fqn'))
        def _model_fn(features, labels, mode, params):
            if not self._export_mode:
                # Always check batch size in params
                self.assertEqual(batch_size_dict[mode], params['batch_size'])
            else:
                self.assertNotIn('batch_size', params)

            # Check the input feeds correct shape for train and eval. When eval on CPU
            # or predict, it is allowed to have dynamic shape. So, here only validates
            # the fully known shape (which covers the TPU train).
            if features['x'].shape.is_fully_defined():
                self.assertEqual(batch_size_dict[mode], features['x'].shape[0])

            predictions = layers.dense(
                features['x'],
                1,
                kernel_initializer=init_ops.ones_initializer())
            export_outputs = {
                'predictions': export_output.RegressionOutput(predictions)
            }

            if mode == _PREDICT:
                return _create_estimator_spec(
                    mode=mode,
                    predictions={'predictions': predictions},
                    export_outputs=export_outputs)

            loss = losses.mean_squared_error(labels, predictions)

            optimizer = tf.tpu.CrossShardOptimizer(
                training.GradientDescentOptimizer(learning_rate=0.5))
            train_op = optimizer.minimize(
                loss, global_step=training.get_global_step())

            eval_metrics = (
                lambda labels, predictions: {  # pylint: disable=g-long-lambda
                    'absolute_error':
                    metrics_lib.mean_absolute_error(labels, predictions)
                },
                [labels, predictions])
            return _create_estimator_spec(
                mode=mode,
                loss=loss,
                predictions={'predictions': predictions},
                export_outputs=export_outputs,
                train_op=train_op,
                eval_metrics=eval_metrics)
Exemple #19
0
    def test_parameter_class_members_with_value_hints(self):
        def test_fn(opt):
            opt.minimize(0)

        node = parser.parse_object(test_fn)
        node = access.resolve(node)
        node = live_values.resolve(node, {'training': training}, {})
        node = type_info.resolve(
            node, {
                'opt': (('%s.GradientDescentOptimizer' % training.__name__),
                        training.GradientDescentOptimizer(0.1))
            })

        attr_call_node = node.body[0].body[0].value.func
        self.assertEquals(
            training.__name__.split('.') + ['GradientDescentOptimizer'],
            anno.getanno(attr_call_node, 'type_fqn'))
def model_fn_global_step_incrementer(features, labels, mode, params):
    del params
    loss = None
    train_op = None
    predictions = dense_computation(features)
    if mode != _PREDICT:
        loss = losses.mean_squared_error(labels, predictions)
        optimizer = tf.tpu.CrossShardOptimizer(
            training.GradientDescentOptimizer(learning_rate=0.5))
        train_op = optimizer.minimize(loss, training.get_global_step())
    return tpu_estimator.TPUEstimatorSpec(
        mode,
        loss=loss,
        train_op=train_op,
        predictions={'predictions': predictions},
        export_outputs={
            'test': export_output.PredictOutput({'prediction': predictions})
        })
Exemple #21
0
  def model_fn(features, labels, mode, params):
    del params
    loss = None
    train_op = None
    predictions = dense_computation(features)
    export_outputs = None
    if mode != _PREDICT:
      loss = losses.mean_squared_error(labels, predictions)
      optimizer = tf.tpu.CrossShardOptimizer(
          training.GradientDescentOptimizer(learning_rate=0.5))
      train_op = optimizer.minimize(loss, training.get_global_step())
    else:
      if export_tpu_tensor:
        key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        export_outputs = {
            key: export_lib.PredictOutput({
                'prediction': predictions
            })
        }
      else:
        export_outputs = {}

      if export_cpu_tensor:

        def host_call(predictions):
          return string_ops.as_string(predictions, name='classes')

        classes = tf.tpu.outside_compilation(host_call, predictions)
        classification_output = export_lib.ClassificationOutput(
            classes=classes)
        export_outputs['classification'] = classification_output

    if tpu_estimator_spec:
      spec_type = tpu_estimator.TPUEstimatorSpec
    else:
      spec_type = model_fn_lib.EstimatorSpec

    return spec_type(
        mode,
        loss=loss,
        train_op=train_op,
        predictions={'predictions': predictions},
        export_outputs=export_outputs)
Exemple #22
0
    def _test_weight_clipping_helper(self, use_tuple):
        loss = self.variables[0]
        opt = training.GradientDescentOptimizer(1.0)
        if use_tuple:
            opt_clip = clip_weights.clip_variables(opt, self.variables, 0.1)
        else:
            opt_clip = clip_weights.clip_discriminator_weights(
                opt, self.tuple, 0.1)

        train_op1 = opt.minimize(loss, var_list=self.variables)
        train_op2 = opt_clip.minimize(loss, var_list=self.variables)

        with self.cached_session(use_gpu=True) as sess:
            sess.run(variables.global_variables_initializer())
            self.assertEqual(2.0, self.variables[0].eval())
            sess.run(train_op1)
            self.assertLess(0.1, self.variables[0].eval())

        with self.cached_session(use_gpu=True) as sess:
            sess.run(variables.global_variables_initializer())
            self.assertEqual(2.0, self.variables[0].eval())
            sess.run(train_op2)
            self.assertNear(0.1, self.variables[0].eval(), 1e-7)
Exemple #23
0
  def testStaticRNNWithKerasSimpleRNNCell(self):
    with self.cached_session() as sess:
      input_shape = 10
      output_shape = 5
      timestep = 4
      batch = 100
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      x_train = np.transpose(x_train, (1, 0, 2))
      y_train = keras.utils.to_categorical(y_train)
      cell = keras.layers.SimpleRNNCell(output_shape)

      inputs = [array_ops.placeholder(
          dtypes.float32, shape=(None, input_shape))] * timestep
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape))

      outputs, state = rnn.static_rnn(
          cell, inputs, dtype=dtypes.float32)
      self.assertEqual(len(outputs), timestep)
      self.assertEqual(outputs[0].shape.as_list(), [None, output_shape])
      self.assertEqual(state.shape.as_list(), [None, output_shape])
      loss = losses.softmax_cross_entropy(predict, state)
      train_op = training.GradientDescentOptimizer(0.001).minimize(loss)

      sess.run([variables_lib.global_variables_initializer()])
      feed_dict = {i: d for i, d in zip(inputs, x_train)}
      feed_dict[predict] = y_train
      _, outputs, state = sess.run(
          [train_op, outputs, state], feed_dict)

      self.assertEqual(len(outputs), timestep)
      self.assertEqual(len(outputs[0]), batch)
      self.assertEqual(len(state), batch)
 def make_opt():
     gstep = training_util.get_or_create_global_step()
     lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
     return training.GradientDescentOptimizer(lr)
Exemple #25
0
 def test_fn():
     opt = training.GradientDescentOptimizer(0.1)
     opt.minimize(0)
Exemple #26
0
 def test_fn():
     opt = training.GradientDescentOptimizer(0.1)
     return opt
Exemple #27
0
 def test_fn():
     foo = training.GradientDescentOptimizer(0.1)
     foo.bar.baz()
 def get_sync_optimizer():
     return sync_replicas_optimizer.SyncReplicasOptimizer(
         training.GradientDescentOptimizer(learning_rate=1.0),
         replicas_to_aggregate=1)
Exemple #29
0
 def test_name(self):
     optimizer = extenders.clip_gradients_by_norm(
         training.GradientDescentOptimizer(1.0), clip_norm=3.)
     self.assertEqual('ClipByNormGradientDescent', optimizer.get_name())
Exemple #30
0
 def setUpClass(cls):
     cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
     cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)