Exemplo n.º 1
0
 def _annotated_graph(self):
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(2)
         current_activation = variable_scope.get_variable(
             name='start', shape=[1, 2, 2, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(3):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation.op._set_attr(
                     '_recompute_hint',
                     # The value of the attribute does not matter; just that the key
                     # exists in the op's attributes.
                     attr_value_pb2.AttrValue(i=1))
                 current_activation += 5.
                 current_activation.op._set_attr(
                     '_recompute_hint', attr_value_pb2.AttrValue(i=0))
                 current_activation = nn.relu(current_activation)
                 current_activation.op._set_attr(
                     '_recompute_hint', attr_value_pb2.AttrValue(i=1))
         loss = math_ops.reduce_mean(current_activation)
         optimizer = train.AdamOptimizer(0.001)
         train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
     return graph, init_op, train_op
Exemplo n.º 2
0
  def __init__(self, model, state_manager=None, optimizer=None, model_dir=None,
               config=None, head_type=ts_head_lib.TimeSeriesRegressionHead):
    """Initialize the Estimator.

    Args:
      model: The time series model to wrap (inheriting from TimeSeriesModel).
      state_manager: The state manager to use, or (by default)
          PassthroughStateManager if none is needed.
      optimizer: The optimization algorithm to use when training, inheriting
          from tf.train.Optimizer. Defaults to Adam with step size 0.02.
      model_dir: See `Estimator`.
      config: See `Estimator`.
      head_type: The kind of head to use for the model (inheriting from
          `TimeSeriesRegressionHead`).
    """
    input_statistics_generator = math_utils.InputStatisticsFromMiniBatch(
        dtype=model.dtype, num_features=model.num_features)
    if state_manager is None:
      if isinstance(model, ar_model.ARModel):
        state_manager = state_management.FilteringOnlyStateManager()
      else:
        state_manager = state_management.PassthroughStateManager()
    if optimizer is None:
      optimizer = train.AdamOptimizer(0.02)
    self._model = model
    ts_regression_head = head_type(
        model=model, state_manager=state_manager, optimizer=optimizer,
        input_statistics_generator=input_statistics_generator)
    model_fn = ts_regression_head.create_estimator_spec
    super(TimeSeriesRegressor, self).__init__(
        model_fn=model_fn,
        model_dir=model_dir,
        config=config)
Exemplo n.º 3
0
 def _GetMetaGraph(self,
                   batch_size=14,
                   image_dim=12,
                   optimizer_scope_name=''):
     """A simple layered graph with conv, an intermediate op, and a ReLU."""
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(1)
         current_activation = variable_scope.get_variable(
             name='start', shape=[batch_size, image_dim, image_dim, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(10):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation = nn.relu(current_activation)
         loss = math_ops.reduce_mean(current_activation)
         with ops.name_scope(optimizer_scope_name):
             optimizer = train.AdamOptimizer(0.001)
             train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
         metagraph = train.export_meta_graph()
     return (metagraph, init_op.name, train_op.name, loss.name)
Exemplo n.º 4
0
 def _RunGraphWithConfig(self, config, batch_size=14, image_dim=12):
     """Run a simple layered graph with conv, an intermediate op, and a ReLU."""
     graph = ops.Graph()
     with graph.as_default():
         random_seed.set_random_seed(1)
         current_activation = variable_scope.get_variable(
             name='start', shape=[batch_size, image_dim, image_dim, 5])
         conv_filter = variable_scope.get_variable(name='filter',
                                                   shape=[5, 5, 5, 5])
         for layer_number in range(10):
             with variable_scope.variable_scope(
                     'layer_{}'.format(layer_number)):
                 after_conv = nn.conv2d(current_activation, conv_filter,
                                        [1, 1, 1, 1], 'SAME')
                 current_activation = 2. * after_conv
                 current_activation = nn.relu(current_activation)
         loss = math_ops.reduce_mean(current_activation)
         optimizer = train.AdamOptimizer(0.001)
         train_op = optimizer.minimize(loss)
         init_op = variables.global_variables_initializer()
         with session.Session(config=config, graph=graph) as sess:
             sess.run(init_op)
             sess.run(train_op)
             sess.run(train_op)
             return sess.run(loss)
Exemplo n.º 5
0
def _configure_optimizer(learning_rate, opt_type='adam'):
    if opt_type == 'adadelta':
        optimizer = training.AdadeltaOptimizer(learning_rate,
                                               rho=FLAGS.adadelta_rho,
                                               epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'adagrad':
        optimizer = training.AdagradOptimizer(
            learning_rate,
            initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
    elif opt_type == 'adam':
        optimizer = training.AdamOptimizer(learning_rate, )
    elif opt_type == 'ftrl':
        optimizer = training.FtrlOptimizer(
            learning_rate,
            learning_rate_power=FLAGS.ftrl_learning_rate_power,
            initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
            l1_regularization_strength=FLAGS.ftrl_l1,
            l2_regularization_strength=FLAGS.ftrl_l2)
    elif opt_type == 'momentum':
        optimizer = training.MomentumOptimizer(learning_rate,
                                               momentum=FLAGS.momentum,
                                               name='Momentum')
    elif opt_type == 'rmsprop':
        optimizer = training.RMSPropOptimizer(learning_rate,
                                              decay=FLAGS.rmsprop_decay,
                                              momentum=FLAGS.rmsprop_momentum,
                                              epsilon=FLAGS.opt_epsilon)
    elif opt_type == 'sgd':
        optimizer = training.GradientDescentOptimizer(learning_rate)
    else:
        raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
    return optimizer
Exemplo n.º 6
0
  def __init__(self, model, state_manager=None, optimizer=None, model_dir=None,
               config=None):
    """Initialize the Estimator.

    Args:
      model: The time series model to wrap (inheriting from TimeSeriesModel).
      state_manager: The state manager to use, or (by default)
          PassthroughStateManager if none is needed.
      optimizer: The optimization algorithm to use when training, inheriting
          from tf.train.Optimizer. Defaults to Adam with step size 0.02.
      model_dir: See `Estimator`.
      config: See `Estimator`.
    """
    input_statistics_generator = math_utils.InputStatisticsFromMiniBatch(
        dtype=model.dtype, num_features=model.num_features)
    if state_manager is None:
      state_manager = state_management.PassthroughStateManager()
    if optimizer is None:
      optimizer = train.AdamOptimizer(0.02)
    self._model = model
    model_fn = model_utils.make_model_fn(
        model, state_manager, optimizer,
        input_statistics_generator=input_statistics_generator)
    super(_TimeSeriesRegressor, self).__init__(
        model_fn=model_fn,
        model_dir=model_dir,
        config=config)
Exemplo n.º 7
0
    def testCloneBatchNorm(self):
        g = ops.Graph()
        with g.as_default():
            np.random.seed(1234)
            x_value = np.random.random([2, 5, 5, 3])
            w_value = np.random.random([3, 3, 3, 2])
            is_training_t = array_ops.placeholder(dtypes.bool,
                                                  name='is_training_t')
            x_t = array_ops.constant(x_value, dtype=dtypes.float32, name='x_t')
            y_t = conv2d(
                x_t,
                2, [3, 3],
                kernel_initializer=init_ops.constant_initializer(w_value))
            y_t = batch_norm(y_t, training=is_training_t)
            optimizer_t = train.AdamOptimizer()
            optimize_t = optimizer_t.minimize(math_ops.reduce_sum(y_t))
            with self.test_session(use_gpu=True) as sess:
                sess.run(variables.global_variables_initializer())
                y_test_1 = sess.run(y_t, feed_dict={is_training_t: False})
                sess.run(optimize_t, feed_dict={is_training_t: True})
                y_test_2 = sess.run(y_t, feed_dict={is_training_t: False})

            is_training = array_ops.placeholder(dtypes.bool,
                                                name='is_training')
            x = array_ops.constant(np.zeros([2, 5, 5, 3]),
                                   dtype=dtypes.float32,
                                   name='x')
            y = conv2d(
                x,
                2, [3, 3],
                kernel_initializer=init_ops.constant_initializer(w_value))
            y = batch_norm(y, training=is_training)
            x_new = array_ops.constant(x_value, dtype=dtypes.float32, name='x')
            y_out = meta_graph.clone(y, "copy", replace={x: x_new})
            optimizer = train.AdamOptimizer()
            optimize = optimizer.minimize(math_ops.reduce_sum(y_out))
            with self.test_session(use_gpu=True) as sess:
                sess.run(variables.global_variables_initializer())
                y_out_1 = sess.run(y_out, feed_dict={is_training: False})
                y_out_2 = sess.run(y_out, feed_dict={is_training: False})
                sess.run(optimize, feed_dict={is_training: True})
                y_out_3 = sess.run(y_out, feed_dict={is_training: False})
            self.assertAllClose(y_out_1, y_out_2)
            self.assertTrue(np.abs(y_out_1 - y_out_3).max() > 1e-6)
            self.assertAllClose(y_test_1, y_out_1)
            self.assertAllClose(y_test_2, y_out_3)
def model_fn(features, labels, mode, params):
    """The model_fn argument for creating an Estimator."""
    model = Model(params["data_format"])
    image = features
    if isinstance(image, dict):
        image = features["image"]

    if mode == estimator.ModeKeys.PREDICT:
        logits = model(image, training=False)
        predictions = {
            "classes": math_ops.argmax(logits, axis=1),
            "probabilities": nn.softmax(logits),
        }
        return estimator.EstimatorSpec(
            mode=estimator.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                "classify": estimator.export.PredictOutput(predictions)
            })

    elif mode == estimator.ModeKeys.TRAIN:
        optimizer = train.AdamOptimizer(learning_rate=1e-4)

        logits = model(image, training=True)
        loss = losses.sparse_softmax_cross_entropy(labels=labels,
                                                   logits=logits)
        return estimator.EstimatorSpec(mode=estimator.ModeKeys.TRAIN,
                                       loss=loss,
                                       train_op=optimizer.minimize(
                                           loss,
                                           train.get_or_create_global_step()))

    elif mode == estimator.ModeKeys.EVAL:
        logits = model(image, training=False)
        loss = losses.sparse_softmax_cross_entropy(labels=labels,
                                                   logits=logits)
        return estimator.EstimatorSpec(
            mode=estimator.ModeKeys.EVAL,
            loss=loss,
            eval_metric_ops={
                "accuracy":
                ops.metrics.accuracy(labels=labels,
                                     predictions=math_ops.argmax(logits,
                                                                 axis=1)),
            })
Exemplo n.º 9
0
def _stub_model_fn():
    return ts_head_lib.TimeSeriesRegressionHead(
        model=_StubModel(),
        state_manager=state_management.PassthroughStateManager(),
        optimizer=train.AdamOptimizer(0.001)).create_estimator_spec
Exemplo n.º 10
0
def _stub_model_fn():
    return model_utils.make_model_fn(
        model=_StubModel(),
        state_manager=state_management.PassthroughStateManager(),
        optimizer=train.AdamOptimizer(0.001))