예제 #1
0
def create_exp_decaying_learning_rate(initial_learning_rate = 0.0001,
                                      decay_steps = 10000,
                                      decay_rate = 0.9,
                                      staircase = True):
  """Create a learning rate that decays exponentially with global_steps.

  Args:
    initial_learning_rate: A scalar float32 or float64 Tensor or a Python
      number. The initial learning rate.
    decay_steps: A scalar int32 or int64 Tensor or a Python number. Must be
      positive. See the decay computation in `tf.exponential_decay`.
    decay_rate: A scalar float32 or float64 Tensor or a Python number. The decay
      rate.
    staircase: Boolean. If True decay the learning rate at discrete intervals.

  Returns:
    learning_rate: Scaler tf.Tensor with the learning rate depending on the
      globat_step.
  """
  learning_rate = tf.exponential_decay(
      learning_rate=initial_learning_rate,
      global_step=tf.get_or_create_global_step(),
      decay_steps=decay_steps,
      decay_rate=decay_rate,
      staircase=staircase)
  return learning_rate
예제 #2
0
def exponential_decay(initial_value = 0.0001,
                      decay_steps = 10000,
                      decay_rate = 0.9,
                      staircase = True):
  """Create a value that decays exponentially with global_step.

  Args:
    initial_value: A scalar float32 or float64 Tensor or a Python
      number. The initial value returned for global_step == 0.
    decay_steps: A scalar int32 or int64 Tensor or a Python number. Must be
      positive. See the decay computation in `tf.exponential_decay`.
    decay_rate: A scalar float32 or float64 Tensor or a Python number. The decay
      rate.
    staircase: Boolean. If True, decay the value at discrete intervals.

  Returns:
    value: Scalar tf.Tensor with the value decaying based on the globat_step.
  """
  global_step = tf.train.get_or_create_global_step()
  value = tf.exponential_decay(
      learning_rate=initial_value,
      global_step=global_step,
      decay_steps=decay_steps,
      decay_rate=decay_rate,
      staircase=staircase)
  return value
예제 #3
0
    def __init__(self, learning_rate, no_inputs):

        # create feature columns
        feature_cols = []
        for i in range(no_inputs):
            feature_cols.append(
                tf.feature_column.categorical_column_with_vocabulary_list(
                    str(i + 1), vocabulary_list=['S', 'O', 'E']))

        hidden_layer_units = [27, 27, 27]

        # instantiate estimator
        self.estimator = tf.estimator.DNNRegressor(
            feature_columns=feature_cols,
            model_dir='train',
            hidden_units=hidden_layer_units,
            optimizer=lambda: tf.AdamOptimizer(
                learning_rate=tf.exponential_decay(learning_rate=0.1,
                                                   global_step=tf.
                                                   get_global_step(),
                                                   decay_steps=10000,
                                                   decay_rate=0.96)))
예제 #4
0
파일: dnn.py 프로젝트: computer-geek64/MTD
estimator = DNNRegressor(
    feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
    hidden_units=[1024, 512, 256],
    optimizer=tf.train.ProximalAdagradOptimizer(
      learning_rate=0.1,
      l1_regularization_strength=0.001
    ))

# Or estimator using an optimizer with a learning rate decay.
estimator = DNNRegressor(
    feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
    hidden_units=[1024, 512, 256],
    optimizer=lambda: tf.AdamOptimizer(
        learning_rate=tf.exponential_decay(
            learning_rate=0.1,
            global_step=tf.get_global_step(),
            decay_steps=10000,
            decay_rate=0.96))

# Or estimator with warm-starting from a previous checkpoint.
estimator = DNNRegressor(
    feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
    hidden_units=[1024, 512, 256],
    warm_start_from="/path/to/checkpoint/dir")

# Input builders
def input_fn_train: # returns x, y
  pass
estimator.train(input_fn=input_fn_train, steps=100)

def input_fn_eval: # returns x, y
예제 #5
0
        learning_rate=tf.compat.v1.train.exponential_decay(
            learning_rate=0.1,
            global_step=tf.compat.v1.train.get_global_step(),
            decay_steps=10000,
            decay_rate=0.96)),
    # warm-start settings
    model_dir="/Users/songfeng/workspace/github/tensorflowDemo/model/widedeep2",
    # warm_start_from="/Users/songfeng/workspace/github/tensorflowDemo/model/widedeep2"
)

# To apply L1 and L2 regularization, you can set dnn_optimizer to:

# To apply learning rate decay, you can set dnn_optimizer to a callable:
lambda: tf.AdamOptimizer(learning_rate=tf.exponential_decay(learning_rate=0.1,
                                                            global_step=tf.
                                                            get_global_step(),
                                                            decay_steps=10000,
                                                            decay_rate=0.96))


def input_fn_train():
    return train_ds


def input_fn_eval():
    return train_ds.take(1)


def input_fn_predict():
    return train_ds.take(1)