def _two_phases_preprocessing_fn(inputs):
    x = inputs['x']
    x_mean = analyzers.mean(x)
    x_square_deviations = tf.square(x - x_mean)
    x_var = analyzers.mean(x_square_deviations + analyzers.mean(inputs['y']))
    x_normalized = (x - x_mean) / tf.sqrt(x_var)
    return {
        'x_normalized': x_normalized,
        's_id': mappers.compute_and_apply_vocabulary(inputs['s'])
    }
示例#2
0
def scale_to_z_score(x):
    """Returns a standardized column with mean 0 and variance 1.

  Scaling to z-score subtracts out the mean and divides by standard deviation.
  Note that the standard deviation computed here is based on the biased variance
  (0 delta degrees of freedom), as computed by analyzers.var.

  Args:
    x: A numeric `Tensor`.

  Returns:
    A `Tensor` containing the input column scaled to mean 0 and variance 1
    (standard deviation 1), given by: (x - mean(x)) / std_dev(x).
    If `x` is floating point, the mean will have the same type as `x`. If `x` is
    integral, the output is cast to float32 for int8 and int16 and float64 for
    int32 and int64 (similar to the behavior of tf.truediv).

    Note that TFLearn generally permits only tf.int64 and tf.float32, so casting
    this scaler's output may be necessary. In particular, scaling an int64
    tensor yields a float64 tensor, which would need a cast to float32 to be
    used in TFLearn.
  """
    # x_mean will be float32 or float64, depending on type of x.
    x_mean = analyzers.mean(x)
    return (tf.cast(x, x_mean.dtype) - x_mean) / tf.sqrt(analyzers.var(x))
示例#3
0
def _preprocessing_fn_with_control_dependency(inputs):
    with tf.init_scope():
        initializer = tf.lookup.KeyValueTensorInitializer(['foo', 'bar'],
                                                          [0, 1])
        table = tf.lookup.StaticHashTable(initializer, default_value=-1)
    # The table created here will add an automatic control dependency.
    s_int = table.lookup(inputs['s']) + 1

    # Perform some TF Ops to ensure x is part of the graph of dependencies for the
    # outputs.
    x_abs = tf.math.abs(inputs['x'])
    y_centered = (tf.sparse.add(tf.cast(inputs['y'], tf.float32),
                                -analyzers.mean(inputs['y'])))
    return {'s_int': s_int, 'x_abs': x_abs, 'y_centered': y_centered}
def _one_phase_preprocessing_fn(inputs):
    x_plus_one = _plus_one(inputs['x'])
    subtracted = tf.sparse.add(tf.cast(inputs['y'], tf.float32),
                               -analyzers.mean(x_plus_one))
    _ = analyzers.vocabulary(inputs['s'])
    return {'subtracted': subtracted}
示例#5
0
 def preprocessing_fn(inputs):
     return {'x_add_1': analyzers.mean(inputs['x'])}
def _one_phase_preprocessing_fn(inputs):
    x_centered = inputs['x'] - analyzers.mean(inputs['y'])
    _ = analyzers.vocabulary(inputs['s'])
    return {'x_centered': x_centered}