예제 #1
0
def dnn_autoencoder(
    tensor_in, hidden_units, activation=nn.relu, add_noise=None, dropout=None,
    scope=None):
  """Creates fully connected autoencoder subgraph.

  Args:
    tensor_in: tensor or placeholder for input features.
    hidden_units: list of counts of hidden units in each layer.
    activation: activation function used to map inner latent layer onto
                reconstruction layer.
    add_noise: a function that adds noise to tensor_in,
           e.g. def add_noise(x):
                    return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
    dropout: if not None, will add a dropout layer with given
             probability.
    scope: the variable scope for this op.

  Returns:
    Tensors for encoder and decoder.
  """
  with vs.variable_op_scope([tensor_in], scope, "autoencoder"):
    if add_noise is not None:
      tensor_in = add_noise(tensor_in)
    with vs.variable_scope("encoder"):
      # build DNN encoder
      encoder = dnn_ops.dnn(
          tensor_in, hidden_units, activation=activation, dropout=dropout)
    with vs.variable_scope("decoder"):
      # reverse hidden_units and built DNN decoder
      decoder = dnn_ops.dnn(
          encoder, hidden_units[::-1], activation=activation, dropout=dropout)
    return encoder, decoder
예제 #2
0
def dnn_autoencoder(tensor_in,
                    hidden_units,
                    activation=nn.relu,
                    add_noise=None,
                    dropout=None,
                    scope=None):
    """Creates fully connected autoencoder subgraph.

  Args:
    tensor_in: tensor or placeholder for input features.
    hidden_units: list of counts of hidden units in each layer.
    activation: activation function used to map inner latent layer onto
                reconstruction layer.
    add_noise: a function that adds noise to tensor_in,
           e.g. def add_noise(x):
                    return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
    dropout: if not None, will add a dropout layer with given
             probability.
    scope: the variable scope for this op.

  Returns:
    Tensors for encoder and decoder.
  """
    with vs.variable_scope(scope, "autoencoder", [tensor_in]):
        if add_noise is not None:
            tensor_in = add_noise(tensor_in)
        with vs.variable_scope("encoder"):
            # build DNN encoder
            encoder = dnn_ops.dnn(tensor_in,
                                  hidden_units,
                                  activation=activation,
                                  dropout=dropout)
        with vs.variable_scope("decoder"):
            # reverse hidden_units and built DNN decoder
            decoder = dnn_ops.dnn(encoder,
                                  hidden_units[::-1],
                                  activation=activation,
                                  dropout=dropout)
        return encoder, decoder
예제 #3
0
파일: models.py 프로젝트: 0ruben/tensorflow
 def dnn_estimator(x, y):
   """DNN estimator with target predictor function on top."""
   layers = dnn_ops.dnn(x, hidden_units, dropout=dropout)
   return target_predictor_fn(layers, y)
예제 #4
0
 def dnn_estimator(x, y):
     """DNN estimator with target predictor function on top."""
     layers = dnn_ops.dnn(x, hidden_units, dropout=dropout)
     return target_predictor_fn(layers, y)
예제 #5
0
 def dnn_estimator(X, y):
   # pylint: disable=invalid-name
   """DNN estimator with target predictor function on top."""
   layers = dnn_ops.dnn(X, hidden_units, dropout=dropout)
   return target_predictor_fn(layers, y)
예제 #6
0
 def dnn_estimator(X, y):
   # pylint: disable=invalid-name
   """DNN estimator with target predictor function on top."""
   layers = dnn_ops.dnn(X, hidden_units, dropout=dropout)
   return target_predictor_fn(layers, y)