def dnn_layers(input_layers, layers):
     if layers and isinstance(layers, dict):
         return tflayers.stack(input_layers, tflayers.fully_connected,
                               layers['layers'],
                               activation=layers.get('activation'),
                               dropout=layers.get('dropout'))
     elif layers:
         return tflayers.stack(input_layers, tflayers.fully_connected, layers)
     else:
         return input_layers
 def dnn_layers(input_layers, layers):
     if layers and isinstance(layers, dict):
         return tflayers.stack(input_layers, tflayers.fully_connected,
                               layers['layers'],
                               activation=layers.get('activation'),
                               dropout=layers.get('dropout'))
     elif layers:
         return tflayers.stack(input_layers, tflayers.fully_connected, layers)
     else:
         return input_layers
示例#3
0
def dnn_stack(input, layers):
    if layers and isinstance(layers, dict):
        dnn_out = tflayers.stack(input,
                                 tflayers.fully_connected,
                                 layers['layers'],
                                 activation_fn=layers.get('activation'))
    elif layers:
        dnn_out = tflayers.stack(input, tflayers.fully_connected, layers)
    W_fc1 = weight_variable([layers['layers'][-1], 1])
    b_fc1 = bias_variable([1])
    pred = tf.add(tf.matmul(dnn_out, W_fc1), b_fc1, name='dnnout')
    return pred
示例#4
0
def dnn_stack(input, layers):  ###全连接层使用tflayers里面的stack,这样不用自己手动写连接
    if layers and isinstance(layers, dict):
        dnn_out = tflayers.stack(input,
                                 tflayers.fully_connected,
                                 layers['layers'],
                                 activation_fn=layers.get('activation'))
    elif layers:
        dnn_out = tflayers.stack(input, tflayers.fully_connected, layers)
    W_fc1 = weight_variable([layers['layers'][-1], 1])
    b_fc1 = bias_variable([1])
    pred = tf.add(tf.matmul(dnn_out, W_fc1), b_fc1,
                  name='dnnout')  ###dnn的输出结果和label对应是一个数字
    return pred
示例#5
0
def embed_condition_images(condition_image,
                           scope,
                           reuse=tf.AUTO_REUSE,
                           fc_layers=None,
                           use_spatial_softmax=True):
    """Independently embed a (meta)-batch of images.

  Args:
    condition_image: A rank 4 tensor of images: [N, H, W, C].
    scope: Name of the tf variable_scope.
    reuse: The variable_scope reuse setting.
    fc_layers: An optional tuple of ints describing the number of units in each
      fully-connected hidden layer, or 1x1 conv layer when excluding spatial
      softmax.
    use_spatial_softmax: Whether to use a spatial softmax or not.

  Returns:
    A rank 2 tensor of embeddings: [N, embedding size] if spatial_softmax is
    True. Otherwise, a rank 4 tensor of visual features [N, H, W, embedding
    size]
  Raises:
    ValueError if `condition_image` has incorrect rank.
  """
    if len(condition_image.shape) != 4:
        raise ValueError('Image has unexpected shape {}.'.format(
            condition_image.shape))
    with tf.variable_scope(scope, reuse=reuse, use_resource=True):
        image_embedding, _ = vision_layers.BuildImagesToFeaturesModel(
            condition_image, use_spatial_softmax=use_spatial_softmax)
        if fc_layers is not None:
            if len(image_embedding.shape) == 2:
                image_embedding = layers.stack(image_embedding,
                                               layers.fully_connected,
                                               fc_layers[:-1],
                                               activation_fn=tf.nn.relu,
                                               normalizer_fn=layers.layer_norm)
                image_embedding = layers.fully_connected(image_embedding,
                                                         fc_layers[-1],
                                                         activation_fn=None)
            else:
                image_embedding = layers.stack(image_embedding,
                                               layers.conv2d,
                                               fc_layers[:-1],
                                               kernel_size=[1, 1],
                                               activation_fn=tf.nn.relu,
                                               normalizer_fn=layers.layer_norm)
                image_embedding = layers.conv2d(image_embedding,
                                                fc_layers[-1],
                                                activation_fn=None)
    return image_embedding
示例#6
0
def lstm_model(X, y):

    X = tf.reshape(
        X, [-1, n_steps, n_input])  # shape: [batch_size, n_steps, n_input]
    X = tf.transpose(X, [1, 0, 2])  # shape: [n_steps, batch_size, n_input]
    X = tf.reshape(X, [-1, n_input])  # shape: [n_steps*batch_size, n_input]

    # Split data for sequences
    X = tf.split(0, n_steps, X)  # n_steps * (batch_size, n_input)

    init = tf.random_normal_initializer(batch_size, stddev=0.05)
    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden, forget_bias=forget_bias)

    # Dropout
    lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell,
                                              output_keep_prob=keep_prob)

    output, _ = tf.nn.rnn(lstm_cell, X, dtype=tf.float32)

    # Fully connected layer with dropout
    output = tf.nn.dropout(
        layers.stack(output[0], layers.fully_connected, dnn_hidden), keep_prob)

    regression = skflow.models.linear_regression(
        output, y)  # Use output[0] if omitting fully connected layer

    return regression
示例#7
0
 def _vgg_11(self, x):
     net = repeat(x, 2, conv2d, 64, [3, 3], scope='conv1')
     net = tf.layers.max_pooling2d(net, [2, 2],
                                   strides=2,
                                   data_format='channels_first',
                                   name='pool1')
     net = repeat(net, 2, conv2d, 128, [3, 3], scope='conv2')
     net = tf.layers.max_pooling2d(net, [2, 2],
                                   strides=2,
                                   data_format='channels_first',
                                   name='pool2')
     net = repeat(net, 3, conv2d, 256, [3, 3], scope='conv3')
     net = tf.layers.max_pooling2d(net, [2, 2],
                                   strides=2,
                                   data_format='channels_first',
                                   name='pool3')
     net = repeat(net, 3, conv2d, 512, [3, 3], scope='conv4')
     net = tf.layers.max_pooling2d(net, [2, 2],
                                   strides=2,
                                   data_format='channels_first',
                                   name='pool4')
     net = tf.layers.flatten(net, name='flatten')
     net = stack(net, fully_connected, self.layer_sizes, scope='fc5')
     features = dropout(net, scope='drop5')
     logits = fully_connected(features,
                              self._num_classes,
                              activation_fn=None,
                              normalizer_fn=None,
                              scope='unscaled_logits')
     return logits, features
def my_model(features, target):
  """DNN with three hidden layers, and dropout of 0.1 probability."""
  # Convert the target to a one-hot tensor of shape (length of features, 3) and
  # with a on-value of 1 for each one-hot vector of length 3.
  target = tf.one_hot(target, 3, 1, 0)

  # Create three fully connected layers respectively of size 10, 20, and 10 with
  # each layer having a dropout probability of 0.1.
  normalizer_fn = layers.dropout
  normalizer_params = {'keep_prob': 0.9}
  features = layers.stack(features, layers.fully_connected, [10, 20, 10],
                          normalizer_fn=normalizer_fn,
                          normalizer_params=normalizer_params)

  # Compute logits (1 per class) and compute loss.
  logits = layers.fully_connected(features, 3, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  # Create a tensor for training op.
  train_op = tf.contrib.layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
      learning_rate=0.1)

  return ({
      'class': tf.argmax(logits, 1),
      'prob': tf.nn.softmax(logits)}, loss, train_op)
def my_model(features, target):
    """DNN with three hidden layers, and dropout of 0.1 probability."""
    # Convert the target to a one-hot tensor of shape (length of features, 3) and
    # with a on-value of 1 for each one-hot vector of length 3.
    target = tf.one_hot(target, 3, 1, 0)

    # Create three fully connected layers respectively of size 10, 20, and 10 with
    # each layer having a dropout probability of 0.1.
    normalizer_fn = layers.dropout
    normalizer_params = {'keep_prob': 0.9}
    features = layers.stack(features,
                            layers.fully_connected, [10, 20, 10],
                            normalizer_fn=normalizer_fn,
                            normalizer_params=normalizer_params)

    # Compute logits (1 per class) and compute loss.
    logits = layers.fully_connected(features, 3, activation_fn=None)
    loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

    # Create a tensor for training op.
    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adagrad',
        learning_rate=0.1)

    return ({
        'class': tf.argmax(logits, 1),
        'prob': tf.nn.softmax(logits)
    }, loss, train_op)
示例#10
0
def my_model(features, target):
    """DNN with three hidden layers, and dropout of 0.1 probability."""
    # Convert the target to a one-hot tensor of shape (length of features, 3) and
    # with a on-value of 1 for each one-hot vector of length 3.
    target = tf.one_hot(target, 3, 1, 0)

    # Create three fully connected layers respectively of size 10, 20, and 10 with
    # each layer having a dropout probability of 0.1.
    normalizer_fn = layers.dropout
    normalizer_params = {'keep_prob': 0.9}
    features = layers.stack(features,
                            layers.fully_connected, [10, 20, 10],
                            normalizer_fn=normalizer_fn,
                            normalizer_params=normalizer_params)

    # Create two tensors respectively for prediction and loss.
    prediction, loss = (tf.contrib.learn.models.logistic_regression(
        features, target))

    # Create a tensor for training op.
    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adagrad',
        learning_rate=0.1)

    return {
        'class': tf.argmax(prediction, 1),
        'prob': prediction
    }, loss, train_op
示例#11
0
def model(features, target):
    global args

    regularizer = None
    regularization_type = args.regularization_type.lower()
    regularization_value = args.regularization_value
    if regularization_type == "l1":
        print("Using L1 regularizer, val =", regularization_value)
        regularizer = tf.contrib.layers.l1_regularizer(regularization_value)
    elif regularization_type == "l2":
        print("Using L2 regularizer, val =", regularization_value)
        regularizer = tf.contrib.layers.l2_regularizer(regularization_value)
    else:
        print("Not using regularization")

    target = tf.one_hot(target, 3, 1, 0)
    with tf.variable_scope(MODEL_NAME, regularizer=regularizer):
        features = layers.stack(features, layers.fully_connected, [10, 20, 10])
        logits = layers.fully_connected(features, 3, activation_fn=None)
    loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
    if regularizer:
        loss = loss + sum(tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES))

    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adagrad',
        learning_rate=0.1)

    return ({
        'class': tf.argmax(logits, 1),
        'prob': tf.nn.softmax(logits)
    }, loss, train_op)
示例#12
0
    def dnn_layers(input_layers, layers):

        logging.info("building dense rnn layers...")

        if layers and isinstance(layers, dict):
            # stack required layers in to one
            return tflayers.stack(input_layers,
                                  tflayers.fully_connected,
                                  layers['layers'],
                                  activation=layers.get('activation'),
                                  dropout=layers.get('dropout'))
        elif layers:
            return tflayers.stack(input_layers, tflayers.fully_connected,
                                  layers)
        else:
            return input_layers
示例#13
0
def embed_condition_images(
    condition_image,
    scope,
    reuse=tf.AUTO_REUSE,
    fc_layers = None):
  """Independently embed a (meta)-batch of images.

  Args:
    condition_image: A rank 4 tensor of images: [N, H, W, C].
    scope: Name of the tf variable_scope.
    reuse: The variable_scope reuse setting.
    fc_layers: An optional tuple of ints describing the number of units in each
      fully-connected hidden layer.
  Returns:
    A rank 2 tensor of embeddings: [N, embedding size].
  Raises:
    ValueError if `condition_image` has incorrect rank.
  """
  if len(condition_image.shape) != 4:
    raise ValueError(
        'Image has unexpected shape {}.'.format(condition_image.shape))
  with tf.variable_scope(scope, reuse=reuse, use_resource=True):
    image_embedding, _ = vision_layers.BuildImagesToFeaturesModel(
        condition_image)
    if fc_layers is not None:
      image_embedding = layers.stack(
          image_embedding,
          layers.fully_connected,
          fc_layers[:-1],
          activation_fn=tf.nn.relu,
          normalizer_fn=layers.layer_norm)
      image_embedding = layers.fully_connected(
          image_embedding, fc_layers[-1], activation_fn=None)
  return image_embedding
示例#14
0
def dnn_tanh(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    # Organize continues features.
    final_features = [
        tf.expand_dims(tf.cast(features[var], tf.float32), 1)
        for var in continues_vars
    ]
    # Embed categorical variables into distributed representation.
    for var in categorical_vars:
        feature = learn.ops.categorical_variable(
            features[var + '_ids'],
            len(categorical_var_encoders[var].classes_),
            embedding_size=CATEGORICAL_EMBED_SIZE,
            name=var)
        final_features.append(feature)
    # Concatenate all features into one vector.
    features = tf.concat(1, final_features)
    # Deep Neural Network
    logits = layers.stack(features,
                          layers.fully_connected, [10, 20, 10],
                          activation_fn=tf.tanh)
    prediction, loss = learn.models.logistic_regression(logits, target)
    train_op = layers.optimize_loss(loss,
                                    tf.contrib.framework.get_global_step(),
                                    optimizer='SGD',
                                    learning_rate=0.05)
    return tf.argmax(prediction, dimension=1), loss, train_op
示例#15
0
def my_model(features, target):
    target = tf.one_hot(target, 3, 1, 0)
    features = layers.stack(features, layers.fully_connected, [10, 20, 10])
    prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(features, target)
    train_op = tf.contrib.layers.optmize_loss(loss, tf.contrib.framework.get_global_step(),
                                              optimizer='Adagrad', learning_rate=0.1)
    return {'class': tf.arg_max(prediction, 1), 'prob': prediction}, loss, train_op
def model_function(features, targets, mode):

    hlayers = layers.stack(
        features,
        layers.fully_connected, [1000, 100, 50, 20],
        activation_fn=tf.nn.relu,
        weights_regularizer=layers.l1_l2_regularizer(1.0, 2.0),
        weights_initializer=layers.xavier_initializer(uniform=True, seed=100))

    # hidden layers have to be fully connected for best performance. So, no option in tensorflow for
    # non-fully connected layers; need to write custom code to do that

    outputs = layers.fully_connected(
        inputs=hlayers,
        num_outputs=10,  # 10 perceptrons in output layer for 10 numbers (0 to 9)
        activation_fn=None
    )  # Use "None" as activation function specified in "softmax_cross_entropy" loss

    # Calculate loss using cross-entropy error; also use the 'softmax' activation function
    loss = losses.softmax_cross_entropy(outputs, targets)

    optimizer = layers.optimize_loss(
        loss=loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=0.8,
        optimizer="SGD")

    # Class of output (i.e., predicted number) corresponds to the perceptron returning the highest fractional value
    # Returning both fractional values and corresponding labels
    probs = tf.nn.softmax(outputs)
    return {'probs': probs, 'labels': tf.argmax(probs, 1)}, loss, optimizer
示例#17
0
def model_function(features, targets, mode):
    # Two hidden layers - 20,10 = # perceptrons in layer1, layer2. Both have ReLU activation
    # More concise syntax
    hlayers = layers.stack(features,
                           layers.fully_connected, [20, 10],
                           activation_fn=tf.nn.relu)

    # hidden layers have to be fully connected for best performance. So, no option in tensorflow for
    # non-fully connected layers; need to write custom code to do that

    outputs = layers.fully_connected(
        inputs=hlayers,
        num_outputs=10,  # 10 perceptrons in output layer for 10 numbers (0 to 9)
        activation_fn=None
    )  # Use "None" as activation function specified in "softmax_cross_entropy" loss

    # Calculate loss using cross-entropy error; also use the 'softmax' activation function
    loss = losses.softmax_cross_entropy(outputs, targets)

    optimizer = layers.optimize_loss(
        loss=loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=0.001,
        optimizer="SGD")

    # Class of output (i.e., predicted number) corresponds to the perceptron returning the highest fractional value
    # Returning both fractional values and corresponding labels
    probs = tf.nn.softmax(outputs)
    return {'probs': probs, 'labels': tf.argmax(probs, 1)}, loss, optimizer
示例#18
0
def my_model(features, target):
  """DNN with three hidden layers, and dropout of 0.1 probability."""
  # Convert the target to a one-hot tensor of shape (length of features, 3) and
  # with a on-value of 1 for each one-hot vector of length 3.
  target = tf.one_hot(target, 3, 1, 0)

  # Create three fully connected layers respectively of size 10, 20, and 10 with
  # each layer having a dropout probability of 0.1.
  normalizer_fn = layers.dropout
  normalizer_params = {'keep_prob': 0.9}
  features = layers.stack(features, layers.fully_connected, [10, 20, 10],
                          normalizer_fn=normalizer_fn,
                          normalizer_params=normalizer_params)

  # Create two tensors respectively for prediction and loss.
  prediction, loss = (
      tf.contrib.learn.models.logistic_regression(features, target)
  )

  # Create a tensor for training op.
  train_op = tf.contrib.layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
      learning_rate=0.1)

  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
示例#19
0
def embed_fullstate(fullstate,
                    embed_size,
                    scope,
                    reuse=tf.AUTO_REUSE,
                    fc_layers=(100, )):
    """Embed full state pose (non-image) observations.

  Args:
    fullstate: A rank 2 tensor: [N, F].
    embed_size: Integer, the output embedding size.
    scope: Name of the tf variable scope.
    reuse: The variable_scope reuse setting.
    fc_layers: A tuple of ints describing the number of units in each hidden
      layer.
  Returns:
    A rank 2 tensor: [N, embed_size].
  """
    with tf.variable_scope(scope, reuse=reuse, use_resource=True):
        embedding = layers.stack(fullstate,
                                 layers.fully_connected,
                                 fc_layers,
                                 activation_fn=tf.nn.relu,
                                 normalizer_fn=layers.layer_norm)
        embedding = layers.fully_connected(embedding,
                                           embed_size,
                                           activation_fn=None)
    return embedding
示例#20
0
def dnn_tanh(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    logits = layers.stack(features, layers.fully_connected, [10, 20, 10],
        activation_fn=tf.tanh)
    prediction, loss = learn.models.logistic_regression(logits, target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD', learning_rate=0.05)
    return tf.argmax(prediction, dimension=1), loss, train_op
示例#21
0
def dnn_tanh(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    logits = layers.stack(features, layers.fully_connected, [10, 20, 10],
        activation_fn=tf.tanh)
    prediction, loss = learn.models.logistic_regression(logits, target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD', learning_rate=0.05)
    return tf.argmax(prediction, dimension=1), loss, train_op
示例#22
0
    def fit(self, X):
        h_layers = self.h_layers
        assert len(h_layers) >= 1, 'Must give at least one layer.'
        l2_pen = self.l2_pen
        nb_epoch  =self.nb_epoch
        X_data = X.reshape(X.shape[0],-1)
        n_inputs = X_data.shape[1]
        n_outputs = n_inputs
        if self.batch_size is None:
            self.batch_size = n_inputs
        batch_size = self.batch_size
        lr = self.lr

        # CONTSTRUCTION PHASE
        X = tf.placeholder(tf.float32, shape=(None, n_inputs), name='X')
        with arg_scope(
                [layers.fully_connected],
                activation_fn=self.activation_fn,
                weights_regularizer=layers.l2_regularizer(l2_pen),
                weights_initializer=layers.variance_scaling_initializer()):
            hidden = layers.stack(X, layers.fully_connected, h_layers, scope='hidden')
            out = layers.fully_connected(hidden, n_outputs, activation_fn=None, scope='out')

        model_loss  = tf.reduce_mean(tf.square(out-X))
        reg_loss    = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        total_loss  = model_loss + reg_loss

        optimizer = tf.train.AdamOptimizer(learning_rate=lr)
        training_op = optimizer.minimize(total_loss)

        init = tf.global_variables_initializer()

        #from tensorflow.examples.tutorials.mnist import input_data
        #mnist = input_data.read_data_sets("/tmp/data/")
        with tf.Session() as sess:
            sess.run(init)

            for epoch in range(nb_epoch):
                for b_idx in range(int(X_data.shape[0]/batch_size)):
                    x_batch = X_data[b_idx*batch_size:(b_idx+1)*batch_size]
                    # run training op
                    sess.run(training_op, feed_dict={X:x_batch})
                # check loss after each epoch
                epoch_loss = sess.run(model_loss, feed_dict={X:X_data})
                print 'Epoch: ' , epoch, ' Loss: ', epoch_loss

            # get weights from fully_connected layer(s)
            weights = []
            for i in range(1,len(h_layers)+1):
                with tf.variable_scope('hidden/hidden_%i'%i, reuse=True):
                    h_w = tf.get_variable('weights')
                    h_w = sess.run(h_w)
                    weights.append(h_w)
            with tf.variable_scope('out', reuse=True):
                o_w = tf.get_variable('weights')
                weights.append(sess.run(o_w))
        
        self.weights_ = weights
示例#23
0
def reduce_temporal_embeddings(temporal_embedding,
                               output_size,
                               scope,
                               reuse=tf.AUTO_REUSE,
                               conv1d_layers=(64, ),
                               fc_hidden_layers=(100, ),
                               combine_mode='temporal_conv'):
    """Combine embedding across the episode temporal dimension.

  Args:
    temporal_embedding: A rank 3 tensor: [N, time dim, feature dim].
    output_size: The dimension of the output embedding.
    scope: Name of the tf variable_scope.
    reuse: The variable_scope reuse setting.
    conv1d_layers: An optional tuple of ints describing the number of feature
      maps in each 1D conv layer.
    fc_hidden_layers: A tuple of ints describing the number of units in each
      fully-connected hidden layer.
    combine_mode: How to reduce across time to get a fixed length vector.

  Returns:
    A rank 2 tensor: [N, output_size].
  Raises:
    ValueError if `temporal_embedding` has incorrect rank.
  """
    if len(temporal_embedding.shape) == 5:
        temporal_embedding = tf.reduce_mean(temporal_embedding, axis=[2, 3])
    if len(temporal_embedding.shape) != 3:
        raise ValueError('Temporal embedding has unexpected shape {}.'.format(
            temporal_embedding.shape))
    embedding = temporal_embedding
    with tf.variable_scope(scope, reuse=reuse, use_resource=True):
        if 'temporal_conv' not in combine_mode:
            # Just average
            embedding = tf.reduce_mean(embedding, axis=1)
        else:
            if conv1d_layers is not None:
                for num_filters in conv1d_layers:
                    embedding = tf.layers.conv1d(embedding,
                                                 num_filters,
                                                 10,
                                                 activation=tf.nn.relu,
                                                 use_bias=False)
                    embedding = layers.layer_norm(embedding)
            if combine_mode == 'temporal_conv_avg_after':
                embedding = tf.reduce_mean(embedding, axis=1)
            else:
                embedding = layers.flatten(embedding)

        embedding = layers.stack(embedding,
                                 layers.fully_connected,
                                 fc_hidden_layers,
                                 activation_fn=tf.nn.relu,
                                 normalizer_fn=layers.layer_norm)
        embedding = layers.fully_connected(embedding,
                                           output_size,
                                           activation_fn=None)
    return embedding
示例#24
0
 def _mlp(self, x):
     net = stack(x, fully_connected, self.layer_sizes, scope='fc')
     features = tf.nn.dropout(
         net, keep_prob=self._dropout_keep_prob)  # For group dropout
     logits = fully_connected(features,
                              num_outputs=2,
                              activation_fn=None,
                              scope='unscaled_logits')
     return logits, features
示例#25
0
def my_model(features,target):
    target=tf.one_hot(target,3,1,0)
    #3- the depth of the one hot dimension.
#  [[1., 0., 0.],#长度是3!因为输入数据的种类和长度以一样的,索引一般这depth也可以用class理解
#  [0., 1., 0.],
#  [0., 0., 1.]]
    features=layers.stack(features,layers.fully_connected,[10,20,10])#叠加多个fc层,每层节点10,20,10
    prediction,loss=tf.contrib.learn.models.logistic_regression_zero_init(features,target)#初始0的逻辑回归
    train_op=tf.contrib.layers.optimize_loss(loss,tf.contrib.framework.get_global_step(),optimizer='Adagrad',learning_rate=0.1)
    return {'class':tf.argmax(prediction,1),'prob':prediction},loss,train_op
示例#26
0
 def predict(self, feat_ab, name, reuse=False):
     with tf.variable_scope(name, reuse=reuse):
         out = layers.stack(feat_ab,
                            layers.fully_connected, [1024, 256],
                            scope='fc')
         out = layers.fully_connected(out,
                                      1,
                                      activation_fn=None,
                                      scope='fc_out')
     return out
示例#27
0
 def _build_model(self, data, target):
     ids = tensorflow.split(1, self.n_ids, data)
     node_vectors = [
         learn.ops.categorical_variable(ids[i], self.vocabulary_sizes[i], self.layer_size, str(i))
         for i in range(self.n_ids)
     ]
     activation_in = tensorflow.squeeze(tensorflow.concat(2, node_vectors), [1])
     activation_out = layers.stack(activation_in, layers.fully_connected, self.hidden_units_formation)
     prediction, loss = learn.models.linear_regression(activation_out, target)
     train_op = layers.optimize_loss(loss, framework.get_global_step(), self.learning_rate, "SGD")
     return prediction, loss, train_op
示例#28
0
 def dnn_layers(input_layers, layers):
     '''
     Defines the optional dense layers
      *note: A dense layer is a kind of hidden layer where every node is connected
             to every other node in the next layer
     INPUT:
         - input_layers:
         - layers: same as dense_layers
     '''
     if layers and isinstance(layers, dict):
         return tflayers.stack(input_layers,
                               tflayers.fully_connected,
                               layers['layers'],
                               activation=layers.get('activation'),
                               dropout=layers.get('dropout'))
     elif layers:
         return tflayers.stack(input_layers, tflayers.fully_connected,
                               layers)
     else:
         return input_layers
def my_model(features, target):
  """DNN with 10, 20, 10 hidden layers, and dropout of 0.1 probability."""
  target = tf.one_hot(target, 3, 1, 0)
  features = layers.stack(features, layers.fully_connected, [10, 20, 10])
  prediction, loss = (
      tf.contrib.learn.models.logistic_regression_zero_init(features, target)
  )
  train_op = tf.contrib.layers.optimize_loss(
      loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
      learning_rate=0.1)
  return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
示例#30
0
    def _lstm_model(X, y):
        x_ =  tf.unstack(X, num=time_steps, axis=1)
        stacked_lstm = rnn.MultiRNNCell(lstm_cells(), state_is_tuple=True)
        output, lastState = rnn.static_rnn(stacked_lstm, x_, dtype=dtypes.float32)
        output = tflayers.stack(output[-1], tflayers.fully_connected, fcDim)
        prediction, loss = tflearn.models.linear_regression(output, y)
        train_op = tf.contrib.layers.optimize_loss(
            loss, tf.contrib.framework.get_global_step(), optimizer=optimizer,
            learning_rate = tf.train.exponential_decay(learning_rate, tf.contrib.framework.get_global_step(),
            decay_steps = 100, decay_rate = 0.9, staircase=False, name=None))

        return prediction, loss, train_op
示例#31
0
def my_model(features, target):
    #features是数据的特征,targets是数据特征每一行的目标或者分类的标识。
    target = tf.onr_hot(target, 3,1,0)      #读热编码,使损失函数的计算更方便
    #使用layers.stack叠加多层layers.fully_connected完全连接的深度神经网络
    #每一层分别有10、20、10个隐藏节点
    features = layers.stack(features, layers.fully_connected,[10,20,10])
    prediction,loss = tf.contrib.learn.models.logistic_regression_zero_init(features,target)
    #使用contrib.layers.optimize_loss函数对损失值进行优化
    train_op = tf.contrib.layers.optimize_loss(
        los, tf.contrib.framwork.get_global_step(),optimizer='Adagrad',
        learning_rate = 0.1)
    return {'class':tf.argmax(prediction,1),'prob':prediction},loss,train_op
示例#32
0
def model(X, y):
    basics = [tf.nn.rnn_cell.BasicLSTMCell(5, state_is_tuple=True)]
    stacked_lstm = tf.nn.rnn_cell.MultiRNNCell(basics, state_is_tuple=True)
    x_ = tf.unpack(X, axis=1, num=10)
    output, layers = tf.nn.rnn(stacked_lstm, x_, dtype=dtypes.float32)
    output = tflayers.stack(output[-1], tflayers.fully_connected, [10, 10])
    prediction, loss = tflearn.models.linear_regression(output, y)
    train_op = tf.contrib.layers.optimize_loss(
        loss,
        tf.contrib.framework.get_global_step(),
        optimizer='Adagrad',
        learning_rate=0.1)
    return prediction, loss, train_op
示例#33
0
 def dnn_layers(input_layers, layers):
     ## Why suport different formats?? The use of a variable that could be either
     ## a scalar type (int/char/whatever) or a dict (much more complex), or NoneType
     ## is interesting to me. Need to pay attention to use here.
     ## tflayers is alias for tf.contrib.layers
     if layers and isinstance(layers, dict):
         ## tflayers.stack() (alias of tf.contrib.layers.stack)
         ## Calls stack_layers repeatedly. What does stack_layers do?
         ## identical between versions.
         ## activation, dropout are kwargs passed directly to stack_layers
         ## In this layer, every cell is connected to every other cell.
         return tflayers.stack(input_layers, tflayers.fully_connected,
                               layers['layers'],
                               activation=layers.get('activation'),
                               dropout=layers.get('dropout'))
     elif layers:
         ## Find out what activation and ropout parameters do, and why they're excluded here
         return tflayers.stack(input_layers, tflayers.fully_connected, layers)
     else:
         ## Why does this exist? Should there be an exception here? in this case, the function
         ## does nothing.
         return input_layers
    def dnn_layers(input_layers, layers):
        if layers and isinstance(layers, dict):
            return tflayers.stack(input_layers,
                                  tflayers.fully_connected,
                                  layers['layers'],
                                  activation=layers.get('activation'),
                                  dropout=layers.get('dropout'))
        elif layers:
            return tflayers.stack(input_layers, tflayers.fully_connected,
                                  layers)
        else:
            return input_layers

        def lstm_model_(X, y):
            stacked_lstm = rnn.MultiRNNCell(lstm_cells(rnn_layers),
                                            state_is_tuple=True)
            x_ = tf.unstack(X, num=time_steps, axis=1)

            output, layers = rnn.static_rnn(stacked_lstm,
                                            x_,
                                            dtype=dtypes.float32)
            output = dnn_layers(output[-1], dense_layers)
            prediction, loss = tflearn.model.linearn_regression(output, y)
            train_op = tf.contrib.layers.optimize_loss(
                loss,
                tf.contrib.framework.get_global_step(),
                optimizer=optimizer,
                learning_rate=tf.train.exponential_decay(
                    learning_rate,
                    tf.contrib.framework.get_global_step(),
                    decay_steps=1010,
                    decay_rate=0.9,
                    staircase=FAlse,
                    name=None))
            print('learning rate', learning_rate)
            return prediction, loss, train_op

        return lstm_model_
def decoder(z):
    """Make reconstruction network.

    Parameters
    ----------
    z

    Returns
    -------
    Reconstruction distribution p(x|z;\theta) with Bernoulli distribution.
    Here, Bernoulli was chosen since pixel space is bounded by [0, 255].
    """
    net = stack(flatten(z), fc, [256, 512])
    logits = fc(net, 28 * 28, activation_fn=None)
    return Bernoulli(logits=logits)
def linear_network(states,
                   scope=None,
                   reuse=False,
                   layers=None,
                   activation_fn=tf.tanh):
    layers = layers or [16, 16]
    with tf.variable_scope(scope or "network") as scope:
        if reuse:
            scope.reuse_variables()

        hidden_state = tflayers.stack(states,
                                      tflayers.fully_connected,
                                      layers,
                                      activation_fn=activation_fn)

        return hidden_state
def dnn_tanh(features, target):
    target = tf.one_hot(target, 2, 1.0, 0.0)
    # Organize continues features.
    final_features = [tf.expand_dims(tf.cast(features[var], tf.float32), 1) for var in continues_vars]
    # Embed categorical variables into distributed representation.
    for var in categorical_vars:
        feature = learn.ops.categorical_variable(
            features[var + '_ids'], len(categorical_var_encoders[var].classes_), 
            embedding_size=CATEGORICAL_EMBED_SIZE, name=var)
        final_features.append(feature)
    # Concatenate all features into one vector.
    features = tf.concat(1, final_features)
    # Deep Neural Network
    logits = layers.stack(features, layers.fully_connected, [10, 20, 10],
        activation_fn=tf.tanh)
    prediction, loss = learn.models.logistic_regression(logits, target)
    train_op = layers.optimize_loss(loss,
        tf.contrib.framework.get_global_step(), optimizer='SGD', learning_rate=0.05)
    return tf.argmax(prediction, dimension=1), loss, train_op
示例#38
0
def my_model(features, target):
  """DNN with three hidden layers, and dropout of 0.1 probability.

  Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
  CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.

  Args:
    features: `Tensor` of input features.
    target: `Tensor` of targets.

  Returns:
    Tuple of predictions, loss and training op.
  """
  # Convert the target to a one-hot tensor of shape (length of features, 3) and
  # with a on-value of 1 for each one-hot vector of length 3.
  target = tf.one_hot(target, 3, 1, 0)

  # Create three fully connected layers respectively of size 10, 20, and 10 with
  # each layer having a dropout probability of 0.1.
  normalizer_fn = layers.dropout
  normalizer_params = {'keep_prob': 0.5}
  with tf.device('/gpu:1'):
    features = layers.stack(features, layers.fully_connected, [10, 20, 10],
                            normalizer_fn=normalizer_fn,
                            normalizer_params=normalizer_params)

  with tf.device('/gpu:2'):
    # Compute logits (1 per class) and compute loss.
    logits = layers.fully_connected(features, 3, activation_fn=None)
    loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

    # Create a tensor for training op.
    train_op = tf.contrib.layers.optimize_loss(
        loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
        learning_rate=0.1)

  return ({
      'class': tf.argmax(logits, 1),
      'prob': tf.nn.softmax(logits)}, loss, train_op)