Exemplo n.º 1
0
    def dnn(x):
        with tf.name_scope('Layer1'):
            W1 = tf.Variable(tf.random_normal([784, 256]), name="W1")
            b1 = tf.Variable(tf.random_normal([256]), name="b1")
            x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))
            # Add this `tanh` op to activations collection or monitoring
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)
            # Add weights regularizer (Regul. summary automatically added)
            tflearn.add_weights_regularizer(W1, 'L2', weight_decay=0.001)

        with tf.name_scope('Layer2'):
            W2 = tf.Variable(tf.random_normal([256, 256]), name="W2")
            b2 = tf.Variable(tf.random_normal([256]), name="b2")
            x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))
            # Add this `tanh` op to activations collection or monitoring
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)
            # Add weights regularizer (Regul. summary automatically added)
            tflearn.add_weights_regularizer(W2, 'L2', weight_decay=0.001)

        with tf.name_scope('Layer3'):
            W3 = tf.Variable(tf.random_normal([256, 10]), name="W3")
            b3 = tf.Variable(tf.random_normal([10]), name="b3")
            x = tf.add(tf.matmul(x, W3), b3)

        return x
Exemplo n.º 2
0
 def test_regularizer(self):
     # Bulk Tests
     with tf.Graph().as_default():
         x = tf.placeholder("float", [None, 4])
         W = tf.Variable(tf.random_normal([4, 4]))
         x = tf.nn.tanh(tf.matmul(x, W))
         tflearn.add_weights_regularizer(W, 'L2', weight_decay=0.001)
Exemplo n.º 3
0
    def dnn(x):
        with tf.name_scope('Layer1'):
            W1 = tf.Variable(tf.random_normal([784, 256]), name="W1")
            b1 = tf.Variable(tf.random_normal([256]), name="b1")
            x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))
            # Add this `tanh` op to activations collection or monitoring
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)
            # Add weights regularizer (Regul. summary automatically added)
            tflearn.add_weights_regularizer(W1, 'L2', weight_decay=0.001)

        with tf.name_scope('Layer2'):
            W2 = tf.Variable(tf.random_normal([256, 256]), name="W2")
            b2 = tf.Variable(tf.random_normal([256]), name="b2")
            x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))
            # Add this `tanh` op to activations collection or monitoring
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, x)
            # Add weights regularizer (Regul. summary automatically added)
            tflearn.add_weights_regularizer(W2, 'L2', weight_decay=0.001)

        with tf.name_scope('Layer3'):
            W3 = tf.Variable(tf.random_normal([256, 10]), name="W3")
            b3 = tf.Variable(tf.random_normal([10]), name="b3")
            x = tf.add(tf.matmul(x, W3), b3)

        return x
Exemplo n.º 4
0
 def test_regularizer(self):
     # Bulk Tests
     with tf.Graph().as_default():
         x = tf.placeholder("float", [None, 4])
         W = tf.Variable(tf.random_normal([4, 4]))
         x = tf.nn.tanh(tf.matmul(x, W))
         tflearn.add_weights_regularizer(W, 'L2', weight_decay=0.001)
Exemplo n.º 5
0
def variable(name, shape=None, dtype=tf.float32, initializer=None,
             regularizer=None, trainable=True, collections=None, device='',
             restore=True):
    """ variable.

    Instantiate a new variable.

    Arguments:
        name: `str`. A name for this variable.
        shape: list of `int`. The variable shape (optional).
        dtype: `type`. The variable data type.
        initializer: `str` or `Tensor`. The variable initialization. (See
            tflearn.initializations for references).
        regularizer: `str` or `Tensor`. The variable regularizer. (See
            tflearn.losses for references).
        trainable: `bool`. If True, this variable weights will be trained.
        collections: `str`. A collection to add the new variable to (optional).
        device: `str`. Device ID to store the variable. Default: '/cpu:0'.
        restore: `bool`. Restore or not this variable when loading a
            pre-trained model (Only compatible with tflearn pre-built
            training functions).

    Returns:
        A Variable.

    """

    if isinstance(initializer, str):
        initializer = tflearn.initializations.get(initializer)()
    # Remove shape param if initializer is a Tensor
    if not callable(initializer) and isinstance(initializer, tf.Tensor):
        shape = None

    if isinstance(regularizer, str):
        regularizer = tflearn.losses.get(regularizer)

    with tf.device(device):

        try:
            var = tf.get_variable(name, shape=shape, dtype=dtype,
                                  initializer=initializer,
                                  regularizer=regularizer,
                                  trainable=trainable,
                                  collections=collections)
        # Fix for old TF versions
        except Exception as e:
            var = tf.get_variable(name, shape=shape, dtype=dtype,
                                  initializer=initializer,
                                  trainable=trainable,
                                  collections=collections)
            if regularizer is not None:
                tflearn.add_weights_regularizer(var, regularizer)

        if not restore:
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, var)

        return var
Exemplo n.º 6
0
def variable(name, shape=None, dtype=tf.float32, initializer=None,
             regularizer=None, trainable=True, collections=None, device='',
             restore=True):
    """ variable.

    Instantiate a new variable.

    Arguments:
        name: `str`. A name for this variable.
        shape: list of `int`. The variable shape (optional).
        dtype: `type`. The variable data type.
        initializer: `str` or `Tensor`. The variable initialization. (See
            tflearn.initializations for references).
        regularizer: `str` or `Tensor`. The variable regularizer. (See
            tflearn.losses for references).
        trainable: `bool`. If True, this variable weights will be trained.
        collections: `str`. A collection to add the new variable to (optional).
        device: `str`. Device ID to store the variable. Default: '/cpu:0'.
        restore: `bool`. Restore or not this variable when loading a
            pre-trained model (Only compatible with tflearn pre-built
            training functions).

    Returns:
        A Variable.

    """

    if isinstance(initializer, str):
        initializer = tflearn.initializations.get(initializer)()
    # Remove shape param if initializer is a Tensor
    if not callable(initializer) and isinstance(initializer, tf.Tensor):
        shape = None

    if isinstance(regularizer, str):
        regularizer = tflearn.losses.get(regularizer)

    with tf.device(device):

        try:
            var = tf.get_variable(name, shape=shape, dtype=dtype,
                                  initializer=initializer,
                                  regularizer=regularizer,
                                  trainable=trainable,
                                  collections=collections)
        # Fix for old TF versions
        except Exception as e:
            var = tf.get_variable(name, shape=shape, dtype=dtype,
                                  initializer=initializer,
                                  trainable=trainable,
                                  collections=collections)
            if regularizer is not None:
                tflearn.add_weights_regularizer(var, regularizer)

        if not restore:
            tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, var)

        return var
Exemplo n.º 7
0
    def create_critic_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        action = tflearn.input_data(shape=[None, self.a_dim])
        net = tflearn.fully_connected(inputs, 400, weights_init='xavier', bias_init='zeros')
        tflearn.add_weights_regularizer(net, 'L2', weight_decay=0.001)
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = tflearn.fully_connected(net, 300, weights_init='xavier', bias_init='zeros')
        t2 = tflearn.fully_connected(action, 300, weights_init='xavier', bias_init='zeros')
        
        net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        #out = tflearn.fully_connected(net, self.a_dim, weights_init=w_init)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
Exemplo n.º 8
0
from tflearn.helpers.regularizer import add_weights_regularizer
from tensorflow.contrib.slim import dataset
from tensorflow.contrib.slim import dataset

tflearn.input_data()
tflearn.variable()
tflearn.conv_2d()
tflearn.single_unit()
tflearn.lstm()
tflearn.embedding()
tflearn.batch_normalization()
tflearn.merge()
tflearn.regression()
tflearn.tanh()
tflearn.softmax_categorical_crossentropy()
tflearn.SGD()
tflearn.initializations.uniform()
tflearn.losses.L1()
tflearn.add_weights_regularizer()
tflearn.metrics.Accuracy()
tflearn.summaries()
tflearn.ImagePreprocessing()
tflearn.ImageAugmentation()
tflearn.init_graph()