Esempio n. 1
0
        def dnn(x):
            with tf.variable_scope('Layer1'):
                # Creating variable using TFLearn
                W1 = va.variable(name='W', shape=[len(X_train[0]), first_layer],
                                 initializer='uniform_scaling',
                                 regularizer='L2')
                b1 = va.variable(name='b', shape=[first_layer])
                x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))

            with tf.variable_scope('Layer2'):
                W2 = va.variable(name='W', shape=[first_layer, second_layer],
                                 initializer='uniform_scaling',
                                 regularizer='L2')
                b2 = va.variable(name='b', shape=[second_layer])
                x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))

            with tf.variable_scope('Layer3'):
                W3 = va.variable(name='W', shape=[second_layer, third_layer],
                                 initializer='uniform_scaling',regularizer='L2')
                b3 = va.variable(name='b', shape=[third_layer])
                x = tf.add(tf.matmul(x, W3), b3)

            with tf.variable_scope('Layer4'):
                W4 = va.variable(name='W', shape=[third_layer, len(Y_train[0])],
                                 initializer='uniform_scaling',regularizer='L2')
                b4 = va.variable(name='b', shape=[len(Y_train[0])])
                x = tf.add(tf.matmul(x, W4), b4)

            return x,W4,b4
Esempio n. 2
0
    def dnn(x):
        with tf.variable_scope('Layer1'):
            # Creating variable using TFLearn
            W1 = va.variable(name='W',
                             shape=[784, 256],
                             initializer='uniform_scaling',
                             regularizer='L2')
            b1 = va.variable(name='b', shape=[256])
            x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))

        with tf.variable_scope('Layer2'):
            W2 = va.variable(name='W',
                             shape=[256, 256],
                             initializer='uniform_scaling',
                             regularizer='L2')
            b2 = va.variable(name='b', shape=[256])
            x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))

        with tf.variable_scope('Layer3'):
            W3 = va.variable(name='W',
                             shape=[256, 10],
                             initializer='uniform_scaling')
            b3 = va.variable(name='b', shape=[10])
            x = tf.add(tf.matmul(x, W3), b3)

        return x
Esempio n. 3
0
    def dnn(x):
        with tf.variable_scope('Layer1'):
            # Creating variable using TFLearn
            W1 = va.variable(name='W', shape=[784, 256],
                             initializer='uniform_scaling',
                             regularizer='L2')
            b1 = va.variable(name='b', shape=[256])
            x = tf.nn.tanh(tf.add(tf.matmul(x, W1), b1))

        with tf.variable_scope('Layer2'):
            W2 = va.variable(name='W', shape=[256, 256],
                             initializer='uniform_scaling',
                             regularizer='L2')
            b2 = va.variable(name='b', shape=[256])
            x = tf.nn.tanh(tf.add(tf.matmul(x, W2), b2))

        with tf.variable_scope('Layer3'):
            W3 = va.variable(name='W', shape=[256, 10],
                             initializer='uniform_scaling')
            b3 = va.variable(name='b', shape=[10])
            x = tf.add(tf.matmul(x, W3), b3)

        return x
Esempio n. 4
0
def single_unit(incoming, activation='linear', bias=True, trainable=True,
                restore=True, reuse=False, scope=None, name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `function`. Activation applied to this
            layer (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Linear'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name

        W = va.variable('W', shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            b = va.variable('b', shape=[n_inputs],
                            initializer=tf.constant_initializer(np.random.randn()),
                            trainable=trainable, restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.mul(inference, W)
        if b: inference = tf.add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 5
0
def fully_connected(incoming,
                    n_units,
                    activation='linear',
                    bias=True,
                    weights_init='truncated_normal',
                    bias_init='zeros',
                    regularizer=None,
                    weight_decay=0.001,
                    trainable=True,
                    restore=True,
                    name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
       regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
       weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
       trainable: `bool`. If True, weights will be trainable.
       restore: `bool`. If True, this layer weights will be restored when
            loading a model
       name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable(scope + 'W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b_init = initializations.get(bias_init)()
            b = va.variable(scope + 'b',
                            shape=[n_units],
                            initializer=b_init,
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 6
0
def single_unit(incoming,
                activation='linear',
                bias=True,
                trainable=True,
                restore=True,
                name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        name: A name for this layer (optional). Default: 'Dense'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W = va.variable(scope + 'W',
                        shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b = va.variable(scope + 'b',
                            shape=[n_inputs],
                            initializer=tf.constant_initializer(
                                np.random.randn()),
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.mul(inference, W)
        if b: inference = tf.add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 7
0
def fully_connected(incoming, n_units, activation='linear', bias=True,
                    weights_init='truncated_normal', bias_init='zeros',
                    regularizer=None, weight_decay=0.001, trainable=True,
                    restore=True, name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
       regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
       weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
       trainable: `bool`. If True, weights will be trainable.
       restore: `bool`. If True, this layer weights will be restored when
            loading a model
       name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable(scope + 'W', shape=[n_inputs, n_units],
                        regularizer=W_regul, initializer=W_init,
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b_init = initializations.get(bias_init)()
            b = va.variable(scope + 'b', shape=[n_units],
                            initializer=b_init, trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 8
0
                               weights_init="normal")  # hidden layer of size 2

output_layer = fully_connected(hidden_layer,
                               1,
                               activation='linear',
                               name="outputLayer_Weights",
                               weights_init="normal")  # output layer of size 1

# Hyper parameters for the one class Neural Network
v = 0.04
nu = 0.04

# Initialize rho
value = 0.0001
init = tf.constant_initializer(value)
rho = va.variable(name='rho', dtype=tf.float32, shape=[], initializer=init)

rcomputed = []
auc = []

sess = tf.Session()
sess.run(tf.initialize_all_variables())
print sess.run(tflearn.get_training_mode())  #False
tflearn.is_training(True, session=sess)
print sess.run(tflearn.get_training_mode())  #now True

X = data_train
D = X.shape[1]
nu = 0.04

# temp = np.random.normal(0, 1, K + K*D + 1)[-1]
Esempio n. 9
0
def highway(incoming,
            n_units,
            activation='linear',
            transform_dropout=None,
            weights_init='truncated_normal',
            bias_init='zeros',
            regularizer=None,
            weight_decay=0.001,
            trainable=True,
            restore=True,
            reuse=False,
            scope=None,
            name="FullyConnectedHighway"):
    """ Fully Connected Highway.

    A fully connected highway network layer, with some inspiration from
    [https://github.com/fomorians/highway-fcn](https://github.com/fomorians/highway-fcn).

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        transform_dropout: `float`: Keep probability on the highway transform gate.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnectedHighway'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        W_t: `Tensor`. Variable representing units weights for transform gate.
        b: `Tensor`. Variable representing biases.
        b_t: `Tensor`. Variable representing biases for transform gate.

    Links:
        [https://arxiv.org/abs/1505.00387](https://arxiv.org/abs/1505.00387)

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        if isinstance(bias_init, str):
            bias_init = initializations.get(bias_init)()
        b = va.variable('b',
                        shape=[n_units],
                        initializer=bias_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        # Weight and bias for the transform gate
        W_T = va.variable('W_T',
                          shape=[n_inputs, n_units],
                          regularizer=None,
                          initializer=W_init,
                          trainable=trainable,
                          restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W_T)

        b_T = va.variable('b_T',
                          shape=[n_units],
                          initializer=tf.constant_initializer(-1),
                          trainable=trainable,
                          restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b_T)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(incoming, [-1, n_inputs])

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        H = activation(tf.matmul(incoming, W) + b)
        T = tf.sigmoid(tf.matmul(incoming, W_T) + b_T)
        if transform_dropout:
            T = dropout(T, transform_dropout)
        C = tf.subtract(1.0, T)

        inference = tf.add(tf.multiply(H, T), tf.multiply(incoming, C))

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.W_t = W_T
    inference.b = b
    inference.b_t = b_T

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 10
0
def fully_connected(incoming,
                    n_units,
                    activation='linear',
                    bias=True,
                    weights_init='truncated_normal',
                    bias_init='zeros',
                    regularizer=None,
                    weight_decay=0.001,
                    trainable=True,
                    restore=True,
                    reuse=False,
                    scope=None,
                    name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = va.variable('b',
                            shape=[n_units],
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)
        if activation:
            if isinstance(activation, str):
                inference = activations.get(activation)(inference)
            elif hasattr(activation, '__call__'):
                inference = activation(inference)
            else:
                raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 11
0
def PhraseLayer(incoming,
                input_dim,
                output_dim,
                output_length,
                activation='linear',
                dropout_keepprob=0.5,
                batchNorm=False,
                name='PhraseLayer',
                alpha=0.5,
                scope=None):
    '''
	incoming: [batch_size, sen_length, input_dim]
	
	return: [batch_size, sen_length, output_length, output_dim[0]],
			[batch_size, sen_length, output_length, output_dim[1]]
	'''
    with tf.variable_scope(scope, default_name=name,
                           values=[incoming]) as scope:
        name = scope.name

        P = va.variable('P',
                        shape=[input_dim, output_dim[0]],
                        initializer=initializations.get('truncated_normal')())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, P)

        P_p = va.variable(
            'P_p',
            shape=[input_dim, output_dim[1]],
            initializer=initializations.get('truncated_normal')())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, P_p)

        Q = va.variable('Q',
                        shape=[input_dim, output_dim[0]],
                        initializer=initializations.get('truncated_normal')())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, Q)

        Q_p = va.variable(
            'Q_p',
            shape=[input_dim, output_dim[1]],
            initializer=initializations.get('truncated_normal')())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, Q_p)

        R = va.variable('R',
                        shape=[input_dim, output_dim[0]],
                        initializer=initializations.get('truncated_normal')())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, R)

        R_p = va.variable(
            'R_p',
            shape=[input_dim, output_dim[1]],
            initializer=initializations.get('truncated_normal')())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, R_p)

        O = va.variable('O',
                        shape=[output_dim[0], output_dim[0]],
                        initializer=initializations.get('truncated_normal')())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, O)

        O_p = va.variable('O_p',
                          shape=[output_dim[1], output_dim[1]],
                          initializer=tf.ones_initializer())
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, O_p)

        b = va.variable('b',
                        shape=[1, 1, 1, output_dim[0]],
                        initializer=tf.ones_initializer())

        b_p = va.variable('b_p',
                          shape=[1, 1, 1, output_dim[1]],
                          initializer=tf.ones_initializer())

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        def calc(incoming, P, Q, R, O, output_dim):

            batch_size = tf.shape(incoming)[0]
            sent_length = incoming.shape[1].value

            G1 = tf.zeros([batch_size, sent_length, output_dim])
            G2 = tf.zeros([batch_size, sent_length, output_dim])
            G3 = tf.zeros([batch_size, sent_length, output_dim])
            r = []

            for i in range(output_length):
                '''if i == 0:
					now = incoming
				else:
					now = tf.concat([tf.zeros([batch_size, i, input_dim]), incoming[:,0:-i, :]], axis = 1)
				
				F2 = tf.einsum('aij,jk->aik', now, Q) * G1
				F3 = tf.einsum('aij,jk->aik', now, R) * G2
				
				if i == 0:
					F1 = tf.einsum('aij,jk->aik', now, P)
					G1 = G1 * alpha + F1
				else:
					G1 = G1 * alpha
				G2 = G2 * alpha + F2
				G3 = G3 * alpha + F3
				
				r.append(tf.einsum('aij,jk->aik',G1+G2+G3, O))'''

                F1 = tf.einsum('aij,jk->aik', incoming, P)
                r.append(tf.einsum('aij,jk->aik', F1, O))

            #return tf.stack(r, axis = 2)
            return tf.reshape(r[0], [batch_size, sent_length, 1, output_dim])

        batch_size = tf.shape(incoming)[0]
        sent_length = incoming.shape[1].value
        #out1 = tf.reshape(tf.einsum('aij,jk->aik', tf.einsum('aij,jk->aik', incoming, P), O), [batch_size, sent_length, 1, output_dim[0]]) + b
        out1 = tf.reshape(tf.einsum('aij,jk->aik', incoming, P),
                          [batch_size, sent_length, 1, output_dim[0]]) + b
        #out1 = calc(incoming, P, Q, R, O, output_dim[0]) + b
        #out1 = activation(out1, name="activation")
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, out1)
        if batchNorm:
            pass
            #out1 = tflearn.batch_normalization(out1, name="batchNormOut1")
        #out1 = tflearn.dropout(out1, dropout_keepprob, name="dropOut1")

        if output_dim[1] == 0:
            out2 = None
        else:
            out2 = calc(tf.stop_gradient(incoming), P_p, Q_p, R_p, O_p,
                        output_dim[1]) + b_p
            out2 = activation(out2, name="activation_p")
            tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, out2)
            if batchNorm:
                out2 = tflearn.batch_normalization(out2, name="batchNormOut2")
            out2 = tflearn.dropout(out2, dropout_keepprob, name="dropOut2")

    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, out1)
    if output_dim[1] != 0:
        tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, out2)

    return out1, out2
Esempio n. 12
0
def _linear(args,
            output_size,
            bias,
            bias_start=0.0,
            weights_init=None,
            trainable=True,
            restore=True,
            reuse=False,
            scope=None):
    """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.

    Arguments:
        args: a 2D Tensor or a list of 2D, batch x n, Tensors.
        output_size: int, second dimension of W[i].
        bias: boolean, whether to add a bias term or not.
        bias_start: starting value to initialize the bias; 0 by default.
        scope: VariableScope for the created subgraph; defaults to "Linear".

    Returns:
        A 2D Tensor with shape [batch x output_size] equal to
        sum_i(args[i] * W[i]), where W[i]s are newly created matrices.

    Raises:
        ValueError: if some of the arguments has unspecified or wrong shape.
    """
    if args is None or (is_sequence(args) and not args):
        raise ValueError("`args` must be specified")
    if not is_sequence(args):
        args = [args]

    # Calculate the total size of arguments on dimension 1.
    total_arg_size = 0
    shapes = [a.get_shape().as_list() for a in args]
    for shape in shapes:
        if len(shape) != 2:
            raise ValueError("Linear is expecting 2D arguments: %s" %
                             str(shapes))
        if not shape[1]:
            raise ValueError("Linear expects shape[1] of arguments: %s" %
                             str(shapes))
        else:
            total_arg_size += shape[1]

    # Now the computation.
    with tf.variable_scope(scope or "Linear", reuse=reuse):
        matrix = va.variable("Matrix", [total_arg_size, output_size],
                             initializer=weights_init,
                             trainable=trainable,
                             restore=restore)
        if len(args) == 1:
            res = tf.matmul(args[0], matrix)
        else:
            res = tf.matmul(array_ops.concat(args, 1), matrix)
        if not bias:
            return res
        bias_term = va.variable(
            "Bias", [output_size],
            initializer=tf.constant_initializer(bias_start),
            trainable=trainable,
            restore=restore)
    return res + bias_term
Esempio n. 13
0
def fully_connected(incoming,
                    n_units,
                    activation='linear',
                    bias=True,
                    weights_init='truncated_normal',
                    bias_init='zeros',
                    regularizer=None,
                    weight_decay=0.001,
                    trainable=True,
                    restore=True,
                    reuse=False,
                    scope=None,
                    name="FullyConnected"):
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b',
                            shape=[n_units],
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b is not None: inference = tf.nn.bias_add(inference, b)
        if activation:
            if isinstance(activation, str):
                inference = activations.get(activation)(inference)
            elif hasattr(activation, '__call__'):
                inference = activation(inference)
            else:
                raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 14
0
def conv_2d(incoming,
            nb_filter,
            filter_size,
            strides=1,
            padding='same',
            activation='linear',
            bias=True,
            weights_init='uniform_scaling',
            bias_init='zeros',
            regularizer=None,
            weight_decay=0.001,
            trainable=True,
            restore=True,
            reuse=False,
            scope=None,
            name="Conv2D"):
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
    filter_size = utils.autoformat_filter_conv2d(filter_size, input_shape[-1],
                                                 nb_filter)
    strides = utils.autoformat_kernel_2d(strides)
    padding = utils.autoformat_padding(padding)

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W',
                        shape=filter_size,
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b',
                            shape=nb_filter,
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
        if b is not None: inference = tf.nn.bias_add(inference, b)

        if activation:
            if isinstance(activation, str):
                inference = activations.get(activation)(inference)
            elif hasattr(activation, '__call__'):
                inference = activation(inference)
            else:
                raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 15
0
def highway(incoming, n_units, activation='linear', transform_dropout=None,
            weights_init='truncated_normal', bias_init='zeros',
            regularizer=None, weight_decay=0.001, trainable=True,
            restore=True, reuse=False, scope=None,
            name="FullyConnectedHighway"):
    """ Fully Connected Highway.

    A fully connected highway network layer, with some inspiration from
    [https://github.com/fomorians/highway-fcn](https://github.com/fomorians/highway-fcn).

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        transform_dropout: `float`: Keep probability on the highway transform gate.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnectedHighway'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        W_t: `Tensor`. Variable representing units weights for transform gate.
        b: `Tensor`. Variable representing biases.
        b_t: `Tensor`. Variable representing biases for transform gate.

    Links:
        [https://arxiv.org/abs/1505.00387](https://arxiv.org/abs/1505.00387)

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        if isinstance(bias_init, str):
            bias_init = initializations.get(bias_init)()
        b = va.variable('b', shape=[n_units], initializer=bias_init,
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        # Weight and bias for the transform gate
        W_T = va.variable('W_T', shape=[n_inputs, n_units],
                          regularizer=None, initializer=W_init,
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W_T)

        b_T = va.variable('b_T', shape=[n_units],
                          initializer=tf.constant_initializer(-1),
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b_T)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(incoming, [-1, n_inputs])

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        H = activation(tf.matmul(incoming, W) + b)
        T = tf.sigmoid(tf.matmul(incoming, W_T) + b_T)
        if transform_dropout:
            T = dropout(T, transform_dropout)
        C = tf.sub(1.0, T)

        inference = tf.add(tf.mul(H, T), tf.mul(incoming, C))

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.W_t = W_T
    inference.b = b
    inference.b_t = b_T

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 16
0
def fully_connected(incoming, n_units, activation='linear', bias=True,
                    weights_init='truncated_normal', bias_init='zeros',
                    regularizer=None, weight_decay=0.001, trainable=True,
                    restore=True, reuse=False, scope=None,
                    name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias, str):
                bias_init = initializations.get(bias_init)()
            b = va.variable('b', shape=[n_units], initializer=bias_init,
                            trainable=trainable, restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
def tflearn_OneClass_NN_linear(data_train, data_test, labels_train):

    X = data_train
    Y = labels_train

    D = X.shape[1]

    No_of_inputNodes = X.shape[1]

    # Clear all the graph variables created in previous run and start fresh
    tf.reset_default_graph()

    # Define the network
    input_layer = input_data(shape=[None,
                                    No_of_inputNodes])  # input layer of size

    np.random.seed(42)
    theta0 = np.random.normal(0, 1, K + K * D + 1) * 0.0001
    #theta0 = np.random.normal(0, 1, K + K*D + 1) # For linear
    hidden_layer = fully_connected(
        input_layer,
        4,
        bias=False,
        activation='linear',
        name="hiddenLayer_Weights",
        weights_init="normal")  # hidden layer of size 2

    output_layer = fully_connected(
        hidden_layer,
        1,
        bias=False,
        activation='linear',
        name="outputLayer_Weights",
        weights_init="normal")  # output layer of size 1

    # Initialize rho
    value = 0.01
    init = tf.constant_initializer(value)
    rho = va.variable(name='rho', dtype=tf.float32, shape=[], initializer=init)

    rcomputed = []
    auc = []

    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    # print sess.run(tflearn.get_training_mode()) #False
    tflearn.is_training(True, session=sess)
    print sess.run(tflearn.get_training_mode())  #now True

    temp = theta0[-1]

    oneClassNN_Net = oneClassNN(output_layer,
                                v,
                                rho,
                                hidden_layer,
                                output_layer,
                                optimizer='sgd',
                                loss='OneClassNN_Loss',
                                learning_rate=1)

    model = DNN(oneClassNN_Net, tensorboard_verbose=3)

    model.set_weights(output_layer.W, theta0[0:K][:, np.newaxis])
    model.set_weights(hidden_layer.W, np.reshape(theta0[K:K + K * D], (D, K)))

    iterStep = 0
    while (iterStep < 100):
        print "Running Iteration :", iterStep
        # Call the cost function
        y_pred = model.predict(data_train)  # Apply some ops
        tflearn.is_training(False, session=sess)
        y_pred_test = model.predict(data_test)  # Apply some ops
        tflearn.is_training(True, session=sess)
        value = np.percentile(y_pred, v * 100)
        tflearn.variables.set_value(rho, value, session=sess)
        rStar = rho
        model.fit(X, Y, n_epoch=2, show_metric=True, batch_size=100)
        iterStep = iterStep + 1
        rcomputed.append(rho)
        temp = tflearn.variables.get_value(rho, session=sess)

    # print "Rho",temp
    # print "y_pred",y_pred
    # print "y_predTest", y_pred_test

    # g = lambda x: x
    g = lambda x: 1 / (1 + tf.exp(-x))

    def nnScore(X, w, V, g):
        return tf.matmul(g((tf.matmul(X, w))), V)

    # Format the datatype to suite the computation of nnscore
    X = X.astype(np.float32)
    X_test = data_test
    X_test = X_test.astype(np.float32)
    # assign the learnt weights
    # wStar = hidden_layer.W
    # VStar = output_layer.W
    # Get weights values of fc2
    wStar = model.get_weights(hidden_layer.W)
    VStar = model.get_weights(output_layer.W)

    # print "Hideen",wStar
    # print VStar

    train = nnScore(X, wStar, VStar, g)
    test = nnScore(X_test, wStar, VStar, g)

    # Access the value inside the train and test for plotting
    # Create a new session and run the example
    # sess = tf.Session()
    # sess.run(tf.initialize_all_variables())
    arrayTrain = train.eval(session=sess)
    arrayTest = test.eval(session=sess)

    # print "Train Array:",arrayTrain
    # print "Test Array:",arrayTest

    # plt.hist(arrayTrain-temp,  bins = 25,label='Normal');
    # plt.hist(arrayTest-temp, bins = 25, label='Anomalies');
    # plt.legend(loc='upper right')
    # plt.title('r = %1.6f- Sigmoid Activation ' % temp)
    # plt.show()

    pos_decisionScore = arrayTrain - temp
    neg_decisionScore = arrayTest - temp

    return [pos_decisionScore, neg_decisionScore]
Esempio n. 18
0
def single_unit(incoming,
                activation='linear',
                bias=True,
                trainable=True,
                restore=True,
                reuse=False,
                scope=None,
                name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `function`. Activation applied to this
            layer (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Linear'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W = va.variable('W',
                        shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            b = va.variable('b',
                            shape=[n_inputs],
                            initializer=tf.constant_initializer(
                                np.random.randn()),
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.multiply(inference, W)
        if b: inference = tf.add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 19
0
def single_unit(incoming, activation='linear', bias=True, trainable=True,
                restore=True, name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        name: A name for this layer (optional). Default: 'Dense'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W = va.variable(scope + 'W', shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b = va.variable(scope + 'b', shape=[n_inputs],
                            initializer=tf.constant_initializer(np.random.randn()),
                            trainable=trainable, restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.mul(inference, W)
        if b: inference = tf.add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 20
0
def conv_1d_tranpose(layer,
                     nb_filter,
                     filter_size,
                     strides,
                     padding='same',
                     bias=True,
                     scope=None,
                     reuse=False,
                     bias_init='zeros',
                     trainable=True,
                     restore=True,
                     regularizer=None,
                     weight_decay=0.001,
                     weights_init='uniform_scaling',
                     name="deconv_1d"):
    '''
    layer: A 3-D `Tensor` of type `float` and shape `[batch, in_width, in_channels]` .
    SEE: https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_backprop_input
    SEE2: https://github.com/tensorflow/tensorflow/pull/13105/commits/2ca9b908d1978a94855349309fd16a67cfd98659
    TODO: ADD weight-decay/regularizer
    '''
    input_shape = utils.get_incoming_shape(layer)
    _, in_width, in_channels = input_shape
    batch_size = tf.shape(layer)[0]

    filter_size = [filter_size, nb_filter, in_channels]
    output_shape = [batch_size, strides * in_width, nb_filter
                    ]  # this trick I think work only for strict up-sampling
    output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")

    strides = [1, 1, strides, 1]
    spatial_start_dim = 1
    padding = utils.autoformat_padding(padding)

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[layer],
                           reuse=reuse) as scope:
        name = scope.name
        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        elif type(W_init) in [tf.Tensor, np.ndarray, list]:
            filter_size = None

        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: tflearn.losses.get(regularizer)(x, weight_decay
                                                                )

        W = vs.variable('W',
                        shape=filter_size,
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)

        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        # expand dims to make it compatible with conv2d
        W = tf.expand_dims(W, 0)
        layer = tf.expand_dims(layer, spatial_start_dim)
        output_shape_ = array_ops.concat(
            [output_shape_[:1], [1], output_shape_[1:]], axis=0)

        result = gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                                  filter=W,
                                                  out_backprop=layer,
                                                  strides=strides,
                                                  padding=padding,
                                                  name=name)

        result = array_ops.squeeze(result, [spatial_start_dim])
        result = tf.reshape(result, shape=output_shape)

        if bias:
            b_shape = [nb_filter]
            bias_init = initializations.get(bias_init)()
            b = vs.variable('b',
                            shape=b_shape,
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
            result = tf.nn.bias_add(result, b)
            result.b = b

        result.scope = scope
        result.W = W

    return result
def conv_2d_BN(incoming, nb_filter, filter_size, strides=1, padding='same',
            activation='linear', bias=True, weights_init='xavier',
            bias_init='zeros', regularizer=None, weight_decay=0.001,
            trainable=True, restore=True, reuse=False, scope=None,
            name="Conv2D", batch_norm=False):
    """ Convolution 2D.
    Input:
        4-D Tensor [batch, height, width, in_channels].
    Output:
        4-D Tensor [batch, new height, new width, nb_filter].
    Arguments:
        incoming: `Tensor`. Incoming 4-D Tensor.
        nb_filter: `int`. The number of convolutional filters.
        filter_size: `int` or `list of int`. Size of filters.
        strides: 'int` or list of `int`. Strides of conv operation.
            Default: [1 1 1 1].
        padding: `str` from `"same", "valid"`. Padding algo to use.
            Default: 'same'.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Conv2D'.
        batch_norm: If true, add batch normalization with default TFLearn 
            parameters before the activation layer 
    Attributes:
        scope: `Scope`. This layer scope.
        W: `Variable`. Variable representing filter weights.
        b: `Variable`. Variable representing biases.
    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
    filter_size = utils.autoformat_filter_conv2d(filter_size,
                                                 input_shape[-1],
                                                 nb_filter)
    strides = utils.autoformat_kernel_2d(strides)
    padding = utils.autoformat_padding(padding)

    # Variable Scope fix for older TF
    try:
        vscope = tf.variable_scope(scope, default_name=name, values=[incoming],
                                   reuse=reuse)
    except Exception:
        vscope = tf.variable_op_scope([incoming], scope, name, reuse=reuse)

    with vscope as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=filter_size, regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b', shape=nb_filter, initializer=bias_init,
                            trainable=trainable, restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
        if b: inference = tf.nn.bias_add(inference, b)

        if batch_norm:
            inference = batch_normalization(inference)
        
        if isinstance(activation, str):
            if activation == 'softmax':
                shapes = inference.get_shape()

                inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference