Esempio n. 1
0
 def __init__(self, X, Y, multi_inputs=False, batch_size=32, shuffle=True):
     # Handle multiple inputs
     if not multi_inputs:
         X = [X]
     X = [np.array(x) for x in X]
     self.X = X
     self.Xlen = len(X[0])
     Y = np.array(Y)
     self.Y = Y
     # Create X placeholders
     self.tensorX = [
         tf.placeholder(dtype=tf.float32,
                        shape=[None] +
                        list(utils.get_incoming_shape(x)[1:])) for x in X
     ]
     # Create Y placeholders
     self.tensorY = tf.placeholder(dtype=tf.float32,
                                   shape=[None] +
                                   list(utils.get_incoming_shape(Y)[1:]))
     # FIFO Queue for feeding data
     self.queue = tf.FIFOQueue(dtypes=[x.dtype for x in self.tensorX] +
                               [self.tensorY.dtype],
                               capacity=batch_size * 8)
     self.enqueue_op = self.queue.enqueue(self.tensorX + [self.tensorY])
     self.batch_size = batch_size
     self.multi_inputs = multi_inputs
     self.shuffle = shuffle
Esempio n. 2
0
def time_distributed(incoming, fn, args=None, scope=None):
    """ Time Distributed.

    This layer applies a function to every timestep of the input tensor. The
    custom function first argument must be the input tensor at every timestep.
    Additional parameters for the custom function may be specified in 'args'
    argument (as a list).

    Examples:
        ```python
        # Applying a fully_connected layer at every timestep
        x = time_distributed(input_tensor, fully_connected, [64])

        # Using a conv layer at every timestep with a scope
        x = time_distributed(input_tensor, conv_2d, [64, 3], scope='tconv')
        ```

    Input:
        (3+)-D Tensor [samples, timestep, input_dim].

    Output:
        (3+)-D Tensor [samples, timestep, output_dim].

    Arguments:
        incoming: `Tensor`. The incoming tensor.
        fn: `function`. A function to apply at every timestep. This function
            first parameter must be the input tensor per timestep. Additional
            parameters may be specified in 'args' argument.
        args: `list`. A list of parameters to use with the provided function.
        scope: `str`. A scope to give to each timestep tensor. Useful when
            sharing weights. Each timestep tensor scope will be generated
            as 'scope'-'i' where i represents the timestep id. Note that your
            custom function will be required to have a 'scope' parameter.

    Returns:
        A Tensor.

    """
    if not args: args = list()
    assert isinstance(args, list), "'args' must be a list."

    if not isinstance(incoming, tf.Tensor):
        incoming = tf.transpose(tf.stack(incoming), [1, 0, 2])

    input_shape = utils.get_incoming_shape(incoming)
    timestep = input_shape[1]
    x = tf.unstack(incoming, axis=1)
    if scope:
        x = [
            fn(x[i], scope=scope + '-' + str(i), *args)
            for i in range(timestep)
        ]
    else:
        x = [fn(x[i], *args) for i in range(timestep)]

    x = list(
        map(lambda t: tf.reshape(t, [-1, 1] + utils.get_incoming_shape(t)[1:]),
            x))
    return tf.concat(x, 1)
Esempio n. 3
0
def time_distributed(incoming, fn, args=None, scope=None):
    """ Time Distributed.

    This layer applies a function to every timestep of the input tensor. The
    custom function first argument must be the input tensor at every timestep.
    Additional parameters for the custom function may be specified in 'args'
    argument (as a list).

    Examples:
        ```python
        # Applying a fully_connected layer at every timestep
        x = time_distributed(input_tensor, fully_connected, [64])

        # Using a conv layer at every timestep with a scope
        x = time_distributed(input_tensor, conv_2d, [64, 3], scope='tconv')
        ```

    Input:
        (3+)-D Tensor [samples, timestep, input_dim].

    Output:
        (3+)-D Tensor [samples, timestep, output_dim].

    Arguments:
        incoming: `Tensor`. The incoming tensor.
        fn: `function`. A function to apply at every timestep. This function
            first parameter must be the input tensor per timestep. Additional
            parameters may be specified in 'args' argument.
        args: `list`. A list of parameters to use with the provided function.
        scope: `str`. A scope to give to each timestep tensor. Useful when
            sharing weights. Each timestep tensor scope will be generated
            as 'scope'-'i' where i represents the timestep id. Note that your
            custom function will be required to have a 'scope' parameter.

    Returns:
        A Tensor.

    """
    if not args: args = list()
    assert isinstance(args, list), "'args' must be a list."

    if not isinstance(incoming, tf.Tensor):
        incoming = tf.transpose(tf.stack(incoming), [1, 0, 2])

    input_shape = utils.get_incoming_shape(incoming)
    timestep = input_shape[1]
    x = tf.unstack(incoming, axis=1)
    if scope:
        x = [fn(x[i], scope=scope+'-'+str(i), *args)
             for i in range(timestep)]
    else:
        x = [fn(x[i], *args) for i in range(timestep)]
    try:
      x = map(lambda t: tf.reshape(t, [-1, 1]+utils.get_incoming_shape(t)[1:]), x)
    except:
      x = list(map(lambda t: tf.reshape(t, [-1, 1]+utils.get_incoming_shape(t)[1:]), x))
    return tf.concat(1, x)
Esempio n. 4
0
def flatten(incoming, name="Flatten"):
    """ Flatten.

    Flatten the incoming Tensor.

    Input:
        (2+)-D `Tensor`.

    Output:
        2-D `Tensor` [batch, flatten_dims].

    Arguments:
        incoming: `Tensor`. The incoming tensor.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    dims = int(np.prod(input_shape[1:]))
    return reshape(incoming, [-1, dims], name)
Esempio n. 5
0
def flatten(incoming, name="Flatten"):
    """ Flatten.

    Flatten the incoming Tensor.

    Input:
        (2+)-D `Tensor`.

    Output:
        2-D `Tensor` [batch, flatten_dims].

    Arguments:
        incoming: `Tensor`. The incoming tensor.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    dims = int(np.prod(input_shape[1:]))
    return reshape(incoming, [-1, dims], name)
Esempio n. 6
0
def flatten(incoming, name="Flatten"):
    """ Flatten.

    Flatten the incoming Tensor.

    Input:
        (2+)-D `Tensor`.

    Output:
        2-D `Tensor` [batch, flatten_dims].

    Arguments:
        incoming: `Tensor`. The incoming tensor.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    dims = int(np.prod(input_shape[1:]))
    x = reshape(incoming, [-1, dims], name)

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, x)

    return x
Esempio n. 7
0
def flatten(incoming, name="Flatten"):
    """ Flatten.

    Flatten the incoming Tensor.

    Input:
        (2+)-D `Tensor`.

    Output:
        2-D `Tensor` [batch, flatten_dims].

    Arguments:
        incoming: `Tensor`. The incoming tensor.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    dims = int(np.prod(input_shape[1:]))
    x = reshape(incoming, [-1, dims], name)

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, x)

    return x
Esempio n. 8
0
def regression(incoming, placeholder=None, optimizer='adam',
               loss='categorical_crossentropy', metric='default',
               learning_rate=0.001, dtype=tf.float32, batch_size=64,
               shuffle_batches=True, trainable_vars=None, op_name=None,
               name=None):
    """ Regression.

    Input:
        2-D Tensor Layer.

    Output:
        2-D Tensor Layer (Same as input).

    Arguments:
        incoming: `Tensor`. Incoming 2-D Tensor.
        placeholder: `Tensor`. This regression target (label) placeholder.
            If 'None' provided, a placeholder will be added automatically.
            You can retrieve that placeholder through graph key: 'TARGETS',
            or the 'placeholder' attribute of this function's returned tensor.
        optimizer: `str` (name) or `Optimizer`. Optimizer to use.
            Default: 'sgd' (Stochastic Descent Gradient).
        loss: `str` (name) or `Tensor`. Loss function used by this layer
            optimizer. Default: 'categorical_crossentropy'.
        metric: `str`, `Metric` or `Tensor`. The metric to be used.
            Default: 'default' metric is 'accuracy'. To disable metric
            calculation, set it to 'None'.
        learning_rate: `float`. This layer optimizer's learning rate.
        dtype: `tf.types`. This layer placeholder type. Default: tf.float32.
        batch_size: `int`. Batch size of data to use for training. tflearn
            supports different batch size for every optimizers. Default: 64.
        shuffle_batches: `bool`. Shuffle or not this optimizer batches at
            every epoch. Default: True.
        trainable_vars: list of `Variable`. If specified, this regression will
            only update given variable weights. Else, all trainale variable
            are going to be updated.
        op_name: A name for this layer optimizer (optional).
            Default: optimizer op name.
        name: A name for this layer's placeholder scope.

    Attributes:
        placeholder: `Tensor`. Placeholder for feeding labels.

    """

    input_shape = utils.get_incoming_shape(incoming)

    if not placeholder:
        pscope = "TargetsData" if not name else name
        with tf.name_scope(pscope):
            pshape = [None, input_shape[-1]]
            if len(input_shape) == 1:
                pshape = [None]
            placeholder = tf.placeholder(shape=pshape, dtype=dtype, name="Y")

    tf.add_to_collection(tf.GraphKeys.TARGETS, placeholder)

    step_tensor = None
    # Building Optimizer
    if isinstance(optimizer, str):
        _opt = optimizers.get(optimizer)(learning_rate)
        op_name = op_name if op_name else type(_opt).__name__
        _opt.build()
        optimizer = _opt.get_tensor()
    elif isinstance(optimizer, optimizers.Optimizer):
        op_name = op_name if op_name else type(optimizer).__name__
        if optimizer.has_decay:
            step_tensor = tf.Variable(0., name="Training_step",
                                      trainable=False)
        optimizer.build(step_tensor)
        optimizer = optimizer.get_tensor()
    elif not isinstance(optimizer, tf.train.Optimizer):
        raise ValueError("Invalid Optimizer type.")

    inputs = tf.get_collection(tf.GraphKeys.INPUTS)
    #inputs = tf.concat(0, utils.get_tensor_parents_placeholders(incoming))

    # Building metric
    # No auto accuracy for linear regression
    if len(input_shape) == 1 and metric == 'default':
        metric = None
    if metric is not None:
        # Default metric is accuracy
        if metric == 'default': metric = 'accuracy'
        if isinstance(metric, str):
            metric = metrics.get(metric)()
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif isinstance(metric, metrics.Metric):
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif not isinstance(metric, tf.Tensor):
            ValueError("Invalid Metric type.")

    # Building other ops (loss, training ops...)
    if isinstance(loss, str):
        loss = objectives.get(loss)(incoming, placeholder)
    elif not isinstance(loss, tf.Tensor):
        raise ValueError("Invalid Loss type.")

    tr_vars = trainable_vars
    if not tr_vars:
        tr_vars = tf.trainable_variables()

    tr_op = TrainOp(loss=loss,
                    optimizer=optimizer,
                    metric=metric,
                    trainable_vars=tr_vars,
                    batch_size=batch_size,
                    shuffle=shuffle_batches,
                    step_tensor=step_tensor,
                    name=op_name)

    tf.add_to_collection(tf.GraphKeys.TRAIN_OPS, tr_op)

    if not hasattr(incoming, '__len__'):
        incoming.placeholder = placeholder

    return incoming
Esempio n. 9
0
def regression(incoming, placeholder=None, optimizer='adam',
               loss='categorical_crossentropy', metric='default',
               learning_rate=0.001, dtype=tf.float32, batch_size=64,
               shuffle_batches=True, to_one_hot=False, n_classes=None,
               trainable_vars=None, restore=True, op_name=None, name=None):
    """ Regression.

    The regression layer is used in TFLearn to apply a regression (linear or
    logistic) to the provided input. It requires to specify a TensorFlow
    gradient descent optimizer 'optimizer' that will minimize the provided
    loss function 'loss' (which calculate the errors). A metric can also be
    provided, to evaluate the model performance.

    A 'TrainOp' is generated, holding all information about the optimization
    process. It is added to TensorFlow collection 'tf.GraphKeys.TRAIN_OPS'
    and later used by TFLearn 'models' classes to perform the training.

    An optional placeholder 'placeholder' can be specified to use a custom
    TensorFlow target placeholder instead of creating a new one. The target
    placeholder is added to the 'tf.GraphKeys.TARGETS' TensorFlow
    collection, so that it can be retrieved later.

    Additionaly, a list of variables 'trainable_vars' can be specified,
    so that only them will be updated when applying the backpropagation
    algorithm.

    Input:
        2-D Tensor Layer.

    Output:
        2-D Tensor Layer (Same as input).

    Arguments:
        incoming: `Tensor`. Incoming 2-D Tensor.
        placeholder: `Tensor`. This regression target (label) placeholder.
            If 'None' provided, a placeholder will be added automatically.
            You can retrieve that placeholder through graph key: 'TARGETS',
            or the 'placeholder' attribute of this function's returned tensor.
        optimizer: `str` (name), `Optimizer` or `function`. Optimizer to use.
            Default: 'adam' (Adaptive Moment Estimation).
        loss: `str` (name) or `function`. Loss function used by this layer
            optimizer. Default: 'categorical_crossentropy'.
        metric: `str`, `Metric` or `function`. The metric to be used.
            Default: 'default' metric is 'accuracy'. To disable metric
            calculation, set it to 'None'.
        learning_rate: `float`. This layer optimizer's learning rate.
        dtype: `tf.types`. This layer placeholder type. Default: tf.float32.
        batch_size: `int`. Batch size of data to use for training. tflearn
            supports different batch size for every optimizers. Default: 64.
        shuffle_batches: `bool`. Shuffle or not this optimizer batches at
            every epoch. Default: True.
        to_one_hot: `bool`. If True, labels will be encoded to one hot vectors.
            'n_classes' must then be specified.
        n_classes: `int`. The total number of classes. Only required when using
            'to_one_hot' option.
        trainable_vars: list of `Variable`. If specified, this regression will
            only update given variable weights. Else, all trainale variable
            are going to be updated.
        restore: `bool`. If False, variables related to optimizers such
            as moving averages will not be restored when loading a
            pre-trained model.
        op_name: A name for this layer optimizer (optional).
            Default: optimizer op name.
        name: A name for this layer's placeholder scope.

    Attributes:
        placeholder: `Tensor`. Placeholder for feeding labels.

    """

    input_shape = utils.get_incoming_shape(incoming)

    if placeholder is None:
        pscope = "TargetsData" if not name else name
        with tf.name_scope(pscope):
            placeholder = tf.placeholder(shape=input_shape, dtype=dtype, name="Y")

    tf.add_to_collection(tf.GraphKeys.TARGETS, placeholder)

    if to_one_hot:
        if n_classes is None:
            raise Exception("'n_classes' is required when using 'to_one_hot'.")
        placeholder = core.one_hot_encoding(placeholder, n_classes)

    step_tensor = None
    # Building Optimizer
    if isinstance(optimizer, str):
        _opt = optimizers.get(optimizer)(learning_rate)
        op_name = op_name if op_name else type(_opt).__name__
        _opt.build()
        optimizer = _opt.get_tensor()
    elif isinstance(optimizer, optimizers.Optimizer):
        op_name = op_name if op_name else type(optimizer).__name__
        if optimizer.has_decay:
            step_tensor = tf.Variable(0., name="Training_step",
                                      trainable=False)
        optimizer.build(step_tensor)
        optimizer = optimizer.get_tensor()
    elif hasattr(optimizer, '__call__'):
        try:
            optimizer, step_tensor = optimizer(learning_rate)
        except Exception as e:
            print(e.message)
            print("Reminder: Custom Optimizer function must return (optimizer, "
                  "step_tensor) and take one argument: 'learning_rate'. "
                  "Note that returned step_tensor can be 'None' if no decay.")
            exit()
    elif not isinstance(optimizer, tf.train.Optimizer):
        raise ValueError("Invalid Optimizer type.")

    inputs = tf.get_collection(tf.GraphKeys.INPUTS)
    #inputs = tf.concat(0, utils.get_tensor_parents_placeholders(incoming))

    # Building metric
    # No auto accuracy for linear regression
    if len(input_shape) == 1 and metric == 'default':
        metric = None
    if metric is not None:
        # Default metric is accuracy
        if metric == 'default': metric = 'accuracy'
        if isinstance(metric, str):
            metric = metrics.get(metric)()
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif isinstance(metric, metrics.Metric):
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif hasattr(metric, '__call__'):
            try:
                metric = metric(incoming, placeholder, inputs)
            except Exception as e:
                print(e.message)
                print('Reminder: Custom metric function arguments must be '
                      'define as follow: custom_metric(y_pred, y_true, x).')
                exit()
        elif not isinstance(metric, tf.Tensor):
            ValueError("Invalid Metric type.")

    # Building other ops (loss, training ops...)
    if isinstance(loss, str):
        loss = objectives.get(loss)(incoming, placeholder)
    # Check if function
    elif hasattr(loss, '__call__'):
        try:
            loss = loss(incoming, placeholder)
        except Exception as e:
            print(e.message)
            print('Reminder: Custom loss function arguments must be define as '
                  'follow: custom_loss(y_pred, y_true).')
            exit()
    elif not isinstance(loss, tf.Tensor):
        raise ValueError("Invalid Loss type.")

    tr_vars = trainable_vars
    if not tr_vars:
        tr_vars = tf.trainable_variables()

    if not restore:
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, 'moving_avg')
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS,
                             optimizer._name + '/')

    tr_op = TrainOp(loss=loss,
                    optimizer=optimizer,
                    metric=metric,
                    trainable_vars=tr_vars,
                    batch_size=batch_size,
                    shuffle=shuffle_batches,
                    step_tensor=step_tensor,
                    name=op_name)

    tf.add_to_collection(tf.GraphKeys.TRAIN_OPS, tr_op)

    if not hasattr(incoming, '__len__'):
        incoming.placeholder = placeholder

    return incoming
Esempio n. 10
0
def fully_connected(incoming,
                    n_units,
                    activation='linear',
                    bias=True,
                    weights_init='truncated_normal',
                    bias_init='zeros',
                    regularizer=None,
                    weight_decay=0.001,
                    trainable=True,
                    restore=True,
                    reuse=False,
                    scope=None,
                    name="FullyConnected"):
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b',
                            shape=[n_units],
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b is not None: inference = tf.nn.bias_add(inference, b)
        if activation:
            if isinstance(activation, str):
                inference = activations.get(activation)(inference)
            elif hasattr(activation, '__call__'):
                inference = activation(inference)
            else:
                raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
def conv_2d_BN(incoming, nb_filter, filter_size, strides=1, padding='same',
            activation='linear', bias=True, weights_init='xavier',
            bias_init='zeros', regularizer=None, weight_decay=0.001,
            trainable=True, restore=True, reuse=False, scope=None,
            name="Conv2D", batch_norm=False):
    """ Convolution 2D.
    Input:
        4-D Tensor [batch, height, width, in_channels].
    Output:
        4-D Tensor [batch, new height, new width, nb_filter].
    Arguments:
        incoming: `Tensor`. Incoming 4-D Tensor.
        nb_filter: `int`. The number of convolutional filters.
        filter_size: `int` or `list of int`. Size of filters.
        strides: 'int` or list of `int`. Strides of conv operation.
            Default: [1 1 1 1].
        padding: `str` from `"same", "valid"`. Padding algo to use.
            Default: 'same'.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Conv2D'.
        batch_norm: If true, add batch normalization with default TFLearn 
            parameters before the activation layer 
    Attributes:
        scope: `Scope`. This layer scope.
        W: `Variable`. Variable representing filter weights.
        b: `Variable`. Variable representing biases.
    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
    filter_size = utils.autoformat_filter_conv2d(filter_size,
                                                 input_shape[-1],
                                                 nb_filter)
    strides = utils.autoformat_kernel_2d(strides)
    padding = utils.autoformat_padding(padding)

    # Variable Scope fix for older TF
    try:
        vscope = tf.variable_scope(scope, default_name=name, values=[incoming],
                                   reuse=reuse)
    except Exception:
        vscope = tf.variable_op_scope([incoming], scope, name, reuse=reuse)

    with vscope as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=filter_size, regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b', shape=nb_filter, initializer=bias_init,
                            trainable=trainable, restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
        if b: inference = tf.nn.bias_add(inference, b)

        if batch_norm:
            inference = batch_normalization(inference)
        
        if isinstance(activation, str):
            if activation == 'softmax':
                shapes = inference.get_shape()

                inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 12
0
def single_unit(incoming,
                activation='linear',
                bias=True,
                trainable=True,
                restore=True,
                reuse=False,
                scope=None,
                name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `function`. Activation applied to this
            layer (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Linear'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W = va.variable('W',
                        shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            b = va.variable('b',
                            shape=[n_inputs],
                            initializer=tf.constant_initializer(
                                np.random.randn()),
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.multiply(inference, W)
        if b: inference = tf.add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 13
0
def fully_connected(incoming,
                    n_units,
                    activation='linear',
                    bias=True,
                    weights_init='truncated_normal',
                    bias_init='zeros',
                    regularizer=None,
                    weight_decay=0.001,
                    trainable=True,
                    restore=True,
                    reuse=False,
                    scope=None,
                    name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = va.variable('b',
                            shape=[n_units],
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)
        if activation:
            if isinstance(activation, str):
                inference = activations.get(activation)(inference)
            elif hasattr(activation, '__call__'):
                inference = activation(inference)
            else:
                raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 14
0
def highway(incoming, n_units, activation='linear', transform_dropout=None,
            weights_init='truncated_normal', bias_init='zeros',
            regularizer=None, weight_decay=0.001, trainable=True,
            restore=True, reuse=False, scope=None,
            name="FullyConnectedHighway"):
    """ Fully Connected Highway.

    A fully connected highway network layer, with some inspiration from
    [https://github.com/fomorians/highway-fcn](https://github.com/fomorians/highway-fcn).

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        transform_dropout: `float`: Keep probability on the highway transform gate.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnectedHighway'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        W_t: `Tensor`. Variable representing units weights for transform gate.
        b: `Tensor`. Variable representing biases.
        b_t: `Tensor`. Variable representing biases for transform gate.

    Links:
        [https://arxiv.org/abs/1505.00387](https://arxiv.org/abs/1505.00387)

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        if isinstance(bias_init, str):
            bias_init = initializations.get(bias_init)()
        b = va.variable('b', shape=[n_units], initializer=bias_init,
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        # Weight and bias for the transform gate
        W_T = va.variable('W_T', shape=[n_inputs, n_units],
                          regularizer=None, initializer=W_init,
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W_T)

        b_T = va.variable('b_T', shape=[n_units],
                          initializer=tf.constant_initializer(-1),
                          trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b_T)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(incoming, [-1, n_inputs])

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        H = activation(tf.matmul(incoming, W) + b)
        T = tf.sigmoid(tf.matmul(incoming, W_T) + b_T)
        if transform_dropout:
            T = dropout(T, transform_dropout)
        C = tf.sub(1.0, T)

        inference = tf.add(tf.mul(H, T), tf.mul(incoming, C))

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.W_t = W_T
    inference.b = b
    inference.b_t = b_T

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 15
0
def regression(incoming,
               placeholder=None,
               optimizer='adam',
               loss='categorical_crossentropy',
               metric='default',
               learning_rate=0.001,
               dtype=tf.float32,
               batch_size=64,
               shuffle_batches=True,
               to_one_hot=False,
               n_classes=None,
               trainable_vars=None,
               restore=True,
               op_name=None,
               name=None):
    """ Regression.

    Input:
        2-D Tensor Layer.

    Output:
        2-D Tensor Layer (Same as input).

    Arguments:
        incoming: `Tensor`. Incoming 2-D Tensor.
        placeholder: `Tensor`. This regression target (label) placeholder.
            If 'None' provided, a placeholder will be added automatically.
            You can retrieve that placeholder through graph key: 'TARGETS',
            or the 'placeholder' attribute of this function's returned tensor.
        optimizer: `str` (name), `Optimizer` or `function`. Optimizer to use.
            Default: 'sgd' (Stochastic Descent Gradient).
        loss: `str` (name) or `function`. Loss function used by this layer
            optimizer. Default: 'categorical_crossentropy'.
        metric: `str`, `Metric` or `function`. The metric to be used.
            Default: 'default' metric is 'accuracy'. To disable metric
            calculation, set it to 'None'.
        learning_rate: `float`. This layer optimizer's learning rate.
        dtype: `tf.types`. This layer placeholder type. Default: tf.float32.
        batch_size: `int`. Batch size of data to use for training. tflearn
            supports different batch size for every optimizers. Default: 64.
        shuffle_batches: `bool`. Shuffle or not this optimizer batches at
            every epoch. Default: True.
        to_one_hot: `bool`. If True, labels will be encoded to one hot vectors.
            'n_classes' must then be specified.
        n_classes: `int`. The total number of classes. Only required when using
            'to_one_hot' option.
        trainable_vars: list of `Variable`. If specified, this regression will
            only update given variable weights. Else, all trainale variable
            are going to be updated.
        restore: `bool`. If False, variables related to optimizers such
            as moving averages will not be restored when loading a
            pre-trained model.
        op_name: A name for this layer optimizer (optional).
            Default: optimizer op name.
        name: A name for this layer's placeholder scope.

    Attributes:
        placeholder: `Tensor`. Placeholder for feeding labels.

    """

    input_shape = utils.get_incoming_shape(incoming)

    if placeholder is None:
        pscope = "TargetsData" if not name else name
        with tf.name_scope(pscope):
            placeholder = tf.placeholder(shape=input_shape,
                                         dtype=dtype,
                                         name="Y")

    tf.add_to_collection(tf.GraphKeys.TARGETS, placeholder)

    if to_one_hot:
        if n_classes is None:
            raise Exception("'n_classes' is required when using 'to_one_hot'.")
        placeholder = core.one_hot_encoding(placeholder, n_classes)

    step_tensor = None
    # Building Optimizer
    if isinstance(optimizer, str):
        _opt = optimizers.get(optimizer)(learning_rate)
        op_name = op_name if op_name else type(_opt).__name__
        _opt.build()
        optimizer = _opt.get_tensor()
    elif isinstance(optimizer, optimizers.Optimizer):
        op_name = op_name if op_name else type(optimizer).__name__
        if optimizer.has_decay:
            step_tensor = tf.Variable(0.,
                                      name="Training_step",
                                      trainable=False)
        optimizer.build(step_tensor)
        optimizer = optimizer.get_tensor()
    elif hasattr(optimizer, '__call__'):
        try:
            optimizer, step_tensor = optimizer(learning_rate)
        except Exception as e:
            print(e.message)
            print(
                "Reminder: Custom Optimizer function must return (optimizer, "
                "step_tensor) and take one argument: 'learning_rate'. "
                "Note that returned step_tensor can be 'None' if no decay.")
            exit()
    elif not isinstance(optimizer, tf.train.Optimizer):
        raise ValueError("Invalid Optimizer type.")

    inputs = tf.get_collection(tf.GraphKeys.INPUTS)
    #inputs = tf.concat(0, utils.get_tensor_parents_placeholders(incoming))

    # Building metric
    # No auto accuracy for linear regression
    if len(input_shape) == 1 and metric == 'default':
        metric = None
    if metric is not None:
        # Default metric is accuracy
        if metric == 'default': metric = 'accuracy'
        if isinstance(metric, str):
            metric = metrics.get(metric)()
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif isinstance(metric, metrics.Metric):
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif hasattr(metric, '__call__'):
            try:
                metric = metric(incoming, placeholder, inputs)
            except Exception as e:
                print(e.message)
                print('Reminder: Custom metric function arguments must be '
                      'define as follow: custom_metric(y_pred, y_true, x).')
                exit()
        elif not isinstance(metric, tf.Tensor):
            ValueError("Invalid Metric type.")

    # Building other ops (loss, training ops...)
    if isinstance(loss, str):
        loss = objectives.get(loss)(incoming, placeholder)
    # Check if function
    elif hasattr(loss, '__call__'):
        try:
            loss = loss(incoming, placeholder)
        except Exception as e:
            print(e.message)
            print('Reminder: Custom loss function arguments must be define as '
                  'follow: custom_loss(y_pred, y_true).')
            exit()
    elif not isinstance(loss, tf.Tensor):
        raise ValueError("Invalid Loss type.")

    tr_vars = trainable_vars
    if not tr_vars:
        tr_vars = tf.trainable_variables()

    if not restore:
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, 'moving_avg')
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS,
                             optimizer._name + '/')

    tr_op = TrainOp(loss=loss,
                    optimizer=optimizer,
                    metric=metric,
                    trainable_vars=tr_vars,
                    batch_size=batch_size,
                    shuffle=shuffle_batches,
                    step_tensor=step_tensor,
                    name=op_name)

    tf.add_to_collection(tf.GraphKeys.TRAIN_OPS, tr_op)

    if not hasattr(incoming, '__len__'):
        incoming.placeholder = placeholder

    return incoming
Esempio n. 16
0
def regression(incoming,
               placeholder='default',
               optimizer='adam',
               loss='categorical_crossentropy',
               metric='default',
               learning_rate=0.001,
               dtype=tf.float32,
               batch_size=64,
               shuffle_batches=True,
               to_one_hot=False,
               n_classes=None,
               trainable_vars=None,
               restore=True,
               op_name=None,
               validation_monitors=None,
               validation_batch_size=None,
               name=None):
    """ Regression.

    The regression layer is used in TFLearn to apply a regression (linear or
    logistic) to the provided input. It requires to specify a TensorFlow
    gradient descent optimizer 'optimizer' that will minimize the provided
    loss function 'loss' (which calculate the errors). A metric can also be
    provided, to evaluate the model performance.

    A 'TrainOp' is generated, holding all information about the optimization
    process. It is added to TensorFlow collection 'tf.GraphKeys.TRAIN_OPS'
    and later used by TFLearn 'models' classes to perform the training.

    An optional placeholder 'placeholder' can be specified to use a custom
    TensorFlow target placeholder instead of creating a new one. The target
    placeholder is added to the 'tf.GraphKeys.TARGETS' TensorFlow
    collection, so that it can be retrieved later. In case no target is used,
    set the placeholder to None.

    Additionaly, a list of variables 'trainable_vars' can be specified,
    so that only them will be updated when applying the backpropagation
    algorithm.

    Input:
        2-D Tensor Layer.

    Output:
        2-D Tensor Layer (Same as input).

    Arguments:
        incoming: `Tensor`. Incoming 2-D Tensor.
        placeholder: `Tensor`. This regression target (label) placeholder.
            If 'default', a placeholder will be added automatically.
            You can retrieve that placeholder through graph key: 'TARGETS',
            or the 'placeholder' attribute of this function's returned tensor.
            If you do not want to use any target, set placeholder to 'None'.
        optimizer: `str` (name), `Optimizer` or `function`. Optimizer to use.
            Default: 'adam' (Adaptive Moment Estimation).
        loss: `str` (name) or `function`. Loss function used by this layer
            optimizer. Default: 'categorical_crossentropy'.
        metric: `str`, `Metric` or `function`. The metric to be used.
            Default: 'default' metric is 'accuracy'. To disable metric
            calculation, set it to 'None'.
        learning_rate: `float`. This layer optimizer's learning rate.
        dtype: `tf.types`. This layer placeholder type. Default: tf.float32.
        batch_size: `int`. Batch size of data to use for training. tflearn
            supports different batch size for every optimizers. Default: 64.
        shuffle_batches: `bool`. Shuffle or not this optimizer batches at
            every epoch. Default: True.
        to_one_hot: `bool`. If True, labels will be encoded to one hot vectors.
            'n_classes' must then be specified.
        n_classes: `int`. The total number of classes. Only required when using
            'to_one_hot' option.
        trainable_vars: list of `Variable`. If specified, this regression will
            only update given variable weights. Else, all trainale variable
            are going to be updated.
        restore: `bool`. If False, variables related to optimizers such
            as moving averages will not be restored when loading a
            pre-trained model.
        op_name: A name for this layer optimizer (optional).
            Default: optimizer op name.
        validation_monitors: `list` of `Tensor` objects.  List of variables
            to compute during validation, which are also used to produce
            summaries for output to TensorBoard.  For example, this can be
            used to periodically record a confusion matrix or AUC metric, 
            during training.  Each variable should have rank 1, i.e. 
            shape [None].
        validation_batch_size: `int` or None. Specifies the batch
            size to be used for the validation data feed.
        name: A name for this layer's placeholder scope.

    Attributes:
        placeholder: `Tensor`. Placeholder for feeding labels.

    """

    input_shape = utils.get_incoming_shape(incoming)

    if placeholder == 'default':
        pscope = "TargetsData" if not name else name
        with tf.name_scope(pscope):
            p_shape = [None] if to_one_hot else input_shape
            placeholder = tf.placeholder(shape=p_shape, dtype=dtype, name="Y")
    elif placeholder is None:
        placeholder = None

    if placeholder is not None:
        if placeholder not in tf.get_collection(tf.GraphKeys.TARGETS):
            tf.add_to_collection(tf.GraphKeys.TARGETS, placeholder)

    if to_one_hot:
        if n_classes is None:
            raise Exception("'n_classes' is required when using 'to_one_hot'.")
        placeholder = core.one_hot_encoding(placeholder, n_classes)

    step_tensor = None
    # Building Optimizer
    if isinstance(optimizer, str):
        _opt = optimizers.get(optimizer)(learning_rate)
        op_name = op_name if op_name else type(_opt).__name__
        _opt.build()
        optimizer = _opt.get_tensor()
    elif isinstance(optimizer, optimizers.Optimizer):
        op_name = op_name if op_name else type(optimizer).__name__
        if optimizer.has_decay:
            step_tensor = tf.Variable(0.,
                                      name="Training_step",
                                      trainable=False)
        optimizer.build(step_tensor)
        optimizer = optimizer.get_tensor()
    elif hasattr(optimizer, '__call__'):
        try:
            optimizer, step_tensor = optimizer(learning_rate)
        except Exception as e:
            print(str(e))
            print(
                "Reminder: Custom Optimizer function must return (optimizer, "
                "step_tensor) and take one argument: 'learning_rate'. "
                "Note that returned step_tensor can be 'None' if no decay.")
            exit()
    elif not isinstance(optimizer, tf.train.Optimizer):
        raise ValueError("Invalid Optimizer type.")

    inputs = tf.get_collection(tf.GraphKeys.INPUTS)
    #inputs = tf.concat(0, utils.get_tensor_parents_placeholders(incoming))

    # Building metric
    # No auto accuracy for linear regression
    if len(input_shape) == 1 and metric == 'default':
        metric = None
    # If no placeholder, only a Tensor can be pass as metric
    if not isinstance(metric, tf.Tensor) and placeholder is None:
        metric = None
    if metric is not None:
        # Default metric is accuracy
        if metric == 'default':
            metric = 'accuracy'
        if isinstance(metric, str):
            metric = metrics.get(metric)()
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif isinstance(metric, metrics.Metric):
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif hasattr(metric, '__call__'):
            try:
                metric = metric(incoming, placeholder, inputs)
            except Exception as e:
                print(str(e))
                print('Reminder: Custom metric function arguments must be '
                      'defined as: custom_metric(y_pred, y_true, x).')
                exit()
        elif not isinstance(metric, tf.Tensor):
            raise ValueError("Invalid Metric type.")

    # Building other ops (loss, training ops...)
    if isinstance(loss, str):
        loss = objectives.get(loss)(incoming, placeholder)
    # Check if function
    elif hasattr(loss, '__call__'):
        try:
            loss = loss(incoming, placeholder)
        except Exception as e:
            print(str(e))
            print(
                'Reminder: Custom loss function arguments must be defined as: '
                'custom_loss(y_pred, y_true).')
            exit()
    elif not isinstance(loss, tf.Tensor):
        raise ValueError("Invalid Loss type.")

    tr_vars = trainable_vars
    if not tr_vars:
        tr_vars = tf.trainable_variables()

    if not restore:
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, 'moving_avg')
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS,
                             optimizer._name + '/')

    tr_op = TrainOp(loss=loss,
                    optimizer=optimizer,
                    metric=metric,
                    trainable_vars=tr_vars,
                    batch_size=batch_size,
                    shuffle=shuffle_batches,
                    step_tensor=step_tensor,
                    validation_monitors=validation_monitors,
                    validation_batch_size=validation_batch_size,
                    name=op_name)

    tf.add_to_collection(tf.GraphKeys.TRAIN_OPS, tr_op)

    if not hasattr(incoming, '__len__'):
        incoming.placeholder = placeholder

    return incoming
Esempio n. 17
0
def bidirectional_rnn(incoming,
                      rnncell_fw,
                      rnncell_bw,
                      return_seq=False,
                      return_states=False,
                      initial_state_fw=None,
                      initial_state_bw=None,
                      dynamic=False,
                      scope=None,
                      name="BiRNN"):
    """ Bidirectional RNN.

    Build a bidirectional recurrent neural network, it requires 2 RNN Cells
    to process sequence in forward and backward order. Any RNN Cell can be
    used i.e. SimpleRNN, LSTM, GRU... with its own parameters. But the two
    cells number of units must match.

    Input:
        3-D Tensor Layer [samples, timesteps, input dim].

    Output:
        if `return_seq`: 3-D Tensor [samples, timesteps, output dim].
        else: 2-D Tensor Layer [samples, output dim].

    Arguments:
        incoming: `Tensor`. The incoming Tensor.
        rnncell_fw: `RNNCell`. The RNN Cell to use for foward computation.
        rnncell_bw: `RNNCell`. The RNN Cell to use for backward computation.
        return_seq: `bool`. If True, returns the full sequence instead of
            last sequence output only.
        return_states: `bool`. If True, returns a tuple with output and
            states: (output, states).
        initial_state_fw: `Tensor`. An initial state for the forward RNN.
            This must be a tensor of appropriate type and shape [batch_size
            x cell.state_size].
        initial_state_bw: `Tensor`. An initial state for the backward RNN.
            This must be a tensor of appropriate type and shape [batch_size
            x cell.state_size].
        dynamic: `bool`. If True, dynamic computation is performed. It will not
            compute RNN steps above the sequence length. Note that because TF
            requires to feed sequences of same length, 0 is used as a mask.
            So a sequence padded with 0 at the end must be provided. When
            computation is performed, it will stop when it meets a step with
            a value of 0.
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: `str`. A name for this layer (optional).

    """
    assert (rnncell_fw._num_units == rnncell_bw._num_units), \
        "RNN Cells number of units must match!"

    sequence_length = None
    if dynamic:
        sequence_length = retrieve_seq_length_op(incoming if isinstance(
            incoming, tf.Tensor) else tf.stack(incoming))

    input_shape = utils.get_incoming_shape(incoming)

    with tf.variable_scope(scope, default_name=name,
                           values=[incoming]) as scope:
        name = scope.name

        # TODO: DropoutWrapper

        inference = incoming
        # If a tensor given, convert it to a per timestep list
        if type(inference) not in [list, np.array]:
            ndim = len(input_shape)
            assert ndim >= 3, "Input dim should be at least 3."
            axes = [1, 0] + list(range(2, ndim))
            inference = tf.transpose(inference, (axes))
            inference = tf.unstack(inference)

        outputs, states_fw, states_bw = _brnn(
            rnncell_fw,
            rnncell_bw,
            inference,
            initial_state_fw=initial_state_fw,
            initial_state_bw=initial_state_bw,
            sequence_length=sequence_length,
            dtype=tf.float32)

        c = tf.GraphKeys.LAYER_VARIABLES + '/' + scope.name
        for v in [rnncell_fw.W, rnncell_fw.b, rnncell_bw.W, rnncell_bw.b]:
            if hasattr(v, "__len__"):
                for var in v:
                    tf.add_to_collection(c, var)
            else:
                tf.add_to_collection(c, v)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])

    if dynamic:
        if return_seq:
            o = outputs
        else:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            o = advanced_indexing_op(outputs, sequence_length)
    else:
        o = outputs if return_seq else outputs[-1]

    sfw = states_fw
    sbw = states_bw

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, o)

    return (o, sfw, sbw) if return_states else o
Esempio n. 18
0
def _rnn_template(incoming,
                  cell,
                  dropout=None,
                  return_seq=False,
                  return_state=False,
                  initial_state=None,
                  dynamic=False,
                  scope=None,
                  reuse=False,
                  name="LSTM"):
    """ RNN Layer Template. """
    sequence_length = None
    if dynamic:
        sequence_length = retrieve_seq_length_op(incoming if isinstance(
            incoming, tf.Tensor) else tf.stack(incoming))

    input_shape = utils.get_incoming_shape(incoming)

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        _cell = cell
        # Apply dropout
        if dropout:
            if type(dropout) in [tuple, list]:
                in_keep_prob = dropout[0]
                out_keep_prob = dropout[1]
            elif isinstance(dropout, float):
                in_keep_prob, out_keep_prob = dropout, dropout
            else:
                raise Exception("Invalid dropout type (must be a 2-D tuple of "
                                "float)")
            cell = DropoutWrapper(cell, in_keep_prob, out_keep_prob)

        inference = incoming
        # If a tensor given, convert it to a per timestep list
        if type(inference) not in [list, np.array]:
            ndim = len(input_shape)
            assert ndim >= 3, "Input dim should be at least 3."
            axes = [1, 0] + list(range(2, ndim))
            inference = tf.transpose(inference, (axes))
            inference = tf.unstack(inference)

        outputs, state = _rnn(cell,
                              inference,
                              dtype=tf.float32,
                              initial_state=initial_state,
                              scope=name,
                              sequence_length=sequence_length)

        # Retrieve RNN Variables
        c = tf.GraphKeys.LAYER_VARIABLES + '/' + scope.name
        for v in [_cell.W, _cell.b]:
            if hasattr(v, "__len__"):
                for var in v:
                    tf.add_to_collection(c, var)
            else:
                tf.add_to_collection(c, v)
        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])

    if dynamic:
        if return_seq:
            o = outputs
        else:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            o = advanced_indexing_op(outputs, sequence_length)
    else:
        o = outputs if return_seq else outputs[-1]

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, o)

    return (o, state) if return_state else o
Esempio n. 19
0
def regression(incoming,
               placeholder='default',
               optimizer='adam',
               loss='categorical_crossentropy',
               metric='default',
               learning_rate=0.001,
               dtype=tf.float32,
               batch_size=64,
               shuffle_batchs=True,
               to_one_hot=False,
               n_classes=None,
               trainalbe_vars=None,
               restore=True,
               op_name=None,
               validation_monitor=None,
               validation_batch_size=None,
               name=None):
    """ Regression.

    The regression layer is used in TFLearn to apply a regression (linear or
    logistic) to the provided input. It requires to specify a TensorFlow
    gradient descent optimizer 'optimizer' that will minimize the provided
    loss function 'loss' (which calculate the errors). A metric can also be
    provided, to evaluate the model performance.

    A 'TrainOp' is generated, holding all information about the optimization
    process. It is added to TensorFlow collection 'tf.GraphKeys.TRAIN_OPS'
    and later used by TFLearn 'models' classes to perform the training.

    An optional placeholder 'placeholder' can be specified to use a custom
    TensorFlow target placeholder instead of creating a new one. The target
    placeholder is added to the 'tf.GraphKeys.TARGETS' TensorFlow
    collection, so that it can be retrieved later. In case no target is used,
    set the placeholder to None.

    Additionaly, a list of variables 'trainable_vars' can be specified,
    so that only them will be updated when applying the backpropagation
    algorithms.

    """

    input_shape = utils.get_incoming_shape(incoming)

    if palceholder == 'default':
        pscope = "TargetsData" if not name else name
        with tf.name_scope(pscope):
            p_shape = [None] if to_one_hot else input_shape
            placeholder = tf.placeholder(shape=p_shape, dtype=dtype, name="Y")
    elif palceholder is None:
        palceholder = None

    if placeholder is not None:
        if palceholder not in tf.get_collection(tf.GraphKeys.TARGETS):
            tf.add_to_collection(tf.GraphKeys.TARGETS, palceholder)

    if to_one_hot:
        if n_classes is None:
            raise Exception("'n_classes' is required when using 'to_one_hot'.")
        placeholder = core.one_hot_encoding(placeholder, n_classes)

    step_tensor = None
    #Building Optimizer
    if isinstance(optimizer, str):
        _opt = optimizers.get(optimizer)(learning_rate)
        op_name = op_name if op_name else type(_opt).__name__
        _opt.build()
        optimizer = _opt.get_tensor()
    elif isinstance(optimizer, optimizers.Optimizer):
        op_name = op_name if op_name else type(optimizer).__name__
        if optomizer.has_decay:
            step_tensor = tf.Variable(0.,
                                      name="Training_step",
                                      trainable=False)
            optimizer.build(step_tensor)
            optimizer = optimizer.get_tensor()
    elif hasattr(optimizer, '__call__'):
        try:
            optimizer, step_tensor = optimizer(learning_rate)
        except Exception as e:
            print(str(e))
            print(
                "Reminder: Custom Optimizer function must return (optimizer, "
                "step_tensor) and take one argument: 'learning_rate'. "
                "Note that returned step_tensor can be 'None' if no decay.")
            exit()
    elif not isinstance(optimizer, tf.train.Optimizer):
        raise ValueError("Invalid Optimizer type.")

    inputs = tf.get_collection(tf.GraphKeys.INPUTS)

    #Building metric
    #No auto accuracy for liner regression
    if len(input_shape) == 1 and metric == 'default':
        mteric = None
    # If no placeholder, only a Tensor can be pass as metric
    if not isinstance(metric, tf.Tensor) and placeholder is None:
        metric = None
    if metric is not None:
        # Default metric is accuracy
        if metric == 'default':
            metric = 'accuracy'
        if isinstance(metric, str):
            metric = metric.get(metric)()
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif isinstance(metric, metrics.Metric):
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif hasattr(metric, '__call__'):
            try:
                metric = metric(incoming, placeholder, inputs)
            except Exception as e:
                print(str(e))
                print('Reminder: Custom metric function arguments must be '
                      'defined as: custom_metric(y_pred, y_true, x).')
                exit()
        elif not isinstance(metric, tf.Tensor):
            raise ValueError("Invalid Metric type.")

    #Building other ops(loss, training ops...)
    if isinstance(loss, str):
        loss = objectives.get(loss)(incoming, placeholder)
    #Check if function
    elif hasattr(loss, '__call__'):
        try:
            loss = loss(incoming, placeholder)
        except Exception as e:
            print(str(e))
            print(
                'Reminder: Custom loss function arguments must be defined as: '
                'custom_loss(y_pred, y_true).')
            exit()
    elif not isinstance(loss, tf.Tensor):
        raise ValueError("Invalid Loss type.")

    tr_vars = trainable_vars
    if not tr_vars:
        tr_vars = tf.trainable_variables()

    if not restore:
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, 'moving_avg')
        tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS,
                             optimizer._name + '/')

    tr_op = TrainOp(loss=loss,
                    optimizer=optimizer,
                    metric=metric,
                    trainable_vars=tr_vars,
                    batch_size=batch_size,
                    shuffle=shuffle_batches,
                    step_tensor=step_tensor,
                    validation_monitors=validation_monitors,
                    validation_batch_size=validation_batch_size,
                    name=op_name)

    tf.add_to_collection(tf.GraphKeys.TRAIN_OPS, tr_op)

    if not hasattr(incoming, '__len__'):
        incoming.palceholder = palceholder

    return incoming
Esempio n. 20
0
def single_unit(incoming, activation='linear', bias=True, trainable=True,
                restore=True, reuse=False, scope=None, name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `function`. Activation applied to this
            layer (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Linear'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name

        W = va.variable('W', shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            b = va.variable('b', shape=[n_inputs],
                            initializer=tf.constant_initializer(np.random.randn()),
                            trainable=trainable, restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.mul(inference, W)
        if b: inference = tf.add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 21
0
def single_unit(incoming, activation='linear', bias=True, trainable=True,
                restore=True, name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        name: A name for this layer (optional). Default: 'Dense'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W = va.variable(scope + 'W', shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b = va.variable(scope + 'b', shape=[n_inputs],
                            initializer=tf.constant_initializer(np.random.randn()),
                            trainable=trainable, restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.mul(inference, W)
        if b: inference = tf.add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 22
0
def fully_connected(incoming, n_units, activation='linear', bias=True,
                    weights_init='truncated_normal', bias_init='zeros',
                    regularizer=None, weight_decay=0.001, trainable=True,
                    restore=True, reuse=False, scope=None,
                    name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias, str):
                bias_init = initializations.get(bias_init)()
            b = va.variable('b', shape=[n_units], initializer=bias_init,
                            trainable=trainable, restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 23
0
def fully_connected(incoming, n_units, activation='linear', bias=True,
                    weights_init='truncated_normal', bias_init='zeros',
                    regularizer=None, weight_decay=0.001, trainable=True,
                    restore=True, name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
       regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
       weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
       trainable: `bool`. If True, weights will be trainable.
       restore: `bool`. If True, this layer weights will be restored when
            loading a model
       name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable(scope + 'W', shape=[n_inputs, n_units],
                        regularizer=W_regul, initializer=W_init,
                        trainable=trainable, restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b_init = initializations.get(bias_init)()
            b = va.variable(scope + 'b', shape=[n_units],
                            initializer=b_init, trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 24
0
def regression(incoming, placeholder=None, optimizer='adam',
               loss='categorical_crossentropy', metric='default',
               learning_rate=0.001, dtype=tf.float32, batch_size=64,
               shuffle_batches=True, op_name=None, name=None):
    """ Regression.

    Input:
        2-D Tensor Layer.

    Output:
        2-D Tensor Layer (Same as input).

    Arguments:
        incoming: `Tensor`. Incoming 2-D Tensor.
        placeholder: `Tensor`. This regression target (label) placeholder.
            If 'None' provided, a placeholder will be added automatically.
            You can retrieve that placeholder through graph key: 'TARGETS',
            or the 'placeholder' attribute of this function's returned tensor.
        optimizer: `str` (name) or `Optimizer`. Optimizer to use.
            Default: 'sgd' (Stochastic Descent Gradient).
        loss: `str` (name) or `Tensor`. Loss function used by this layer
            optimizer. Default: 'categorical_crossentropy'.
        metric: `str`, `Metric` or `Tensor`. The metric to be used.
            Default: 'default' metric is 'accuracy'. To disable metric
            calculation, set it to 'None'.
        learning_rate: `float`. This layer optimizer's learning rate.
        dtype: `tf.types`. This layer placeholder type. Default: tf.float32.
        batch_size: `int`. Batch size of data to use for training. tflearn
            supports different batch size for every optimizers. Default: 64.
        shuffle_batches: `bool`. Shuffle or not this optimizer batches at
            every epoch. Default: True.
        op_name: A name for this layer optimizer (optional).
            Default: optimizer op name.
        name: A name for this layer's placeholder scope.

    Attributes:
        placeholder: `Tensor`. Placeholder for feeding labels.

    """

    input_shape = utils.get_incoming_shape(incoming)

    if not placeholder:
        pscope = "TargetsData" if not name else name
        with tf.name_scope(pscope):
            pshape = [None, input_shape[-1]]
            if len(input_shape) == 1:
                pshape = [None]
            placeholder = tf.placeholder(shape=pshape, dtype=dtype, name="Y")

    tf.add_to_collection(tf.GraphKeys.TARGETS, placeholder)

    step_tensor = None
    # Building Optimizer
    if isinstance(optimizer, str):
        _opt = optimizers.get(optimizer)(learning_rate)
        op_name = op_name if op_name else type(_opt).__name__
        _opt.build()
        optimizer = _opt.get_tensor()
    elif isinstance(optimizer, optimizers.Optimizer):
        op_name = op_name if op_name else type(optimizer).__name__
        if optimizer.has_decay:
            step_tensor = tf.Variable(0., name="Training_step",
                                      trainable=False)
        optimizer.build(step_tensor)
        optimizer = optimizer.get_tensor()
    elif not isinstance(optimizer, tf.train.Optimizer):
        raise ValueError("Invalid Optimizer type.")

    inputs = tf.get_collection(tf.GraphKeys.INPUTS)
    #inputs = tf.concat(0, utils.get_tensor_parents_placeholders(incoming))

    # Building metric
    # No auto accuracy for linear regression
    if len(input_shape) == 1 and metric == 'default':
        metric = None
    if metric is not None:
        # Default metric is accuracy
        if metric == 'default': metric = 'accuracy'
        if isinstance(metric, str):
            metric = metrics.get(metric)()
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif isinstance(metric, metrics.Metric):
            metric.build(incoming, placeholder, inputs)
            metric = metric.get_tensor()
        elif not isinstance(metric, tf.Tensor):
            ValueError("Invalid Metric type.")

    # Building other ops (loss, training ops...)
    if isinstance(loss, str):
        loss = objectives.get(loss)(incoming, placeholder)
    elif not isinstance(loss, tf.Tensor):
        raise ValueError("Invalid Loss type.")

    tr_op = TrainOp(loss=loss,
                    optimizer=optimizer,
                    metric=metric,
                    trainable_vars=tf.trainable_variables(),
                    batch_size=batch_size,
                    shuffle=shuffle_batches,
                    step_tensor=step_tensor,
                    name=op_name)

    tf.add_to_collection(tf.GraphKeys.TRAIN_OPS, tr_op)

    if not hasattr(incoming, '__len__'):
        incoming.placeholder = placeholder

    return incoming
Esempio n. 25
0
def single_unit(incoming,
                activation='linear',
                bias=True,
                trainable=True,
                restore=True,
                name="Linear"):
    """ Single Unit.

    A single unit (Linear) Layer.

    Input:
        1-D Tensor [samples]. If not 2D, input will be flatten.

    Output:
        1-D Tensor [samples].

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        name: A name for this layer (optional). Default: 'Dense'.

    Attributes:
        W: `Tensor`. Variable representing weight.
        b: `Tensor`. Variable representing bias.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W = va.variable(scope + 'W',
                        shape=[n_inputs],
                        initializer=tf.constant_initializer(np.random.randn()),
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b = va.variable(scope + 'b',
                            shape=[n_inputs],
                            initializer=tf.constant_initializer(
                                np.random.randn()),
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(inference, [-1])

        inference = tf.mul(inference, W)
        if b: inference = tf.add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 26
0
def highway(incoming,
            n_units,
            activation='linear',
            transform_dropout=None,
            weights_init='truncated_normal',
            bias_init='zeros',
            regularizer=None,
            weight_decay=0.001,
            trainable=True,
            restore=True,
            reuse=False,
            scope=None,
            name="FullyConnectedHighway"):
    """ Fully Connected Highway.

    A fully connected highway network layer, with some inspiration from
    [https://github.com/fomorians/highway-fcn](https://github.com/fomorians/highway-fcn).

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        transform_dropout: `float`: Keep probability on the highway transform gate.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnectedHighway'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        W_t: `Tensor`. Variable representing units weights for transform gate.
        b: `Tensor`. Variable representing biases.
        b_t: `Tensor`. Variable representing biases for transform gate.

    Links:
        [https://arxiv.org/abs/1505.00387](https://arxiv.org/abs/1505.00387)

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable('W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        if isinstance(bias_init, str):
            bias_init = initializations.get(bias_init)()
        b = va.variable('b',
                        shape=[n_units],
                        initializer=bias_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        # Weight and bias for the transform gate
        W_T = va.variable('W_T',
                          shape=[n_inputs, n_units],
                          regularizer=None,
                          initializer=W_init,
                          trainable=trainable,
                          restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W_T)

        b_T = va.variable('b_T',
                          shape=[n_units],
                          initializer=tf.constant_initializer(-1),
                          trainable=trainable,
                          restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b_T)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(incoming, [-1, n_inputs])

        if isinstance(activation, str):
            activation = activations.get(activation)
        elif hasattr(activation, '__call__'):
            activation = activation
        else:
            raise ValueError("Invalid Activation.")

        H = activation(tf.matmul(incoming, W) + b)
        T = tf.sigmoid(tf.matmul(incoming, W_T) + b_T)
        if transform_dropout:
            T = dropout(T, transform_dropout)
        C = tf.subtract(1.0, T)

        inference = tf.add(tf.multiply(H, T), tf.multiply(incoming, C))

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.W_t = W_T
    inference.b = b
    inference.b_t = b_T

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
Esempio n. 27
0
def fully_connected(incoming,
                    n_units,
                    activation='linear',
                    bias=True,
                    weights_init='truncated_normal',
                    bias_init='zeros',
                    regularizer=None,
                    weight_decay=0.001,
                    trainable=True,
                    restore=True,
                    name="FullyConnected"):
    """ Fully Connected.

    A fully connected layer.

    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

    Output:
        2D Tensor [samples, n_units].

    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
       regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
       weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
       trainable: `bool`. If True, weights will be trainable.
       restore: `bool`. If True, this layer weights will be restored when
            loading a model
       name: A name for this layer (optional). Default: 'FullyConnected'.

    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.

    """
    input_shape = utils.get_incoming_shape(incoming)
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    with tf.name_scope(name) as scope:

        W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = va.variable(scope + 'W',
                        shape=[n_inputs, n_units],
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)

        b = None
        if bias:
            b_init = initializations.get(bias_init)()
            b = va.variable(scope + 'b',
                            shape=[n_units],
                            initializer=b_init,
                            trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if b: inference = tf.nn.bias_add(inference, b)
        inference = activations.get(activation)(inference)

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    return inference
Esempio n. 28
0
def conv_1d_tranpose(layer,
                     nb_filter,
                     filter_size,
                     strides,
                     padding='same',
                     bias=True,
                     scope=None,
                     reuse=False,
                     bias_init='zeros',
                     trainable=True,
                     restore=True,
                     regularizer=None,
                     weight_decay=0.001,
                     weights_init='uniform_scaling',
                     name="deconv_1d"):
    '''
    layer: A 3-D `Tensor` of type `float` and shape `[batch, in_width, in_channels]` .
    SEE: https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_backprop_input
    SEE2: https://github.com/tensorflow/tensorflow/pull/13105/commits/2ca9b908d1978a94855349309fd16a67cfd98659
    TODO: ADD weight-decay/regularizer
    '''
    input_shape = utils.get_incoming_shape(layer)
    _, in_width, in_channels = input_shape
    batch_size = tf.shape(layer)[0]

    filter_size = [filter_size, nb_filter, in_channels]
    output_shape = [batch_size, strides * in_width, nb_filter
                    ]  # this trick I think work only for strict up-sampling
    output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")

    strides = [1, 1, strides, 1]
    spatial_start_dim = 1
    padding = utils.autoformat_padding(padding)

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[layer],
                           reuse=reuse) as scope:
        name = scope.name
        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        elif type(W_init) in [tf.Tensor, np.ndarray, list]:
            filter_size = None

        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: tflearn.losses.get(regularizer)(x, weight_decay
                                                                )

        W = vs.variable('W',
                        shape=filter_size,
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)

        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        # expand dims to make it compatible with conv2d
        W = tf.expand_dims(W, 0)
        layer = tf.expand_dims(layer, spatial_start_dim)
        output_shape_ = array_ops.concat(
            [output_shape_[:1], [1], output_shape_[1:]], axis=0)

        result = gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
                                                  filter=W,
                                                  out_backprop=layer,
                                                  strides=strides,
                                                  padding=padding,
                                                  name=name)

        result = array_ops.squeeze(result, [spatial_start_dim])
        result = tf.reshape(result, shape=output_shape)

        if bias:
            b_shape = [nb_filter]
            bias_init = initializations.get(bias_init)()
            b = vs.variable('b',
                            shape=b_shape,
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
            result = tf.nn.bias_add(result, b)
            result.b = b

        result.scope = scope
        result.W = W

    return result
Esempio n. 29
0
def conv_2d(incoming,
            nb_filter,
            filter_size,
            strides=1,
            padding='same',
            activation='linear',
            bias=True,
            weights_init='uniform_scaling',
            bias_init='zeros',
            regularizer=None,
            weight_decay=0.001,
            trainable=True,
            restore=True,
            reuse=False,
            scope=None,
            name="Conv2D"):
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
    filter_size = utils.autoformat_filter_conv2d(filter_size, input_shape[-1],
                                                 nb_filter)
    strides = utils.autoformat_kernel_2d(strides)
    padding = utils.autoformat_padding(padding)

    with tf.variable_scope(scope,
                           default_name=name,
                           values=[incoming],
                           reuse=reuse) as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer is not None:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W',
                        shape=filter_size,
                        regularizer=W_regul,
                        initializer=W_init,
                        trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b',
                            shape=nb_filter,
                            initializer=bias_init,
                            trainable=trainable,
                            restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
        if b is not None: inference = tf.nn.bias_add(inference, b)

        if activation:
            if isinstance(activation, str):
                inference = activations.get(activation)(inference)
            elif hasattr(activation, '__call__'):
                inference = activation(inference)
            else:
                raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference