Пример #1
0
def convolutional(name, x, W, b, **kwargs):
    """
    Convolutional layer.

    :param name: scope name
    :type name: str
    :param x: input tensor
    :type x: tensorflow.Tensor
    :param W: weight tensor
    :type W: tensorflow.Tensor
    :param b: bias tensor
    :type b: tensorflow.Tensor

    :Keyword Arguments:
        * *strides* ([int, int, int, int]) -- strides in each dimension ([1, 1, 1, 1])
        * *padding* (str) -- padding type, 'SAME' or 'VALID' ('SAME')

    :return: convolutional tensor
    :rtype: tensorflow.Tensor
    """

    strides = utils.get_kwargs(kwargs, 'strides', [1, 1, 1, 1])
    padding = utils.get_kwargs(kwargs, 'padding', 'SAME')

    assert len(strides) == 4
    for stride in strides:
        assert stride > 0

    assert padding == 'SAME' or padding == 'VALID'

    with tf.name_scope(name):
        return tf.add(tf.nn.conv2d(x, W, strides=strides, padding=padding), b)
Пример #2
0
def plot_normalized_histograms(values, path, **kwargs):
    """
    Plots histograms for all of the provided numpy.ndarrays.

    :param values: object containing the names as keys and the values as numpy.ndarrays
    :type values: {string: numpy.ndarray}
    :param path: path to file to store the plot
    :type path: str

    :Keyword Arguments:
    * *bins* (int) -- number of bins for histogram (1000)
    """

    bins = utils.get_kwargs(kwargs, 'bins', 1000)
    assert bins > 0

    pyplot.clf()
    plots = []

    for key in values.keys():
        y, edges = np.histogram(values[key], bins=bins)
        centers = 0.5 * (edges[1:] + edges[:-1])
        y = y / y.sum()
        plot, = pyplot.plot(centers, y, '-')
        plots.append(plot)

    # http://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box
    legend = pyplot.legend(plots,
                           values.keys(),
                           loc='upper center',
                           bbox_to_anchor=(0.5, -0.1))
    pyplot.grid('on')
    pyplot.savefig(path, bbox_extra_artists=(legend, ), bbox_inches='tight')
Пример #3
0
 def inner(*args, **kwargs):
     args_string_form = get_args(args)
     kwargs_string_form = get_kwargs(kwargs)
     if kwargs_string_form:
         delimiter = '' if not args_string_form else ', '
         print '{0}({1}{2}{3})'.format(func.func_name, args_string_form,
                                       delimiter, kwargs_string_form)
     else:
         print '{0}({1})'.format(func.func_name, args_string_form)
     return func(*args, **kwargs)
def truncated_normal(name, shape, **kwargs):
    """
    Initialize a truncated normal variable, i.e. truncated after 2 standard deviations.

    :param name: name of variable
    :type name: str
    :param shape: variable shape
    :type shape: (int, int, int, int)

    :Keyword Arguments:
        * *stddev* (float) -- standard deviation (0.1)
        * *mean* (float) -- mean (0.0)

    :return: initialized variable
    :rtype: tensorflow.Variable
    """

    stddev = utils.get_kwargs(kwargs, 'stddev', 0.1)
    mean = utils.get_kwargs(kwargs, 'mean', 0.0)

    return tf.Variable(tf.truncated_normal(shape, stddev=stddev, mean=mean),
                       name=name)
Пример #5
0
def pooling(name, x, **kwargs):
    """
    Pooling layer.

    :param name: scope name
    :type name: str
    :param x: input tensore
    :type x: tensorflow.Tensor

    :Keyword Arguments:
        * *type* (str) -- pooling type, 'MAX' or 'AVG' ('MAX')
        * *ksize* ([int, int, int, int]) -- sizes for pooling ([1, 2, 2, 1])
        * *strides* ([int, int, int, int]) -- strides in each dimension ([1, 2, 2, 1])
        * *padding* (str) -- padding type, 'SAME' or 'VALID' ('SAME')

    :return: pooling tensor
    :rtype: tensorflow.Tensor
    """

    type = utils.get_kwargs(kwargs, 'type', 'MAX')
    ksize = utils.get_kwargs(kwargs, 'ksize', [1, 2, 2, 1])
    strides = utils.get_kwargs(kwargs, 'strides', [1, 2, 2, 1])
    padding = utils.get_kwargs(kwargs, 'padding', 'SAME')

    assert len(ksize) == 4
    for size in ksize:
        assert size > 0

    assert len(strides) == 4
    for stride in strides:
        assert stride > 0

    assert type == 'MAX' or type == 'AVG'
    assert padding == 'SAME' or padding == 'VALID'

    with tf.name_scope(name):
        return tf.nn.max_pool(x, ksize=ksize, strides=strides, padding=padding)
def constant(name, shape, **kwargs):
    """
    Initialize a constant variable.

    :param name: name of variable
    :type name: str
    :param shape: variable shape
    :type shape: (int, int, int, int)

    :Keyword Arguments:
        * *value* (float) -- constant value (0.1)

    :return: initialized variable
    :rtype: tensorflow.Variable
    """

    value = utils.get_kwargs(kwargs, 'value', 0.5)

    return tf.Variable(tf.constant(value), name=name)
Пример #7
0
def softmax(name, x, **kwargs):
    """
    Softmax layer.

    :param name: scope name
    :type name: str
    :param x: input tensor
    :type x: tensorflow.Tensor

    :Keyword Arguments:
        * *epsilon* (float) -- epsilon to add to denominator (1e-12)

    :return: softmax tensor
    :rtype: tensorflow.Tensor
    """

    epsilon = utils.get_kwargs(kwargs, 'epsilon', 1e-12)
    assert epsilon >= 0

    with tf.name_scope(name):
        return tf.div(tf.exp(x), tf.add(tf.reduce_sum(tf.exp(x)), epsilon))
Пример #8
0
def dropout(name, x, **kwargs):
    """
    Dropout layer.

    :param name: scope name
    :type name: str
    :param x: input tensor
    :type x: tensorflow.Tensor

    :Keyword Arguments:
        * *keep_prob* (float) -- keeping probability per unit (0.5)

    :return: dropout tensor
    :rtype: tensorflow.Tensor
    """

    keep_prob = utils.get_kwargs(kwargs, 'keep_prob', 0.5)
    assert keep_prob >= 0 and keep_prob <= 1

    with tf.name_scope(name):
        return tf.nn.dropout(x, keep_prob)
def uniform_unit_scale(name, shape, **kwargs):
    """
    Initialize using tf.uniform_unit_scaling_initializer.

    :param name: name of variable
    :type name: str
    :param shape: variable shape
    :type shape: (int, int, int, int)

    :Keyword Arguments:
        * *factor* (float) -- factor (0.1)

    :return: initialized variable
    :rtype: tensorflow.Variable
    """

    factor = utils.get_kwargs(kwargs, 'factor', 1.0)

    return tf.get_variable(
        name,
        shape=shape,
        initializer=tf.uniform_unit_scaling_initializer(factor=factor))
Пример #10
0
def batch_normalization_cpu(name, x, **kwargs):
    """
    Batch normalization layer.

    :param name: scope name
    :type name: str
    :param x: input tensor
    :type x: tensorflow.Tensor

    :Keyword Arguments:
        * *variance_epsilon* (float) -- epsilon to add to variance before dividing (0.0)

    :return: batch normalization tensor
    :rtype: tensorflow.Tensor
    """

    variance_epsilon = utils.get_kwargs(kwargs, 'variance_epsilon', 0.0)
    assert variance_epsilon >= 0

    with tf.name_scope(name):
        offset = tf.Variable(tf.constant(0.0,
                                         shape=[x.get_shape().as_list()[-1]]),
                             name='offset',
                             trainable=True)
        scale = tf.Variable(tf.constant(1.0,
                                        shape=[x.get_shape().as_list()[-1]]),
                            name='scale',
                            trainable=True)

        # Convolutional layer:
        if len(x.get_shape().as_list()) == 4:
            # this call won't work on GPU
            mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')
        # Fully connected layer:
        else:
            mean, variance = tf.nn.moments(x, [0, 1], name='moments')

        return tf.nn.batch_normalization(x, mean, variance, offset, scale,
                                         variance_epsilon)
Пример #11
0
 def test_master_user(self):
     prog = self.get_prog(
         "Master", "user",
         **get_kwargs(username="******", password=NO_PASSWORD),
         **GLOBAL_CONFIG)
     self.assertTrue(type(prog) == Master)