예제 #1
0
def create_variable(value, name, shape, trainable=True):
    """
    Creates NN parameter as Tensorfow variable.

    Parameters
    ----------
    value : array-like, Tensorfow variable, scalar or Initializer
        Default value for the parameter.

    name : str
        Shared variable name.

    shape : tuple
        Parameter's shape.

    trainable : bool
        Whether parameter trainable by backpropagation.

    Returns
    -------
    Tensorfow variable.
    """
    from neupy import init

    if shape is not None:
        shape = shape_to_tuple(shape)

    if isinstance(value, (tf.Variable, tf.Tensor, np.ndarray, np.matrix)):
        variable_shape = shape_to_tuple(value.shape)

        if as_tuple(variable_shape) != as_tuple(shape):
            raise ValueError(
                "Cannot create variable with name `{}`. Provided variable "
                "with shape {} is incompatible with expected shape {}"
                "".format(name, variable_shape, shape))

    if isinstance(value, (tf.Variable, tf.Tensor)):
        return value

    if isinstance(value, (int, float)):
        value = init.Constant(value)

    if isinstance(value, init.Initializer):
        value = value.sample(shape)

    return tf.Variable(
        asfloat(value),
        name=name,
        dtype=tf.float32,
        trainable=trainable,
    )
예제 #2
0
파일: test_misc.py 프로젝트: zeroyou/neupy
    def test_as_tuple(self):
        Case = namedtuple("Case", "input_args expected_output")
        testcases = (
            Case(
                input_args=(1, 2, 3),
                expected_output=(1, 2, 3),
            ),
            Case(
                input_args=(None, (1, 2, 3), None),
                expected_output=(None, 1, 2, 3, None),
            ),
            Case(
                input_args=((1, 2, 3), tuple()),
                expected_output=(1, 2, 3),
            ),
            Case(
                input_args=((1, 2, 3), [], (4, 5)),
                expected_output=(1, 2, 3, 4, 5),
            ),
            Case(
                input_args=((1, 2, 3), (4, 5, 3)),
                expected_output=(1, 2, 3, 4, 5, 3),
            ),
        )

        for testcase in testcases:
            actual_output = as_tuple(*testcase.input_args)
            self.assertEqual(actual_output,
                             testcase.expected_output,
                             msg="Input args: {}".format(testcase.input_args))
예제 #3
0
def repeat(tensor, repeats):
    """
    Repeat elements of an tensor. The same as ``numpy.repeat``.

    Parameters
    ----------
    input : tensor
    repeats: list, tuple
        Number of repeat for each dimension, length must be the
        same as the number of dimensions in input.

    Returns
    -------
    tensor
        Has the same type as input. Has the shape
        of ``tensor.shape * repeats``.
    """
    with tf.variable_scope("repeat"):
        expanded_tensor = tf.expand_dims(tensor, -1)

        multiples = as_tuple(1, repeats)
        tiled_tensor = tf.tile(expanded_tensor, multiples)

        repeats = tf.convert_to_tensor(repeats)
        return tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
예제 #4
0
def function(inputs, outputs, updates=None, name=None):
    """
    Function simulates behaviour of the Theano's functions.

    Parameters
    ----------
    inputs : list
        List of input placeholders

    outputs : list, Tensor
        Output that has to be computed by the function

    updates : list or None
        List of the updates that has to be performed on the variables.
        The ``None`` value means that no updates will be applied at the
        end of the computation. Defaults to ``None``.

    name : str or None
        Defaults to ``None``.

    Returns
    -------
    function
    """
    if updates is None:
        updates = []

    session = tensorflow_session()
    tensorflow_updates = []

    # Ensure that all new values has been computed. Absence of these
    # checks might lead to the non-deterministic update behaviour.
    new_values = [val[1] for val in updates if isinstance(val, (list, tuple))]

    # Make sure that all outputs has been computed
    with tf.control_dependencies(as_tuple(outputs, new_values)):
        for update in updates:
            if isinstance(update, (list, tuple)):
                old_value, new_value = update
                update = old_value.assign(new_value)
            tensorflow_updates.append(update)

        # Group variables in order to avoid output for the updates
        tensorflow_updates = tf.group(*tensorflow_updates)

    @wraps(function)
    def wrapper(*input_values):
        feed_dict = dict(zip(inputs, input_values))
        result, _ = session.run(
            [outputs, tensorflow_updates],
            feed_dict=feed_dict,
        )
        return result

    return wrapper
예제 #5
0
파일: iters.py 프로젝트: wjianxz/neupy
def apply_batches(function,
                  inputs,
                  batch_size,
                  show_progressbar=False,
                  show_output=False,
                  average_outputs=False):
    """
    Splits inputs into mini-batches and passes them to the function.
    Function returns list of outputs or average loss in case
    if ``average_outputs=True``.

    Parameters
    ----------
    function : func
        Function that accepts one or more positional inputs.
        Each of them should be an array-like variable that
        have exactly the same number of rows.

    inputs : tuple, list
        The arguemnts that will be provided to the function specified
        in the ``function`` argument.

    batch_size : int
        Mini-batch size. Defines maximum number of samples that will be
        used as an input to the ``function``.

    show_progressbar : bool
        When ``True`` than progress bar will be shown in the terminal.
        Defaults to ``False``.

    show_output : bool
        Assumes that outputs from the function errors. The ``True`` value
        will show information in the progressbar. Error will be related to
        the last epoch. Defaults to ``False``.

    average_outputs : bool
        Output from each batch will be combined into single average. This
        option assumes that loss per batch was calculated from.
        Defaults to ``False``.

    Returns
    -------
    list
        List of function outputs.
    """
    n_samples = count_samples(inputs)
    batch_size = n_samples if batch_size is None else batch_size

    n_batches = count_minibatches(inputs, batch_size)
    bar = progressbar.NullBar()

    if show_progressbar and n_batches >= 2:
        bar = make_progressbar(n_batches, show_output)
        bar.update(0)  # triggers empty progressbar

    outputs = []
    iterator = minibatches(inputs, batch_size, shuffle=False)

    for i, sliced_inputs in enumerate(iterator):
        output = function(*as_tuple(sliced_inputs))
        outputs.append(output)

        kwargs = dict(loss=output) if show_output else {}
        bar.update(i, **kwargs)

    # Clean progressbar from the screen
    bar.fd.write('\r' + ' ' * bar.term_width + '\r')

    if average_outputs:
        # When loss calculated per batch separately it might be
        # necessary to combine error into single value
        return average_batch_errors(outputs, n_samples, batch_size)

    return outputs