예제 #1
0
파일: base.py 프로젝트: wjianxz/neupy
    def init_functions(self):
        loss = self.loss(self.target, self.network.outputs)
        val_loss = self.loss(self.target, self.network.training_outputs)

        if self.regularizer is not None:
            loss += self.regularizer(self.network)

        self.variables.update(
            step=self.step,
            loss=loss,
            val_loss=val_loss,
        )

        with tf.name_scope('training-updates'):
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            with tf.control_dependencies(update_ops):
                training_updates = self.init_train_updates()
                training_updates.extend(update_ops)

        tf_utils.initialize_uninitialized_variables()

        self.functions.update(
            predict=tf_utils.function(inputs=as_tuple(self.network.inputs),
                                      outputs=self.network.outputs,
                                      name='optimizer/predict'),
            one_training_update=tf_utils.function(
                inputs=as_tuple(self.network.inputs, self.target),
                outputs=loss,
                updates=training_updates,
                name='optimizer/one-update-step'),
            score=tf_utils.function(inputs=as_tuple(self.network.inputs,
                                                    self.target),
                                    outputs=val_loss,
                                    name='optimizer/score'))
예제 #2
0
파일: reshape.py 프로젝트: wjianxz/neupy
    def output(self, input, **kwargs):
        """
        Reshape the feature space for the input value.

        Parameters
        ----------
        input : array-like or Tensorfow variable
        """
        input = tf.convert_to_tensor(input, dtype=tf.float32)
        input_shape = tf.shape(input)

        n_samples = input_shape[0]
        expected_shape = self.get_output_shape(input.shape)
        feature_shape = expected_shape[1:]

        if feature_shape.is_fully_defined():
            # For cases when we have -1 in the shape and feature shape
            # can be precomputed from the input we want to be explicit about
            # expected output shape. Because of the unknown batch dimension
            # it won't be possible for tensorflow to derive exact output
            # shape from the -1
            output_shape = as_tuple(n_samples, feature_shape.dims)
        else:
            output_shape = as_tuple(n_samples, self.shape)

        return tf.reshape(input, output_shape)
예제 #3
0
파일: model.py 프로젝트: itdxer/deeplab
    def output_shape(self):
        if not self.input_shape:
            return

        elif None not in self.new_shape:
            return as_tuple(self.new_shape, self.input_shape[0][-1])

        elif len(self.input_shape) > 1:
            height, width, _ = self.input_shape[1]
            return as_tuple(height, width, self.input_shape[0][-1])
예제 #4
0
    def output(self, input_value):
        input_shape = tf.shape(input_value)
        # We need to get information about output shape from the input
        # tensor's shape, because for some inputs we might have
        # height and width specified as None and shape value won't be
        # computed for these dimensions.
        output_shape = self.find_output_from_input_shape(
            tf.unstack(input_shape[1:]))

        batch_size = input_shape[0]
        padding = self.padding

        if isinstance(self.padding, (list, tuple)):
            height_pad, width_pad = self.padding

            # VALID option will make sure that
            # deconvolution won't use any padding.
            padding = 'VALID'

            # conv2d_transpose doesn't know about extra paddings that we added
            # in the convolution. For this reason we have to expand our
            # expected output shape and later we will remove these paddings
            # manually after transpose convolution.
            output_shape = (
                output_shape[0] + 2 * height_pad,
                output_shape[1] + 2 * width_pad,
                output_shape[2],
            )

        output = tf.nn.conv2d_transpose(
            input_value,
            self.weight,
            as_tuple(batch_size, output_shape),
            as_tuple(1, self.stride, 1),
            padding,
            data_format="NHWC"
        )

        if isinstance(self.padding, (list, tuple)):
            h_pad, w_pad = self.padding

            if h_pad > 0:
                output = output[:, h_pad:-h_pad, :, :]

            if w_pad > 0:
                output = output[:, :, w_pad:-w_pad, :]

        if self.bias is not None:
            bias = tf.reshape(self.bias, (1, 1, 1, -1))
            output += bias

        return output
예제 #5
0
    def test_inline_connections(self):
        input_layer = layers.Input(784)
        conn = input_layer > layers.Sigmoid(20)
        conn = conn > layers.Sigmoid(10)

        self.assertEqual(3, len(conn))

        in_sizes = [784, 784, 20]
        out_sizes = [784, 20, 10]

        for layer, in_size, out_size in zip(conn, in_sizes, out_sizes):
            self.assertEqual(layer.input_shape, as_tuple(in_size),
                             msg="Layer: {}".format(layer))
            self.assertEqual(layer.output_shape, as_tuple(out_size),
                             msg="Layer: {}".format(layer))
예제 #6
0
    def initialize(self):
        super(BatchNorm, self).initialize()

        input_shape = as_tuple(None, self.input_shape)
        ndim = len(input_shape)

        if self.axes is None:
            # If ndim == 4 then axes = (0, 2, 3)
            # If ndim == 2 then axes = (0,)
            self.axes = tuple(axis for axis in range(ndim) if axis != 1)

        if any(axis >= ndim for axis in self.axes):
            raise ValueError("Cannot apply batch normalization on the axis "
                             "that doesn't exist.")

        opposite_axes = find_opposite_axes(self.axes, ndim)
        parameter_shape = [input_shape[axis] for axis in opposite_axes]

        if any(parameter is None for parameter in parameter_shape):
            unknown_dim_index = parameter_shape.index(None)
            raise ValueError("Cannot apply batch normalization on the axis "
                             "with unknown size over the dimension #{} "
                             "(0-based indeces).".format(unknown_dim_index))

        self.add_parameter(value=self.running_mean, shape=parameter_shape,
                           name='running_mean', trainable=False)
        self.add_parameter(value=self.running_inv_std, shape=parameter_shape,
                           name='running_inv_std', trainable=False)

        self.add_parameter(value=self.gamma, name='gamma',
                           shape=parameter_shape, trainable=True)
        self.add_parameter(value=self.beta, name='beta',
                           shape=parameter_shape, trainable=True)
예제 #7
0
    def validate(self, input_shapes):
        # The axis value has 0-based indeces where 0s index points
        # to the batch dimension of the input. Shapes in the neupy
        # do not store information about the batch and we need to
        # put None value on the 0s position.
        valid_shape = as_tuple(None, input_shapes[0])

        # Avoid using negative indeces
        possible_axes = list(range(len(valid_shape)))
        concat_axis = possible_axes[self.axis]

        for input_shape in input_shapes[1:]:
            if len(input_shapes[0]) != len(input_shape):
                raise LayerConnectionError(
                    "Cannot concatenate layers, because inputs have "
                    "different number of dimensions. Shapes: {} and {}"
                    "".format(input_shapes[0], input_shape))

            for axis, axis_size in enumerate(input_shape, start=1):
                if axis != concat_axis and valid_shape[axis] != axis_size:
                    raise LayerConnectionError(
                        "Cannot concatenate layers, because some of them "
                        "don't match over dimension #{} (0-based indeces)."
                        "Shapes: {} and {}"
                        "".format(axis, input_shapes[0], input_shape))
예제 #8
0
def saliency_map_graph(connection):
    """
    Returns tensorflow variables for saliency map.

    Parameters
    ----------
    connection : connection
    image : ndarray
    """
    session = tensorflow_session()

    if session in saliency_map_graph.cache:
        return saliency_map_graph.cache[session]

    x = tf.placeholder(
        shape=as_tuple(None, connection.input_shape),
        name='saliency-map/input',
        dtype=tf.float32,
    )

    with connection.disable_training_state():
        prediction = connection.output(x)

    output_class = tf.argmax(prediction[0])
    saliency, = tf.gradients(tf.reduce_max(prediction), x)

    # Caching will ensure that we won't build tensorflow graph every time
    # we generate
    saliency_map_graph.cache[session] = x, saliency, output_class
    return x, saliency, output_class
예제 #9
0
파일: base.py 프로젝트: itdxer/neupy
    def train_epoch(self, input_train, target_train):
        """
        Train one epoch.

        Parameters
        ----------
        input_train : array-like
            Training input dataset.

        target_train : array-like
            Training target dataset.

        Returns
        -------
        float
            Training error.
        """
        errors = self.apply_batches(
            function=self.methods.train_epoch,
            input_data=input_train,
            arguments=as_tuple(target_train),

            description='Training batches',
            show_error_output=True,
        )
        return average_batch_errors(
            errors,
            n_samples=len(input_train),
            batch_size=self.batch_size,
        )
예제 #10
0
파일: base.py 프로젝트: wjianxz/neupy
    def score(self, X, y):
        """
        Check the prediction error for the specified input samples
        and their targets.

        Parameters
        ----------
        X : array-like
        y : array-like

        Returns
        -------
        float
            Prediction error.
        """
        X = self.format_input(X)
        y = self.format_target(y)

        return iters.apply_batches(
            function=self.functions.score,
            inputs=as_tuple(X, y),
            batch_size=self.batch_size,
            show_output=True,
            show_progressbar=self.logs.enable,
            average_outputs=True,
        )
예제 #11
0
def function(inputs, outputs, updates=None, name=None):
    if updates is None:
        updates = []

    session = tensorflow_session()
    tensorflow_updates = []

    # Ensure that all new values has been computed. Absence of these
    # checks might lead to the non-deterministic update behaviour.
    new_values = [val[1] for val in updates if isinstance(val, (list, tuple))]

    # Make sure that all outputs has been computed
    with tf.control_dependencies(as_tuple(outputs, new_values)):
        for update in updates:
            if isinstance(update, (list, tuple)):
                old_value, new_value = update
                update = old_value.assign(new_value)
            tensorflow_updates.append(update)

        # Group variables in order to avoid output for the updates
        tensorflow_updates = tf.group(*tensorflow_updates)

    @wraps(function)
    def wrapper(*input_values):
        feed_dict = dict(zip(inputs, input_values))
        result, _ = session.run(
            [outputs, tensorflow_updates],
            feed_dict=feed_dict,
        )
        return result

    return wrapper
예제 #12
0
    def output(self, first_input, *other_inputs):
        """
        Compute outputs per each network in parallel
        connection.

        Parameters
        ----------
        first_input : Theano variable, array-like, dict
        *other_inputs

        Returns
        -------
        list
        """
        n_inputs = len(other_inputs) + 1  # +1 for first input
        n_connections = len(self.connections)

        if not other_inputs:
            input_values = [first_input] * n_connections

        elif n_inputs == n_connections:
            input_values = as_tuple(first_input, other_inputs)

        else:
            raise ValueError("Expected {} input values for parallel "
                             "connection, got {}"
                             "".format(n_connections, n_inputs))

        outputs = []
        for input_value, connection in zip(input_values, self.connections):
            connection_output = connection.output(input_value)
            outputs.append(connection_output)

        return outputs
예제 #13
0
파일: base.py 프로젝트: wjianxz/neupy
    def __init__(self, shape, name=None):
        super(Input, self).__init__(name=name)

        if isinstance(shape, tf.TensorShape):
            shape = tf_utils.shape_to_tuple(shape)

        self.shape = as_tuple(shape)
예제 #14
0
    def initialize(self):
        super(BatchNorm, self).initialize()

        input_shape = as_tuple(None, self.input_shape)
        ndim = len(input_shape)

        if self.axes is None:
            # If ndim == 4 then axes = (0, 2, 3)
            # If ndim == 2 then axes = (0,)
            self.axes = tuple(axis for axis in range(ndim) if axis != 1)

        if any(axis >= ndim for axis in self.axes):
            raise ValueError("Cannot apply batch normalization on the axis "
                             "that doesn't exist.")

        opposite_axes = find_opposite_axes(self.axes, ndim)
        parameter_shape = [input_shape[axis] for axis in opposite_axes]

        if any(parameter is None for parameter in parameter_shape):
            unknown_dim_index = parameter_shape.index(None)
            raise ValueError("Cannot apply batch normalization on the axis "
                             "with unknown size over the dimension #{} "
                             "(0-based indeces).".format(unknown_dim_index))

        self.add_parameter(value=self.running_mean, shape=parameter_shape,
                           name='running_mean', trainable=False)
        self.add_parameter(value=self.running_inv_std, shape=parameter_shape,
                           name='running_inv_std', trainable=False)

        self.add_parameter(value=self.gamma, name='gamma',
                           shape=parameter_shape, trainable=True)
        self.add_parameter(value=self.beta, name='beta',
                           shape=parameter_shape, trainable=True)
예제 #15
0
    def train_epoch(self, input_train, target_train):
        """
        Train one epoch.

        Parameters
        ----------
        input_train : array-like
            Training input dataset.

        target_train : array-like
            Training target dataset.

        Returns
        -------
        float
            Training error.
        """
        errors = self.apply_batches(
            function=self.methods.train_epoch,
            input_data=input_train,
            arguments=as_tuple(target_train),
            description='Training batches',
            show_error_output=True,
        )
        return average_batch_errors(
            errors,
            n_samples=count_samples(input_train),
            batch_size=self.batch_size,
        )
예제 #16
0
    def prediction_error(self, input_data, target_data):
        """
        Check the prediction error for the specified input samples
        and their targets.

        Parameters
        ----------
        input_data : array-like
        target_data : array-like

        Returns
        -------
        float
            Prediction error.
        """
        input_data = self.format_input_data(input_data)
        target_data = self.format_target_data(target_data)

        errors = self.apply_batches(
            function=self.methods.prediction_error,
            input_data=input_data,
            arguments=as_tuple(target_data),
            description='Validation batches',
            show_error_output=True,
        )
        return average_batch_errors(
            errors,
            n_samples=count_samples(input_data),
            batch_size=self.batch_size,
        )
예제 #17
0
파일: base.py 프로젝트: itdxer/neupy
    def prediction_error(self, input_data, target_data):
        """
        Check the prediction error for the specified input samples
        and their targets.

        Parameters
        ----------
        input_data : array-like
        target_data : array-like

        Returns
        -------
        float
            Prediction error.
        """
        input_data = self.format_input_data(input_data)
        target_data = self.format_target_data(target_data)

        errors = self.apply_batches(
            function=self.methods.prediction_error,
            input_data=input_data,
            arguments=as_tuple(target_data),

            description='Validation batches',
            show_error_output=True,
        )
        return average_batch_errors(
            errors,
            n_samples=len(input_data),
            batch_size=self.batch_size,
        )
예제 #18
0
class MultiParameterProperty(ParameterProperty):
    expected_type = as_tuple(init.Initializer, dict)

    def validate(self, value):
        super(MultiParameterProperty, self).validate(value)

        if isinstance(value, dict):
            for key in value:
                if key not in self.default:
                    valid_keys = ', '.join(self.default.keys())
                    raise ValueError("Parameter `{}` has invalid key: `{}`. "
                                     "Valid keys are: {}"
                                     "".format(self.name, key, valid_keys))

    def __set__(self, instance, value):
        self.validate(value)

        if isinstance(value, init.Initializer):
            # All keys will have the same initializer
            dict_value = dict.fromkeys(self.default.keys())

            for key in dict_value:
                dict_value[key] = value

            value = dict_value

        default_value = self.default.copy()
        default_value.update(value)
        value = default_value

        value = AttributeKeyDict(value)
        instance.__dict__[self.name] = value
예제 #19
0
    def test_as_tuple(self):
        Case = namedtuple("Case", "input_args expected_output")
        testcases = (
            Case(
                input_args=(1, 2, 3),
                expected_output=(1, 2, 3),
            ),
            Case(
                input_args=(None, (1, 2, 3), None),
                expected_output=(None, 1, 2, 3, None),
            ),
            Case(
                input_args=((1, 2, 3), tuple()),
                expected_output=(1, 2, 3),
            ),
            Case(
                input_args=((1, 2, 3), (4, 5, 3)),
                expected_output=(1, 2, 3, 4, 5, 3),
            ),
        )

        for testcase in testcases:
            actual_output = as_tuple(*testcase.input_args)
            self.assertEqual(actual_output,
                             testcase.expected_output,
                             msg="Input args: {}".format(testcase.input_args))
예제 #20
0
class PaddingProperty(TypedListProperty):
    expected_type = as_tuple(TypedListProperty.expected_type, int)

    def __set__(self, instance, value):
        if isinstance(value, int):
            value = (value, value)
        super(PaddingProperty, self).__set__(instance, value)
예제 #21
0
파일: base.py 프로젝트: webdiscover/neupy
    def apply_batches(self,
                      function,
                      input_data,
                      arguments=(),
                      description='',
                      show_progressbar=None,
                      show_error_output=False):
        """
        Apply function per each mini-batch.

        Parameters
        ----------
        function : callable

        input_data : array-like
            First argument to the function that can be divided
            into mini-batches.

        arguments : tuple
            Additional arguments to the function.

        description : str
            Some description for the progressbar. Defaults to ``''``.

        show_progressbar : None or bool
            ``True``/``False`` will show/hide progressbar. If value
            is equal to ``None`` than progressbar will be visible in
            case if network expects to see logging after each
            training epoch.

        show_error_output : bool
            Assumes that outputs from the function errors.
            ``True`` will show information in the progressbar.
            Error will be related to the last epoch.

        Returns
        -------
        list
            List of outputs from the function. Each output is an
            object that ``function`` returned.
        """
        arguments = as_tuple(input_data, arguments)

        if cannot_divide_into_batches(input_data, self.batch_size):
            return [function(*arguments)]

        if show_progressbar is None:
            show_progressbar = (self.training
                                and self.training.show_epoch == 1)

        return apply_batches(
            function=function,
            arguments=arguments,
            batch_size=self.batch_size,
            description=description,
            logger=self.logs,
            show_progressbar=show_progressbar,
            show_error_output=show_error_output,
        )
예제 #22
0
파일: properties.py 프로젝트: degerli/neupy
    def __init__(self, default=None, required=False, allow_none=False):
        self.name = None
        self.default = default
        self.required = required
        self.allow_none = allow_none

        if allow_none:
            self.expected_type = as_tuple(self.expected_type, type(None))
예제 #23
0
 def output(self, input_value):
     bias = T.reshape(self.bias, (1, -1, 1, 1))
     output = T.nnet.conv2d(input_value, self.weight,
                            input_shape=as_tuple(None, self.input_shape),
                            filter_shape=self.weight_shape,
                            border_mode=self.border_mode,
                            subsample=self.stride_size)
     return output + bias
예제 #24
0
파일: properties.py 프로젝트: itdxer/neupy
    def __init__(self, default=None, required=False, allow_none=False):
        self.name = None
        self.default = default
        self.required = required
        self.allow_none = allow_none

        if allow_none:
            self.expected_type = as_tuple(self.expected_type, type(None))
예제 #25
0
    def test_inline_connections(self):
        input_layer = layers.Input(784)
        conn = input_layer > layers.Sigmoid(20)
        conn = conn > layers.Sigmoid(10)

        self.assertEqual(3, len(conn))

        in_sizes = [784, 784, 20]
        out_sizes = [784, 20, 10]

        for layer, in_size, out_size in zip(conn, in_sizes, out_sizes):
            self.assertEqual(layer.input_shape,
                             as_tuple(in_size),
                             msg="Layer: {}".format(layer))
            self.assertEqual(layer.output_shape,
                             as_tuple(out_size),
                             msg="Layer: {}".format(layer))
예제 #26
0
 def output(self, input_value):
     bias = T.reshape(self.bias, (1, -1, 1, 1))
     output = T.nnet.conv2d(input_value,
                            self.weight,
                            input_shape=as_tuple(None, self.input_shape),
                            filter_shape=self.weight_shape,
                            border_mode=self.border_mode,
                            subsample=self.stride_size)
     return output + bias
예제 #27
0
    def output(self, input_value):
        """ Reshape the feature space for the input value.

        Parameters
        ----------
        input_value : array-like or Theano variable
        """
        n_samples = input_value.shape[0]
        output_shape = as_tuple(n_samples, self.output_shape)
        return T.reshape(input_value, output_shape)
예제 #28
0
class ArrayOrScalarProperty(SharedArrayProperty):
    """ Defines array, Theano shared variable or scalar.

    Parameters
    ----------
    {BaseProperty.default}
    {BaseProperty.required}
    """
    expected_type = as_tuple(SharedArrayProperty.expected_type,
                             number_type)
예제 #29
0
    def validate(self, input_shapes):
        valid_shape = as_tuple(None, input_shapes[0])

        for input_shape in input_shapes[1:]:
            for axis, axis_size in enumerate(input_shape, start=1):
                if axis != self.axis and valid_shape[axis] != axis_size:
                    raise LayerConnectionError(
                        "Cannot concatenate layers. Some of them don't "
                        "match over dimension #{} (0-based indeces)."
                        "".format(axis))
예제 #30
0
    def output(self, input_value):
        """ Reshape the feature space for the input value.

        Parameters
        ----------
        input_value : array-like or Theano variable
        """
        n_samples = input_value.shape[0]
        output_shape = as_tuple(n_samples, self.output_shape)
        return T.reshape(input_value, output_shape)
예제 #31
0
    def test_sew_together_basic(self):
        connection = surgery.sew_together([
            layers.Sigmoid(24),
            layers.Sigmoid(12) > layers.Sigmoid(6),
            layers.Sigmoid(3),
        ])
        expected_shapes = (24, 12, 6, 3)
        output_shapes = [layer.output_shape for layer in iter(connection)]

        self.assertEqual(as_tuple(*output_shapes), expected_shapes)
예제 #32
0
class PReluAlphaProperty(SharedArrayProperty):
    """ Defines PReLu layer alpha parameter.

    Parameters
    ----------
    {BaseProperty.default}
    {BaseProperty.required}
    """
    expected_type = as_tuple(SharedArrayProperty.expected_type, number_type,
                             type(None))
예제 #33
0
    def __init__(self, scale, name=None):
        super(Upscale, self).__init__(name=name)

        if isinstance(scale, int):
            scale = as_tuple(scale, scale)

        if any(element <= 0 for element in scale):
            raise ValueError("Only positive integers are allowed for scale")

        self.scale = scale
예제 #34
0
파일: base.py 프로젝트: itdxer/neupy
    def apply_batches(self, function, input_data, arguments=(), description='',
                      show_progressbar=None, show_error_output=False):
        """
        Apply function per each mini-batch.

        Parameters
        ----------
        function : callable

        input_data : array-like
            First argument to the function that can be divided
            into mini-batches.

        arguments : tuple
            Additional arguments to the function.

        description : str
            Some description for the progressbar. Defaults to ``''``.

        show_progressbar : None or bool
            ``True``/``False`` will show/hide progressbar. If value
            is equal to ``None`` than progressbar will be visible in
            case if network expects to see logging after each
            training epoch.

        show_error_output : bool
            Assumes that outputs from the function errors.
            ``True`` will show information in the progressbar.
            Error will be related to the last epoch.

        Returns
        -------
        list
            List of outputs from the function. Each output is an
            object that ``function`` returned.
        """
        arguments = as_tuple(input_data, arguments)

        if cannot_divide_into_batches(input_data, self.batch_size):
            return [function(*arguments)]

        if show_progressbar is None:
            show_progressbar = (self.training and
                                self.training.show_epoch == 1)

        return apply_batches(
            function=function,
            arguments=arguments,
            batch_size=self.batch_size,

            description=description,
            logger=self.logs,
            show_progressbar=show_progressbar,
            show_error_output=show_error_output,
        )
예제 #35
0
    def output(self, input, **kwargs):
        input = tf.convert_to_tensor(input, tf.float32)
        # We need to get information about output shape from the input
        # tensor's shape, because for some inputs we might have
        # height and width specified as None and shape value won't be
        # computed for these dimensions.
        padding = self.padding

        # It's important that expected output shape gets computed on then
        # Tensor (produced by tf.shape) rather than on TensorShape object.
        # Tensorflow cannot convert TensorShape object into Tensor and
        # it will cause an exception in the conv2d_transpose layer.
        output_shape = self.expected_output_shape(tf.shape(input))

        if isinstance(self.padding, (list, tuple)):
            height_pad, width_pad = self.padding

            # VALID option will make sure that
            # deconvolution won't use any padding.
            padding = 'VALID'

            # conv2d_transpose doesn't know about extra paddings that we added
            # in the convolution. For this reason, we have to expand our
            # expected output shape and later we will remove these paddings
            # manually after transpose convolution.
            output_shape = (
                output_shape[0],
                output_shape[1] + 2 * height_pad,
                output_shape[2] + 2 * width_pad,
                output_shape[3],
            )

        output = tf.nn.conv2d_transpose(
            value=input,
            filter=self.weight,
            output_shape=list(output_shape),
            strides=as_tuple(1, self.stride, 1),
            padding=padding,
            data_format="NHWC"
        )

        if isinstance(self.padding, (list, tuple)):
            h_pad, w_pad = self.padding

            if h_pad > 0:
                output = output[:, h_pad:-h_pad, :, :]

            if w_pad > 0:
                output = output[:, :, w_pad:-w_pad, :]

        if self.bias is not None:
            bias = tf.reshape(self.bias, (1, 1, 1, -1))
            output += bias

        return output
예제 #36
0
def validate_graphs_before_combining(left_graph, right_graph):
    left_out_layers = left_graph.output_layers
    right_in_layers = right_graph.input_layers

    if len(left_out_layers) > 1 and len(right_in_layers) > 1:
        raise LayerConnectionError(
            "Cannot make many to many connection between graphs. One graph "
            "has {n_left_outputs} outputs (layer names: {left_names}) and "
            "the other one has {n_right_inputs} inputs (layer names: "
            "{right_names}). First graph: {left_graph}, Second graph: "
            "{right_graph}".format(
                left_graph=left_graph,
                n_left_outputs=len(left_out_layers),
                left_names=[layer.name for layer in left_out_layers],
                right_graph=right_graph,
                n_right_inputs=len(right_in_layers),
                right_names=[layer.name for layer in right_in_layers],
            ))

    left_out_shapes = as_tuple(left_graph.output_shape)
    right_in_shapes = as_tuple(right_graph.input_shape)

    for left_layer, left_out_shape in zip(left_out_layers, left_out_shapes):
        right = zip(right_in_layers, right_in_shapes)

        for right_layer, right_in_shape in right:
            if left_out_shape.is_compatible_with(right_in_shape):
                continue

            raise LayerConnectionError(
                "Cannot connect layer `{left_name}` to layer `{right_name}`, "
                "because output shape ({left_out_shape}) of the first layer "
                "is incompatible with the input shape ({right_in_shape}) "
                "of the second layer. First layer: {left_layer}, Second "
                "layer: {right_layer}".format(
                    left_layer=left_layer,
                    left_name=left_layer.name,
                    left_out_shape=left_out_shape,
                    right_layer=right_layer,
                    right_name=right_layer.name,
                    right_in_shape=right_in_shape,
                ))
예제 #37
0
파일: merge.py 프로젝트: itdxer/neupy
    def validate(self, input_shapes):
        valid_shape = as_tuple(None, input_shapes[0])

        for input_shape in input_shapes[1:]:
            for axis, axis_size in enumerate(input_shape, start=1):
                if axis != self.axis and valid_shape[axis] != axis_size:
                    raise LayerConnectionError(
                        "Cannot concatenate layers. Some of them don't "
                        "match over dimension #{} (0-based indeces)."
                        "".format(axis)
                    )
예제 #38
0
    def initialize(self):
        super(PRelu, self).initialize()
        output_shape = as_tuple(None, self.output_shape)

        alpha_shape = [output_shape[axis] for axis in self.alpha_axes]
        self.add_parameter(
            value=self.alpha,
            name='alpha',
            shape=alpha_shape,
            trainable=True,
        )
예제 #39
0
    def output(self, input_value):
        """
        Reshape the feature space for the input value.

        Parameters
        ----------
        input_value : array-like or Tensorfow variable
        """
        input_shape = tf.shape(input_value)
        n_samples = input_shape[0]
        output_shape = as_tuple(n_samples, self.shape)
        return tf.reshape(input_value, output_shape)
예제 #40
0
    def output(self, input_value):
        output = T.nnet.conv2d(input_value, self.weight,
                               input_shape=as_tuple(None, self.input_shape),
                               filter_shape=self.weight_shape,
                               border_mode=self.padding,
                               subsample=self.stride)

        if self.bias is not None:
            bias = T.reshape(self.bias, (1, -1, 1, 1))
            output += bias

        return output
예제 #41
0
    def test_tree_connection_structure(self):
        l0 = layers.Input(1)
        l1 = layers.Sigmoid(10)
        l2 = layers.Sigmoid(20)
        l3 = layers.Sigmoid(30)
        l4 = layers.Sigmoid(40)
        l5 = layers.Sigmoid(50)
        l6 = layers.Sigmoid(60)

        # Tree Structure:
        #
        # l0 - l1 - l5 - l6
        #        \
        #         l2 - l4
        #           \
        #            -- l3
        conn1 = layers.join(l0, l1, l5, l6)
        conn2 = layers.join(l0, l1, l2, l3)
        conn3 = layers.join(l0, l1, l2, l4)

        self.assertEqual(conn1.output_shape, as_tuple(60))
        self.assertEqual(conn2.output_shape, as_tuple(30))
        self.assertEqual(conn3.output_shape, as_tuple(40))
예제 #42
0
파일: test_utils.py 프로젝트: itdxer/neupy
    def test_as_tuple(self):
        Case = namedtuple("Case", "input_args expected_output")
        testcases = (
            Case(input_args=(1, 2, 3),
                 expected_output=(1, 2, 3)),
            Case(input_args=(None, (1, 2, 3), None),
                 expected_output=(None, 1, 2, 3, None)),
            Case(input_args=((1, 2, 3), (4, 5, 3)),
                 expected_output=(1, 2, 3, 4, 5, 3)),
        )

        for testcase in testcases:
            actual_output = as_tuple(*testcase.input_args)
            self.assertEqual(actual_output, testcase.expected_output,
                             msg="Input args: {}".format(testcase.input_args))
예제 #43
0
    def initialize(self):
        super(BatchNorm, self).initialize()

        input_shape = as_tuple(None, self.input_shape)
        ndim = len(input_shape)

        if self.axes is None:
            # If ndim == 4 then axes = (0, 2, 3)
            # If ndim == 2 then axes = (0,)
            self.axes = tuple(axis for axis in range(ndim) if axis != 1)

        if any(axis >= ndim for axis in self.axes):
            raise ValueError("Cannot apply batch normalization on the axis "
                             "that doesn't exist.")

        opposite_axes = find_opposite_axes(self.axes, ndim)
        parameter_shape = [input_shape[axis] for axis in opposite_axes]

        if any(parameter is None for parameter in parameter_shape):
            unknown_dim_index = parameter_shape.index(None)
            raise ValueError("Cannot apply batch normalization on the axis "
                             "with unknown size over the dimension #{} "
                             "(0-based indeces).".format(unknown_dim_index))

        self.running_mean = theano.shared(
            name='running_mean_{}'.format(self.layer_id),
            value=asfloat(np.zeros(parameter_shape))
        )
        self.running_inv_std = theano.shared(
            name='running_inv_std_{}'.format(self.layer_id),
            value=asfloat(np.ones(parameter_shape))
        )

        if isinstance(self.gamma, number_type):
            self.gamma = np.ones(parameter_shape) * self.gamma

        if isinstance(self.beta, number_type):
            self.beta = np.ones(parameter_shape) * self.beta

        self.gamma = theano.shared(
            name='gamma_{}'.format(self.layer_id),
            value=asfloat(self.gamma),
        )
        self.beta = theano.shared(
            name='beta_{}'.format(self.layer_id),
            value=asfloat(self.beta),
        )
        self.parameters = [self.gamma, self.beta]
예제 #44
0
    def test_cut_layers_basics(self):
        testcases = [
            dict(kwargs=dict(connection=self.network, start=0, end=2),
                 expected_sizes=(30, 10)),
            dict(kwargs=dict(connection=self.network, start=1, end=3),
                 expected_sizes=(10, 20)),
            dict(kwargs=dict(connection=self.network, start=1, end=-1),
                 expected_sizes=(10, 20)),
        ]

        for testcase in testcases:
            layers = surgery.cut(**testcase['kwargs'])
            output_shapes = [layer.output_shape for layer in iter(layers)]
            self.assertEqual(
                as_tuple(*output_shapes),
                testcase['expected_sizes']
            )
예제 #45
0
    def test_cut_along_lines_basic(self):
        network = algorithms.GradientDescent([
            layers.Input(5),

            surgery.CutLine(),

            layers.Sigmoid(10),
            layers.Sigmoid(20),
            layers.Sigmoid(30),

            surgery.CutLine(),

            layers.Sigmoid(1),
        ])

        for connection in (network, network.connection):
            _, interested_layers, _ = surgery.cut_along_lines(connection)
            cutted_shapes = [layer.output_shape for layer in interested_layers]

            self.assertEqual(as_tuple(*cutted_shapes), (10, 20, 30))
예제 #46
0
    def test_conv_shapes(self):
        border_modes = [
            'valid', 'full', 'half',
            4, 5,
            (6, 3), (4, 4), (1, 1)
        ]
        strides = [(1, 1), (2, 1), (2, 2)]
        x = asfloat(np.random.random((20, 2, 12, 11)))

        for stride, border_mode in product(strides, border_modes):
            input_layer = layers.Input((2, 12, 11))
            conv_layer = layers.Convolution((5, 3, 4),
                                            border_mode=border_mode,
                                            stride_size=stride)

            connection = input_layer > conv_layer
            conv_layer.initialize()

            y = conv_layer.output(x).eval()
            actual_output_shape = as_tuple(y.shape[1:])

            self.assertEqual(actual_output_shape, conv_layer.output_shape,
                             msg='border_mode={}'.format(border_mode))
예제 #47
0
    def test_conv_shapes(self):
        paddings = [
            'valid', 'full', 'half',
            4, 5,
            (6, 3), (4, 4), (1, 1)
        ]
        strides = [(1, 1), (2, 1), (2, 2)]
        x = asfloat(np.random.random((20, 2, 12, 11)))

        for stride, padding in product(strides, paddings):
            input_layer = layers.Input((2, 12, 11))
            conv_layer = layers.Convolution((5, 3, 4),
                                            padding=padding,
                                            stride=stride)

            input_layer > conv_layer
            conv_layer.initialize()

            y = conv_layer.output(x).eval()
            actual_output_shape = as_tuple(y.shape[1:])

            self.assertEqual(actual_output_shape, conv_layer.output_shape,
                             msg='padding={}'.format(padding))
예제 #48
0
    def test_cut_along_lines_check_cut_points(self):
        testcases = (
            dict(
                network=algorithms.GradientDescent([
                    layers.Input(5),
                    layers.Sigmoid(10),
                    layers.Sigmoid(20),
                    layers.Sigmoid(30),

                    surgery.CutLine(),

                    layers.Sigmoid(1),
                ]),
                expected_shapes=[(5, 10, 20, 30), (1,)]
            ),
            dict(
                network=algorithms.GradientDescent([
                    layers.Input(5),
                    layers.Sigmoid(10),
                    layers.Sigmoid(20),
                    layers.Sigmoid(30),

                    surgery.CutLine(),
                    surgery.CutLine(),

                    layers.Sigmoid(1),
                ]),
                expected_shapes=[(5, 10, 20, 30), (1,)]
            ),
            dict(
                network=algorithms.GradientDescent([
                    layers.Input(5),

                    surgery.CutLine(),
                    layers.Sigmoid(10),

                    surgery.CutLine(),
                    layers.Sigmoid(20),

                    surgery.CutLine(),
                    layers.Sigmoid(30),

                    surgery.CutLine(),

                    layers.Sigmoid(1),
                    surgery.CutLine(),
                ]),
                expected_shapes=[(5,), (10,), (20,), (30,), (1,)]
            ),
            dict(
                network=surgery.sew_together([
                    layers.Input(5),
                    layers.Sigmoid(10),
                    layers.Sigmoid(20),
                    layers.Sigmoid(30),
                    layers.Sigmoid(1),
                ]),
                expected_shapes=[(5, 10, 20, 30, 1)]
            ),
            dict(
                network=surgery.sew_together([
                    surgery.CutLine(),
                    layers.Input(5),
                    layers.Sigmoid(10),
                    layers.Sigmoid(20),
                    layers.Sigmoid(30),
                    layers.Sigmoid(1),
                    surgery.CutLine(),
                ]),
                expected_shapes=[(5, 10, 20, 30, 1)]
            ),
        )

        for test_id, testcase in enumerate(testcases):
            connections = surgery.cut_along_lines(testcase['network'])

            actual_shapes = []
            for connection in connections:
                if isinstance(connection, collections.Iterable):
                    shapes = [layer.output_shape for layer in connection]
                else:
                    layer = connection
                    shapes = as_tuple(layer.output_shape)

                actual_shapes.append(as_tuple(*shapes))

            self.assertEqual(
                actual_shapes,
                testcase['expected_shapes'],
                msg="Test ID: {}".format(test_id)
            )
예제 #49
0
파일: base.py 프로젝트: itdxer/neupy
    def __init__(self, size, **options):
        super(Input, self).__init__(size=size, **options)

        self.input_shape = as_tuple(self.size)
        self.initialize()
예제 #50
0
파일: base.py 프로젝트: itdxer/neupy
 def bias_shape(self):
     if self.bias is not None:
         return as_tuple(self.output_shape)
예제 #51
0
파일: base.py 프로젝트: itdxer/neupy
 def weight_shape(self):
     return as_tuple(self.input_shape, self.output_shape)
예제 #52
0
 def __set__(self, instance, value):
     if isinstance(value, int):
         value = as_tuple(value, value)
     super(ScaleFactorProperty, self).__set__(instance, value)
예제 #53
0
 def output_shape(self):
     if self.size is not None:
         return as_tuple(self.size)
     return self.input_shape
예제 #54
0
 def initialize(self):
     super(Embedding, self).initialize()
     self.add_parameter(
         value=self.weight, name='weight',
         shape=as_tuple(self.input_size, self.output_size),
         trainable=True)
예제 #55
0
파일: base.py 프로젝트: InSertCod3/neupy
 def output_shape(self):
     return as_tuple(self.relate_to_layer.size)
예제 #56
0
파일: base.py 프로젝트: InSertCod3/neupy
 def input_shape(self):
     return as_tuple(self.size)
예제 #57
0
파일: base.py 프로젝트: InSertCod3/neupy
 def bias_shape(self):
     return as_tuple(self.output_shape)
예제 #58
0
 def bias_shape(self):
     return as_tuple(self.size[0])
예제 #59
0
    def output_shape(self):
        if self.shape is not None:
            return as_tuple(self.shape)

        n_output_features = np.prod(self.input_shape)
        return as_tuple(n_output_features)