예제 #1
0
class BaseAssociative(UnsupervisedLearning, BaseNetwork):
    """ Base class for associative learning.

    Parameters
    ----------
    n_inputs : int
        Number of input units.
    n_outputs : int
        Number of output units.
    weight : array-like
        Neural network weights. ``None`` mean that network weight would
        be generated randomly. Value defined manualy should have
        shape ``(n_inputs, n_outputs)``. Defaults to ``None``.
    {BaseNetwork.step}
    {BaseNetwork.show_epoch}
    {BaseNetwork.shuffle_data}
    {BaseNetwork.epoch_end_signal}
    {BaseNetwork.train_end_signal}
    {Verbose.verbose}

    Methods
    -------
    {BaseSkeleton.predict}
    train(input_train, epochs=100):
        Train neural network.
    {BaseSkeleton.fit}
    {BaseNetwork.plot_errors}
    """

    n_inputs = IntProperty(minval=1, required=True)
    n_outputs = IntProperty(minval=1, required=True)
    weight = ArrayProperty()

    def __init__(self, **options):
        super(BaseAssociative, self).__init__(**options)
        self.init_layers()

    def init_layers(self):
        valid_weight_shape = (self.n_inputs, self.n_outputs)

        if self.weight is None:
            self.weight = np.random.randn(*valid_weight_shape)

        if self.weight.shape != valid_weight_shape:
            raise ValueError("Weight matrix has invalid shape. Got {}, "
                             "expected {}".format(self.weight.shape,
                                                  valid_weight_shape))

        self.weight = self.weight.astype(float)

    def train(self, input_train, epochs=100):
        return super(BaseAssociative, self).train(input_train,
                                                  epochs,
                                                  epsilon=None)
예제 #2
0
파일: oja.py 프로젝트: vamsijkrishna/neupy
class Oja(UnsupervisedLearning, BaseNetwork):
    """ Oja unsupervised algorithm that minimize input data feature
    space.

    Notes
    -----
    * In practice use step as very small value. For example ``1e-7``.
    * Normalize the input data before use Oja algorithm. Input data \
    shouldn't contains large values.
    * Set up smaller values for weights if error for a few first iterations \
    is big compare to the input values scale. For example, if your input \
    data have values between 0 and 1 error value equal to 100 is big.

    Parameters
    ----------
    minimized_data_size : int
        Expected number of features after minimization, defaults to ``1``
    weights : array-like or ``None``
        Predefine default weights which controll your data in two sides.
        If weights are, ``None`` before train algorithms generate random
        weights. Defaults to ``None``.
    {BaseNetwork.step}
    {BaseNetwork.show_epoch}
    {BaseNetwork.epoch_end_signal}
    {BaseNetwork.train_end_signal}
    {Verbose.verbose}

    Methods
    -------
    reconstruct(input_data):
        Reconstruct your minimized data.
    {BaseSkeleton.predict}
    {UnsupervisedLearning.train}
    {BaseSkeleton.fit}

    Raises
    ------
    ValueError
        * Try reconstruct without training.
        * Invalid number of input data features for ``train`` and \
        ``reconstruct`` methods.

    Examples
    --------
    >>> import numpy as np
    >>> from neupy import algorithms
    >>>
    >>> data = np.array([[2, 2], [1, 1], [4, 4], [5, 5]])
    >>>
    >>> ojanet = algorithms.Oja(
    ...     minimized_data_size=1,
    ...     step=0.01,
    ...     verbose=False
    ... )
    >>>
    >>> ojanet.train(data, epsilon=1e-5)
    >>> minimized = ojanet.predict(data)
    >>> minimized
    array([[-2.82843122],
           [-1.41421561],
           [-5.65686243],
           [-7.07107804]])
    >>> ojanet.reconstruct(minimized)
    array([[ 2.00000046,  2.00000046],
           [ 1.00000023,  1.00000023],
           [ 4.00000093,  4.00000093],
           [ 5.00000116,  5.00000116]])
    """
    minimized_data_size = IntProperty(minval=1)
    weights = ArrayProperty()

    def init_properties(self):
        del self.shuffle_data
        super(Oja, self).init_properties()

    def train_epoch(self, input_data, target_train):
        weights = self.weights

        minimized = dot(input_data, weights)
        reconstruct = dot(minimized, weights.T)
        error = input_data - reconstruct

        weights += self.step * dot(error.T, minimized)

        mae = np_sum(np_abs(error)) / input_data.size

        # Clear memory
        del minimized
        del reconstruct
        del error

        return mae

    def train(self, input_data, epsilon=1e-2, epochs=100):
        input_data = format_data(input_data)
        n_input_features = input_data.shape[1]

        if self.weights is None:
            self.weights = randn(n_input_features, self.minimized_data_size)

        if n_input_features != self.weights.shape[0]:
            raise ValueError(
                "Invalid number of features. Expected {}, got {}".format(
                    self.weights.shape[0], n_input_features))

        super(Oja, self).train(input_data, epsilon=epsilon, epochs=epochs)

    def reconstruct(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use reconstruct method.")

        input_data = format_data(input_data)
        if input_data.shape[1] != self.minimized_data_size:
            raise ValueError("Invalid input data feature space, expected "
                             "{}, got {}.".format(input_data.shape[1],
                                                  self.minimized_data_size))

        return dot(input_data, self.weights.T)

    def predict(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use prediction method.")

        input_data = format_data(input_data)
        return dot(input_data, self.weights)
예제 #3
0
파일: base.py 프로젝트: PranY/neupy
class BaseLayer(with_metaclass(LayerMeta, ChainConnection, BaseConfigurable)):
    """ Base class for all layers.

    Parameters
    ----------
    {layer_params}
    """
    __layer_params = """input_size : int
        Layer input size.
    weight : 2D array-like or None
        Define your layer weights. `None` means that your weights will be
        generate randomly dependence on property `init_method`.
        `None` by default.
    init_method : {'gauss', 'bounded', 'ortho'}
        Weight initialization method.
        `gauss` will generate random weights dependence on Standard
        Normal Distribution.
        `bounded` generate uniform random weghts in initialized bounds.
        `ortho` generate random orthogonal matrix.
    random_weight_bound : tuple of two int
        Available only for `init_method` eqaul to `bounded`, defaults
        to `(0, 1)`.
    """
    shared_docs = {'layer_params': __layer_params}

    input_size = IntProperty()
    weight = ArrayProperty(default=None)
    random_weight_bound = NumberBoundProperty(default=(0, 1))
    init_method = ChoiceProperty(default=GAUSSIAN,
                                 choices=[GAUSSIAN, BOUNDED, ORTHOGONAL])

    def __init__(self, input_size, **options):
        super(BaseLayer, self).__init__()

        self.input_size = input_size
        self.use_bias = False

        # Default variables which will change after initialization
        self.relate_to_layer = None
        self.size = None

        # If you will set class method function variable, python understend
        # that this is new class method and will call it with `self`
        # first parameter.
        if hasattr(self.__class__, 'activation_function'):
            self.activation_function = self.__class__.activation_function

        # Initialize default options
        BaseConfigurable.__init__(self, **options)

    def relate_to(self, right_layer):
        self.relate_to_layer = right_layer

    def initialize(self, with_bias=False):
        self.use_bias = with_bias
        size = self.input_size + self.use_bias
        self.size = (size, self.relate_to_layer.input_size)
        self.weight = self._init_weight()

    # --------------- Weights manipulations --------------- #

    def _init_weight(self):
        if self.weight is not None:
            return self.weight

        init_method = self.init_method

        if init_method == GAUSSIAN:
            return randn(*self.size)

        elif init_method == BOUNDED:
            return random_bounded(self.size, *self.random_weight_bound)

        elif init_method == ORTHOGONAL:
            return random_orthogonal(self.size)

    @property
    def weight_without_bias(self):
        if self.use_bias:
            return self.weight[1:, :]
        return self.weight

    # --------------- Layer operations --------------- #

    def summator(self, input_value):
        return dot(input_value, self.weight)

    def output(self, input_value):
        input_data = self.preformat_input(input_value)
        summated = self.summator(input_data)
        return self.activation_function(summated)

    def preformat_input(self, input_data):
        if self.use_bias:
            input_data = add_bias_column(input_data)
        return input_data

    def __repr__(self):
        return '{name}({size})'.format(name=self.__class__.__name__,
                                       size=self.input_size)
예제 #4
0
class BaseStepAssociative(BaseAssociative):
    """
    Base class for associative algorithms which have 2 layers and first
    one is has step function as activation.

    Parameters
    ----------
    {BaseAssociative.n_inputs}

    {BaseAssociative.n_outputs}

    n_unconditioned : int
        Number of unconditioned units in neraul networks. All these
        units wouldn't update during the training procedure.
        Unconditioned should be the first feature in the dataset.

    weight : array-like
        Neural network weights.
        Value defined manualy should have shape ``(n_inputs, n_outputs)``.
        Defaults to ``None`` which means that all unconditional
        weights will be equal to ``1``. Other weights equal to ``0``.

    bias : array-like, Initializer
        Neural network bias units.
        Defaults to :class:`Constant(-0.5) <neupy.init.Constant>`.

    {BaseNetwork.step}

    {BaseNetwork.show_epoch}

    {BaseNetwork.shuffle_data}

    {BaseNetwork.epoch_end_signal}

    {BaseNetwork.train_end_signal}

    {Verbose.verbose}

    Methods
    -------
    {BaseSkeleton.predict}

    {BaseAssociative.train}

    {BaseSkeleton.fit}
    """
    n_inputs = IntProperty(minval=2, required=True)
    n_unconditioned = IntProperty(minval=1, required=True)

    weight = ArrayProperty()
    bias = ParameterProperty(default=init.Constant(-0.5))

    def init_layers(self):
        if self.n_inputs <= self.n_unconditioned:
            raise ValueError(
                "Number of uncondition features should be less than total "
                "number of features. `n_inputs`={} and "
                "`n_unconditioned`={}".format(self.n_inputs,
                                              self.n_unconditioned))

        valid_weight_shape = (self.n_inputs, self.n_outputs)
        valid_bias_shape = (self.n_outputs, )

        if self.weight is None:
            self.weight = np.zeros(valid_weight_shape)
            self.weight[:self.n_unconditioned, :] = 1

        if isinstance(self.bias, init.Initializer):
            self.bias = self.bias.sample(valid_bias_shape)

        super(BaseStepAssociative, self).init_layers()

        if self.bias.shape != valid_bias_shape:
            raise ValueError("Bias vector has invalid shape. Got {}, "
                             "expected {}".format(self.bias.shape,
                                                  valid_bias_shape))

        self.bias = self.bias.astype(float)

    def predict(self, input_data):
        input_data = format_data(input_data, is_feature1d=False)
        raw_output = input_data.dot(self.weight) + self.bias
        return np.where(raw_output > 0, 1, 0)

    def train(self, input_train, *args, **kwargs):
        input_train = format_data(input_train, is_feature1d=False)
        return super(BaseStepAssociative, self).train(input_train, *args,
                                                      **kwargs)

    def train_epoch(self, input_train, target_train):
        weight = self.weight
        n_unconditioned = self.n_unconditioned
        predict = self.predict
        weight_delta = self.weight_delta

        for input_row in input_train:
            input_row = np.reshape(input_row, (1, input_row.size))
            layer_output = predict(input_row)
            weight[n_unconditioned:, :] += weight_delta(
                input_row, layer_output)
예제 #5
0
class BaseStepAssociative(BaseAssociative):
    """
    Base class for associative algorithms which have 2 layers and first
    one is has step function as activation.

    Parameters
    ----------
    {BaseAssociative.n_inputs}

    {BaseAssociative.n_outputs}

    n_unconditioned : int
        Number of unconditioned units in neraul networks. All these
        units wouldn't update during the training procedure.
        Unconditioned should be the first feature in the dataset.

    weight : array-like
        Neural network weights.
        Value defined manualy should have shape ``(n_inputs, n_outputs)``.
        Defaults to ``None`` which means that all unconditional
        weights will be equal to ``1``. Other weights equal to ``0``.

    bias : array-like, Initializer
        Neural network bias units.
        Defaults to :class:`Constant(-0.5) <neupy.init.Constant>`.

    {BaseNetwork.Parameters}

    Methods
    -------
    {BaseAssociative.Methods}
    """
    n_inputs = IntProperty(minval=2, required=True)
    n_unconditioned = IntProperty(minval=1, required=True)

    weight = ArrayProperty()
    bias = ParameterProperty(default=init.Constant(-0.5))

    def init_weights(self):
        if self.n_inputs <= self.n_unconditioned:
            raise ValueError(
                "Number of uncondition features should be less than total "
                "number of features. `n_inputs`={} and `n_unconditioned`={}"
                "".format(self.n_inputs, self.n_unconditioned))

        valid_weight_shape = (self.n_inputs, self.n_outputs)
        valid_bias_shape = (self.n_outputs, )

        if self.weight is None:
            self.weight = np.zeros(valid_weight_shape)
            self.weight[:self.n_unconditioned, :] = 1

        if isinstance(self.bias, init.Initializer):
            self.bias = self.bias.sample(valid_bias_shape, return_array=True)

        super(BaseStepAssociative, self).init_weights()

        if self.bias.shape != valid_bias_shape:
            raise ValueError(
                "Bias vector has invalid shape. Got {}, expected {}"
                "".format(self.bias.shape, valid_bias_shape))

        self.bias = self.bias.astype(float)

    def predict(self, X):
        X = format_data(X, is_feature1d=False)
        raw_output = X.dot(self.weight) + self.bias
        return np.where(raw_output > 0, 1, 0)

    def train(self, X_train, *args, **kwargs):
        X_train = format_data(X_train, is_feature1d=False)
        return super(BaseStepAssociative, self).train(X_train, *args, **kwargs)

    def one_training_update(self, X_train, y_train):
        weight = self.weight
        n_unconditioned = self.n_unconditioned
        predict = self.predict
        weight_delta = self.weight_delta

        error = 0

        for x_row in X_train:
            x_row = np.expand_dims(x_row, axis=0)
            layer_output = predict(x_row)

            delta = weight_delta(x_row, layer_output)
            weight[n_unconditioned:, :] += delta

            # This error can tell us whether network has converged
            # to some value of weihts. Low errors will mean that weights
            # hasn't been updated much during the training epoch.
            error += np.linalg.norm(delta)

        return error
예제 #6
0
class Oja(UnsupervisedLearning, BaseNetwork):
    """ Oja unsupervised algorithm which minimize feature space.

    Notes
    -----
    * In practice use step as very small value. For example ``1e-7``.

    Parameters
    ----------
    minimized_data_size : int
        Expected number of features after minimization, defaults to ``1``
    weights : array-like or ``None``
        Predefine default weights which controll your data in two sides.
        If weights are, ``None`` before train algorithms generate random
        weights. Defaults to ``None``.
    {step}
    {show_epoch}
    {verbose}
    {full_signals}

    Methods
    -------
    reconstruct(input_data):
        Reconstruct your minimized data.
    {unsupervised_train_epsilon}
    {full_methods}

    Raises
    ------
    ValueError
        * Try reconstruct without training.
        * Invalid number of input data features for ``train`` and \
        ``reconstruct`` methods.

    Examples
    --------
    >>> import numpy as np
    >>> from neupy import algorithms
    >>>
    >>> data = np.array([[2, 2], [1, 1], [4, 4], [5, 5]])
    >>>
    >>> ojanet = algorithms.Oja(
    ...     minimized_data_size=1,
    ...     step=0.01,
    ...     verbose=False
    ... )
    >>>
    >>> ojanet.train(data, epsilon=1e-5)
    >>> minimized = ojanet.predict(data)
    >>> minimized
    array([[-2.82843122],
           [-1.41421561],
           [-5.65686243],
           [-7.07107804]])
    >>> ojanet.reconstruct(minimized)
    array([[ 2.00000046,  2.00000046],
           [ 1.00000023,  1.00000023],
           [ 4.00000093,  4.00000093],
           [ 5.00000116,  5.00000116]])
    """
    minimized_data_size = NonNegativeIntProperty(min_size=1)
    weights = ArrayProperty()

    def __init__(self, **options):
        super(Oja, self).__init__(FAKE_CONNECTION, **options)

    def setup_defaults(self):
        del self.use_bias
        del self.error
        del self.shuffle_data
        super(Oja, self).setup_defaults()

    def train_epoch(self, input_data, target_train):
        weights = self.weights

        minimized = dot(input_data, weights)
        reconstruct = dot(minimized, weights.T)
        error = input_data - reconstruct

        weights += self.step * dot(error.T, minimized)

        return np_abs(error) / (input_data.shape[0] * input_data.shape[1])

    def train(self, input_data, epsilon=1e-5):
        input_data = format_data(input_data)
        n_input_features = input_data.shape[1]

        if self.weights is None:
            self.weights = randn(n_input_features, self.minimized_data_size)

        if n_input_features != self.weights.shape[0]:
            raise ValueError(
                "Invalid number of features. Expected {}, got {}".format(
                    self.weights.shape[0], n_input_features))

        super(Oja, self).train(input_data, epsilon=epsilon)

    def reconstruct(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use reconstruct method.")

        input_data = format_data(input_data)
        if input_data.shape[1] != self.minimized_data_size:
            raise ValueError("Invalid input data feature space, expected "
                             "{}, got {}.".format(input_data.shape[1],
                                                  self.minimized_data_size))

        return dot(input_data, self.weights.T)

    def predict(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use prediction method.")

        input_data = format_data(input_data)
        return dot(input_data, self.weights)