Exemplo n.º 1
0
class BaseStepAssociative(BaseAssociative):
    """ Base class for associative algorithms which have 2 layers and first
    one is has step function as activation.
    """
    n_unconditioned = NonNegativeIntProperty(default=1, min_size=1)

    def __init__(self, connection, **options):
        super(BaseStepAssociative, self).__init__(connection, **options)

        input_layer = self.input_layer
        n_unconditioned = self.n_unconditioned

        if not isinstance(input_layer, StepLayer):
            raise ValueError("Input layer must be `StepLayer`")

        if input_layer.input_size <= n_unconditioned:
            raise ValueError(
                "Number of uncondition features must be less than total "
                "number of features in network. #feature = {} and "
                "#unconditioned = {}".format(input_layer.input_size,
                                             n_unconditioned))

    def train_epoch(self, input_train, target_train):
        input_train = format_data(input_train)

        weight = self.input_layer.weight
        unconditioned = self.n_unconditioned
        predict = self.predict
        weight_delta = self.weight_delta

        for input_row in input_train:
            input_row = reshape(input_row, (1, input_row.size))
            layer_output = predict(input_row)
            weight[unconditioned:, :] += weight_delta(input_row, layer_output)
Exemplo n.º 2
0
class DiscreteMemory(BaseSkeleton, Configurable):
    """ Base class for discrete memory networks.

    Notes
    -----
    * {discrete_data_note}
    """
    __discrete_data_note = """ Input and output data must contains only \
    binary values.
    """

    __discrete_params = """mode : {'sync', 'async'}
        Indentify pattern recovery mode. ``sync`` mode try recovery a pattern
        using the all input vector. ``async`` mode randomly chose some
        values from the input vector and repeat this procedure the number
        of times a given variable ``n_times``. Defaults to ``sync``.
    n_times : int
        Available only in ``async`` mode. Identify number of random trials.
        Defaults to ``100``.
    """

    shared_docs = {
        'discrete_data_note': __discrete_data_note,
        'discrete_params': __discrete_params
    }

    mode = ChoiceProperty(default='sync', choices=['async', 'sync'])
    n_times = NonNegativeIntProperty(default=100)

    def __init__(self, **options):
        super(DiscreteMemory, self).__init__(**options)
        self.weight = None

        if 'n_times' in options and self.mode != 'async':
            self.logs.warning("You can use `n_times` property only in "
                              "`async` mode.")

    def discrete_validation(self, matrix):
        """ Validate discrete matrix.

        Parameters
        ----------
        matrix : array-like
            Matrix for validation.

        Returns
        -------
        bool
            Got ``True`` all ``matrix`` discrete values are in
            `discrete_values` list and `False` otherwise.
        """
        if np_any((matrix != 0) & (matrix != 1)):
            raise ValueError("This network is descrete. This mean that you "
                             "can use data which contains 0 and 1 values")
Exemplo n.º 3
0
class SearchThenConverge(SingleStep):
    """ Algorithm minimize learning step. Similar to
    :network:`SimpleStepMinimization`, but more complicated step update rule.

    Parameters
    ----------
    epochs_step_minimizator : int
        The parameter controls the frequency reduction step with respect
        to epochs. Defaults to ``100`` epochs. Can't be less than ``1``.
        Less value mean that step decrease faster.
    rate_coefitient : float
        Second important parameter to control the rate of error reduction.
        Defaults to ``0.2``

    Attributes
    ----------
    {first_step}

    Warns
    -----
    {bp_depending}

    Examples
    --------
    >>> from neupy import algorithms
    >>>
    >>> bpnet = algorithms.Backpropagation(
    ...     (2, 4, 1),
    ...     step=0.1,
    ...     verbose=False,
    ...     optimizations=[algorithms.SearchThenConverge]
    ... )
    >>>

    See Also
    --------
    :network:`SimpleStepMinimization`
    """
    epochs_step_minimizator = NonNegativeIntProperty(min_size=1, default=100)
    rate_coefitient = NumberProperty(default=0.2)

    def after_weight_update(self, input_train, target_train):
        super(SearchThenConverge,
              self).after_weight_update(input_train, target_train)

        first_step = self.first_step
        epochs_step_minimizator = self.epochs_step_minimizator

        epoch_value = self.epoch / epochs_step_minimizator
        rated_value = (self.rate_coefitient / first_step) * epoch_value

        self.step = first_step * (1 + rated_value) / (
            1 + rated_value + epochs_step_minimizator * epoch_value**2)
Exemplo n.º 4
0
class RoundOutputLayer(OutputLayer):
    """ Round output layer value.

    Parameters
    ----------
    decimal_places : int
        The precision in decimal digits for output value.
    {layer_params}
    """
    decimal_places = NonNegativeIntProperty(default=0)

    def format_output(self, value):
        return np_round(value, self.decimal_places)
Exemplo n.º 5
0
class SimpleStepMinimization(SingleStep):
    """ Algorithm Monotonicly minimize learning step on each iteration.
    Probably this is most simple step minimization idea.

    Parameters
    ----------
    epochs_step_minimizator : int
        The parameter controls the frequency reduction step with respect
        to epochs. Defaults to ``100`` epochs. Can't be less than ``1``.
        Less value mean that step decrease faster.

    Attributes
    ----------
    {first_step}

    Warns
    -----
    {bp_depending}

    Examples
    --------
    >>> from neupy import algorithms
    >>>
    >>> bpnet = algorithms.Backpropagation(
    ...     (2, 4, 1),
    ...     step=0.1,
    ...     verbose=False,
    ...     optimizations=[algorithms.SimpleStepMinimization]
    ... )
    >>>

    See Also
    --------
    :network:`SearchThenConverge`
    """
    epochs_step_minimizator = NonNegativeIntProperty(min_size=1, default=100)

    def after_weight_update(self, input_train, target_train):
        super(SimpleStepMinimization, self).after_weight_update(
            input_train, target_train
        )
        self.step = self.first_step / (
            1 + self.epoch / self.epochs_step_minimizator
        )
Exemplo n.º 6
0
Arquivo: sofm.py Projeto: PranY/neupy
class SOFM(Kohonen):
    """ Self-Organizing Feature Map.

    Notes
    -----
    * Network architecture must contains two layers.
    * Second layer must be :layer:`CompetitiveOutputLayer`.

    Parameters
    ----------
    learning_radius : int
        Learning radius.
    features_grid : int
        Learning radius.
    {full_params}

    Methods
    -------
    {unsupervised_train_epochs}
    {predict}
    {plot_errors}
    {last_error}
    """
    learning_radius = NonNegativeIntProperty(default=0)
    features_grid = NumberBoundProperty()

    # # None - mean that this property is the same as default step
    # neighbours_step = NumberProperty()

    def __init__(self, connection, **options):
        super(SOFM, self).__init__(connection, **options)

        if not isinstance(self.output_layer, CompetitiveOutputLayer):
            raise ValueError("Output layer must be `CompetitiveOutputLayer`")

        if self.features_grid is not None:
            if mul(*self.features_grid) != self.output_layer.input_size:
                raise ValueError(
                    "Feature grid must contains the same size of elements as "
                    "at output layer: {0}. But it contains: {1} "
                    "({2}x{3})".format(self.output_layer.input_size,
                                       mul(*self.features_grid),
                                       self.features_grid[0],
                                       self.features_grid[1]))

    def setup_defaults(self):
        super(SOFM, self).setup_defaults()

        # if self.neighbours_step is None:
        #     self.neighbours_step = self.step

        if self.features_grid is None:
            self.features_grid = (self.output_layer.input_size, 1)

    def update_indexes(self, layer_output):
        neuron_winner = layer_output.argmax(axis=1)
        feature_bound = self.features_grid[1]

        output_with_neightbours = neuron_neighbours(
            reshape(layer_output, self.features_grid),
            (neuron_winner // feature_bound, neuron_winner % feature_bound),
            self.learning_radius)
        index_y, _ = nonzero(
            reshape(output_with_neightbours,
                    (self.output_layer.input_size, 1)))
        return index_y
Exemplo n.º 7
0
class ART1(Clustering, BaseNetwork):
    """ Adaptive Resonance Theory (ART1) Network for binary
    data clustering.

    Notes
    -----
    * Weights are not random, so the result will be always reproduceble.

    Parameters
    ----------
    rho : float
        Control reset action in trainig process. Value must be
        between ``0`` and ``1``, defaults to ``0.5``.
    n_clusters : int
        Number of clusters, defaults to ``2``. Min value is also ``2``.
    {full_params}

    Methods
    -------
    train(input_data):
        Trains network until it has clustered all samples
    {predict}
    {plot_errors}
    {last_error}

    Examples
    --------
    >>> import numpy as np
    >>> from neupy import algorithms
    >>>
    >>> data = np.array([
    ...     [0, 1, 0],
    ...     [1, 0, 0],
    ...     [1, 1, 0],
    ... ])
    >>>>
    >>> artnet = algorithms.ART1(
    ...     step=2,
    ...     rho=0.7,
    ...     n_clusters=2,
    ...     verbose=False
    ... )
    >>> artnet.predict(data)
    array([ 0.,  1.,  1.])
    """
    rho = BetweenZeroAndOneProperty(default=0.5)
    n_clusters = NonNegativeIntProperty(default=2, min_size=2)

    def __init__(self, **options):
        super(ART1, self).__init__(FAKE_CONNECTION, **options)

    def train(self, input_data):
        input_data = format_data(input_data)

        if input_data.ndim != 2:
            raise ValueError("Input value must be 2 dimentional, got "
                             "{0}".format(input_data.ndim))

        data_size = input_data.shape[1]
        n_clusters = self.n_clusters
        step = self.step
        rho = self.rho

        if list(sort(unique(input_data))) != [0, 1]:
            raise ValueError("ART1 Network works only with binary matrix, "
                             "all matix must contains only 0 and 1")

        if not hasattr(self, 'weight_21'):
            self.weight_21 = ones((data_size, n_clusters))

        if not hasattr(self, 'weight_12'):
            self.weight_12 = step / (step + n_clusters - 1) * self.weight_21.T

        weight_21 = self.weight_21
        weight_12 = self.weight_12

        if data_size != weight_21.shape[0]:
            raise ValueError(
                "Data dimention is invalid. Get {} columns data set. "
                "Must be - {} columns".format(data_size, weight_21.shape[0]))

        classes = zeros(input_data.shape[0])

        # Train network
        for i, p in enumerate(input_data):
            disabled_neurons = []
            reseted_values = []
            reset = True

            while reset:
                output1 = p
                input2 = dot(weight_12, output1.T)

                output2 = zeros(input2.size)
                input2[disabled_neurons] = -inf
                winner_index = input2.argmax()
                output2[winner_index] = 1

                expectation = dot(weight_21, output2)
                output1 = logical_and(p, expectation).astype(int)

                reset_value = dot(output1.T, output1) / dot(p.T, p)
                reset = reset_value < rho

                if reset:
                    disabled_neurons.append(winner_index)
                    reseted_values.append((reset_value, winner_index))

                if len(disabled_neurons) >= n_clusters:
                    # Got this case only if we test all possible clusters
                    reset = False
                    winner_index = None

                if not reset:
                    if winner_index is not None:
                        weight_12[winner_index, :] = (step * output1) / (
                            step + dot(output1.T, output1) - 1)
                        weight_21[:, winner_index] = output1
                    else:
                        # Get result with the best `rho`
                        winner_index = max(reseted_values)[1]

                    classes[i] = winner_index

        return classes

    def predict(self, input_data):
        return self.train(input_data)

    def train_epoch(self, input_data, target_data):
        pass
Exemplo n.º 8
0
class BaseNetwork(BaseSkeleton, NetworkSignals):
    """ Base class Network algorithms.

    Parameters
    ----------
    {full_params}

    Methods
    -------
    {plot_errors}
    {last_error}
    """
    error = FuncProperty(default=mse)
    use_bias = BoolProperty(default=True)
    step = NumberProperty(default=0.1)

    # Training settings
    show_epoch = NonNegativeIntProperty(min_size=1, default=1)
    shuffle_data = BoolProperty(default=False)

    def __init__(self, connection, **options):
        self.connection = clean_layers(connection)

        self.errors_in = []
        self.errors_out = []
        self.epoch = 0
        self.train_epoch_time = None

        self.layers = list(self.connection)
        self.input_layer = self.layers[0]
        self.output_layer = self.layers[-1]
        self.train_layers = self.layers[:-1]

        # Setup initialized options
        super(BaseNetwork, self).__init__(**options)
        logs = self.logs

        self.setup_defaults()

        available_classes = [c.__name__ for c in self.__class__.__mro__]

        def classname_grouper(option):
            classname = option[1].class_name
            class_priority = -available_classes.index(classname)
            return (class_priority, classname)

        # Sort and group options by classes
        grouped_options = groupby(sorted(self.options.items(),
                                         key=classname_grouper),
                                  key=classname_grouper)

        if isinstance(self.connection, LayerConnection):
            logs.header("Network structure")
            logs.log("LAYERS", self.connection)

        # Just display in terminal all network options.
        logs.header("Network options")
        for (_, clsname), class_options in grouped_options:
            if not class_options:
                # When in some class we remove all available attributes
                # we just skip it.
                continue

            logs.simple("{}:".format(clsname))

            for key, data in sorted(class_options):
                if key in options:
                    logger = logs.log
                    value = options[key]
                else:
                    logger = logs.gray_log
                    value = data.value

                logger("OPTION", "{} = {}".format(key, preformat_value(value)))
            logs.empty()

        self.init_layers()
        super(BaseNetwork, self).__init__()

    def setup_defaults(self):
        """ Setup default values before populate options.
        """

    # ----------------- Neural Network Layers ---------------- #

    def init_layers(self):
        """ Initialize layers.
        """
        if self.connection == FAKE_CONNECTION:
            return

        for layer in self.layers:
            layer.initialize(with_bias=self.use_bias)

    # ----------------- Neural Network Train ---------------- #

    def _train(self,
               input_train,
               target_train=None,
               input_test=None,
               target_test=None,
               epochs=None,
               epsilon=None):

        # ----------- Pre-format target data ----------- #

        input_row1d = is_row1d(self.input_layer)
        input_train = format_data(input_train, row1d=input_row1d)

        target_row1d = is_row1d(self.output_layer)
        target_train = format_data(target_train, row1d=target_row1d)

        if input_test is not None:
            input_test = format_data(input_test, row1d=input_row1d)

        if target_test is not None:
            target_test = format_data(target_test, row1d=target_row1d)

        # ----------- Validation ----------- #

        if epochs is None and epsilon is None:
            epochs = 100

        if epochs is not None and epsilon is not None:
            raise ValueError("You can't user `epochs` and `epsilon` "
                             "attributes in one train process.")

        # ----------- Predefine parameters ----------- #

        if epochs is not None:
            self.epoch = 0
            iterepochs = range(self.epoch, epochs)
            last_epoch = epochs - 1
            predict = self.predict
            compute_error_out = (input_test is not None
                                 and target_test is not None)

        if epsilon is not None:
            iterepochs = iter_until_converge(self, epsilon)
            last_epoch = None
            predict = None
            compute_error_out = None

        # ----------- Train process ----------- #

        logs = self.logs
        logs.header("Start train")
        logs.log("TRAIN", "Train data size: {}".format(input_train.shape[0]))
        logs.log("TRAIN",
                 "Number of input features: {}".format(input_train.shape[1]))

        if epochs is not None:
            logs.log("TRAIN", "Total epochs: {}".format(epochs))

        logs.empty()

        # Optimizations for long loops. Set constant properties to
        # variables.
        errors = self.errors_in
        errors_out = self.errors_out
        show_epoch = self.show_epoch
        shuffle_data = self.shuffle_data

        # Methods
        error_func = self.error
        train_epoch = self.train_epoch
        train_epoch_end_signal = self.train_epoch_end_signal
        train_end_signal = self.train_end_signal

        for epoch in iterepochs:
            epoch_start_time = time()

            if shuffle_data:
                if target_train is not None:
                    input_train, target_train = shuffle(
                        input_train, target_train)
                else:
                    input_train, = shuffle(input_train)

            self.input_train = input_train
            self.target_train = target_train

            try:
                error = train_epoch(input_train, target_train)

                if compute_error_out:
                    error_out = error_func(predict(input_test), target_test)
                    errors_out.append(error_out)

                errors.append(error)
                self.train_epoch_time = time() - epoch_start_time

                if epoch % show_epoch == 0 or epoch == last_epoch:
                    logs.data("""
                        Epoch {epoch}
                        Error in:  {error}
                        Error out: {error_out}
                        Epoch time: {epoch_time} sec
                    """.format(epoch=self.epoch,
                               error=self.last_error_in() or '-',
                               error_out=self.last_error_out() or '-',
                               epoch_time=round(self.train_epoch_time, 5)))

                if train_epoch_end_signal is not None:
                    train_epoch_end_signal(self)

                self.epoch = epoch + 1

            except StopIteration as err:
                logs.log("TRAIN",
                         "Epoch #{} stopped. {}".format(self.epoch, str(err)))
                break

        if train_end_signal is not None:
            train_end_signal(self)

        logs.log("TRAIN", "End train")

    # ----------------- Errors ----------------- #

    def _last_error(self, errors):
        if errors and errors[-1] is not None:
            return normilize_error_output(errors[-1])

    def last_error_in(self):
        return self._last_error(self.errors_in)

    def last_error(self):
        return self._last_error(self.errors_in)

    def last_error_out(self):
        return self._last_error(self.errors_out)

    def previous_error(self):
        errors = self.errors_in
        return normilize_error_output(errors[-2]) if len(errors) > 2 else None

    def _normalized_errors(self, errors):
        if not len(errors) or isinstance(errors[0], float):
            return errors

        self.logs.warn("Your errors bad formated for plot output. "
                       "They will be normilized.")

        normilized_errors = []
        for error in errors:
            normilized_errors.append(normilize_error_output(error))

        return normilized_errors

    def normalized_errors_in(self):
        return self._normalized_errors(self.errors_in)

    def normalized_errors_out(self):
        return self._normalized_errors(self.errors_out)

    def plot_errors(self, use_semilog=False):
        if not self.errors_in:
            return

        errors_in = self.normalized_errors_in()
        errors_out = self.normalized_errors_out()
        errors_range = arange(len(errors_in))
        plot_function = plt.semilogx if use_semilog else plt.plot

        line_error_in, = plot_function(errors_range, errors_in)

        if errors_out:
            line_error_out, = plot_function(errors_range, errors_out)
            plt.legend([line_error_in, line_error_out],
                       ['Error in', 'Error out'])

        plt.xlim(0)

        plt.title('Train errors')
        plt.ylabel('Error')
        plt.xlabel('Epoch')

        plt.show()

    # ----------------- Representations ----------------- #

    def get_class_name(self):
        return self.__class__.__name__

    def __repr__(self):
        classname = self.get_class_name()
        options_repr = self._repr_options()

        if self.connection != FAKE_CONNECTION:
            return "{}({}, {})".format(classname, self.connection,
                                       options_repr)
        return "{}({})".format(classname, options_repr)
Exemplo n.º 9
0
class RBFKMeans(UnsupervisedLearning, Clustering, BaseNetwork):
    """ Radial basis function K-means for clustering.

    Parameters
    ----------
    n_clusters : int
        number of clusters in dataset.
    {show_epoch}
    {shuffle_data}
    {full_signals}
    {verbose}

    Attributes
    ----------
    centers : numpy array [n_clusters, n_futures]
        After training this property will contain coordinates
        to cluster centers.

    Methods
    -------
    {unsupervised_train_epsilon}
    {full_methods}

    Examples
    --------
    >>> import numpy as np
    >>> from neupy.algorithms import RBFKMeans
    >>>
    >>> data = np.array([
    ...     [0.11, 0.20],
    ...     [0.25, 0.32],
    ...     [0.64, 0.60],
    ...     [0.12, 0.42],
    ...     [0.70, 0.73],
    ...     [0.30, 0.27],
    ...     [0.43, 0.81],
    ...     [0.44, 0.87],
    ...     [0.12, 0.92],
    ...     [0.56, 0.67],
    ...     [0.36, 0.35],
    ... ])
    >>> rbfk_net = RBFKMeans(n_clusters=2, verbose=False)
    >>> rbfk_net.train(data, epsilon=1e-5)
    >>> rbfk_net.centers
    array([[ 0.228     ,  0.312     ],
           [ 0.48166667,  0.76666667]])
    >>>
    >>> new_data = np.array([[0.1, 0.1], [0.9, 0.9]])
    >>> rbfk_net.predict(new_data)
    array([[ 0.],
           [ 1.]])
    """
    n_clusters = NonNegativeIntProperty(min_size=2)

    def __init__(self, **options):
        self.centers = None
        super(RBFKMeans, self).__init__(FAKE_CONNECTION, **options)

    def setup_defaults(self):
        del self.use_bias
        del self.error
        del self.step
        super(RBFKMeans, self).setup_defaults()

    def predict(self, input_data):
        input_data = format_data(input_data)

        centers = self.centers
        classes = zeros((input_data.shape[0], 1))

        for i, value in enumerate(input_data):
            classes[i] = argmin(norm(centers - value, axis=1))

        return classes

    def train_epoch(self, input_train, target_train):
        centers = self.centers
        old_centers = centers.copy()
        output_train = self.predict(input_train)

        for i, center in enumerate(centers):
            positions = argwhere(output_train[:, 0] == i)

            if not np_any(positions):
                continue

            class_data = take(input_train, positions, axis=0)
            centers[i, :] = (1 / len(class_data)) * np_sum(class_data, axis=0)

        return np_abs(old_centers - centers)

    def train(self, input_train, epsilon=1e-5, epochs=100):
        n_clusters = self.n_clusters
        input_train = format_data(input_train)

        if input_train.shape[0] <= n_clusters:
            raise ValueError("Count of clusters must be less than count of "
                             "input data.")

        self.centers = input_train[:n_clusters, :].copy()
        super(RBFKMeans, self).train(input_train, epsilon=epsilon,
                                     epochs=epochs)
Exemplo n.º 10
0
class Oja(UnsupervisedLearning, BaseNetwork):
    """ Oja unsupervised algorithm which minimize feature space.

    Notes
    -----
    * In practice use step as very small value. For example ``1e-7``.
    * Normalize the input data before use Oja algorithm. Input data
    shouldn't contains large values.
    * Set up smaller values for weights if error for a few first iterations
    is big compare to the input values scale. For example, if your input data
    have values between 0 and 1 error value equal to 100 is big.

    Parameters
    ----------
    minimized_data_size : int
        Expected number of features after minimization, defaults to ``1``
    weights : array-like or ``None``
        Predefine default weights which controll your data in two sides.
        If weights are, ``None`` before train algorithms generate random
        weights. Defaults to ``None``.
    {step}
    {show_epoch}
    {verbose}
    {full_signals}

    Methods
    -------
    reconstruct(input_data):
        Reconstruct your minimized data.
    {unsupervised_train_epsilon}
    {full_methods}

    Raises
    ------
    ValueError
        * Try reconstruct without training.
        * Invalid number of input data features for ``train`` and \
        ``reconstruct`` methods.

    Examples
    --------
    >>> import numpy as np
    >>> from neupy import algorithms
    >>>
    >>> data = np.array([[2, 2], [1, 1], [4, 4], [5, 5]])
    >>>
    >>> ojanet = algorithms.Oja(
    ...     minimized_data_size=1,
    ...     step=0.01,
    ...     verbose=False
    ... )
    >>>
    >>> ojanet.train(data, epsilon=1e-5)
    >>> minimized = ojanet.predict(data)
    >>> minimized
    array([[-2.82843122],
           [-1.41421561],
           [-5.65686243],
           [-7.07107804]])
    >>> ojanet.reconstruct(minimized)
    array([[ 2.00000046,  2.00000046],
           [ 1.00000023,  1.00000023],
           [ 4.00000093,  4.00000093],
           [ 5.00000116,  5.00000116]])
    """
    minimized_data_size = NonNegativeIntProperty(min_size=1)
    weights = ArrayProperty()

    def __init__(self, **options):
        super(Oja, self).__init__(FAKE_CONNECTION, **options)

    def setup_defaults(self):
        del self.use_bias
        del self.error
        del self.shuffle_data
        super(Oja, self).setup_defaults()

    def train_epoch(self, input_data, target_train):
        weights = self.weights

        minimized = dot(input_data, weights)
        reconstruct = dot(minimized, weights.T)
        error = input_data - reconstruct

        weights += self.step * dot(error.T, minimized)

        mae = np_sum(np_abs(error)) / input_data.size

        del minimized
        del reconstruct
        del error

        return mae

    def train(self, input_data, epsilon=1e-2, epochs=100):
        input_data = format_data(input_data)
        n_input_features = input_data.shape[1]

        if self.weights is None:
            self.weights = randn(n_input_features, self.minimized_data_size)

        if n_input_features != self.weights.shape[0]:
            raise ValueError(
                "Invalid number of features. Expected {}, got {}".format(
                    self.weights.shape[0], n_input_features))

        super(Oja, self).train(input_data, epsilon=epsilon, epochs=epochs)

    def reconstruct(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use reconstruct method.")

        input_data = format_data(input_data)
        if input_data.shape[1] != self.minimized_data_size:
            raise ValueError("Invalid input data feature space, expected "
                             "{}, got {}.".format(input_data.shape[1],
                                                  self.minimized_data_size))

        return dot(input_data, self.weights.T)

    def predict(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use prediction method.")

        input_data = format_data(input_data)
        return dot(input_data, self.weights)
Exemplo n.º 11
0
class CMAC(SupervisedLearning, BaseNetwork):
    """ CMAC Network based on memory.

    Notes
    -----
    * Network always use Mean Absolute Error (MAE).
    * Works for single and multi output values.

    Parameters
    ----------
    quantization : int
        Network transform every input to discrete values. Quantization
        value contol number of total possible values after
        quantization, defaults to ``10``.
    associative_unit_size : int
        Number of associative blocks in memory, defaults to ``2``.
    {step}
    {show_epoch}
    {shuffle_data}
    {full_signals}

    Methods
    -------
    {fit}
    {supervised_train}
    {predict}
    {last_error}
    {plot_errors}

    Examples
    --------
    >>> import numpy as np
    >>> from neupy.algorithms import CMAC
    >>>
    >>> train_space = np.linspace(0, 2 * np.pi, 100)
    >>> test_space = np.linspace(np.pi, 2 * np.pi, 50)
    >>>
    >>> input_train = np.reshape(train_space, (100, 1))
    >>> input_test = np.reshape(test_space, (50, 1))
    >>>
    >>> target_train = np.sin(input_train)
    >>> target_test = np.sin(input_test)
    >>>
    >>> cmac = CMAC(
    ...     quantization=100,
    ...     associative_unit_size=32,
    ...     step=0.2,
    ... )
    ...
    >>> cmac.train(input_train, target_train, epochs=100)
    >>> predicted_test = cmac.predict(input_test)
    >>> cmac.error(target_test, predicted_test)
    0.0023639417543036569
    """
    quantization = NonNegativeIntProperty(default=10)
    associative_unit_size = NonNegativeIntProperty(default=2, min_size=2)

    def __init__(self, **options):
        self.weights = {}
        super(CMAC, self).__init__(FAKE_CONNECTION, **options)

    def setup_defaults(self):
        del self.use_bias
        del self.error
        super(CMAC, self).setup_defaults()

    def predict(self, input_data):
        input_data = format_data(input_data)

        get_memory_coords = self.get_memory_coords
        get_result_by_coords = self.get_result_by_coords
        predicted = []

        for input_sample in self.quantize(input_data):
            coords = get_memory_coords(input_sample)
            predicted.append(get_result_by_coords(coords))

        return array(predicted)

    def get_result_by_coords(self, coords):
        return sum(self.weights.setdefault(coord, 0)
                   for coord in coords) / self.associative_unit_size

    def get_memory_coords(self, quantized_value):
        assoc_unit_size = self.associative_unit_size

        for i in range(assoc_unit_size):
            point = ((quantized_value + i) / assoc_unit_size).astype(int)
            yield tuple(concatenate([point, [i]]))

    def quantize(self, input_data):
        return (input_data * self.quantization).astype(int)

    def train_epoch(self, input_train, target_train):
        get_memory_coords = self.get_memory_coords
        get_result_by_coords = self.get_result_by_coords
        weights = self.weights

        quantized_input = self.quantize(input_train)
        errors = 0

        for input_sample, target_sample in zip(quantized_input, target_train):
            coords = list(get_memory_coords(input_sample))
            predicted = get_result_by_coords(coords)

            error = target_sample - predicted
            for coord in coords:
                weights[coord] += self.step * error

            errors += abs(error)
        return errors / input_train.shape[0]