示例#1
0
    def train(self, input_train, target_train, input_test=None,
              target_test=None, epochs=100, epsilon=None,
              summary_type='table'):

        is_test_data_partialy_missed = (
            (input_test is None and target_test is not None) or
            (input_test is not None and target_test is None)
        )

        if is_test_data_partialy_missed:
            raise ValueError("Input and target test samples missed. "
                             "They must be defined both or none of them.")

        input_train = format_data(input_train)
        target_train = format_data(target_train)

        if input_test is not None:
            input_test = format_data(input_test)

        if target_test is not None:
            target_test = format_data(target_test)

        return super(SupervisedLearning, self).train(
            input_train=input_train, target_train=target_train,
            input_test=input_test, target_test=target_test,
            epochs=epochs, epsilon=epsilon,
            summary_type=summary_type
        )
示例#2
0
文件: grnn.py 项目: itdxer/neupy
    def train(self, input_train, target_train, copy=True):
        """
        Trains network. PNN doesn't actually train, it just stores
        input data and use it for prediction.

        Parameters
        ----------
        input_train : array-like (n_samples, n_features)

        target_train : array-like (n_samples,)
            Target variable should be vector or matrix
            with one feature column.

        copy : bool
            If value equal to ``True`` than input matrices will
            be copied. Defaults to ``True``.

        Raises
        ------
        ValueError
            In case if something is wrong with input data.
        """
        input_train = format_data(input_train, copy=copy)
        target_train = format_data(target_train, copy=copy)

        n_target_features = target_train.shape[1]
        if n_target_features != 1:
            raise ValueError("Target value must be one dimensional array")

        LazyLearningMixin.train(self, input_train, target_train)
示例#3
0
    def wrapper(actual, expected, *args, **kwargs):
        actual = format_data(actual)
        expected = format_data(expected)

        output = function(actual, expected, *args, **kwargs)
        # use .item(0) to get a first array element and automaticaly
        # convert vector that contains one element to scalar
        return output.eval().item(0)
示例#4
0
文件: grnn.py 项目: EdwardBetts/neupy
    def train(self, input_train, target_train, copy=True):
        input_train = format_data(input_train, copy=copy)
        target_train = format_data(target_train, copy=copy)

        if target_train.shape[1] != 1:
            raise ValueError("Target value must be one dimentional array")

        LazyLearning.train(self, input_train, target_train)
示例#5
0
文件: errors.py 项目: Neocher/neupy
def _preformat_inputs(actual, predicted):
    actual = format_data(actual)
    predicted = format_data(predicted)

    if actual.shape != predicted.shape:
        raise ValueError("Actual and predicted values have different shapes. "
                         "Actual shape {}, predicted shape {}"
                         "".format(actual.shape, predicted.shape))

    return actual, predicted
示例#6
0
    def prediction_error(self, input_data, target_data=None):
        """
        Compute the pseudo-likelihood of input samples.

        Parameters
        ----------
        input_data : array-like
            Values of the visible layer

        Returns
        -------
        float
            Value of the pseudo-likelihood.
        """
        is_input_feature1d = (self.n_visible == 1)
        input_data = format_data(input_data, is_input_feature1d)

        errors = self.apply_batches(
            function=self.methods.prediction_error,
            input_data=input_data,
            description='Validation batches',
            show_error_output=True,
        )
        return average_batch_errors(
            errors,
            n_samples=len(input_data),
            batch_size=self.batch_size,
        )
示例#7
0
    def hidden_to_visible(self, hidden_input):
        """
        Propagates output from the hidden layer backward
        to the visible.

        Parameters
        ----------
        hidden_input : array-like (n_samples, n_hidden_features)

        Returns
        -------
        array-like
        """
        is_input_feature1d = (self.n_hidden == 1)
        hidden_input = format_data(hidden_input, is_input_feature1d)

        outputs = self.apply_batches(
            function=self.methods.hidden_to_visible,
            input_data=hidden_input,
            description='Visible from hidden batches',
            show_progressbar=True,
            show_error_output=False,
        )

        return np.concatenate(outputs, axis=0)
示例#8
0
    def visible_to_hidden(self, visible_input):
        """
        Populates data throught the network and returns output
        from the hidden layer.

        Parameters
        ----------
        visible_input : array-like (n_samples, n_visible_features)

        Returns
        -------
        array-like
        """
        is_input_feature1d = (self.n_visible == 1)
        visible_input = format_data(visible_input, is_input_feature1d)

        outputs = self.apply_batches(
            function=self.methods.visible_to_hidden,
            input_data=visible_input,
            description='Hidden from visible batches',
            show_progressbar=True,
            show_error_output=False,
        )

        return np.concatenate(outputs, axis=0)
示例#9
0
    def predict(self, input_data):
        """
        Make a prediction from the input data.

        Parameters
        ----------
        input_data : array-like (n_samples, n_features)

        Raises
        ------
        ValueError
            In case if something is wrong with input data.

        Returns
        -------
        array-like (n_samples,)
        """
        super(GRNN, self).predict(input_data)

        input_data = format_data(input_data)

        input_data_size = input_data.shape[1]
        train_data_size = self.input_train.shape[1]

        if input_data_size != train_data_size:
            raise ValueError("Input data must contains {0} features, got "
                             "{1}".format(train_data_size, input_data_size))

        ratios = pdf_between_data(self.input_train, input_data, self.std)
        return (dot(self.target_train.T, ratios) / ratios.sum(axis=0)).T
示例#10
0
    def predict_proba(self, input_data):
        """
        Predict probabilities for each class.

        Parameters
        ----------
        input_data : array-like (n_samples, n_features)

        Returns
        -------
        array-like (n_samples, n_classes)
        """
        outputs = self.apply_batches(
            function=self.predict_raw,
            input_data=format_data(input_data),

            description='Prediction batches',
            show_progressbar=True,
            show_error_output=False,
            scalar_output=False,
        )
        raw_output = np.concatenate(outputs, axis=1)

        total_output_sum = raw_output.sum(axis=0).reshape((-1, 1))
        return raw_output.T / total_output_sum
示例#11
0
    def train(self, input_data, target_data, epochs=100):
        target_data = format_data(target_data, is_feature1d=True)

        output_size = target_data.shape[1]
        if output_size != 1:
            raise ValueError("Target data must contains only 1 column, got "
                             "{0}".format(output_size))

        input_size = input_data.shape[1]
        gating_network = self.gating_network
        input_layer = gating_network.connection.input_layers[0]
        gating_network_input_size = input_layer.size

        if gating_network_input_size != input_size:
            raise ValueError(
                "Gating Network expected get {0} input features, got "
                "{1}".format(gating_network_input_size, input_size))

        networks = self.networks

        for epoch in range(epochs):
            predictions = []
            for i, network in enumerate(networks):
                predictions.append(network.predict(input_data))
                network.train_epoch(input_data, target_data)

            predictions = np.concatenate(predictions, axis=1)
            gating_network.train_epoch(input_data, predictions)
示例#12
0
文件: sofm.py 项目: disc5/neupy
 def predict_raw(self, input_data):
     input_data = format_data(input_data)
     output = np.zeros((input_data.shape[0], self.n_outputs))
     for i, input_row in enumerate(input_data):
         output[i, :] = self.transform(input_row.reshape(1, -1),
                                       self.weight)
     return output
示例#13
0
    def train(self, input_data):
        self.discrete_validation(input_data)

        input_data = bin2sign(input_data)
        input_data = format_data(input_data, is_feature1d=False)

        n_rows, n_features = input_data.shape
        n_rows_after_update = self.n_memorized_samples + n_rows

        if self.check_limit:
            memory_limit = math.ceil(n_features / (2 * math.log(n_features)))

            if n_rows_after_update > memory_limit:
                raise ValueError("You can't memorize more than {0} "
                                 "samples".format(memory_limit))

        weight_shape = (n_features, n_features)

        if self.weight is None:
            self.weight = np.zeros(weight_shape, dtype=int)

        if self.weight.shape != weight_shape:
            n_features_expected = self.weight.shape[1]
            raise ValueError("Input data has invalid number of features. "
                             "Got {} features instead of {}."
                             "".format(n_features, n_features_expected))

        self.weight = input_data.T.dot(input_data)
        np.fill_diagonal(self.weight, np.zeros(len(self.weight)))
        self.n_memorized_samples = n_rows_after_update
示例#14
0
文件: rbm.py 项目: itdxer/neupy
    def prediction_error(self, input_data, target_data=None):
        """
        Compute the pseudo-likelihood of input samples.

        Parameters
        ----------
        input_data : array-like
            Values of the visible layer

        Returns
        -------
        float
            Value of the pseudo-likelihood.
        """
        is_input_feature1d = (self.n_visible == 1)
        input_data = format_data(input_data, is_input_feature1d)

        errors = self.apply_batches(
            function=self.methods.prediction_error,
            input_data=input_data,

            description='Validation batches',
            show_error_output=True,
        )
        return average_batch_errors(
            errors,
            n_samples=len(input_data),
            batch_size=self.batch_size,
        )
示例#15
0
文件: sofm.py 项目: mayblue9/neupy
 def predict_raw(self, input_data):
     input_data = format_data(input_data)
     output = np.zeros((input_data.shape[0], self.n_outputs))
     for i, input_row in enumerate(input_data):
         output[i, :] = self.transform(input_row.reshape(1, -1),
                                       self.weight)
     return output
示例#16
0
文件: rbm.py 项目: itdxer/neupy
    def gibbs_sampling(self, visible_input, n_iter=1):
        """
        Makes Gibbs sampling n times using visible input.

        Parameters
        ----------
        visible_input : 1d or 2d array
        n_iter : int
            Number of Gibbs sampling iterations. Defaults to ``1``.

        Returns
        -------
        array-like
            Output from the visible units after perfoming n
            Gibbs samples. Array will contain only binary
            units (0 and 1).
        """
        is_input_feature1d = (self.n_visible == 1)
        visible_input = format_data(visible_input, is_input_feature1d)

        gibbs_sampling = self.methods.gibbs_sampling

        input_ = visible_input
        for iteration in range(n_iter):
            input_ = gibbs_sampling(input_)

        return input_
示例#17
0
    def train(self, input_data, target_data, epochs=100):
        target_data = format_data(target_data, is_feature1d=True)

        output_size = target_data.shape[1]
        if output_size != 1:
            raise ValueError("Target data must contains only 1 column, got "
                             "{0}".format(output_size))

        input_size = input_data.shape[1]

        gating_network = self.gating_network
        gating_network_input_size = gating_network.input_layer.size

        if gating_network_input_size != input_size:
            raise ValueError(
                "Gating Network expected get {0} input features, got "
                "{1}".format(gating_network_input_size, input_size)
            )

        networks = self.networks

        for epoch in range(epochs):
            predictions = []
            for i, network in enumerate(networks):
                predictions.append(network.predict(input_data))
                network.train_epoch(input_data, target_data)

            predictions = np.concatenate(predictions, axis=1)
            gating_network.train_epoch(input_data, predictions)
示例#18
0
文件: grnn.py 项目: itdxer/neupy
    def predict(self, input_data):
        """
        Make a prediction from the input data.

        Parameters
        ----------
        input_data : array-like (n_samples, n_features)

        Raises
        ------
        ValueError
            In case if something is wrong with input data.

        Returns
        -------
        array-like (n_samples,)
        """
        if self.input_train is None:
            raise NotTrained("Cannot make a prediction. Network " "hasn't been trained yet")

        input_data = format_data(input_data)

        input_data_size = input_data.shape[1]
        train_data_size = self.input_train.shape[1]

        if input_data_size != train_data_size:
            raise ValueError(
                "Input data must contain {0} features, got " "{1}".format(train_data_size, input_data_size)
            )

        ratios = pdf_between_data(self.input_train, input_data, self.std)
        return (dot(self.target_train.T, ratios) / ratios.sum(axis=0)).T
    def train(self, input_data):
        self.discrete_validation(input_data)

        input_data = bin2sign(input_data)
        input_data = format_data(input_data, is_feature1d=False)

        nrows, n_features = input_data.shape
        nrows_after_update = self.n_remembered_data + nrows

        if self.check_limit:
            memory_limit = ceil(n_features / (2 * log(n_features)))

            if nrows_after_update > memory_limit:
                raise ValueError("You can't memorize more than {0} "
                                 "samples".format(memory_limit))

        weight_shape = (n_features, n_features)

        if self.weight is None:
            self.weight = zeros(weight_shape, dtype=int)

        if self.weight.shape != weight_shape:
            raise ValueError("Invalid input shapes. Number of input "
                             "features must be equal to {} and {} output "
                             "features".format(*weight_shape))

        self.weight = input_data.T.dot(input_data)
        fill_diagonal(self.weight, zeros(len(self.weight)))
        self.n_remembered_data = nrows_after_update
示例#20
0
文件: rbm.py 项目: wjianxz/neupy
    def gibbs_sampling(self, visible_input, n_iter=1):
        """
        Makes Gibbs sampling n times using visible input.

        Parameters
        ----------
        visible_input : 1d or 2d array
        n_iter : int
            Number of Gibbs sampling iterations. Defaults to ``1``.

        Returns
        -------
        array-like
            Output from the visible units after perfoming n
            Gibbs samples. Array will contain only binary
            units (0 and 1).
        """
        is_input_feature1d = (self.n_visible == 1)
        visible_input = format_data(visible_input, is_input_feature1d)

        input_ = visible_input
        for iteration in range(n_iter):
            input_ = self.gibbs_sampling_one_step(input_)

        return input_
示例#21
0
文件: base.py 项目: sonia2599/neupy
    def predict(self, input_data):
        row1d = is_row1d(self.input_layer)
        result = format_data(input_data, row1d=row1d)

        for layer in self.layers:
            result = layer.output(result)
        return result
示例#22
0
文件: base.py 项目: PranY/neupy
    def predict(self, input_data):
        row1d = is_row1d(self.input_layer)
        result = format_data(input_data, row1d=row1d)

        for layer in self.layers:
            result = layer.output(result)
        return result
示例#23
0
文件: rbm.py 项目: itdxer/neupy
    def hidden_to_visible(self, hidden_input):
        """
        Propagates output from the hidden layer backward
        to the visible.

        Parameters
        ----------
        hidden_input : array-like (n_samples, n_hidden_features)

        Returns
        -------
        array-like
        """
        is_input_feature1d = (self.n_hidden == 1)
        hidden_input = format_data(hidden_input, is_input_feature1d)

        outputs = self.apply_batches(
            function=self.methods.hidden_to_visible,
            input_data=hidden_input,

            description='Visible from hidden batches',
            show_progressbar=True,
            show_error_output=False,
        )

        return np.concatenate(outputs, axis=0)
示例#24
0
文件: rbm.py 项目: itdxer/neupy
    def visible_to_hidden(self, visible_input):
        """
        Populates data throught the network and returns output
        from the hidden layer.

        Parameters
        ----------
        visible_input : array-like (n_samples, n_visible_features)

        Returns
        -------
        array-like
        """
        is_input_feature1d = (self.n_visible == 1)
        visible_input = format_data(visible_input, is_input_feature1d)

        outputs = self.apply_batches(
            function=self.methods.visible_to_hidden,
            input_data=visible_input,

            description='Hidden from visible batches',
            show_progressbar=True,
            show_error_output=False,
        )

        return np.concatenate(outputs, axis=0)
示例#25
0
    def energy(self, input_data, output_data):
        self.discrete_validation(input_data)
        self.discrete_validation(output_data)

        input_data, output_data = bin2sign(input_data), bin2sign(output_data)
        input_data = format_data(input_data, is_feature1d=False)
        output_data = format_data(output_data, is_feature1d=False)
        nrows, n_features = input_data.shape

        if nrows == 1:
            return hopfield_energy(self.weight, input_data, output_data)

        output = np.zeros(nrows)
        for i, rows in enumerate(zip(input_data, output_data)):
            output[i] = hopfield_energy(self.weight, *rows)

        return output
示例#26
0
文件: learning.py 项目: disc5/neupy
 def train(self, input_train, epochs=100, epsilon=None):
     input_train = format_data(input_train, is_feature1d=True)
     return super(UnsupervisedLearning, self).train(input_train=input_train,
                                                    target_train=None,
                                                    input_test=None,
                                                    target_test=None,
                                                    epochs=epochs,
                                                    epsilon=epsilon)
示例#27
0
    def energy(self, X_bin, y_bin):
        self.discrete_validation(X_bin)
        self.discrete_validation(y_bin)

        X_sign, y_sign = bin2sign(X_bin), bin2sign(y_bin)
        X_sign = format_data(X_sign, is_feature1d=False)
        y_sign = format_data(y_sign, is_feature1d=False)
        nrows, n_features = X_sign.shape

        if nrows == 1:
            return hopfield_energy(self.weight, X_sign, y_sign)

        output = np.zeros(nrows)
        for i, rows in enumerate(zip(X_sign, y_sign)):
            output[i] = hopfield_energy(self.weight, *rows)

        return output
示例#28
0
文件: bam.py 项目: Neocher/neupy
    def energy(self, input_data, output_data):
        self.discrete_validation(input_data)
        self.discrete_validation(output_data)

        input_data, output_data = bin2sign(input_data), bin2sign(output_data)
        input_data = format_data(input_data, row1d=True)
        output_data = format_data(output_data, row1d=True)
        nrows, n_features = input_data.shape

        if nrows == 1:
            return hopfield_energy(self.weight, input_data, output_data)

        output = zeros(nrows)
        for i, rows in enumerate(zip(input_data, output_data)):
            output[i] = hopfield_energy(self.weight, *rows)

        return output
示例#29
0
文件: base.py 项目: itdxer/neupy
 def train(self, input_train, epochs=100):
     input_train = format_data(input_train, is_feature1d=True)
     return super(BaseAssociative, self).train(
         input_train=input_train, target_train=None,
         input_test=None, target_test=None,
         epochs=epochs, epsilon=None,
         summary='table'
     )
示例#30
0
    def train(self, input_train, target_train, copy=True):
        """
        Trains network. PNN doesn't actually train, it just stores
        input data and use it for prediction.

        Parameters
        ----------
        input_train : array-like (n_samples, n_features)

        target_train : array-like (n_samples,)
            Target variable should be vector or matrix
            with one feature column.

        copy : bool
            If value equal to ``True`` than input matrices will
            be copied. Defaults to ``True``.

        Raises
        ------
        ValueError
            In case if something is wrong with input data.
        """
        input_train = format_data(input_train, copy=copy)
        target_train = format_data(target_train, copy=copy, make_float=False)

        LazyLearningMixin.train(self, input_train, target_train)

        n_target_features = target_train.shape[1]
        if n_target_features != 1:
            raise ValueError("Target value should be a vector or a "
                             "matrix with one column")

        classes = self.classes = np.unique(target_train)
        n_classes = classes.size
        n_samples = input_train.shape[0]

        class_ratios = self.class_ratios = np.zeros(n_classes)
        row_comb_matrix = self.row_comb_matrix = np.zeros(
            (n_classes, n_samples))

        for i, class_name in enumerate(classes):
            class_name = classes[i]
            class_val_positions = (target_train == class_name)
            row_comb_matrix[i, class_val_positions.ravel()] = 1
            class_ratios[i] = np.sum(class_val_positions)
示例#31
0
文件: pnn.py 项目: itdxer/neupy
    def train(self, input_train, target_train, copy=True):
        """
        Trains network. PNN doesn't actually train, it just stores
        input data and use it for prediction.

        Parameters
        ----------
        input_train : array-like (n_samples, n_features)

        target_train : array-like (n_samples,)
            Target variable should be vector or matrix
            with one feature column.

        copy : bool
            If value equal to ``True`` than input matrices will
            be copied. Defaults to ``True``.

        Raises
        ------
        ValueError
            In case if something is wrong with input data.
        """
        input_train = format_data(input_train, copy=copy)
        target_train = format_data(target_train, copy=copy)

        LazyLearningMixin.train(self, input_train, target_train)

        n_target_features = target_train.shape[1]
        if n_target_features != 1:
            raise ValueError("Target value should be a vector or a "
                             "matrix with one column")

        classes = self.classes = np.unique(target_train)
        n_classes = classes.size
        n_samples = input_train.shape[0]

        row_comb_matrix = self.row_comb_matrix = np.zeros(
            (n_classes, n_samples)
        )
        class_ratios = self.class_ratios = np.zeros(n_classes)

        for i, class_name in enumerate(classes):
            class_val_positions = (target_train == i)
            row_comb_matrix[i, class_val_positions.ravel()] = 1
            class_ratios[i] = np.sum(class_val_positions)
示例#32
0
    def test_format_data(self):
        # None input
        self.assertEqual(format_data(None), None)

        # Sparse data
        sparse_matrix = csr_matrix((3, 4), dtype=np.int8)
        formated_sparce_matrix = format_data(sparse_matrix)
        self.assertIs(formated_sparce_matrix, sparse_matrix)
        self.assertEqual(formated_sparce_matrix.dtype, sparse_matrix.dtype)

        # Vector input
        x = np.random.random(10)
        formated_x = format_data(x, is_feature1d=True)
        self.assertEqual(formated_x.shape, (10, 1))

        x = np.random.random(10)
        formated_x = format_data(x, is_feature1d=False)
        self.assertEqual(formated_x.shape, (1, 10))
示例#33
0
    def test_format_data(self):
        # None input
        self.assertEqual(format_data(None), None)

        # Sparse data
        sparse_matrix = csr_matrix((3, 4), dtype=np.int8)
        formated_sparce_matrix = format_data(sparse_matrix)
        np.testing.assert_array_equal(formated_sparce_matrix, sparse_matrix)
        self.assertEqual(formated_sparce_matrix.dtype, sparse_matrix.dtype)

        # Vector input
        x = np.random.random(10)
        formated_x = format_data(x, is_feature1d=True)
        self.assertEqual(formated_x.shape, (10, 1))

        x = np.random.random(10)
        formated_x = format_data(x, is_feature1d=False)
        self.assertEqual(formated_x.shape, (1, 10))
示例#34
0
 def train(self, input_train, epochs=100):
     input_train = format_data(input_train, is_feature1d=True)
     return super(BaseAssociative, self).train(input_train=input_train,
                                               target_train=None,
                                               input_test=None,
                                               target_test=None,
                                               epochs=epochs,
                                               epsilon=None,
                                               summary='table')
示例#35
0
文件: base.py 项目: Neocher/neupy
    def raw_predict(self, input_data):
        input_data = format_data(input_data)

        input_layer = self.input_layer
        input_data = input_layer.preformat_input(input_data)

        self.input_data = input_data
        self.summated = input_layer.summator(input_data)

        return input_layer.activation_function(self.summated)
示例#36
0
    def train(self, input_train, target_train, copy=True):
        input_train = format_data(input_train, copy=copy)
        target_train = format_data(target_train, copy=copy)

        LazyLearning.train(self, input_train, target_train)

        if target_train.shape[1] != 1:
            raise ValueError("Target value must be in 1 dimention")

        classes = self.classes = unique(target_train)
        number_of_classes = classes.size
        row_comb_matrix = self.row_comb_matrix = zeros(
            (number_of_classes, input_train.shape[0]))
        class_ratios = self.class_ratios = zeros(number_of_classes)

        for i, class_name in enumerate(classes):
            class_val_positions = (target_train == i)
            row_comb_matrix[i, class_val_positions.ravel()] = 1
            class_ratios[i] = np_sum(class_val_positions)
示例#37
0
    def predict(self, input_data):
        input_data = format_data(input_data)

        centers = self.centers
        classes = zeros((input_data.shape[0], 1))

        for i, value in enumerate(input_data):
            classes[i] = argmin(norm(centers - value, axis=1))

        return classes
示例#38
0
    def raw_predict(self, input_data):
        input_data = format_data(input_data)

        input_layer = self.input_layer
        input_data = input_layer.preformat_input(input_data)

        self.input_data = input_data
        self.summated = input_layer.summator(input_data)

        return input_layer.activation_function(self.summated)
示例#39
0
文件: rbf_kmeans.py 项目: PranY/neupy
    def train(self, input_train, epsilon=1e-5):
        n_clusters = self.n_clusters
        input_train = format_data(input_train)

        if input_train.shape[0] <= n_clusters:
            raise ValueError("Count of clusters must be less than count of "
                             "input data.")

        self.centers = input_train[:n_clusters, :].copy()
        super(RBFKMeans, self).train(input_train, epsilon=epsilon)
示例#40
0
    def train(self, input_train, epsilon=1e-5):
        n_clusters = self.n_clusters
        input_train = format_data(input_train)

        if input_train.shape[0] <= n_clusters:
            raise ValueError("Count of clusters must be less than count of "
                             "input data.")

        self.centers = input_train[:n_clusters, :].copy()
        super(RBFKMeans, self).train(input_train, epsilon=epsilon)
示例#41
0
    def predict(self, input_data):
        input_data = format_data(input_data)

        centers = self.centers
        classes = zeros((input_data.shape[0], 1))

        for i, value in enumerate(input_data):
            classes[i] = argmin(norm(centers - value, axis=1))

        return classes
示例#42
0
    def train(self, input_train, epochs=100, epsilon=None,
              summary_type='table'):

        input_train = format_data(input_train, is_feature1d=True)
        return super(UnsupervisedLearning, self).train(
            input_train=input_train, target_train=None,
            input_test=None, target_test=None,
            epochs=epochs, epsilon=epsilon,
            summary_type=summary_type
        )
示例#43
0
文件: base.py 项目: wjianxz/neupy
    def format_target(self, y):
        output_shape = tf.TensorShape(self.network.output_shape)
        is_feature1d = (output_shape.ndims == 2 and output_shape[1] == 1)
        formatted_target = format_data(y, is_feature1d=is_feature1d)

        if (formatted_target.ndim + 1) == len(output_shape):
            # We assume that when one dimension was missed than user
            # wants to propagate single sample through the network
            formatted_target = np.expand_dims(formatted_target, axis=0)

        return formatted_target
示例#44
0
    def reconstruct(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use reconstruct method.")

        input_data = format_data(input_data)
        if input_data.shape[1] != self.minimized_data_size:
            raise ValueError("Invalid input data feature space, expected "
                             "{}, got {}.".format(input_data.shape[1],
                                                  self.minimized_data_size))

        return dot(input_data, self.weights.T)
示例#45
0
    def train(self, input_data, output_data):
        self.discrete_validation(input_data)
        self.discrete_validation(output_data)

        output_data = bin2sign(format_data(output_data, is_feature1d=False))
        input_data = bin2sign(format_data(input_data, is_feature1d=False))

        _, wight_nrows = input_data.shape
        _, wight_ncols = output_data.shape
        weight_shape = (wight_nrows, wight_ncols)

        if self.weight is None:
            self.weight = np.zeros(weight_shape)

        if self.weight.shape != weight_shape:
            raise ValueError("Invalid input shapes. Number of input "
                             "features must be equal to {} and {} output "
                             "features".format(wight_nrows, wight_ncols))

        self.weight += input_data.T.dot(output_data)
    def train(self, input_data):
        self.discrete_validation(input_data)

        input_data = bin2sign(input_data)
        #print(input_data.shape)
        input_data = format_data(input_data, is_feature1d=False)

        n_rows, n_features = input_data.shape
        n_rows_after_update = self.n_memorized_samples + n_rows

        if self.check_limit:
            memory_limit = math.ceil(n_features / (2 * math.log(n_features)))

            if n_rows_after_update > memory_limit:
                raise ValueError("You can't memorize more than {0} "
                                 "samples".format(memory_limit))

        weight_shape = (n_features, n_features)

        #print('Weight Shape: ' , weight_shape)

        if self.weight is None:
            self.weight = np.zeros(weight_shape, dtype=int)

        if self.weight.shape != weight_shape:
            n_features_expected = self.weight.shape[1]
            raise ValueError("Input data has invalid number of features. "
                             "Got {} features instead of {}."
                             "".format(n_features, n_features_expected))

        if self.rule == 'oja':
            print('Training based on Oja!')
            self.weight = 0.02 * input_data.T.dot(input_data)
            coeff = linalg.norm(self.weight, 2)
            self.weight = self.weight / n_features
            np.fill_diagonal(self.weight, np.zeros(len(self.weight)))
            u = 0.01
            V = np.dot(self.weight, input_data.T)
            i = 0

            for inp in input_data:
                v = V[:, i].reshape((n_features, 1))
                self.weight += (inp * v) - u * np.square(v) * self.weight
                i += 1

        else:
            print('Training based on Hebb!')
            self.weight = 0.02 * input_data.T.dot(input_data)
            self.weight = self.weight / n_rows
            #self.weight = self.weight + np.outer(input_data , input_data)
            np.fill_diagonal(self.weight, np.zeros(len(self.weight)))

            print(self.weight)
        self.n_memorized_samples = n_rows_after_update
示例#47
0
文件: pnn.py 项目: Neocher/neupy
    def train(self, input_train, target_train, copy=True):
        input_train = format_data(input_train, copy=copy)
        target_train = format_data(target_train, copy=copy)

        LazyLearning.train(self, input_train, target_train)

        if target_train.shape[1] != 1:
            raise ValueError("Target value must be in 1 dimention")

        classes = self.classes = unique(target_train)
        number_of_classes = classes.size
        row_comb_matrix = self.row_comb_matrix = zeros(
            (number_of_classes, input_train.shape[0])
        )
        class_ratios = self.class_ratios = zeros(number_of_classes)

        for i, class_name in enumerate(classes):
            class_val_positions = (target_train == i)
            row_comb_matrix[i, class_val_positions.ravel()] = 1
            class_ratios[i] = np_sum(class_val_positions)
示例#48
0
    def train(self,
              input_train,
              input_test=None,
              epochs=100,
              epsilon=None,
              summary_type='table'):

        input_train = format_data(input_train, is_feature1d=True)

        if input_test is not None:
            input_test = format_data(input_test)

        return super(UnsupervisedLearningMixin,
                     self).train(input_train=input_train,
                                 target_train=None,
                                 input_test=input_test,
                                 target_test=None,
                                 epochs=epochs,
                                 epsilon=epsilon,
                                 summary_type=summary_type)
示例#49
0
    def reconstruct(self, X):
        if not isinstance(self.weight, np.ndarray):
            raise NotTrained("Network hasn't been trained yet")

        X = format_data(X)
        if X.shape[1] != self.minimized_data_size:
            raise ValueError("Invalid input data feature space, expected "
                             "{}, got {}.".format(X.shape[1],
                                                  self.minimized_data_size))

        return np.dot(X, self.weight.T)
示例#50
0
文件: bam.py 项目: Neocher/neupy
    def train(self, input_data, output_data):
        self.discrete_validation(input_data)
        self.discrete_validation(output_data)

        output_data = bin2sign(format_data(output_data, row1d=True))
        input_data = bin2sign(format_data(input_data, row1d=True))

        _, wight_nrows = input_data.shape
        _, wight_ncols = output_data.shape
        weight_shape = (wight_nrows, wight_ncols)

        if self.weight is None:
            self.weight = zeros(weight_shape)

        if self.weight.shape != weight_shape:
            raise ValueError("Invalid input shapes. Number of input "
                             "features must be equal to {} and {} output "
                             "features".format(wight_nrows, wight_ncols))

        self.weight += input_data.T.dot(output_data)
示例#51
0
    def train(self, X_bin, y_bin):
        self.discrete_validation(X_bin)
        self.discrete_validation(y_bin)

        X_sign = bin2sign(format_data(X_bin, is_feature1d=False))
        y_sign = bin2sign(format_data(y_bin, is_feature1d=False))

        _, weight_nrows = X_sign.shape
        _, weight_ncols = y_sign.shape
        weight_shape = (weight_nrows, weight_ncols)

        if self.weight is None:
            self.weight = np.zeros(weight_shape)

        if self.weight.shape != weight_shape:
            raise ValueError(
                "Invalid input shapes. Number of input "
                "features must be equal to {} and {} output "
                "features".format(weight_nrows, weight_ncols))

        self.weight += X_sign.T.dot(y_sign)
示例#52
0
    def train_epoch(self, input_train, target_train):
        input_train = format_data(input_train)

        weight = self.input_layer.weight
        unconditioned = self.n_unconditioned
        predict = self.predict
        weight_delta = self.weight_delta

        for input_row in input_train:
            input_row = reshape(input_row, (1, input_row.size))
            layer_output = predict(input_row)
            weight[unconditioned:, :] += weight_delta(input_row, layer_output)
示例#53
0
文件: base.py 项目: sonia2599/neupy
    def train_epoch(self, input_train, target_train):
        input_train = format_data(input_train)

        weight = self.input_layer.weight
        unconditioned = self.n_unconditioned
        predict = self.predict
        weight_delta = self.weight_delta

        for input_row in input_train:
            input_row = reshape(input_row, (1, input_row.size))
            layer_output = predict(input_row)
            weight[unconditioned:, :] += weight_delta(input_row, layer_output)
示例#54
0
    def train(self, X_train, y_train, X_test=None, y_test=None, epochs=100):
        is_test_data_partialy_missed = ((X_test is None and y_test is not None)
                                        or (X_test is not None
                                            and y_test is None))

        if is_test_data_partialy_missed:
            raise ValueError("Input and target test samples are missed. "
                             "They must be defined together or none of them.")

        X_train = format_data(X_train)
        y_train = format_data(y_train)

        if X_test is not None:
            X_test = format_data(X_test)
            y_test = format_data(y_test)

        return super(CMAC, self).train(X_train,
                                       y_train,
                                       X_test,
                                       y_test,
                                       epochs=epochs)
示例#55
0
    def format_input_data(self, X):
        X = format_data(X, is_feature1d=(self.n_inputs == 1))

        if X.ndim != 2:
            raise ValueError("Cannot make prediction, because input "
                             "data has more than 2 dimensions")

        if X.shape[1] != self.n_inputs:
            raise ValueError("Input data expected to have {} features, "
                             "but got {}".format(self.n_inputs, X.shape[1]))

        return X
示例#56
0
文件: cmac.py 项目: EdwardBetts/neupy
    def predict(self, input_data):
        input_data = format_data(input_data)

        get_memory_coords = self.get_memory_coords
        get_result_by_coords = self.get_result_by_coords
        predicted = []

        for input_sample in self.quantize(input_data):
            coords = get_memory_coords(input_sample)
            predicted.append(get_result_by_coords(coords))

        return array(predicted)
示例#57
0
    def predict(self, X):
        X = format_data(X)

        get_memory_coords = self.get_memory_coords
        get_result_by_coords = self.get_result_by_coords
        predicted = []

        for input_sample in self.quantize(X):
            coords = get_memory_coords(input_sample)
            predicted.append(get_result_by_coords(coords))

        return np.array(predicted)
    def energy(self, input_data):
        self.discrete_validation(input_data)
        input_data = bin2sign(input_data)
        input_data = format_data(input_data, is_feature1d=False)
        nrows, n_features = input_data.shape

        if nrows == 1:
            return hopfield_energy(self.weight, input_data, input_data)

        output = zeros(nrows)
        for i, row in enumerate(input_data):
            output[i] = hopfield_energy(self.weight, row, row)

        return output
示例#59
0
文件: oja.py 项目: Neocher/neupy
    def reconstruct(self, input_data):
        if self.weights is None:
            raise ValueError("Train network before use reconstruct method.")

        input_data = format_data(input_data)
        if input_data.shape[1] != self.minimized_data_size:
            raise ValueError(
                "Invalid input data feature space, expected "
                "{}, got {}.".format(
                    input_data.shape[1], self.minimized_data_size
                )
            )

        return dot(input_data, self.weights.T)
示例#60
0
    def train(self, input_train, epsilon=1e-5, epochs=100):
        n_clusters = self.n_clusters
        input_train = format_data(input_train)
        n_samples = input_train.shape[0]

        if n_samples <= n_clusters:
            raise ValueError("Number of samples in the dataset is less than "
                             "spcified number of clusters. Got {} samples, "
                             "expected at least {} (for {} clusters)"
                             "".format(n_samples, n_clusters + 1, n_clusters))

        self.centers = input_train[:n_clusters, :].copy()
        super(RBFKMeans, self).train(input_train, epsilon=epsilon,
                                     epochs=epochs)