Пример #1
0
 def test_old_wrap(self):
     class with_wrap(object):
         def __array__(self):
             return np.zeros(1)
         def __array_wrap__(self, arr):
             r = with_wrap()
             r.arr = arr
             return r
     a = with_wrap()
     x = ncu.minimum(a, a)
     assert_equal(x.arr, np.zeros(1))
Пример #2
0
 def test_default_prepare(self):
     class with_wrap(object):
         __array_priority__ = 10
         def __array__(self):
             return np.zeros(1)
         def __array_wrap__(self, arr, context):
             return arr
     a = with_wrap()
     x = ncu.minimum(a, a)
     assert_equal(x, np.zeros(1))
     assert_equal(type(x), np.ndarray)
Пример #3
0
 def test_old_wrap(self):
     class with_wrap(object):
         def __array__(self):
             return np.zeros(1)
         def __array_wrap__(self, arr):
             r = with_wrap()
             r.arr = arr
             return r
     a = with_wrap()
     x = ncu.minimum(a, a)
     assert_equal(x.arr, np.zeros(1))
Пример #4
0
 def test_default_prepare(self):
     class with_wrap(object):
         __array_priority__ = 10
         def __array__(self):
             return np.zeros(1)
         def __array_wrap__(self, arr, context):
             return arr
     a = with_wrap()
     x = ncu.minimum(a, a)
     assert_equal(x, np.zeros(1))
     assert_equal(type(x), np.ndarray)
Пример #5
0
    def check_old_wrap(self):
        class with_wrap(object):
            def __array__(self):
                return zeros(1)

            def __array_wrap__(self, arr):
                r = with_wrap()
                r.arr = arr
                return r

        a = with_wrap()
        x = minimum(a, a)
        assert_equal(x.arr, zeros(1))
Пример #6
0
    def check_old_wrap(self):
        class with_wrap(object):
            def __array__(self):
                return zeros(1)

            def __array_wrap__(self, arr):
                r = with_wrap()
                r.arr = arr
                return r

        a = with_wrap()
        x = minimum(a, a)
        assert_equal(x.arr, zeros(1))
Пример #7
0
 def test_wrap(self):
     class with_wrap(object):
         def __array__(self):
             return zeros(1)
         def __array_wrap__(self, arr, context):
             r = with_wrap()
             r.arr = arr
             r.context = context
             return r
     a = with_wrap()
     x = minimum(a, a)
     assert_equal(x.arr, zeros(1))
     func, args, i = x.context
     self.failUnless(func is minimum)
     self.failUnlessEqual(len(args), 2)
     assert_equal(args[0], a)
     assert_equal(args[1], a)
     self.failUnlessEqual(i, 0)
Пример #8
0
 def test_wrap(self):
     class with_wrap(object):
         def __array__(self):
             return np.zeros(1)
         def __array_wrap__(self, arr, context):
             r = with_wrap()
             r.arr = arr
             r.context = context
             return r
     a = with_wrap()
     x = ncu.minimum(a, a)
     assert_equal(x.arr, np.zeros(1))
     func, args, i = x.context
     self.assertTrue(func is ncu.minimum)
     self.assertEqual(len(args), 2)
     assert_equal(args[0], a)
     assert_equal(args[1], a)
     self.assertEqual(i, 0)
Пример #9
0
 def test_wrap(self):
     class with_wrap(object):
         def __array__(self):
             return np.zeros(1)
         def __array_wrap__(self, arr, context):
             r = with_wrap()
             r.arr = arr
             r.context = context
             return r
     a = with_wrap()
     x = ncu.minimum(a, a)
     assert_equal(x.arr, np.zeros(1))
     func, args, i = x.context
     self.assertTrue(func is ncu.minimum)
     self.assertEqual(len(args), 2)
     assert_equal(args[0], a)
     assert_equal(args[1], a)
     self.assertEqual(i, 0)
Пример #10
0
    def test_wrap(self):
        class with_wrap(object):
            def __array__(self):
                return zeros(1)

            def __array_wrap__(self, arr, context):
                r = with_wrap()
                r.arr = arr
                r.context = context
                return r

        a = with_wrap()
        x = minimum(a, a)
        assert_equal(x.arr, zeros(1))
        func, args, i = x.context
        self.failUnless(func is minimum)
        self.failUnlessEqual(len(args), 2)
        assert_equal(args[0], a)
        assert_equal(args[1], a)
        self.failUnlessEqual(i, 0)
Пример #11
0
    def fit(self, X, y):
        """Fit classifier with training data

        Parameters
        ----------
        X : numpy.ndarray or scipy.sparse
            input features, can be a dense or sparse matrix of size
            :code:`(n_samples, n_features)`
        y : numpy.ndarray or scipy.sparse {0,1}
            binary indicator matrix with label assignments.

        Returns
        -------
        skmultilearn.MLARAMfast.MLARAM
            fitted instance of self
        """

        self._labels = []
        self._allneu = ""
        self._online = 1
        self._alpha = 0.0000000000001

        is_sparse_x = issparse(X)

        label_combination_to_class_map = {}
        # FIXME: we should support dense matrices natively
        if isinstance(X, numpy.matrix):
            X = numpy.asarray(X)
        if isinstance(y, numpy.matrix):
            y = numpy.asarray(y)
        is_more_dimensional = int(len(X[0].shape) != 1)
        X = _normalize_input_space(X)

        y_0 = _get_label_vector(y, 0)

        if len(self.neurons) == 0:
            neuron_vc = _concatenate_with_negation(X[0])
            self.neurons.append(Neuron(neuron_vc, y_0))
            start_index = 1
            label_combination_to_class_map[_get_label_combination_representation(y_0)] = [0]
        else:
            start_index = 0

        # denotes the class enumerator for label combinations
        last_used_label_combination_class_id = 0

        for row_no, input_vector in enumerate(X[start_index:], start_index):
            label_assignment_vector = _get_label_vector(y, row_no)

            fc = _concatenate_with_negation(input_vector)
            activationn = [0] * len(self.neurons)
            activationi = [0] * len(self.neurons)
            label_combination = _get_label_combination_representation(label_assignment_vector)

            if label_combination in label_combination_to_class_map:
                fcs = fc.sum()
                for class_number in label_combination_to_class_map[label_combination]:
                    if issparse(self.neurons[class_number].vc):
                        minnfs = self.neurons[class_number].vc.minimum(fc).sum()
                    else:
                        minnfs = umath.minimum(self.neurons[class_number].vc, fc).sum()

                    activationi[class_number] = minnfs / fcs
                    activationn[class_number] = minnfs / self.neurons[class_number].vc.sum()

            if numpy.max(activationn) == 0:
                last_used_label_combination_class_id += 1
                self.neurons.append(Neuron(fc, label_assignment_vector))
                label_combination_to_class_map.setdefault(label_combination, []).append(len(self.neurons) - 1)

                continue

            inds = numpy.argsort(activationn)
            indc = numpy.where(numpy.array(activationi)[inds[::-1]] > self.vigilance)[0]

            if indc.shape[0] == 0:
                self.neurons.append(Neuron(fc, label_assignment_vector))
                label_combination_to_class_map.setdefault(label_combination, []).append(len(self.neurons) - 1)
                continue

            winner = inds[::- 1][indc[0]]
            if issparse(self.neurons[winner].vc):
                self.neurons[winner].vc = self.neurons[winner].vc.minimum(fc)
            else:
                self.neurons[winner].vc = umath.minimum(
                    self.neurons[winner].vc, fc
                )

            # 1 if winner neuron won a given label 0 if not
            labels_won_indicator = numpy.zeros(y_0.shape, dtype=y_0.dtype)
            labels_won_indicator[label_assignment_vector.nonzero()] = 1
            self.neurons[winner].label += labels_won_indicator

        return self
Пример #12
0
def garch2f8(y, c1, a1, b1, y1, h1, c2, a2, b2, y2, h2, df):
    ## Off-diagonal parameter estimation in bivariate GARCH(1,1) when diagonal parameters are given.
    #  INPUTS
    #   y     : [vector] (T x 1) data generated by a GARCH(1,1) process
    #  OPS
    #   q     : [vector] (4 x 1) parameters of the GARCH(1,1) process
    #   qerr  : [vector] (4 x 1) standard error of parameter estimates
    #   hf    : [scalar] current conditional heteroskedasticity estimate
    #   hferr : [scalar] standard error on hf
    #  NOTE
    #   o Originally written by Olivier Ledoit, 4/28/1997
    #   o Uses a conditional t-distribution with fixed degrees of freedom
    #   o Steepest Ascent on boundary, Hessian off boundary, no grid search

    # Parameters
    gold = (1 + sqrt(5)) / 2  # step size increment
    tol1 = 1e-7  # for termination criterion
    tol2 = 1e-7  # for closeness to boundary
    big = 2  # for making the hessian negative definite
    maxiter = 50  # maximum number of iterations
    # n=30			# number of points on the grid

    # Prepare
    t = len(y)
    y1 = y1.flatten()
    y2 = y2.flatten()
    y = y.flatten()
    s = mean(y)
    # s1=mean((y1))
    # s2=mean((y2))
    h1 = h1.flatten()
    h2 = h2.flatten()

    # Bounds
    low = r_[-sqrt(c1 * c2), 0, 0] + tol2
    high = r_[sqrt(c1 * c2), sqrt(a1 * a2), sqrt(b1 * b2)] - tol2

    # Starting Point
    a0 = 0.9 * sqrt(a1 * a2)
    b0 = 0.9 * sqrt(b1 * b2)
    c0 = mean(y) * (1 - a0 - b0) * (df - 2) / df
    c0 = sign(c0) * min(abs(c0), 0.9 * sqrt(c1 * c2))

    # Initialize optimization
    a = r_[c0, a0, b0]
    best = 0
    da = 0
    # term=1
    # negdef=0
    iter = 0

    # Begin optimization loop
    while iter < maxiter:
        iter = iter + 1

        # New parameter
        # olda = a
        a = a + gold**best * da

        # Conditional variance
        h = filter([0, a[1]], [1, -a[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
            + filter([0, a[0]], [1, -a[2]], ones(t))
        d = h1 * h2 - h**2
        z = h2 * y1 + h1 * y2 - 2 * h * y

        # Likelihood
        if (any(a < low) or any(a > high)):
            like = -np.Inf
        else:
            # like=-sum(log(h)+y/h))
            # like=-sum(log(h)+(df+1)*log(1+y/h/df))
            if any(d <= 0) or any(1 + z / d / df <= 0):
                like = -np.Inf
            else:
                like = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        # Gradient
        GG = r_['-1',
                filter([0, 1], [1, -a[2]], ones(t))[..., newaxis],
                filter([0, 1], [1, -a[2]],
                       y * (df - 2) / df)[..., newaxis],
                filter([0, 1], [1, -a[2]], h)[..., newaxis]]
        g1 = h / d + (2 + df) * y / (z + d * df) - (2 + df) * h * z / (
            z + d * df) / d
        G = GG * repeat(g1.reshape(-1, 1), 3, axis=1)
        gra = npsum(G, axis=0)

        # Hessian
        GG2 = GG[:,
                 [0, 1, 2, 0, 1, 2, 0, 1, 2]] * GG[:,
                                                   [0, 0, 0, 1, 1, 1, 2, 2, 2]]
        g2 = 1 / d + 2 * h ** 2 / d ** 2 - (2 + df) * y / (z + d * df) ** 2 * (-2 * y - 2 * df * h) \
             - (2 + df) * z / (z + d * df) / d + 2 * (2 + df) * h * y / (z + d * df) / d \
             + (2 + df) * h * z / (z + d * df) ** 2 / d * (-2 * y - 2 * df * h) \
             - 2 * (2 + df) * h ** 2 * z / (z + d * df) / d ** 2
        HH = zeros((t, 9))
        HH[:, 2] = filter([0, 1], [1, -a[2]], GG[:, 0])
        HH[:, 6] = HH[:, 2]
        HH[:, 5] = filter([0, 1], [1, -a[2]], GG[:, 1])
        HH[:, 7] = HH[:, 5]
        HH[:, 8] = filter([0, 2], [1, -a[2]], GG[:, 2])
        H = GG2 * repeat(g2.reshape(-1, 1), 9, axis=1) + HH * repeat(
            g1.reshape(-1, 1), 9, axis=1)
        hes = reshape(npsum(H, axis=0), (3, 3), 'F')

        # Negative definite
        val, u = eig(hes)
        if all(val > 0):
            hes = -eye(3)
            negdef = 0
        elif any(val > 0):
            negdef = 0
            val = minimum(val, max(val[val < 0]) / big)
            hes = u @ diagflat(val) @ u.T
        else:
            negdef = 1

        # Steepest Ascent or Newton
        if any(a == low) or any(a == high):
            da = -((gra @ gra.T) / (gra @ hes @ gra.T)) * gra
        else:
            da = -gra.dot(pinv(hes))

        # Termination criterion
        term = da @ gra.T
        if ((term < tol1) and negdef):
            break

            # If you are on the boundary and want to get out, slide along
        da[(a == low) & (da < 0)] = zeros(da[(a == low) & (da < 0)].shape)
        da[(a == high) & (da > 0)] = zeros(da[(a == high) & (da > 0)].shape)

        # If you are stuck in a corner, terminate too
        if all(da == 0):
            break

        # Go no further than next boundary
        hit = r_[(low[da != 0] - a[da != 0]) / da[da != 0],
                 (high[da != 0] - a[da != 0]) / da[da != 0]]
        hit = hit[hit > 0]
        da = min(r_[hit, 1]) * da

        # Step search
        best = 0
        newa = a + gold**(best - 1) * da
        if (any(newa < low) or any(newa > high)):
            left = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            d = h1 * h2 - h**2
            z = h2 * y1 + h1 * y2 - 2 * h * y
            if any(d <= 0) or any(1 + z / d / df <= 0):
                left = -np.Inf
            else:
                left = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        newa = a + gold**best * da
        if (any(newa < low) or any(newa > high)):
            center = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            d = h1 * h2 - h**2
            z = h2 * y1 + h1 * y2 - 2 * h * y
            if any(d <= 0) or any(1 + z / d / df <= 0):
                center = -np.Inf
            else:
                center = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        newa = a + gold**(best + 1) * da
        if (any(newa < low) or any(newa > high)):
            right = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            d = h1 * h2 - h**2
            z = h2 * y1 + h1 * y2 - 2 * h * y
            if any(d <= 0) or any(1 + z / d / df <= 0):
                right = -np.Inf
            else:
                right = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        if all(like > array([left, center, right])) or all(
                left > array([center, right])):
            while True:
                best = best - 1
                center = left
                newa = a + gold**(best - 1) * da
                if (any(newa < low) or any(newa > high)):
                    left = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    d = h1 * h2 - h**2
                    z = h2 * y1 + h1 * y2 - 2 * h * y
                    if any(d <= 0) or any(1 + z / d / df <= 0):
                        left = -np.Inf
                    else:
                        left = -sum(log(d) +
                                    (2 + df) * log(1 + z / d / df)) / 2
                if all(center >= [like, left]):
                    break

        elif all(right > array([left, center])):
            while True:
                best = best + 1
                center = right
                newa = a + gold**(best + 1) * da
                if (any(newa < low) or any(newa > high)):
                    right = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    d = h1 * h2 - h**2
                    z = h2 * y1 + h1 * y2 - 2 * h * y
                    if any(d <= 0) or any(1 + z / d / df <= 0):
                        right = -np.Inf
                    else:
                        right = -npsum(
                            log(d) + (2 + df) * log(1 + z / d / df)) / 2
                if center > right:
                    break
    q = a

    return q
Пример #13
0
    def predict_proba(self, X):
        """Predict probabilities of label assignments for X

        Parameters
        ----------
        X : numpy.ndarray or scipy.sparse.csc_matrix
            input features of shape :code:`(n_samples, n_features)`

        Returns
        -------
        array of arrays of float
            matrix with label assignment probabilities of shape
            :code:`(n_samples, n_labels)`
        """
        # FIXME: we should support dense matrices natively
        if isinstance(X, numpy.matrix):
            X = numpy.asarray(X)
        if issparse(X):
            if X.getnnz() == 0:
                return
        elif len(X) == 0:
            return

        is_matrix = int(len(X[0].shape) != 1)
        X = _normalize_input_space(X)

        all_ranks = []
        neuron_vectors = [n1.vc for n1 in self.neurons]
        if any(map(issparse, neuron_vectors)):
            all_neurons = scipy.sparse.vstack(neuron_vectors)
            # can't add a constant to a sparse matrix in scipy
            all_neurons_sum = all_neurons.sum(1).A
        else:
            all_neurons = numpy.vstack(neuron_vectors)
            all_neurons_sum = all_neurons.sum(1)

        all_neurons_sum += self._alpha

        for row_number, input_vector in enumerate(X):
            fc = _concatenate_with_negation(input_vector)

            if issparse(fc):
                activity = (fc.minimum(all_neurons).sum(1) /
                            all_neurons_sum).squeeze().tolist()
            else:
                activity = (umath.minimum(fc, all_neurons).sum(1) /
                            all_neurons_sum).squeeze().tolist()

            if is_matrix:
                activity = activity[0]

            # be very fast
            sorted_activity = numpy.argsort(activity)[::-1]
            winner = sorted_activity[0]
            activity_difference = activity[winner] - activity[
                sorted_activity[-1]]
            largest_activity = 1
            par_t = self.threshold

            for i in range(1, len(self.neurons)):
                activity_change = (
                    activity[winner] -
                    activity[sorted_activity[i]]) / activity[winner]
                if activity_change > par_t * activity_difference:
                    break

                largest_activity += 1

            rbsum = sum(
                [activity[k] for k in sorted_activity[0:largest_activity]])
            rank = activity[winner] * self.neurons[winner].label
            activated = []
            activity_among_activated = []
            activated.append(winner)
            activity_among_activated.append(activity[winner])

            for i in range(1, largest_activity):
                rank += activity[sorted_activity[i]] * self.neurons[
                    sorted_activity[i]].label
                activated.append(sorted_activity[i])
                activity_among_activated.append(activity[sorted_activity[i]])

            rank /= rbsum
            all_ranks.append(rank)

        return numpy.array(numpy.matrix(all_ranks))
Пример #14
0
    def fit(self, X, y):
        """Fit classifier with training data

        Parameters
        ----------
        X : numpy.ndarray or scipy.sparse
            input features, can be a dense or sparse matrix of size
            :code:`(n_samples, n_features)`
        y : numpy.ndarray or scipy.sparse {0,1}
            binary indicator matrix with label assignments.

        Returns
        -------
        yyskmultilearn.MLARAMfast.MLARAM
            fitted instance of self
        """

        self._labels = []
        self._allneu = ""
        self._online = 1
        self._alpha = 0.0000000000001

        is_sparse_x = issparse(X)

        label_combination_to_class_map = {}
        # FIXME: we should support dense matrices natively
        if isinstance(X, numpy.matrix):
            X = numpy.asarray(X)
        if isinstance(y, numpy.matrix):
            y = numpy.asarray(y)
        is_more_dimensional = int(len(X[0].shape) != 1)
        X = _normalize_input_space(X)

        y_0 = _get_label_vector(y, 0)

        if len(self.neurons) == 0:
            neuron_vc = _concatenate_with_negation(X[0])
            self.neurons.append(Neuron(neuron_vc, y_0))
            start_index = 1
            label_combination_to_class_map[
                _get_label_combination_representation(y_0)] = [0]
        else:
            start_index = 0

        # denotes the class enumerator for label combinations
        last_used_label_combination_class_id = 0

        for row_no, input_vector in enumerate(X[start_index:], start_index):
            label_assignment_vector = _get_label_vector(y, row_no)

            fc = _concatenate_with_negation(input_vector)
            activationn = [0] * len(self.neurons)
            activationi = [0] * len(self.neurons)
            label_combination = _get_label_combination_representation(
                label_assignment_vector)

            if label_combination in label_combination_to_class_map:
                fcs = fc.sum()
                for class_number in label_combination_to_class_map[
                        label_combination]:
                    if issparse(self.neurons[class_number].vc):
                        minnfs = self.neurons[class_number].vc.minimum(
                            fc).sum()
                    else:
                        minnfs = umath.minimum(self.neurons[class_number].vc,
                                               fc).sum()

                    activationi[class_number] = minnfs / fcs
                    activationn[class_number] = minnfs / self.neurons[
                        class_number].vc.sum()

            if numpy.max(activationn) == 0:
                last_used_label_combination_class_id += 1
                self.neurons.append(Neuron(fc, label_assignment_vector))
                label_combination_to_class_map.setdefault(
                    label_combination, []).append(len(self.neurons) - 1)

                continue

            inds = numpy.argsort(activationn)
            indc = numpy.where(
                numpy.array(activationi)[inds[::-1]] > self.vigilance)[0]

            if indc.shape[0] == 0:
                self.neurons.append(Neuron(fc, label_assignment_vector))
                label_combination_to_class_map.setdefault(
                    label_combination, []).append(len(self.neurons) - 1)
                continue

            winner = inds[::-1][indc[0]]
            if issparse(self.neurons[winner].vc):
                self.neurons[winner].vc = self.neurons[winner].vc.minimum(fc)
            else:
                self.neurons[winner].vc = umath.minimum(
                    self.neurons[winner].vc, fc)

            # 1 if winner neuron won a given label 0 if not
            labels_won_indicator = numpy.zeros(y_0.shape, dtype=y_0.dtype)
            labels_won_indicator[label_assignment_vector.nonzero()] = 1
            self.neurons[winner].label += labels_won_indicator

        return self
Пример #15
0
    def predict_proba(self, X):
        result = []
        if len(X) == 0:
            return
        if len(X[0].shape) == 1:
            ismatrix = 0
        else:
            ismatrix = 1
        xma = X.max()
        xmi = X.min()
        if xma < 0 or xma > 1 or xmi < 0 or xmi > 1:
            X = numpy.multiply(X - xmi, 1 / (xma - xmi))
        ones = scipy.ones(X[0].shape)
        n1s = [0] * len(self.neurons)
        allranks = []
        neuronsactivated = []

        allneu = numpy.vstack([n1.vc for n1 in self.neurons])
        allneusum = allneu.sum(1) + self.alpha

        import time
        time1 = time.time()
        for i1, f1 in enumerate(X):
            if self.debug == 1:
                print i1,
            if (i1 % 10) + 1 == 10:
                print i1, time.time() - time1
                time1 = time.time()

            if scipy.sparse.issparse(f1):

                f1 = f1.todense()
            fc = numpy.concatenate((f1, ones - f1), ismatrix)
            activity = (umath.minimum(fc, allneu).sum(1) /
                        allneusum).squeeze().tolist()
            if ismatrix == 1:
                activity = activity[0]

            # be very fast
            sortedact = numpy.argsort(activity)[::-1]

            winner = sortedact[0]
            diff_act = activity[winner] - activity[sortedact[-1]]

            largest_activ = 1

            par_t = self.threshold
            for i in range(1, len(self.neurons)):
                activ_change = (activity[winner] -
                                activity[sortedact[i]]) / activity[winner]
                if activ_change > par_t * diff_act:
                    break

                largest_activ += 1

            rbsum = sum([activity[k] for k in sortedact[0:largest_activ]])

            rank = activity[winner] * self.neurons[winner].label
            actives = []
            activity_actives = []
            actives.append(winner)
            activity_actives.append(activity[winner])
            for i in range(1, largest_activ):
                rank += activity[sortedact[i]] * self.neurons[
                    sortedact[i]].label
                actives.append(sortedact[i])
                activity_actives.append(activity[sortedact[i]])
            rank /= rbsum
            allranks.append(rank)

        return numpy.array(numpy.matrix(allranks))
Пример #16
0
    def predict_proba(self,X):
        result = []
        if len(X) == 0: 
            return
        if len(X[0].shape)==1:
            ismatrix=0
        else:
            ismatrix=1
        xma=X.max()
        xmi=X.min()
        if xma<0 or xma>1 or xmi<0 or xmi>1:
            X=numpy.multiply(X-xmi,1/(xma-xmi))
        ones = scipy.ones(X[0].shape);
        n1s = [0] *  len(self.neurons)
        allranks = []
        neuronsactivated=[]

        allneu=numpy.vstack([n1.vc for n1 in self.neurons])
        allneusum=allneu.sum(1)+self.alpha


        import time
        time1=time.time()
        for i1,f1 in enumerate(X):
            if self.debug==1:
                print i1,
            if (i1%10)+1==10:
                print i1,time.time()-time1
                time1=time.time()

            if scipy.sparse.issparse(f1):

                f1 = f1.todense()
            fc = numpy.concatenate((f1, ones - f1), ismatrix)
            activity=(umath.minimum(fc,allneu).sum(1)/allneusum).squeeze().tolist()
            if ismatrix==1:
                activity=activity[0]
            
            # be very fast
            sortedact=numpy.argsort(activity)[::-1]
            

            winner=sortedact[0]
            diff_act=activity[winner]-activity[sortedact[-1]]

            

            largest_activ = 1;

            par_t=self.threshold
            for i in range(1, len(self.neurons)):
                activ_change = (activity[winner]-activity[sortedact[i]])/activity[winner];
                if activ_change >par_t*diff_act:
                    break

                largest_activ +=  1;



            rbsum = sum([activity[k] for k in sortedact[0:largest_activ]])

            rank = activity[winner]*self.neurons[winner].label
            actives =[]
            activity_actives =[]
            actives.append(winner)
            activity_actives.append(activity[winner])
            for i in range(1,largest_activ):
                rank+=activity[sortedact[i]]*self.neurons[sortedact[i]].label
                actives.append(sortedact[i])
                activity_actives.append(activity[sortedact[i]])
            rank/= rbsum
            allranks.append(rank)
                
        return numpy.array(numpy.matrix(allranks))
Пример #17
0
    def fit(self,X,y):
        
        labdict = {}
        if len(X[0].shape)==1:
            ismatrix=0
        else:
            ismatrix=1
        xma=X.max()
        xmi=X.min()
        if xma<0 or xma>1 or xmi<0 or xmi>1:
            X=numpy.multiply(X-xmi,1/(xma-xmi))
            
        if len(self.neurons) == 0:
            ones = scipy.ones(X[0].shape);
            self.neurons.append(Neuron(numpy.concatenate((X[0], ones - X[0]), ismatrix),y[0]))
            startc = 1
            labdict[y[0].nonzero()[0].tostring()] = [0]
        else:
            startc = 0
        newlabel = 0
        import time
        time1=time.time()
        ones = scipy.ones(X[0].shape);
        for i1,f1 in enumerate(X[startc: ], startc):

            if i1%1000==0:
                print i1,X.shape[0],len(self.neurons), newlabel, "time ",time.time()-time1
                time1=time.time()
            found=0
            if scipy.sparse.issparse(f1):
                f1=f1.todense()
            fc = numpy.concatenate((f1, ones - f1), ismatrix)
                
            activationn = [0] * len(self.neurons)
            activationi = [0] * len(self.neurons)
            ytring=y[i1].nonzero()[0].tostring()
            if ytring in labdict:
                fcs = fc.sum()
                for i2 in labdict[ytring]:
                    minnfs = umath.minimum(self.neurons[i2].vc, fc).sum()
                    activationi[i2] =minnfs/fcs
                    activationn[i2] =minnfs/self.neurons[i2].vc.sum()
            

            if numpy.max(activationn) == 0:
                newlabel += 1
                self.neurons.append(Neuron(fc,y[i1]))
                labdict.setdefault(ytring, []). append(len(self.neurons) - 1)
           

                continue
            inds = numpy.argsort(activationn)
            
            indc = numpy.where(numpy.array(activationi)[inds[::-1]]>self.vigilance)[0]
            if indc.shape[0] == 0: 
                self.neurons.append(Neuron(fc,y[i1]))
                
                labdict.setdefault(ytring, []). append(len(self.neurons) - 1)
                continue
                

            winner =inds[::- 1][indc[0]]
            self.neurons[winner].vc= umath.minimum(self.neurons[winner].vc,fc)
            

            
            labadd = numpy.zeros(y[0].shape,dtype=y[0].dtype)
            labadd[y[i1].nonzero()] = 1
            self.neurons[winner].label +=   labadd
Пример #18
0
    def predict_proba(self, X):
        """Predict probabilities of label assignments for X

        Parameters
        ----------
        X : numpy.ndarray or scipy.sparse.csc_matrix
            input features of shape :code:`(n_samples, n_features)`

        Returns
        -------
        array of arrays of float
            matrix with label assignment probabilities of shape
            :code:`(n_samples, n_labels)`
        """
        # FIXME: we should support dense matrices natively
        if isinstance(X, numpy.matrix):
            X = numpy.asarray(X)
        if issparse(X):
            if X.getnnz() == 0:
                return
        elif len(X) == 0:
            return

        is_matrix = int(len(X[0].shape) != 1)
        X = _normalize_input_space(X)

        all_ranks = []
        neuron_vectors = [n1.vc for n1 in self.neurons]
        if any(map(issparse, neuron_vectors)):
            all_neurons = scipy.sparse.vstack(neuron_vectors)
            # can't add a constant to a sparse matrix in scipy
            all_neurons_sum = all_neurons.sum(1).A
        else:
            all_neurons = numpy.vstack(neuron_vectors)
            all_neurons_sum = all_neurons.sum(1)

        all_neurons_sum += self._alpha

        for row_number, input_vector in enumerate(X):
            fc = _concatenate_with_negation(input_vector)

            if issparse(fc):
                activity = (fc.minimum(all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()
            else:
                activity = (umath.minimum(fc, all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()

            if is_matrix:
                activity = activity[0]

            # be very fast
            sorted_activity = numpy.argsort(activity)[::-1]
            winner = sorted_activity[0]
            activity_difference = activity[winner] - activity[sorted_activity[-1]]
            largest_activity = 1
            par_t = self.threshold

            for i in range(1, len(self.neurons)):
                activity_change = (activity[winner] - activity[sorted_activity[i]]) / activity[winner]
                if activity_change > par_t * activity_difference:
                    break

                largest_activity += 1

            rbsum = sum([activity[k] for k in sorted_activity[0:largest_activity]])
            rank = activity[winner] * self.neurons[winner].label
            activated = []
            activity_among_activated = []
            activated.append(winner)
            activity_among_activated.append(activity[winner])

            for i in range(1, largest_activity):
                rank += activity[sorted_activity[i]] * self.neurons[
                    sorted_activity[i]].label
                activated.append(sorted_activity[i])
                activity_among_activated.append(activity[sorted_activity[i]])

            rank /= rbsum
            all_ranks.append(rank)

        return numpy.array(numpy.matrix(all_ranks))
Пример #19
0
    def fit(self, X, y):
        """Fit classifier with training data

        Parameters
        ----------
        X : numpy.ndarray or scipy.sparse
            input features, can be a dense or sparse matrix of size
            :code:`(n_samples, n_features)`
        y : numpy.ndarray or scipy.sparse {0,1}
            binary indicator matrix with label assignments.

        Returns
        -------
        skmultilearn.MLARAMfast.MLARAM
            fitted instance of self
        """

        self.labels = []
        self.allneu = ""
        self.online = 1
        self.alpha = 0.0000000000001

        labdict = {}
        if len(X[0].shape) == 1:
            ismatrix = 0
        else:
            ismatrix = 1
        xma = X.max()
        xmi = X.min()
        if xma < 0 or xma > 1 or xmi < 0 or xmi > 1:
            X = numpy.multiply(X - xmi, 1 / (xma - xmi))

        if len(self.neurons) == 0:
            ones = scipy.ones(X[0].shape)
            self.neurons.append(
                Neuron(numpy.concatenate((X[0], ones - X[0]), ismatrix), y[0]))
            startc = 1
            labdict[y[0].nonzero()[0].tostring()] = [0]
        else:
            startc = 0
        newlabel = 0
        ones = scipy.ones(X[0].shape)
        for i1, f1 in enumerate(X[startc:], startc):
            found = 0
            if scipy.sparse.issparse(f1):
                f1 = f1.todense()
            fc = numpy.concatenate((f1, ones - f1), ismatrix)

            activationn = [0] * len(self.neurons)
            activationi = [0] * len(self.neurons)
            ytring = y[i1].nonzero()[0].tostring()
            if ytring in labdict:
                fcs = fc.sum()
                for i2 in labdict[ytring]:
                    minnfs = umath.minimum(self.neurons[i2].vc, fc).sum()
                    activationi[i2] = minnfs / fcs
                    activationn[i2] = minnfs / self.neurons[i2].vc.sum()

            if numpy.max(activationn) == 0:
                newlabel += 1
                self.neurons.append(Neuron(fc, y[i1]))
                labdict.setdefault(ytring, []).append(len(self.neurons) - 1)

                continue
            inds = numpy.argsort(activationn)

            indc = numpy.where(
                numpy.array(activationi)[inds[::-1]] > self.vigilance)[0]
            if indc.shape[0] == 0:
                self.neurons.append(Neuron(fc, y[i1]))

                labdict.setdefault(ytring, []).append(len(self.neurons) - 1)
                continue

            winner = inds[::-1][indc[0]]
            self.neurons[winner].vc = umath.minimum(self.neurons[winner].vc,
                                                    fc)

            labadd = numpy.zeros(y[0].shape, dtype=y[0].dtype)
            labadd[y[i1].nonzero()] = 1
            self.neurons[winner].label += labadd
Пример #20
0
    def predict_proba(self, X):
        """Predict probabilities of label assignments for X

        Parameters
        ----------
        X : numpy.ndarray or scipy.sparse.csc_matrix
            input features of shape :code:`(n_samples, n_features)`

        Returns
        -------
        array of arrays of float
            matrix with label assignment probabilities of shape
            :code:`(n_samples, n_labels)`
        """
        result = []
        if len(X) == 0:
            return
        if len(X[0].shape) == 1:
            ismatrix = 0
        else:
            ismatrix = 1
        xma = X.max()
        xmi = X.min()
        if xma < 0 or xma > 1 or xmi < 0 or xmi > 1:
            X = numpy.multiply(X - xmi, 1 / (xma - xmi))
        ones = scipy.ones(X[0].shape)
        n1s = [0] * len(self.neurons)
        allranks = []
        neuronsactivated = []

        allneu = numpy.vstack([n1.vc for n1 in self.neurons])
        allneusum = allneu.sum(1) + self.alpha

        for i1, f1 in enumerate(X):
            if scipy.sparse.issparse(f1):

                f1 = f1.todense()
            fc = numpy.concatenate((f1, ones - f1), ismatrix)
            activity = (umath.minimum(fc, allneu).sum(1) /
                        allneusum).squeeze().tolist()
            if ismatrix == 1:
                activity = activity[0]

            # be very fast
            sortedact = numpy.argsort(activity)[::-1]

            winner = sortedact[0]
            diff_act = activity[winner] - activity[sortedact[-1]]

            largest_activ = 1

            par_t = self.threshold
            for i in range(1, len(self.neurons)):
                activ_change = (activity[winner] -
                                activity[sortedact[i]]) / activity[winner]
                if activ_change > par_t * diff_act:
                    break

                largest_activ += 1

            rbsum = sum([activity[k] for k in sortedact[0:largest_activ]])

            rank = activity[winner] * self.neurons[winner].label
            actives = []
            activity_actives = []
            actives.append(winner)
            activity_actives.append(activity[winner])
            for i in range(1, largest_activ):
                rank += activity[sortedact[i]] * self.neurons[
                    sortedact[i]].label
                actives.append(sortedact[i])
                activity_actives.append(activity[sortedact[i]])
            rank /= rbsum
            allranks.append(rank)

        return numpy.array(numpy.matrix(allranks))
Пример #21
0
    def fit(self, X, y):

        labdict = {}
        if len(X[0].shape) == 1:
            ismatrix = 0
        else:
            ismatrix = 1
        xma = X.max()
        xmi = X.min()
        if xma < 0 or xma > 1 or xmi < 0 or xmi > 1:
            X = numpy.multiply(X - xmi, 1 / (xma - xmi))

        if len(self.neurons) == 0:
            ones = scipy.ones(X[0].shape)
            self.neurons.append(
                Neuron(numpy.concatenate((X[0], ones - X[0]), ismatrix), y[0]))
            startc = 1
            labdict[y[0].nonzero()[0].tostring()] = [0]
        else:
            startc = 0
        newlabel = 0
        ones = scipy.ones(X[0].shape)
        for i1, f1 in enumerate(X[startc:], startc):
            found = 0
            if scipy.sparse.issparse(f1):
                f1 = f1.todense()
            fc = numpy.concatenate((f1, ones - f1), ismatrix)

            activationn = [0] * len(self.neurons)
            activationi = [0] * len(self.neurons)
            ytring = y[i1].nonzero()[0].tostring()
            if ytring in labdict:
                fcs = fc.sum()
                for i2 in labdict[ytring]:
                    minnfs = umath.minimum(self.neurons[i2].vc, fc).sum()
                    activationi[i2] = minnfs / fcs
                    activationn[i2] = minnfs / self.neurons[i2].vc.sum()

            if numpy.max(activationn) == 0:
                newlabel += 1
                self.neurons.append(Neuron(fc, y[i1]))
                labdict.setdefault(ytring, []).append(len(self.neurons) - 1)

                continue
            inds = numpy.argsort(activationn)

            indc = numpy.where(
                numpy.array(activationi)[inds[::-1]] > self.vigilance)[0]
            if indc.shape[0] == 0:
                self.neurons.append(Neuron(fc, y[i1]))

                labdict.setdefault(ytring, []).append(len(self.neurons) - 1)
                continue

            winner = inds[::-1][indc[0]]
            self.neurons[winner].vc = umath.minimum(self.neurons[winner].vc,
                                                    fc)

            labadd = numpy.zeros(y[0].shape, dtype=y[0].dtype)
            labadd[y[i1].nonzero()] = 1
            self.neurons[winner].label += labadd