Exemple #1
0
def _LSSVMtrain(X, Y, kernel_dict, regulator):
    m = Y.shape[0]
    # Kernel
    if kernel_dict['type'] == 'RBF':
        K = Kernel.RBF(m, kernel_dict['gamma'])
        K.calculate(X)
    elif kernel_dict['type'] == 'LINEAR':
        K = Kernel.LINEAR(m)
        K.calculate(X)
    elif kernel_dict['type'] == 'POLY':
        K = Kernel.POLY(m, kernel_dict['c'], kernel_dict['d'])
        K.calculate(X)
    elif kernel_dict['type'] == 'TANH':
        K = Kernel.TANH(m, kernel_dict['c'], kernel_dict['d'])
        K.calculate(X)
    elif kernel_dict['type'] == 'TL1':
        K = Kernel.TL1(m, kernel_dict['rho'])
        K.calculate(X)

    H = np.multiply(np.dot(np.matrix(Y).T, np.matrix(Y)), K.kernelMat)
    M_BR = H + np.eye(m) / regulator
    #Concatenate
    L_L = np.concatenate((np.matrix(0), np.matrix(Y).T), axis=0)
    L_R = np.concatenate((np.matrix(Y), M_BR), axis=0)
    L = np.concatenate((L_L, L_R), axis=1)
    R = np.ones(m + 1)
    R[0] = 0

    #solve
    b_a = LA.solve(L, R)
    b = b_a[0]
    alpha = b_a[1:]

    #return
    return (alpha, b, K)
Exemple #2
0
def _KSVMtrain(X, Y, kernel_dict):
    m = Y.shape[0]

    if kernel_dict['type'] == 'RBF':
        K = Kernel.RBF(m, kernel_dict['gamma'])
        K.calculate(X)
    elif kernel_dict['type'] == 'LINEAR':
        K = Kernel.LINEAR(m)
        K.calculate(X)
    elif kernel_dict['type'] == 'POLY':
        K = Kernel.POLY(m)
        K.calculate(X)
    elif kernel_dict['type'] == 'TANH':
        K = Kernel.TANH(m, kernel_dict['c'], kernel_dict['d'])
        K.calculate(X)
    elif kernel_dict['type'] == 'TL1':
        K = Kernel.TL1(m, kernel_dict['rho'])
        K.calculate(X)

    p1, p2 = trans_mat(K.kernelMat)
    K.kernelMat = np.dot((p1 - p2), K.kernelMat)

    #根据SVM求出alpha,b   ???
    svm = Algorithms.SVM(X, Y, kernel_dict)

    #更新alpha
    alpha = np.dot((p1 - p2), svm.alphas)
    b = svm.b

    return (alpha, b, K)
Exemple #3
0
    def fit(self, X, Y):
        #        print('Kernel:', kernel_dict)
        train_data = np.append(X, Y.reshape(len(Y), 1), axis=1)

        if self.databalance == 'LowSampling':
            data_maj = train_data[Y == 1]  # 将多数
            data_min = train_data[Y != 1]
            index = np.random.randint(len(data_maj), size=len(data_min))
            lower_data_maj = data_maj[list(index)]
            train_data = np.append(lower_data_maj, data_min, axis=0)
            X = train_data[:, :-1]
            Y = train_data[:, -1]
            self.Y = Y

        elif self.databalance == 'UpSampling':
            X, Y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\
                                       np.asarray(train_data[:, -1]))
            self.Y = Y

        else:
            X = X
            Y = Y
            self.Y = Y

        m = Y.shape[0]

        # Kernel
        if self.kernel_dict['type'] == 'RBF':
            K = Kernel.RBF(m, self.kernel_dict['sigma'])
        elif self.kernel_dict['type'] == 'LINEAR':
            K = Kernel.LINEAR(m)
        elif self.kernel_dict['type'] == 'POLY':
            K = Kernel.POLY(m, self.kernel_dict['d'])

        K.calculate(X)

        tmp1 = np.hstack((np.ones((1, 2 * m)), [[0]]))
        M_BR = K.kernelMat + np.eye(m) / (self.C * self.m_value)
        tmp2 = np.hstack((M_BR, K.kernelMat, np.ones((m, 1))))
        M_BL = K.kernelMat + np.eye(m) / (self.C * (1 - self.m_value))
        tmp3 = np.hstack((K.kernelMat, M_BL, np.ones((m, 1))))

        L = np.vstack((tmp1, tmp2, tmp3))
        R = np.ones(2 * m + 1)
        R[0] = 0
        R[m + 1:] = -1
        # solve

        solution = LA.solve(L, R)
        b = solution[-1]
        alpha = solution[:m]
        beta = solution[m:2 * m]
        print('b', b)
        #        self.gamma = gamma
        self.beta = beta
        self.alpha = alpha
        self.b = b
        self.K = K
        self.kernelMat = K.kernelMat
Exemple #4
0
    def fit(self, X, Y):
        #        print('Kernel:', self.kernel_dict)
        train_data = np.append(X, Y.reshape(len(Y), 1), axis=1)

        if self.databalance == 'LowSampling':
            data_maj = train_data[Y == 1]  # 将多数
            data_min = train_data[Y != 1]
            index = np.random.randint(len(data_maj), size=len(data_min))
            lower_data_maj = data_maj[list(index)]
            train_data = np.append(lower_data_maj, data_min, axis=0)
            X = train_data[:, :-1]
            Y = train_data[:, -1]
            self.Y = Y

        elif self.databalance == 'UpSampling':
            X, Y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\
                                       np.asarray(train_data[:, -1]))
            self.Y = Y

        else:
            X = X
            Y = Y
            self.Y = Y

        m = len(Y)

        # Kernel
        if self.kernel_dict['type'] == 'RBF':
            K = Kernel.RBF(m, self.kernel_dict['sigma'])
            K.calculate(X)
        elif self.kernel_dict['type'] == 'LINEAR':
            K = Kernel.LINEAR(m)
            K.calculate(X)
        elif self.kernel_dict['type'] == 'POLY':
            K = Kernel.POLY(m, self.kernel_dict['d'])
            K.calculate(X)

        H = np.multiply(np.dot(np.matrix(Y).T, np.matrix(Y)), K.kernelMat)
        M_BR = H + np.eye(m) / (self.C)
        # Concatenate
        L_L = np.concatenate((np.matrix(0), np.matrix(Y).T), axis=0)
        L_R = np.concatenate((np.matrix(Y), M_BR), axis=0)
        L = np.concatenate((L_L, L_R), axis=1)
        R = np.ones(m + 1)
        R[0] = 0
        # solve
        b_a = LA.solve(L, R)
        b = b_a[0]
        alpha = b_a[1:]

        e = alpha / self.C

        self.alpha = alpha
        self.b = b
        self.K = K
        self.kernelMat = K.kernelMat

        return self.alpha, self.b, e
Exemple #5
0
    def train(self, C=[0.01, 1, 10, 100], tol=1e-3):

        m = self.Y.shape[0]

        A = [0] * 10
        B = [0] * 10

        indices = numpy.random.permutation(
            self.X.shape[0])  # shape[0]表示第0轴的长度,通常是训练数据的数量
        rand_data_x = self.X[indices]
        rand_data_y = self.Y[indices]  # data_y就是标记(label)

        l = int(len(indices) / 10)

        for i in range(9):
            A[i] = rand_data_x[i * l:i * l + l]
            B[i] = rand_data_y[i * l:i * l + l]

        A[9] = rand_data_x[9 * l:]
        B[9] = rand_data_y[9 * l:]
        #        '''
        #        X_num=self.X.shape[0]
        #        train_index=range(X_num)
        #        test_size=int(X_num*0.1)+1
        #        for i in range(9):
        #            test_index=[]
        #            for j in range(test_size):
        #                randomIndex=int(numpy.random.uniform(0,len(train_index)))
        #                test_index.append(train_index[randomIndex])
        #                #del train_index[randomIndex]
        #            A[i]=self.X[test_index,:]
        #            B[i]=self.Y[test_index,:]
        #        A[9]=self.X.ix_[train_index]
        #        B[9]=self.Y.ix_[train_index]
        #        '''

        acc_best = 0
        C_best = None
        avg_acc = 0
        #        gamma_best = None
        for CVal in C:
            #            for gammaVal in gamma:
            #                avg_acc = 0
            for i in range(10):
                X_test = A[i]
                Y_test = B[i]

                # X_train = None
                # Y_train = None

                #model= SMO.SMO_Model(X_train, Y_train, CVal,  kernel,gammaVal, tol=1e-3, eps=1e-3)
                #output_model=SMO.SMO(model)

                #根据output_model的参数信息计算对应decision_function----->推得accuracy
                #acc = _evaulate(output_model)

                X_train = numpy.concatenate([
                    A[(i + 1) % 10], A[(i + 2) % 10], A[(i + 3) % 10],
                    A[(i + 4) % 10], A[(i + 5) % 10], A[(i + 6) % 10],
                    A[(i + 7) % 10], A[(i + 8) % 10], A[(i + 9) % 10]
                ],
                                            axis=0)
                Y_train = numpy.concatenate([
                    B[(i + 1) % 10], B[(i + 2) % 10], B[(i + 3) % 10],
                    B[(i + 4) % 10], B[(i + 5) % 10], B[(i + 6) % 10],
                    B[(i + 7) % 10], B[(i + 8) % 10], B[(i + 9) % 10]
                ],
                                            axis=0)

                #                SMO.GG = gammaVal
                # calculate Kernel Matrix then pass it to SMO.
                if self.IK:
                    if self.kernel_dict['type'] == 'TANH':
                        K = Kernel.TANH(X_train.shape[0],
                                        self.kernel_dict['c'],
                                        self.kernel_dict['d'])
                        K.calculate(X_train)
                    elif self.kernel_dict['type'] == 'TL1':
                        K = Kernel.TL1(X_train.shape[0],
                                       self.kernel_dict['rho'])
                        K.calculate(X_train)

                    p1, p2 = trans_mat(K.kernelMat)
                    K.kernelMat = np.dot((p1 - p2), K.kernelMat)

                if self.kernel_dict['type'] == 'RBF':
                    K = Kernel.RBF(X_train.shape[0], self.kernel_dict['gamma'])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'LINEAR':
                    K = Kernel.LINEAR(X_train.shape[0])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'POLY':
                    K = Kernel.POLY(X_train.shape[0], self.kernel_dict['c'],
                                    self.kernel_dict['d'])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'TANH':
                    K = Kernel.TANH(X_train.shape[0], self.kernel_dict['c'],
                                    self.kernel_dict['d'])
                    K.calculate(X_train)
                elif self.kernel_dict['type'] == 'TL1':
                    K = Kernel.TL1(X_train.shape[0], self.kernel_dict['rho'])
                    K.calculate(X_train)

                model = SMO.SMO_Model(X_train,
                                      Y_train,
                                      CVal,
                                      K,
                                      tol=1e-3,
                                      eps=1e-3)

                output_model = SMO.SMO(model)

                #IK
                if self.IK:
                    output_model.alphas = np.dot((p1 - p2),
                                                 output_model.alphas)

                acc = SMO._evaluate(output_model, X_test, Y_test)

                avg_acc = avg_acc + acc / 10

                if avg_acc > acc_best:
                    acc_best = avg_acc
                    #更新C gamma
                    C_best = CVal
#                    gamma_best =gammaVal
#                    self.gamma = gamma_best

#最后一遍train
#        SMO.GG = gamma_best

#!K
        if self.IK:
            if self.kernel_dict['type'] == 'TANH':
                K = Kernel.TANH(self.X.shape[0], self.kernel_dict['c'],
                                self.kernel_dict['d'])
                K.calculate(self.X)
            elif self.kernel_dict['type'] == 'TL1':
                K = Kernel.TL1(self.X.shape[0], self.kernel_dict['rho'])
                K.calculate(self.X)

            p1, p2 = trans_mat(K.kernelMat)
            K.kernelMat = np.dot((p1 - p2), K.kernelMat)

        if self.kernel_dict['type'] == 'RBF':
            K = Kernel.RBF(self.X.shape[0], self.kernel_dict['gamma'])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'LINEAR':
            K = Kernel.LINEAR(self.X.shape[0])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'POLY':
            K = Kernel.POLY(self.X.shape[0], self.kernel_dict['c'],
                            self.kernel_dict['d'])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'TANH':
            K = Kernel.TANH(self.X.shape[0], self.kernel_dict['c'],
                            self.kernel_dict['d'])
            K.calculate(self.X)
        elif self.kernel_dict['type'] == 'TL1':
            K = Kernel.TL1(self.X.shape[0], self.kernel_dict['rho'])
            K.calculate(self.X)

        SVM_model = SMO.SMO(
            SMO.SMO_Model(self.X, self.Y, C_best, K, tol=1e-3, eps=1e-3))
        # 参数传递给最后生成的SVM类

        if self.IK:
            SVM_model.alphas = np.dot((p1 - p2), SVM_model.alphas)

        self.X = SVM_model.X
        self.Y = SVM_model.y
        self.kernel_dict = SVM_model.kernel
        self.alphas = SVM_model.alphas
        self.b = SVM_model.b

        # C_best = C
        # gamma_best =gamma

        # (w,b) = SMO(X_train,Y_train,C_best,gamma_best,kernal,tol=1e-3)
        # self.w = w
        # self.b = b
        return None
Exemple #6
0
    def _mvalue(self, X, y):
#        print('fuzzy value:', self.fuzzyvalue )
        train_data = np.append(X,y.reshape(len(y),1),axis=1)
        
        if self.databalance =='LowSampling':
            data_maj = train_data[y == 1]  # 将多数
            data_min =  train_data[y != 1] 
            index = np.random.randint(len(data_maj), size=len(data_min)) 
            lower_data_maj = data_maj[list(index)]
            train_data = np.append(lower_data_maj,data_min,axis=0)
            X = train_data[:,:-1]
            y = train_data[:,-1]
        
        elif self.databalance =='UpSampling':
            X, y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\
                                       np.asarray(train_data[:, -1]))
            
        else:
            X = X
            y = y
        
        if self.fuzzyvalue['type'] == 'Cen':
            
            x_1 = X[y==1]
            x_0 = X[y==-1]
            x_centre_1 = np.mean(x_1, axis=0)
            x_centre_0 = np.mean(x_0, axis=0)
            max_distance_1 = 0
            max_distance_0 = 0
            for i in range(len(x_1)):
                distance = LA.norm(x_centre_1 - x_1[i,:])
                if max_distance_1 < distance:
                    max_distance_1 = distance
            for i in range(len(x_0)):
                distance = LA.norm(x_centre_0 - x_0[i,:])
                if max_distance_0 < distance:
                    max_distance_0 = distance
        
            memership = []
            if self.fuzzyvalue['function'] == 'Lin':
                for i in range(len(y)):
                    if y[i]  == 1:
                        memership.append((1 - LA.norm(X[i]-x_centre_1)/(max_distance_1+0.0001))* self.r_max)
                    if y[i]  == -1:
                        memership.append((1 - LA.norm(X[i]-x_centre_0)/(max_distance_0+0.0001))*self.r_min)
                        
            elif self.fuzzyvalue['function'] == 'Exp':
                for i in range(len(y)):
                    if y[i] == 1:
                        memership.append((2/(1+np.exp(LA.norm(X[i]-x_centre_1))))* self.r_max)
                    if y[i] == -1:
                        memership.append((2/(1+np.exp(LA.norm(X[i]-x_centre_0))))*self.r_min)
                        
        elif self.fuzzyvalue['type'] == 'Hyp':
            m = y.shape[0]
            C = 3
            gamma = 1
            # Kernel
        
            K = Kernel.RBF(m, gamma)
            K.calculate(X)
        
        
            P = cvxopt.matrix(np.outer(y, y) * K.kernelMat)
            q = cvxopt.matrix(np.ones(m) * -1)
            A = cvxopt.matrix(y, (1, m))
            A = matrix(A, (1, m), 'd')
            b = cvxopt.matrix(0.0)
            
            tmp1 = np.diag(np.ones(m) * -1)
            tmp2 = np.identity(m)
            G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
            
            tmp1 = np.zeros(m)
            tmp2 = np.ones(m) * C
            h = cvxopt.matrix(np.hstack((tmp1, tmp2)))
            
            solution = cvxopt.solvers.qp(P, q, G, h, A, b)
        
            alpha = np.ravel(solution['x'])
            b = 0
            sum_y = sum(y)
            A = np.multiply(alpha, y)
            b = (sum_y - np.sum(K.kernelMat * A.reshape(len(A),1)))/len(alpha)
                
            K.expand(X)
            A = np.multiply(alpha, y)
        
            f = b + np.sum(K.testMat * A.reshape(len(A),1),axis=0)
            
            d_hyp = abs(f*y)
        
            memership = []
            if self.fuzzyvalue['function'] == 'Lin':
                for i in range(len(y)):
                    if y[i]  == 1:
                        memership.append((1 - d_hyp[i]/(max(d_hyp)+0.0001))*self.r_max)
                    if y[i]  == -1:
                        memership.append((1 - d_hyp[i]/(max(d_hyp)+0.0001))*self.r_min)
                        
            elif self.fuzzyvalue['function'] == 'Exp':
                for i in range(len(y)):
                    if y[i] == 1:
                        memership.append((2/(1+ np.exp(d_hyp[i])))* self.r_max)
                    if y[i] == -1:
                        memership.append((2/(1+ np.exp(d_hyp[i])))*self.r_min)
                
            
                        
        self.m_value = np.array(memership)
        return self.m_value
Exemple #7
0
    def fit(self, X, Y):
#        print('Kernel:', kernel_dict)
        train_data = np.append(X,Y.reshape(len(Y),1),axis=1)
        
        if self.databalance =='LowSampling':
            data_maj = train_data[Y == 1]  # 将多数
            data_min =  train_data[Y != 1] 
            index = np.random.randint(len(data_maj), size=len(data_min)) 
            lower_data_maj = data_maj[list(index)]
            train_data = np.append(lower_data_maj,data_min,axis=0)
            X = train_data[:,:-1]
            Y = train_data[:,-1]
            self.Y =  Y
        
        elif self.databalance =='UpSampling':
            X, Y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\
                                       np.asarray(train_data[:, -1]))
            self.Y =  Y
            
        else:
            X = X
            Y = Y
            self.Y =  Y
        
        m = Y.shape[0]
      
        # Kernel
        if self.kernel_dict['type'] == 'RBF':
            K = Kernel.RBF(m, self.kernel_dict['sigma'])
            K.calculate(X)
        elif self.kernel_dict['type'] == 'LINEAR':
            K = Kernel.LINEAR(m)
            K.calculate(X)
        elif self.kernel_dict['type'] == 'POLY':
            K = Kernel.POLY(m, self.kernel_dict['d'])
            K.calculate(X)
            
        
        P = cvxopt.matrix(np.outer(Y, Y) * K.kernelMat)
        q = cvxopt.matrix(np.ones(m) * -1)
        A = cvxopt.matrix(Y, (1, m))
        A = matrix(A, (1, m), 'd')
        b = cvxopt.matrix(0.0)
        
        tmp1 = np.diag(np.ones(m) * -1)
        tmp2 = np.identity(m)
        G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
        
        tmp1 = np.zeros(m)
        tmp2 = np.ones(m) * self.m_value * self.C
        
        h = cvxopt.matrix(np.hstack((tmp1, tmp2)))
        # solve QP problem
        solution = cvxopt.solvers.qp(P, q, G, h, A, b)
        
           # Lagrange multipliers 
        alpha = np.ravel(solution['x'])
 
        for i in range(m):
            sv = np.logical_and(alpha < self.m_value, alpha > 1e-5)
            
        
        alpha_sv = alpha[sv]
        X_sv = X[sv]
        Y_sv = Y[sv]

        b = 0
        sum_y = sum(Y)
        A = np.multiply(alpha, Y)
        b = (sum_y - np.sum(K.kernelMat * A.reshape(len(A),1)))/len(alpha)
        
        self.alpha = alpha
        self.alpha_sv = alpha_sv
        self.X_sv = X_sv
        self.Y_sv = Y_sv
        self.b = b
        self.K = K
        self.kernelMat = K.kernelMat
Exemple #8
0
    def _mvalue(self, X, y):
        #        print('fuzzy value:', self.fuzzyvalue )
        train_data = np.append(X, y.reshape(len(y), 1), axis=1)

        if self.databalance == 'LowSampling':
            data_maj = train_data[y == 1]  # 将多数
            data_min = train_data[y != 1]
            index = np.random.randint(len(data_maj), size=len(data_min))
            lower_data_maj = data_maj[list(index)]
            train_data = np.append(lower_data_maj, data_min, axis=0)
            X = train_data[:, :-1]
            y = train_data[:, -1]

        elif self.databalance == 'UpSampling':
            X, y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\
                                       np.asarray(train_data[:, -1]))

        else:
            X = X
            y = y

        if self.fuzzyvalue['type'] == 'Hyp':
            m = y.shape[0]
            C = 3
            gamma = 1
            # Kernel

            K = Kernel.RBF(m, gamma)
            K.calculate(X)

            P = cvxopt.matrix(np.outer(y, y) * K.kernelMat)
            q = cvxopt.matrix(np.ones(m) * -1)
            A = cvxopt.matrix(y, (1, m))
            A = matrix(A, (1, m), 'd')
            b = cvxopt.matrix(0.0)

            tmp1 = np.diag(np.ones(m) * -1)
            tmp2 = np.identity(m)
            G = cvxopt.matrix(np.vstack((tmp1, tmp2)))

            tmp1 = np.zeros(m)
            tmp2 = np.ones(m) * C
            h = cvxopt.matrix(np.hstack((tmp1, tmp2)))

            solution = cvxopt.solvers.qp(P, q, G, h, A, b)

            alpha = np.ravel(solution['x'])
            b = 0
            sum_y = sum(y)
            A = np.multiply(alpha, y)
            b = (sum_y -
                 np.sum(K.kernelMat * A.reshape(len(A), 1))) / len(alpha)
            K.expand(X)
            A = np.multiply(alpha, y)

            f = b + np.sum(K.testMat * A.reshape(len(A), 1), axis=0)
            #            print(f)
            memership = []
            if self.fuzzyvalue['function'] == 'Linear':
                memership = (f - min(f)) / (max(f) - min(f))

            elif self.fuzzyvalue['function'] == 'Bridge':
                s_up = np.percentile(f, 75)
                s_down = np.percentile(f, 25)
                memership = np.zeros((len(f)))
                for i in range(len(f)):
                    if f[i] > s_up:
                        memership[i] = 1
                    elif f[i] <= s_down:
                        memership[i] = 0
                    else:
                        memership[i] = (f[i] - s_down) / (s_up - s_down)

            elif self.fuzzyvalue['function'] == 'Logistic':
                a = 1
                N_pos = len(y[y == 1])
                #the average of the N+ th highest primary score and the (N+ +1)th highest primary score.
                b = (f[N_pos] + f[N_pos + 1]) / 2
                memership = np.exp(a * f + b) / (np.exp(a * f + b) + 1)

            elif self.fuzzyvalue['function'] == 'Probit':
                memership = norm.cdf(f)

        self.m_value = np.array(memership)
        return self.m_value
    def _mvalue(self, X, y):
#        print('fuzzy value:', self.fuzzyvalue )
        train_data = np.append(X,y.reshape(len(y),1),axis=1)
        
        if self.databalance =='LowSampling':
            data_maj = train_data[y == 1]  # 将多数
            data_min =  train_data[y != 1] 
            index = np.random.randint(len(data_maj), size=len(data_min)) 
            lower_data_maj = data_maj[list(index)]
            train_data = np.append(lower_data_maj,data_min,axis=0)
            X = train_data[:,:-1]
            y = train_data[:,-1]
        
        elif self.databalance =='UpSampling':
            X, y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\
                                       np.asarray(train_data[:, -1]))
            
        else:
            X = X
            y = y
        
        if self.fuzzyvalue['type'] == 'Cen':
            
            x_1 = X[y==1]
            x_0 = X[y==-1]
            x_centre_1 = np.mean(x_1, axis=0)
            x_centre_0 = np.mean(x_0, axis=0)
            max_distance_1 = 0
            max_distance_0 = 0
            for i in range(len(x_1)):
                distance = LA.norm(x_centre_1 - x_1[i,:])
                if max_distance_1 < distance:
                    max_distance_1 = distance
            for i in range(len(x_0)):
                distance = LA.norm(x_centre_0 - x_0[i,:])
                if max_distance_0 < distance:
                    max_distance_0 = distance
        
            memership = []
            if self.fuzzyvalue['function'] == 'Lin':
                for i in range(len(y)):
                    if y[i]  == 1:
                        memership.append((1 - LA.norm(X[i]-x_centre_1)/(max_distance_1+0.0001))* self.r_max)
                    if y[i]  == -1:
                        memership.append((1 - LA.norm(X[i]-x_centre_0)/(max_distance_0+0.0001))*self.r_min)
                        
            elif self.fuzzyvalue['function'] == 'Exp':
                for i in range(len(y)):
                    if y[i] == 1:
                        memership.append((2/(1+np.exp(LA.norm(X[i]-x_centre_1))))* self.r_max)
                    if y[i] == -1:
                        memership.append((2/(1+np.exp(LA.norm(X[i]-x_centre_0))))*self.r_min)
                        
        elif self.fuzzyvalue['type'] == 'Hyp':
            m = y.shape[0]
            C = 3
            gamma = 1
            # Kernel
        
            K = Kernel.RBF(m, gamma)
            K.calculate(X)
        
        
            H = np.multiply(np.dot(np.matrix(y).T, np.matrix(y)), K.kernelMat)
            M_BR = H + np.eye(m) / C
            # Concatenate
            L_L = np.concatenate((np.matrix(0), np.matrix(y).T), axis=0)
            L_R = np.concatenate((np.matrix(y), M_BR), axis=0)
            L = np.concatenate((L_L, L_R), axis=1)
            R = np.ones(m + 1)
            R[0] = 0
            # solve
            b_a = LA.solve(L, R)
            b = b_a[0]
            alpha = b_a[1:]
            
            K.expand(X)
            A = np.multiply(alpha, y)
        
            f = b + np.dot(K.testMat, A)
            
            d_hyp = abs(f*y)
        
            memership = []
            if self.fuzzyvalue['function'] == 'Lin':
                for i in range(len(y)):
                    if y[i]  == 1:
                        memership.append((1 - d_hyp[i]/(max(d_hyp)+0.0001))*self.r_max)
                    if y[i]  == -1:
                        memership.append((1 - d_hyp[i]/(max(d_hyp)+0.0001))*self.r_min)
                        
            elif self.fuzzyvalue['function'] == 'Exp':
                for i in range(len(y)):
                    if y[i] == 1:
                        memership.append((2/(1+ np.exp(d_hyp[i])))* self.r_max)
                    if y[i] == -1:
                        memership.append((2/(1+ np.exp(d_hyp[i])))*self.r_min)
                
            
                        
        self.m_value = np.array(memership)
        return self.m_value
Exemple #10
0
    def fit(self, X, Y):
#        print('Kernel:', kernel_dict)
        train_data = np.append(X,Y.reshape(len(Y),1),axis=1)
        
        if self.databalance =='LowSampling':
            data_maj = train_data[Y == 1]  # 将多数
            data_min =  train_data[Y != 1] 
            index = np.random.randint(len(data_maj), size=len(data_min)) 
            lower_data_maj = data_maj[list(index)]
            train_data = np.append(lower_data_maj,data_min,axis=0)
            X = train_data[:,:-1]
            Y = train_data[:,-1]
            self.Y =  Y
        
        elif self.databalance =='UpSampling':
            X, Y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\
                                       np.asarray(train_data[:, -1]))
            self.Y =  Y
            
        else:
            X = X
            Y = Y
            self.Y =  Y
        
        m = Y.shape[0]
      
        # Kernel
        if self.kernel_dict['type'] == 'RBF':
            K = Kernel.RBF(m, self.kernel_dict['sigma'])
        elif self.kernel_dict['type'] == 'LINEAR':
            K = Kernel.LINEAR(m)
        elif self.kernel_dict['type'] == 'POLY':
            K = Kernel.POLY(m, self.kernel_dict['d'])
            
        K.calculate(X)
            
        kernel = np.zeros((2*m, 2*m))
        kernel[:m,:m] = K.kernelMat 
        P = cvxopt.matrix(kernel)
        q = cvxopt.matrix(np.hstack((np.ones(m)*-1,np.ones(m)*-2)))
        
        A = cvxopt.matrix(np.hstack((np.ones(m),np.zeros(m))))
        A = matrix(A, (1, 2*m), 'd')
        b = cvxopt.matrix(0.0)
        
        tmp1 = np.hstack((np.identity(m),np.identity(m)))
        tmp2 = np.hstack((np.diag(np.ones(m) * -1),np.diag(np.ones(m) * -1)))
        tmp3 = np.hstack((np.zeros((m,m)),np.identity(m)))
        tmp4 = np.hstack((np.zeros((m,m)),np.diag(np.ones(m) * -1)))
        G = cvxopt.matrix(np.vstack((tmp1, tmp2, tmp3, tmp4)))
        
        tmp1 = np.zeros(m)
        tmp2 = np.ones(m) * self.m_value * self.C
        tmp3 = np.ones(m) * (1-self.m_value) * self.C
        h = cvxopt.matrix(np.hstack((tmp2, tmp1,tmp3,tmp1)))
        # solve QP problem
        solution = cvxopt.solvers.qp(P, q, G, h, A, b)

        sol = np.ravel(solution['x'])
        gamma = sol[:m]
        beta = sol[m:]
        alpha = gamma + beta
        w_phi = np.multiply(np.sum(K.kernelMat,axis=1),gamma)

        b = 0
        b = np.sum(Y-w_phi)/len(Y)       
        print('b',b)
        self.gamma = gamma
        self.beta = beta
        self.alpha = alpha
        self.b = b
        self.K = K
        self.kernelMat = K.kernelMat