Example #1
0
    def process(self, data, obs_vec):
        """
        Solve optimization problem to get :math:`\\alpha`.

        :param phi_mat: Training data matrix :math:`\Phi\in\mathbb{R}^{d\\times n}`
        :type phi_mat: numpy array
        :param obs_vec: Observation vector :math:`y\in\mathbb{R}^{n}`
        :type obs_vec: numpy array
        :return: `alpha`: estimated state vector :math:`\\alpha\in\mathbb{R}^{d}`
        :rtype:  numpy array
        """
        # check consistency of data
        obs_num = obs_vec.shape[0]
        data_num = data.shape[0]

        if obs_num == data_num:
            self.data = data

            # take into account both options
            if self.lam is 0:
                k_mat = kernel(data, data, self.k_type)
            else:
                dim = data.shape[1]
                k_mat = kernel(data, data, self.k_type) + self.lam*eye(dim)

            self.alpha = lstsq(k_mat, obs_vec.transpose())[0]
            return self.alpha
        else:
            print "ERROR: number of samples for data and observations must be the same"
    def predict(self, X_test):
        X_test = X_test.as_matrix()
        n = np.shape(X_test)[0]
        y_pred = np.zeros((n, 1))

        for i in tqdm(range(n)):
            x = X_test[i, ]
            #dictionary of distances between x and all x_train
            x_dist = dict()
            #building dict
            for j in range(np.shape(self.X_train)[0]):
                #computing distance^2
                x_dist[j] = kernel(self.kernel, x, x, self.sigma, self.k, self.d, self.e, self.beta) \
                            - 2 * kernel(self.kernel, self.X_train [i,], x, self.sigma, self.k, self.d, self.e, self.beta) \
                            + kernel(self.kernel, self.X_train [i,], self.X_train [i,], self.sigma, self.k, self.d, self.e, self.beta)

            #sorting dict by value and getting k nearest neighbors
            x_k = sorted((value, key) for (key, value) in
                         x_dist.items())[:self.kn]  #returns a list of length k
            #getting indices of k nearest neighbors
            x_k_ind = [z[1] for z in x_k]
            #getting labels of k nearest neighbors
            y_k = self.y_train[x_k_ind, ]
            #predicting label of x(i)
            y_pred[i, ] = np.argmax(pd.Series(y_k).value_counts())

        y_pred_list = map(lambda x: x[0], y_pred)

        return (pd.Series(y_pred_list))
Example #3
0
    def mmd(self, data2):
        """
        Compute the maximum mean discrepancy between 
        :math:`D_1` and :math:`D_2`, i.e. :math:`\\|\\mu_{{D_1}} - \\mu_{{D_2}}\\|_{\mathcal{H}}`

        :param data2: data matrix :math:`D_2\in\mathbb{R}^{d \\times n}`, where 
                     :math:`d` is the dimensionality and 
                     :math:`n` is the number of training samples
        :type data: numpy array
        :return: diffused data
        :rtype:  numpy array
        """        
        # parse data and ensure it meets requirements
        dim_data2 = data2.shape[0]
        ntest = data2.shape[1]                

        if dim_data2 != self.dim:
          raise Exception("ERROR: dimensionality of data must be consistent with training set.")

        # now, compute MMD equation, using in-place computations 
        kmat = kernel(data2,data2,self.kernel);
        ktestsum = npsum(npsum(kmat,axis=0),axis=1)/(pow(ntest,2));    

        kmat = kernel(self.data,self.data,self.kernel);
        ktrainsum = npsum(npsum(kmat,axis=0),axis=1)/(pow(self.nsamp,2));    

        kmat = kernel(self.data,data2,self.kernel);
        kcrossum = npsum(npsum(kmat,axis=0),axis=1)/(self.nsamp*ntest);                  

        mmdcomputed = ktestsum + ktrainsum - 2*kcrossum
 
        return mmdcomputed
Example #4
0
    def predict(self, te_data):
        """
        Use GP model to compute regression values at data points, as well as posterior variance.

        :param D: Testing data matrix :math:`D\in\mathbb{R}^{m\\times d}`
        :type phi_mat: numpy array
        :return: `f`: estimated output values at data :math:`\hat{y}\in\mathbb{R}^{1 \\times m}`
        :rtype:  numpy array
        :return: `sigma`: estimated output values at data :math:`\hat{sigma}\in\mathbb{R}^{1 \\times m}`
        :rtype:  numpy array
        """
        k_test = kernel(self.data, te_data, self.k_type)
        dim_test = te_data.shape[0]
        ntest = te_data.shape[1]

        # make sure dimensionality is consistent
        if dim_test != self.data_dim:
            raise Exception("Dimensions of training and test data must be the same")
        else:
            # compute posterior mean
            f = dot(k_test,self.mean_vec)

            # compute posterior variance
            var_m = lstsq(self.lmat, k_test)
            var_f = kernel(te_data,te_data,self.k_type) - dot(var_m,var_m)

        return f, var_f
Example #5
0
    def calculate(self,alldata_par, alldata_obs, alldata_wei):
        
        no_snapshots, no_parameters = alldata_par.shape
        TEMP = np.zeros([no_snapshots,no_snapshots])
        for i in range(no_parameters):
            v = alldata_par[:,i]
            Q = npm.repmat(v,no_snapshots,1)
            T = np.transpose(Q)
            TEMP = TEMP + np.power(Q-T,2)
        np.sqrt(TEMP,out=TEMP) # distances between all points
        if self.no_keep>0:
            MAX = 2*np.max(TEMP)
            M = TEMP+MAX*np.eye(no_snapshots)
            to_keep = np.ones(no_snapshots,dtype=bool)
            for i in range(no_snapshots - self.no_keep):
                argmin = np.argmin(M)
                xx = argmin // no_snapshots
                if self.expensive>0:
                    S=sum(M)
                    yy = argmin % no_snapshots
                    M[xx,yy]=MAX
                    M[yy,xx]=MAX
                    if S[yy]<S[xx]:
                        yy=xx
                M[xx,:]=MAX
                M[:,xx]=MAX
                to_keep[xx]=False
            TEMP = TEMP[to_keep,:]
            TEMP = TEMP[:,to_keep]
            alldata_par = alldata_par[to_keep,:]
            alldata_obs = alldata_obs[to_keep,:]
            try:
                to_keep=np.append(to_keep,np.ones(no_parameters+1,dtype=bool))
                self.initial_iteration = self.initial_iteration[to_keep]
            except:
                print("Exception!")    
        kernel.kernel(TEMP,self.type_kernel)
        P = np.ones([no_snapshots,1]) # only constant polynomials
    #    print(P.shape, no_evaluations, alldata_par.shape, TEMP.shape)
        P = np.append(alldata_par,P,axis=1) # plus linear polynomials
        no_polynomials = P.shape[1]
        TEMP = np.append(TEMP,P,axis=1)
        TEMP2 = np.append(np.transpose(P),np.zeros([no_polynomials,no_polynomials]),axis=1)
        TEMP2 = np.vstack([TEMP,TEMP2])
        RHS = np.vstack([ alldata_obs, np.zeros([no_polynomials,1]) ])
    #    print("Condition_number:",np.linalg.cond(TEMP2))
#        print("DEBUG2:", TEMP2.shape, np.linalg.matrix_rank(TEMP2), RHS.shape)
        if self.type_solver == 0:
            SOL=np.linalg.solve(TEMP2,RHS)
        else:
            SOL_=splin.minres(TEMP2,RHS,x0=self.initial_iteration,tol=pow(10,-self.type_solver),show=False)
            SOL=SOL_[0]
    #    RES=RHS-np.reshape(np.matmul(TEMP2,SOL),(no_evaluations+no_polynomials,1))
    #    print("Residual norm:",np.linalg.norm(RES), "RHSnorm:", np.linalg.norm(RHS))
        return SOL, no_snapshots, alldata_par, alldata_obs, alldata_wei, TEMP2, RHS
Example #6
0
    def train(self, X, y):
        DNA_chars = ["A", "T", "C", "G"]
        self.DNA_seq = []
        self.X_train = X
        self.y = y
        self.alpha = np.zeros(y.shape)
        n = len(y)
        self.K = np.zeros((n, n))
        if (self.kernel_name == "Linear"):
            self.K = (X @ X.T + 1)**2
        else:
            for i in range(n):
                for j in range(i + 1):
                    self.K[i, j] = kernel(X[i],
                                          X[j],
                                          kernel_name=self.kernel_name,
                                          substring_size=self.substring_size,
                                          substring_all=self.substring_all,
                                          degree=self.degree,
                                          gamma=self.gamma)
                    self.K[j, i] = self.K[i, j]

        q = -self.y.reshape((-1, ))
        n = len(self.y)
        G = np.vstack((np.diag(self.y), -np.diag(self.y)))
        h = np.vstack((np.ones((n, 1)) * self.C, np.zeros((n, 1)))).reshape(
            (-1, ))
        sol = solvers.qp(matrix(self.K), matrix(q), matrix(G), matrix(h))
        self.alpha = sol['x']
Example #7
0
 def __init__(self, camp_dist):
     pq.setConfigOption('background', 'w')
     self.widget = pq.PlotWidget()
     self.kernel = kernel('../data/csv/out.csv', camp_dist)
     self.kernel.pars()
     super(UI, self).__init__()
     self.initUI()
Example #8
0
    def _train(self, s, e):
        if s >= e:
            return
        sp = (s + e) // 2
        print("Training step : ", s,"-",sp, " vs ", sp+1,"-",e)
        first_half = []
        second_half = []
        for part in self.cat_train_dataset[s: sp + 1]:
            first_half += part
        for part in self.cat_train_dataset[sp + 1: e + 1]:
            second_half += part
        for example in first_half:
            example[-1] = 0.0
        for example in second_half:
            example[-1] = 1.0
        sub_train_data = np.concatenate([np.array(first_half), np.array(second_half)], axis=0).astype("float64")
        np.random.shuffle(sub_train_data)
        sub_train_feature = sub_train_data[:, :-1]
        sub_train_label = sub_train_data[:, -1]

        if self.nn_kernel:
            print("Training NN kernel --- START")
            kernel_function = kernel.kernel(sub_train_feature, sub_train_label, sub_train_feature.shape[1])
            sub_clf = svm.SVC(kernel=kernel_function)
            print("Training NN kernel --- END")
        else:
            sub_clf = svm.SVC(kernel="rbf")
        print("Training sub SVM --- START")
        sub_clf.fit(sub_train_feature, sub_train_label)
        print("Training sub SVM --- START")
        self.sub_svm_dict[(s, sp, e)] = sub_clf

        self._train(s, sp)
        self._train(sp + 1, e)
Example #9
0
    def process(self, data):
        """
        This function builds the kernel matrix from the data, and generates the
        KPCA eigenspace.

        :param data: data matrix :math:`D\in\mathbb{R}^{d \\times n}`, where
                     :math:`d` is the dimensionality and
                     :math:`n` is the number of training samples
        :type data: numpy array
        """
        # perform eigendecomposition
        k_mat = kernel(data, data, self.kernel)

        # if centered, need to perform extra computations
        if self.centered > 0:
            nsamp = data.shape[1]
            unit = ones((nsamp, 1), float)
            self.h_mat = eye(nsamp) - (1./nsamp)*dot(unit, unit.T) # centering matrix
            self.k_cent = (1./nsamp)*dot(k_mat, unit) # centering vector, used in reduction

            k_mat = dot(k_mat, self.h_mat) # augmented kernel matrix
            k_mat = dot(self.h_mat, k_mat)

        k_evals, k_evecs = eigsh(k_mat, k=self.neigs, which='LM')
        k_evals = sqrt(k_evals)
        k_scale = (ones((1, self.neigs), float)/k_evals)
        k_scale = diag(k_scale[0, :]) # create scaling matrix
        alpha = dot(k_evecs, k_scale)

        self.alpha = alpha # store copies of coefficients and data
        self.data = data
Example #10
0
    def train(self):
        print("Multiclass svm training -- START")
        # train one svm for each label pair
        for label1 in range(self.classes):
            for label2 in range(label1 + 1, self.classes):
                print("Training step : ", label1, " vs ", label2)
                sub_data1 = self.cat_train_dataset[label1]
                sub_data2 = self.cat_train_dataset[label2]
                sub_train_data1 = np.array(sub_data1).astype("float64")
                sub_train_data2 = np.array(sub_data2).astype("float64")
                sub_train_data = np.concatenate([sub_train_data1, sub_train_data2], axis=0)
                np.random.shuffle(sub_train_data)
                sub_train_feature = sub_train_data[:, :-1]
                sub_train_label = sub_train_data[:, -1]

                if self.nn_kernel:
                    print("Training NN kernel --- START")
                    kernel_function = kernel.kernel(sub_train_feature, sub_train_label, sub_train_feature.shape[1])
                    sub_clf = svm.SVC(kernel=kernel_function)
                    print("Training NN kernel --- END")
                else:
                    sub_clf = svm.SVC(kernel="rbf")
                print("Training sub SVM --- START")
                sub_clf.fit(sub_train_feature, sub_train_label)
                print("Training sub SVM --- END")
                self.sub_svm_list[label1].append(sub_clf)
        print("Multiclass svm training -- END")
Example #11
0
    def project(self, tdata):
        """
        This function projects test data :math:`D'` onto the mean map, i.e.
        it computes and returns :math:`\\langle\mu_{{D_1}},\\psi(x)\\rangle_\mathcal{H}`
        for each :math:`x\in D'`, where :math:`\\psi:\mathbb{R}^{d}\\to\mathcal{H}` is the feature
        map associated to the RKHS :math:`\mathcal{H}`. 

        :param data: testdata matrix :math:`D'\in\mathbb{R}^{d \\times n}`, where 
                     :math:`d` is the dimensionality and 
                     :math:`n` is the number of training samples
        :type data: numpy array
        :return: projected data
        :rtype:  numpy array
        """        
        # parse data and ensure it meets requirements
        test_dim = tdata.shape[0]
        ntest = tdata.shape[1]

        if test_dim != self.dim:
          raise Exception("ERROR: dimensionality of data must be consistent with training set.")

        # compute kernel matrix between data and tdata 
        kmat = kernel(self.data,tdata,self.kernel)
        kmat = npsum(kmat,axis=0)
        kmat = (1/self.nsamp)*kmat
        return kmat 
Example #12
0
    def predict(self, te_data):
        """
        Use Bayesian RBF model to compute regression values at data points, as well as posterior variance. 

        :param D: Testing data matrix :math:`D\in\mathbb{R}^{m\\times d}`
        :type phi_mat: numpy array
        :return: `f`: estimated output values at data :math:`\hat{y}\in\mathbb{R}^{1 \\times m}`
        :rtype:  numpy array
        :return: `sigma`: estimated output values at data :math:`\hat{sigma}\in\mathbb{R}^{1 \\times m}`
        :rtype:  numpy array
        """ 
        k_test = kernel(te_data, self.centers, self.k_type)	
        dim_test = te_data.shape[0]
        ntest = te_data.shape[1]

        # make sure dimensionality is consistent 
        if dim_test != self.centers_dim:
            raise Exception("Dimensions of data and centers must be the same")
        else:
            # compute posterior mean
            f = (dot(k_test,self.mn)).transpose()
         
            # compute posterior variance 
            var_m = dot(k_test,self.precision)
            var_m = dot(var_m,k_test.transpose())
            var_f = (diag(var_m)).transpose()
            one_mat = ones((1,ntest))
            var_f = var_f + (1/self.beta)*one_mat

        return f, var_f
Example #13
0
    def reduce(self, tdata):
        """
        This function builds the kernel matrix from the data, and the test data,
        and projects the test data onto the KPCA eigenspace. 

        :param tdata: data matrix :math:`D\in\mathbb{R}^{d \\times m}`, where 
                     :math:`d` is the dimensionality and 
                     :math:`m` is the number of testing samples
        :type tdata: numpy array
        :return: eigenfunction coefficients
        :rtype:  numpy array
        """        
        k_test = kernel(self.data, tdata, self.kernel)	

        if self.centered > 0:
          ntest = tdata.shape[1]

          #print ntest
          #print (self.k_cent).shape
          #print k_test.shape
          #print tile(self.k_cent, (1, ntest)).shape 
          k_test =  k_test - tile(self.k_cent, (1, ntest))
          
          k_test = dot(self.h_mat,k_test)

        coeff = dot(transpose(self.alpha), k_test)
        return coeff
Example #14
0
    def process(self, data_in, obs_vec):
        """
        Generate function network model.

        :param data: Training data matrix :math:`\mathcal{X}\in\mathbb{R}^{d\\times n}`
        :type data: numpy array
        :param obs_vec: Observation vector :math:`y\in\mathbb{R}^{1 \\times n}`
        :type obs_vec: numpy array
        :return: none
        :rtype:  none
        """
        # check consistency of data
        obs_num = obs_vec.shape[1]
        data_num = data_in.shape[1]

        if obs_num != data_num:
            raise Exception("Number of samples for data and observations must be the same")
        else:
            # initialize variables
            self.data = data_in
            self.data_dim = data_in.shape[0]
            nsamp = data_num

            # peel off parameters
            ki = self.k_type
            bandwidth = ki.params[0]

            # compute regularized kernel matrix
            kmat = kernel(self.data, self.data, self.k_type) + (pow(self.noise,2))*eye(nsamp)

            # perform Cholesky factorization, and compute mean vector (for stable inverse computations)
            self.lmat = cholesky(kmat).transpose()
            self.mean_vec = lstsq(self.lmat, obs_vec)
            self.mean_vec = lstsq(self.lmat.transpose(), self.mean_vec)
Example #15
0
 def modfit(self, print_stat=True):
     self.sort_out_text()
     if(print_stat): # if requested by user (passing print_stat=True to modfit) prints out the output file of the fortran code describing best fit parameters (by default True)
         self.print_stat_array()
     self.save_stat_array()
     self.dismantle_keplerian_fit()
     self.dismantle_RV_kep()
     results = kernel(self.generate_summary(), self.jd, self.rv_obs, self.rv_error,self.o_c, self.model, self.JD_model, self.npl,self.semiM,self.masses,self.data_set,self.stat_array_saved,self.reduced_chi2,self.chi2,self.rms,self.loglik, self.mfit,self.omega_dot,self.omega_dot_err) 
     return results
Example #16
0
 def getK(self):
     """
     return Kinship matrix as kernel object
     """
     if not self.isNormalised:
         self.impute()
     K = np.dot(self.GT, self.GT.T)
     K /= K.diagonal().mean()
     Kobj = kernel.kernel(self.SID, K)
     return Kobj
Example #17
0
 def getK(self):
     """
     return Kinship matrix as kernel object
     """
     if not self.isNormalised:
         self.impute()
     K = np.dot(self.GT, self.GT.T)
     K /= K.diagonal().mean()
     Kobj = kernel.kernel(self.SID, K)
     return Kobj
 def predict(self,X_test):
     X_test = X_test.as_matrix()
     n = np.shape(X_test)[0]
     y_pred = np.zeros((n,1))
     for i in tqdm(range(n)):
         x = X_test[i,]
         Kx = np.asarray([kernel(self.kernel, xi, x, self.sigma, self.k, self.d, self.e, self.beta) for xi in self.X_train])
         y_pred[i,] = np.dot(Kx,self.alpha)
     y_pred = np.sign(y_pred).astype('int')
     y_pred_list = map(lambda x: x[0], y_pred)
     return(pd.Series(y_pred_list))
Example #19
0
def apply(newdata_par, alldata_par, no_parameters, SOL, kernel_type):
    #    print(SOL.shape, alldata_par.shape, no_parameters)
    no_new = newdata_par.shape[0]
    no_evaluations = alldata_par.shape[0]
    TEMP = np.zeros([no_new, no_evaluations])
    for i in range(no_parameters):
        v = alldata_par[:, i]
        M = npm.repmat(v, no_new, 1)
        v_new = newdata_par[:, i]
        M_new = npm.repmat(v_new, no_evaluations, 1)
        T = np.transpose(M_new)
        TEMP = TEMP + np.power(M - T, 2)
    np.sqrt(TEMP, out=TEMP)
    kernel.kernel(TEMP, kernel_type)
    no_polynomials = 1 + no_parameters  # plus linear polynomials
    newdata_surrogate = np.matmul(TEMP, SOL[:-no_polynomials])
    pp = SOL[-1] + np.matmul(newdata_par, SOL[-no_parameters - 1:-1])
    #    print(pp.shape)
    newdata_surrogate = np.reshape(newdata_surrogate,
                                   (no_new, 1)) + np.reshape(pp, (no_new, 1))
    return newdata_surrogate
Example #20
0
    def predict(self, te_data):
        """
        Use :math:`\\alpha` to compute values at data points.

        :param D: Testing data matrix :math:`D\in\mathbb{R}^{m\\times d}`
        :type phi_mat: numpy array
        :return: `est_obs_vec`: estimated output values at data :math:`\hat{y}\in\mathbb{R}^{m}`
        :rtype:  numpy array
        """
        k_test = kernel(self.data, te_data, self.k_type)
        coeff = dot((self.alpha).transpose(), k_test)
        return coeff
Example #21
0
 def __init__(self, data_mat, label_mat, C, toler, kernel_type):
     self.data_mat  = data_mat                   # Data Mat
     self.label_mat = label_mat                  # Label Mat
     self.C         = C                          # Soft margin parameter
     self.toler     = toler                      # toler for stop
     self.m         = np.shape(data_mat)[0]      # numbers of data mat
     self.alphas    = np.mat(np.zeros((self.m, 1))) # alphas mat
     self.b         = 0                          # Initial as 0
     self.eCache    = np.mat(np.zeros((self.m, 2))) # Cache
     self.Kernel    = np.mat(np.zeros((self.m, self.m)))
     for i in range(self.m):
         self.Kernel[:, i] = kernel(self.data_mat, self.data_mat[i, :], kernel_type)
Example #22
0
    def fit(self, Xs, Ys, Xt, Yt):
        Ns, Nt = len(Xs), len(Xt)
        X = np.vstack((Xs, Xt))
        N, P = X.shape
        C = len(np.unique(Ys))
        X /= np.expand_dims(np.linalg.norm(X, axis=1), P)  # normalization
        E = np.vstack((1 / Ns * np.ones((Ns, 1)), -1 / Nt * np.ones((Nt, 1))))
        H = np.eye(N) - 1 / N * np.ones((N, N))

        Yp = None
        acc_list = []
        for i in range(self.maxit):
            Mc = 0
            M0 = E * E.T * C
            if Yp is not None and len(Yp) == Nt:
                for c in range(1, C + 1):
                    E = np.zeros((N, 1))
                    tt = Ys == c
                    E[np.where(tt == True)] = 1 / len(Ys[np.where(Ys == c)])
                    yy = Yp == c
                    E[np.array(np.where(yy == True)) + Ns] = \
                        -1 / len(Yp[np.where(Yp == c)])
                    E[np.isinf(E)] = 0
                    Mc = Mc + np.dot(E, E.T)
            M = M0 + Mc

            if self.kernel is None:
                N_eye = P
                K = X.T
            else:
                N_eye = N
                K = kernel(np.asarray(X), np.asarray(X), self.kernel, self.kargs)
            A = np.linalg.multi_dot([K, M, K.T]) + self.beta * np.eye(N_eye)
            B = np.linalg.multi_dot([K, H, K.T])
            V, U = sp.linalg.eig(A, B)
            ind = np.argsort(V)
            A = U[:, ind[:self.dim]]
            Z = np.dot(A.T, K)
            Z /= np.linalg.norm(Z, axis=0)
            Xs_new, Xt_new = Z[:, :Ns].T, Z[:, Ns:].T

            knn = KNeighborsClassifier(n_neighbors=1)
            knn.fit(Xs_new, Ys.ravel())
            Yp = knn.predict(Xt_new)
            acc = sklearn.metrics.accuracy_score(Yt, Yp)
            acc_list.append(acc)
            print('JDA iteration [{}/{}]: Acc: {:.4f}'
                  .format(i + 1, self.maxit, acc))

        return Xs_new, Xt_new
Example #23
0
def test(data_mat, label_mat_ref, support_vector_data, support_vector_label,
         support_vector_alphas, b, kernel_type):
    error_count = 0
    m, n = np.shape(data_mat)
    for i in range(m):
        kernel_eval = kernel(support_vector_data, data_mat[i, :], kernel_type)
        predict = kernel_eval.T * np.multiply(support_vector_label,
                                              support_vector_alphas) + b
        if np.sign(predict) != np.sign(label_mat_ref[i]):
            error_count += 1
            print(i)

    print("The Test error rate is: %d/%d" % (error_count, m))
    return m, error_count
Example #24
0
File: tick.py Project: myoutny/GoL
def tick(g):

    gp = g

    j = gp > 0
    L = np.argwhere(j)
    D = np.argwhere(np.invert(j))

    K_live = kernel(L, space=gp.shape[0])
    K_dead = kernel(D, space=gp.shape[0])

    N_live = neighbors(gp, K_live, L)
    N_dead = neighbors(gp, K_dead, D)

    S_live = np.array([np.sum(n) for n in N_live])
    S_dead = np.array([np.sum(n) for n in N_dead])

    rip = L[np.any([S_live < 2, S_live > 3], axis=0)]
    gp[rip[:, 0], rip[:, 1]] = 0

    baby = D[S_dead == 3]
    gp[baby[:, 0], baby[:, 1]] = 1

    return gp
Example #25
0
 def __init__(self):
     gk.init()
     self.kernel = kernel()
     self.choices = []
     self.loading = False
     self.saving = True
     self.app = qt.QApplication()
     self.main = qt.QMainWindow()
     self.main.resize(400, 640)
     self.setMenu()
     self.font = gui.QFont()
     self.font.setPixelSize(14)
     self.font.setFamily("宋体")
     self.main.show()
     self.hello()
Example #26
0
    def project(self, tdata):
        """
        This function uses the Nystrom extension to projects the test data onto 
        the diffusion space. 

        :param tdata: data matrix :math:`D\in\mathbb{R}^{d \\times m}`, where 
                     :math:`d` is the dimensionality and 
                     :math:`m` is the number of testing samples
        :type tdata: numpy array
        :return: projected test data
        :rtype:  numpy array
        """        
	K_test = kernel(self.data, tdata, self.kernel)	
	coeff = dot(transpose(self.alpha), K_test)
	return coeff
Example #27
0
def init(config):
    ## Initialise jarvis kernel
    jarvis = kernel.kernel(config)

    ## Connect to data source
    jarvis.register('data', data.init(jarvis))

    ## Initialise functions
    jarvis.register('function', load_modules('functions'))

    ## Set up interfaces
    jarvis.register('interface', load_modules('interfaces'))

    ## Finish setup
    jarvis.setup()

    return jarvis
Example #28
0
def init(config):
    ## Initialise jarvis kernel
    jarvis = kernel.kernel(config)

    ## Connect to data source
    jarvis.register('data', data.init(jarvis))

    ## Initialise functions
    jarvis.register('function', load_modules('functions'))

    ## Set up interfaces
    jarvis.register('interface', load_modules('interfaces'))

    ## Finish setup
    jarvis.setup()

    return jarvis
Example #29
0
    def draw_rfunc(self, te_data):
        """
        Draw random function from current Bayesian RBF model.

        :param D: Testing data matrix :math:`D\in\mathbb{R}^{d \\times m}`
        :type phi_mat: numpy array
        :return: `f`: estimated output values at data :math:`\hat{y}\in\mathbb{R}^{1 \\times m}`
        :rtype:  numpy array
        """
        # sample a random weight vector from the Bayesian model
        w_random = rnd.multivariate_normal(self.mn_aslist,self.precision,1).transpose()
        k_test = kernel(self.centers, te_data, self.k_type)

        f_rand = dot(w_random.transpose(),k_test)
        f_rand = squeeze(asarray(f_rand))

        return f_rand
Example #30
0
    def train(self, X, y):
        DNA_chars = ["A", "T", "C", "G"]
        self.DNA_seq = []
        self.X_train = X
        self.y = y
        self.alpha = np.zeros(y.shape)
        n = len(y)
        self.K = np.zeros((n, n))
        if (self.kernel_name == "Linear"):
            self.K = (X @ X.T)
        else:

            for i in range(n):
                for j in range(i + 1):
                    self.K[i, j] = kernel(X[i], X[j], self.kernel_name)
                    self.K[j, i] = self.K[i, j]

        self.alpha = scipy.optimize.minimize(self.logistic_regression,
                                             self.alpha)['x']
Example #31
0
    def process(self, data, obs_vec):
        """
        Generate function network model. 

        :param phi_mat: Training data matrix :math:`\mathcal{X}\in\mathbb{R}^{d\\times n}`
        :type phi_mat: numpy array
        :param obs_vec: Observation vector :math:`y\in\mathbb{R}^{1 \\times n}`
        :type obs_vec: numpy array
        :return: none
        :rtype:  none
        """        
        # check consistency of data
        data_dim = data.shape[0]  
        obs_num = obs_vec.shape[1] 
        data_num = data.shape[1] 

        if data_dim != self.centers_dim:  
            raise Exception("Dimensions of data and centers must be the same")
        elif obs_num != data_num:            
            raise Exception("Number of samples for data and observations must be the same")
        else:
            # peel off parameters 
            ki = self.k_type
            bandwidth = ki.params[0]          

            # create kernel feature matrix 
            phi = kernel(data, self.centers, self.k_type)           
            self.alpha = 1/(pow(bandwidth,2))
            self.beta = 1/(pow(self.noise,2))

            # take into account both options
            if self.noise is 0:
                # S = inv(alpha*eye(m) + beta*(Phi'*Phi));
                # m_n = beta*(S*(Phi'*vals'));
                jitter = 0.00001 # add jitter factor to ensure inverse doesn't blow up 
            	self.precision = inv(jitter*eye(self.ncent) + self.beta*dot(phi.transpose(),phi))
            else:
            	self.precision = inv(self.alpha*eye(self.ncent) + self.beta*dot(phi.transpose(),phi))                

            self.mn = (dot(obs_vec,phi)).transpose()
            self.mn = self.beta*(dot(self.precision,self.mn))
            self.mn_aslist = array((self.mn).transpose())[0].tolist() # store as list for use in multivariate normal sampling
Example #32
0
    def evaluate(self, tdata):
        """
        This function computes the output of the kernel SVM using the kernel, current weight vector and bias.

        :param tdata: data matrix :math:`D\in\mathbb{R}^{d \\times m}`, where
                     :math:`d` is the dimensionality and
                     :math:`m` is the number of testing samples
        :type tdata: numpy array
        :return: projected test data
        :rtype:  numpy array
        """
        test_dim = tdata.shape[0]

        if self.dim != test_dim:
          raise Exception("ERROR: dimensionality of test data must be consistent.")

	K_test = kernel(self.svecs, tdata, self.kernel)
        weights = self.alpha*self.slabels

	f_eval = dot(weights, K_test)
	return f_eval
Example #33
0
def classify_object(features, labels, test_feature, distance_function_type,
                    kernel_function_type, window_type, window_parameter):
    distances_and_labels = []

    # count distances between test_feature and others
    for index in range(len(features)):
        distances_and_labels.append({
            'distance':
            distance(test_feature, features[index], distance_function_type),
            'label':
            labels[index]
        })

    # sort by distance
    distances_and_labels = sorted(distances_and_labels,
                                  key=lambda k: k['distance'])

    # set radius
    if window_type == 'variable':
        # get radius as distance to vector number window_parameter (as they are sorted by distance) and if there are block of save vectors there do a little step out of them
        window_radius = distances_and_labels[window_parameter]['distance'] \
            if distances_and_labels[window_parameter-1]['distance'] < distances_and_labels[window_parameter]['distance'] \
            else distances_and_labels[window_parameter-1]['distance'] + 0.000001
    else:
        window_radius = window_parameter

    weighted_class_sum = 0
    kernels_sum = 0

    for index in range(len(features)):
        kernel_value = kernel(
            distances_and_labels[index]['distance'] /
            window_radius if window_radius != 0 else 0, kernel_function_type)
        weighted_class_sum += distances_and_labels[index][
            'label'] * kernel_value
        kernels_sum += kernel_value

    predicted_value = weighted_class_sum / kernels_sum if kernels_sum != 0 else weighted_class_sum

    return predicted_value
Example #34
0
    def process(self, data):
        """
        This function builds the diffusion matrix from the data, performs the random walk, and 
        returns the diffused data points. 

        :param data: data matrix :math:`D\in\mathbb{R}^{d \\times n}`, where 
                     :math:`d` is the dimensionality and 
                     :math:`n` is the number of training samples
        :type data: numpy array
        :return: diffused data
        :rtype:  numpy array
        """        
        # perform eigendecomposition
        k_mat = kernel(data, data, self.kernel)                
        k_evals, k_evecs = eigsh(k_mat, k=self.neigs, which='LM') 
        k_evals = sqrt(k_evals)
	k_scale = (ones((1,self.neigs),float)/k_evals)
	k_scale = diag(k_scale[0,:]) # create scaling matrix 
        alpha = dot(k_evecs, k_scale)

	self.alpha = alpha # store copies of coefficients and data
	self.data = data
 def create_kernels(self):
     intervals = get_list_of_intervals(datetime(2015, 1, 1, 6, 0, 0),
                                       datetime(2015, 1, 1, 21, 5, 0), timedelta(minutes=5))
     #intervals.append(datetime(2015, 1, 1, 23, 59, 59))
     # list_of_times = get_list_of_times(self.trainingdata['dateAndTime'])
     # list_of_datasets = list()
     # for i in range(0, len(intervals)-1): #create data set for every 5-minute interval
     #     indices = where((list_of_times >= intervals[i].time()) &
     #                      (list_of_times < intervals[i+1].time()))
     #     dataset = self.create_dataset(indices)
     #     list_of_datasets.append(dataset)
     list_of_trainingsets = self.split_dataset(self.trainingdata, intervals)
     list_of_testingsets = self.split_dataset(self.testingdata, intervals, True)
     for i in range(0, len(list_of_trainingsets)): #combine data sets with neighbors
         kernel_data = zeros((1, 3))
         for j in range(-(self.window_size), self.window_size+1):
             kernel_data = vstack((kernel_data, list_of_trainingsets[(i+j)%len(list_of_trainingsets)]))
         kernel_data = delete(kernel_data, 0, axis=0) #delete first row
         k = kernel(kernel_data[:, [0,1]], kernel_data[:, 2]) #create kernel
         print intervals[i].time()
         k.tune(list_of_testingsets[i])
         self.kernel_map[str(intervals[i].time())] = k #add kernel to kernel_map
Example #36
0
    def predict(self, X, val=False):
        nb = X.shape[0]

        result = []
        for i in range(nb):
            pred = 0
            for j in range(len(self.alpha)):

                pred = pred + (kernel(self.X_train[j],
                                      X[i],
                                      kernel_name=self.kernel_name,
                                      degree=self.degree,
                                      gamma=self.gamma) * self.alpha[j])

            if (pred >= 0):
                result.append(1)
            else:
                if (val):
                    result.append(-1)
                else:
                    result.append(0)

        return result
Example #37
0
def main():
    dataset = pd.read_csv("./datasets/data.csv")
    dataset.drop(columns='id', inplace=True)
    dataset.dropna(axis=1, how='all', inplace=True)
    dataset.replace('M', 1, inplace=True)
    dataset.replace('B', 0, inplace=True)
    data = dataset.values
    np.random.shuffle(data)
    y, x = np.split(data, (1, ), axis=1)
    y = y.flatten()
    ratio = 0.8
    index = int(ratio * len(data))
    x_train, x_test, y_train, y_test = x[:index], x[index:], y[:index], y[
        index:]
    kernel_function = kernel.kernel(x_train, y_train, 30)
    clf = svm.SVC(kernel=kernel_function)

    a = time.time()
    clf.fit(x_train, y_train.ravel())
    b = time.time()
    print("time: ", b - a, " seconds")

    print("accuracy:", clf.score(x_test, y_test))
Example #38
0
def classify_object(features, labels, test_features, distance_function_type,
                    kernel_function_type, window_type, window_parameter):
    distances_and_labels = []

    for index in range(len(features)):
        distances_and_labels.append({
            'distance':
            distance(test_features, features[index], distance_function_type),
            'label':
            labels[index]
        })

    distances_and_labels = sorted(distances_and_labels,
                                  key=lambda k: k['distance'])

    if window_type == 'variable':
        window_radius = distances_and_labels[window_parameter]['distance'] \
            if distances_and_labels[window_parameter-1]['distance'] < distances_and_labels[window_parameter]['distance'] \
            else distances_and_labels[window_parameter-1]['distance'] + 0.000001
    else:
        window_radius = window_parameter

    weighted_class_sum = 0
    kernels_sum = 0

    for index in range(len(features)):
        kernel_value = kernel(
            distances_and_labels[index]['distance'] /
            window_radius if window_radius != 0 else 0, kernel_function_type)
        weighted_class_sum += distances_and_labels[index][
            'label'] * kernel_value
        kernels_sum += kernel_value

    predicted_value = weighted_class_sum / kernels_sum if kernels_sum != 0 else weighted_class_sum

    return predicted_value
def create_matching_kernel_ff(stars_source, stars_target, allstars=True):
    kern = kernel(81)
    nn = int(np.minimum(len(stars_source), len(stars_target)))

    starpairs = np.stack([stars_target[:nn], stars_source[:nn]])
    _, nstars, nside, _ = np.shape(starpairs)
    npsf = nside - kern.nf

    pairs = []
    for i in range(nstars):
        star1 = starpairs[1][i]
        star2 = starpairs[0][i]
        pairs.append([star1,
                      star2])
        if i > 0 and not allstars:
            continue

    sol = kern.solve(npsf, pairs)
    # sol[sol < 0] = 0
    sol /= sol.sum()
    print sol.sum()
    print sol.shape

    return sol
Example #40
0
 def fit(self, Xs, Xt):
     Ns, Nt = len(Xs), len(Xt)
     X = np.vstack((Xs, Xt))
     N, P = X.shape
     X /= np.expand_dims(np.linalg.norm(X, axis=1), P)  # normalization
     E = np.vstack((1 / Ns * np.ones((Ns, 1)), -1 / Nt * np.ones((Nt, 1))))
     M = E * E.T
     M = M / np.linalg.norm(M, 'fro')
     H = np.eye(N) - 1 / N * np.ones((N, N))
     if self.kernel is None:
         N_eye = P
         K = X.T
     else:
         N_eye = N
         K = kernel(np.asarray(X), np.asarray(X), self.kernel, self.kargs)
     A = np.linalg.multi_dot([K, M, K.T]) + self.beta * np.eye(N_eye)
     B = np.linalg.multi_dot([K, H, K.T])
     V, U = sp.linalg.eig(A, B)
     ind = np.argsort(V)
     A = U[:, ind[:self.dim]]
     Z = np.dot(A.T, K)
     Z /= np.linalg.norm(Z, axis=0)
     Xs_new, Xt_new = Z[:, :Ns].T, Z[:, Ns:].T
     return Xs_new, Xt_new
Example #41
0
np.set_printoptions(precision=4)

x = np.zeros([5, 5])
for fileName in sorted(glob('img/ts1/*.png')):
    print '_' * 60
    print fileName
    img = imread(fileName)
    img = gray(img)
    a, b = fileName.split('/')[-1].split('.')[0].split('-')
    a, b = ord(a) - 65, ord(b) - 65
    if img[0, 0] < 10:
        img = 255 - img
    KS = img / 1.0
    sigma = 1
    ki = kernel(KS, sigma)
    x[a][b] = ki
    x[b][a] = ki

print x
pca = PCA(n_components=2)
pca.fit(x)
print 'pca ratio', pca.explained_variance_ratio_

fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.scatter(x[:, 0], x[:, 1], x[:, 2], color='bbrrr')
ax = fig.add_subplot(122)
x = pca.transform(x)
print x
ax.scatter(x[:, 0], x[:, 1], color='bbrrr')
Example #42
0
        #*****************************************************************************#

        ###############################################################################
        #                   Process Matrix Elements through kernel                    #
        ###############################################################################

        # Load shared library of kernel src/ files to ROOT
        ROOT.gROOT.SetBatch(0)
        ROOT.gSystem.Load('lib/libpdf.so')
        ROOT.gSystem.Load("lib/libme.so")

        # Set the parameters of the model using the parameter card(s)
        pobj = setparams()

        # Create kernel object
        MEObj = kernel.kernel(nexternal-dof, amtOfInfoPerEvent, pobj, mode = gpu, pdfsetn = 'CT10', kernelfn = "kernel/kernel.cl", pR = neval)

        # Initialize results arrays
        resultsVector = np.zeros(nevts, dtype = kernel.a_float_t)

        class batch_func(VEGAS.BatchIntegrand):
            def __init__(self, ME_kernel_object = None):
                self.ME_kernel_object = ME_kernel_object

            def __call__(self, neutrino_space):
                eval = self.ME_kernel_object.eval(xp = neutrino_space)
                return eval

        # Set the kinematic values for the each event in the kernel object in order to evaluate the ME.

        write_path = "/home/kvmu/vbf/vbf_plotting"
Example #43
0
        else:
            grovepi.digitalWrite(8,0)
            grovepi.digitalWrite(9,1)
    elif index==3:
        if r_or_g==0:#green
            grovepi.digitalWrite(16,1)
            grovepi.digitalWrite(17,0)
        else:
            grovepi.digitalWrite(16,0)
            grovepi.digitalWrite(17,1)
initial_rgb()

space_num = 8

# init 
Kernel = kernel(block_num=space_num)
print('kernel ready')
client = mqtt.Client()

client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.0.104")

client.loop_start()

grovepi.pinMode(6,"OUTPUT") #led 1 R
grovepi.pinMode(7,"OUTPUT") #led 2 R
grovepi.pinMode(8,"OUTPUT") #led 3 R
grovepi.pinMode(9,"OUTPUT") #led 3 G

grovepi.pinMode(14,"OUTPUT") #led 1 G
Example #44
0

# print the names of the kernels
print k1
print k2
print k3
print k4
print k5
print k7

# now, generate plots for kernels
x = np.arange(-5,5,0.1)
x_rad = np.arange(-3,7,0.1)
y = np.array([2]) # y vals

k_gauss = kernel(x_rad,y,k1)
k_sigm = kernel(x,y,k2)
k_poly = kernel(x,y,k3)
k_lap = kernel(x_rad,y,k4)
k_cauchy = kernel(x_rad,y,k5)
k_periodic = kernel(x_rad,y,k7)
k_locally_periodic = kernel(x_rad,y,k8)

# save files in test data directory

if save_data:
    savemat('../data/unit_tests/test_kernel.mat',
            {'k_gauss':k_gauss,'k_sigm':k_sigm,'k_poly':k_poly,'k_lap':k_lap,
             'k_cauchy':k_cauchy,'k_periodic':k_periodic,'k_locally_periodic':k_locally_periodic})

# plot Gaussian kernel values
Example #45
0
    def simplesmo(self, data, labels):
        """
        This function uses a simplified version of the SMO algorithm to train
        the weight and bias vectors associated to solution to the maximum margin
        problem. If the data is nonlinearly separable, this algorithm will not converge.
        See Andrew Ng's notes for more information.

        A technical point: in the SMO algorithm, we need to refer to elements in the
        numpy arrays. Therefore we have to use the item function.

        :param data: data matrix :math:`D\in\mathbb{R}^{d \\times n}`, where
                     :math:`d` is the dimensionality and
                     :math:`n` is the number of training samples
        :type data: numpy array
        """
        # initialize
        passes = 0
        alpha_old = zeros((1,self.nsamp))

        while (passes < self.max_its):
            num_changed_alphas = 0

            # process data
            for i in xrange(1,self.nsamp):
                # compute the Ei variable
                datai = self.svecs[:,i-1:i]
                labeli = (self.slabels).item(i-1)
                f_evali = self.evaluate(datai)
                f_evalj = f_evali.item(0)
                Ei = f_evali - labeli

                # compute conditions required
                alphai = (self.alpha).item(i-1)
                cond1 = (labeli*Ei < -self.tol and alphai < self.C)
                cond2 = (labeli*Ei > self.tol and alphai > 0)

                # check to see if you have to keep going; if you have to, you must optimize a lagrange multiplier pair
                if cond1 or cond2:
                    j = rnd.randint(1,self.nsamp,size=1)
                    j = j.item(0)

                    # compute eval_array[j]
                    dataj = self.svecs[:,j-1:j]
                    labelj = (self.slabels).item(j-1)
                    f_evalj = self.evaluate(dataj)
                    f_evalj = f_evali.item(0) # convert f_evali to scalar

                    Ej = f_evalj - labelj
                    alphaj = (self.alpha).item(j-1)

                    # save old Lagrange multipliers
                    alpha_old[:,i-1] = alphai
                    alpha_old[:,j-1] = alphaj

                    # compute L and H
                    if labeli != labelj:
                        L = amax([0,alphaj-alphai])
                        H = amin([self.C,self.C+alphaj-alphai])
                    else:
                        L = amax([0,alphai+alphaj-self.C])
                        H = amin([self.C,alphai+alphaj])

                    if L == H:
                        continue

                    # compute eta
                    eta = 2*kernel(datai,dataj,self.kernel) - kernel(datai,datai,self.kernel) - kernel(dataj,dataj,self.kernel)
                    eta = eta.item(0)

                    if eta >= 0:
                        continue

                    # compute new value for alphaj
                    alphaj = alphaj - (labelj*(Ei-Ej))/eta

                    # clip if necessary
                    if alphaj > H:
                        alphaj = H
                    elif alphaj < L:
                        alphaj = L

                    # if no change, continue
                    if (fabs(alphaj-alpha_old.item(j-1)) < self.tol):
                        continue

                    # otherwise, update alphai
                    self.alpha[:,i-1] = alphai + labeli*labelj*(alpha_old.item(j-1) - alphaj)

                    # compute threshold
                    b1 = self.bias - Ei - labeli*(alphai-alpha_old.item(i-1))*kernel(datai,datai,self.kernel)
                    b1 = b1 - labelj*(alphaj-alpha_old.item(j-1))*kernel(datai,dataj,self.kernel)

                    b2 = self.bias - Ej - labeli*(alphai-alpha_old.item(i-1))*kernel(datai,dataj,self.kernel)
                    b2 = b2 - labelj*(alphaj-alpha_old.item(j-1))*kernel(dataj,dataj,self.kernel)

                    if alphai > 0 and alphai < self.C:
                        self.bias = b1
                    elif alphaj > 0 and alphaj < self.C:
                        self.bias = b2
                    else:
                        self.bias = (b1+b2)/2

                    num_changed_alphas = num_changed_alphas + 1

            if num_changed_alphas == 0:
                passes = passes + 1
            else:
                passes = 0
            print "Pass ", passes
Example #46
0
 def __init__(self, agent_num, time, render=True):
     self.game = kernel(car_num=agent_num, time=time, render=render)
     self.g_map = self.game.get_map()
     self.memory = []
     sum
Example #47
0
                                    kern_dict_local['s_axis'][s_axis_idx]['master']['num'] = str(i + int(s_axis['master']['num']))
                        else:
                            if kern_dict_local['s_axis']['scope'] == 'local':
                                kern_dict_local['s_axis']['master']['num'] = str( i + int(kern_dict_local['s_axis']['master']['num']))

                    if 'wire_slave' in kern_dict_local:
                        if type(kern_dict_local['wire_slave']) == type([]):
                            for slave_idx, slave in enumerate(kern_dict_local['wire_slave']):
                                kern_dict_local['wire_slave'][slave_idx]['master']['num'] = str(i + int(slave['master']['num']))
                        else:  
                            kern_dict_local['wire_slave']['master']['num'] = str(i + int(slave['master']['num']))
                    
                    
                    
                    print("kern dicT " + str(kern_dict_local))
                    self.kernels.append(kernel(**kern_dict_local))
                    print("kernelONE object " + str(self.kernels[len(self.kernels) - 1].data))


            else:
                kern_dict_local['rep'] = 1
                kern_dict_local['num'] = int(kern_dict_local['num'])
                self.kernels.append(kernel(**kern_dict_local))

        for kern in self.kernels:
            print("kernel object " + str(kern.data))

        self.nodes = []
        for node_idx, node_dict in enumerate(map_dict):
            node_inst = node(**node_dict)
            node_inst['kernel'] = []
Example #48
0
from kernel import kernel
import savitzky_golay_filter as sgf

xkernels1 = [
	kernel(sgf.makeIt(71, 5, 12, 12, 1, 0)),
	kernel(sgf.makeIt(25, 5, 12, 12, 1, 0)),
	kernel(sgf.makeIt(21, 3, 10, 10, 1, 0)),
	kernel(sgf.makeIt(9, 3, 4, 4, 1, 0)),
	kernel(sgf.makeIt(5, 3, 2, 2, 1, 0)),
	kernel(sgf.makeIt(3, 2, 1, 1, 1, 0)),
	kernel(sgf.makeIt(3, 2, 0, 1, 1, 0)),
	kernel(sgf.makeIt(3, 2, 1, 0, 1, 0)),
	kernel(sgf.makeIt(3, 2, 2, 1, 1, 0)),
	kernel(sgf.makeIt(3, 2, 1, 2, 1, 0)),
	kernel(sgf.makeIt(3, 2, 0, 0, 1, 0)),
	kernel(sgf.makeIt(3, 2, 0, 2, 1, 0)),
	kernel(sgf.makeIt(3, 2, 2, 0, 1, 0)),
	kernel(sgf.makeIt(3, 2, 2, 2, 1, 0)),
]

ykernels1 = [
	kernel(sgf.makeIt(71, 5, 12, 12, 0, 1)),
	kernel(sgf.makeIt(25, 5, 12, 12, 0, 1)),
	kernel(sgf.makeIt(21, 3, 10, 10, 0, 1)),
	kernel(sgf.makeIt(9, 3, 4, 4, 0, 1)),
	kernel(sgf.makeIt(5, 3, 2, 2, 0, 1)),
	kernel(sgf.makeIt(3, 2, 1, 1, 0, 1)),
	kernel(sgf.makeIt(3, 2, 0, 1, 0, 1)),
	kernel(sgf.makeIt(3, 2, 1, 0, 0, 1)),
	kernel(sgf.makeIt(3, 2, 2, 1, 0, 1)),
	kernel(sgf.makeIt(3, 2, 1, 2, 0, 1)),
Example #49
0
	lval = [training_label.count(i) for i in lkey]
	temp = lkey[lval.index(max(lval))]
	return [temp for i in test_data]
"""
"""
#SVM
def CLM(training_set, training_label, test_data):
	if len(set(training_label)) == 1:
		return [training_label[0] for i in test_data]
	clf = svm.SVC(decision_function_shape='ovo')
	clf.fit(training_set, training_label)
	return clf.predict(np.array(test_data))
"""

trcl = treelet_CPM(trL, labeling(trdataextract))
ker = kernel("ra", [coi])
trcl.ker(ker)
trcl.cor()
trcl.target()
trcl.CLM = CLM
trcl.linkage("inf")
print("start: tree")
trcl.tree()
print("start: train")
trcl.train()
print("start: predict")
L = trcl.predict(tsL)
realL = labeling(tsdataextract)
with open("temp.csv", "w", newline="") as csvfile:
    writer = csv.writer(csvfile)
    writer.writerow(L)
Example #50
0
import numpy as np
from kernel import kernel
import matplotlib.pyplot as plt
from time import sleep

A = np.random.randint(1,10,[6,6])
A
loc = np.array([2,3])
order = 3
for k in range(order):
    dx = np.arange(loc[0]-order+k, loc[0]+order+1-k)
    dy = np.arange(loc[1]-order+k, loc[1]+order+1-k)
    ix = np.mod(dx,6)
    iy = np.mod(dy,6)
    print(ix)
k = kernel(np.array([[3,3]]), type='square', order=0, space=8)
k[:,:,0]

a=[]
r = 5
for t in range(360):
    x = 3+np.round(r*np.cos(t*np.pi/180))
    y = 3+np.round(r*np.sin(t*np.pi/180))
    a.append(np.array([x,y]))

# for o in range(10):
#     kin = kernel(np.array([[33,33]]), type='circle', order=o, space=64)
#     plt.imshow(kin[:,:,0])
#     plt.show()