def AI_Module(self,Blood_oxygen, Blood_pressure, Pulses):

        ## AI module do the prediection, The AI module uses previous data
        oxygen=np.array(Blood_oxygen)
        pressure = np.array(Blood_pressure)
        Pulse = np.array(Pulses)
        pressure_predict_result = np.means(pressure)
        oxygen_predict_result=np.means(oxygen)
        Pulse_predict_result = np.means(Pulse)

        return pressure_predict_result, oxygen_predict_result, Pulse_predict_result
예제 #2
0
    def execute(self):

        #Get points from vtk file as numpy array
        features = self._feature_extractor(self._in_vtk)
        features = StandardScaler().fit_transform(features)

        #Clustering
        if self._method == 'DBSCAN':
            db = DBSCAN(eps=0.3, min_samples=10).fit(features)
            core_samples = db.core_sample_indices_
            labels = db.labels_
        elif self._method == 'KMeans':
            kmeans = KMeans(init='k-means++',
                            n_clusters=self._number_of_clusters,
                            n_init=50).fit(features)
            core_samples = kmeans.cluster_centers_
            labels = kmeans.labels_

        elif self._method == 'MiniBatchKMeans':
            mbk = MiniBatchKMeans(init='k-means++',
                                  n_clusters=self._number_of_clusters,
                                  batch_size=20,
                                  n_init=20,
                                  max_no_improvement=10,
                                  verbose=0).fit(features)
            labels = mbk.labels_
            core_samples = mbk.cluster_centers_
        elif self._method == 'SpectralClustering':
            sc = SpectralClustering(
                n_clusters=self._number_of_clusters).fit(features)
            labels = mbk.labels_
            core_samples = np.zeros(
                [self._number_of_clusters, features.shape[1]])
            for ii in self._number_of_clusters:
                core_samples[ii, :] = np.means(features[labels, :], axis=0)

        unique_labels = set(labels)
        self._labels = labels
        self._centroids = core_samples
        self._unique_labels = unique_labels

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.set_aspect('equal')
        ax.scatter(features[:, 0],
                   features[:, 1],
                   c=labels,
                   marker='.',
                   cmap=plt.cm.jet,
                   linewidth=0)
        ax.grid(True)
        fig.savefig('test.png')

        #Save data for each cluster as a vtkPolyData
        for k in unique_labels:
            ids = np.argwhere(labels == k).flatten()
            print labels.shape[0]
            print ids.shape[0]
            print self._in_vtk.GetNumberOfPoints()
            self._out_vtk_collection.AddItem(self.extract_particles(ids))
예제 #3
0
파일: COS.py 프로젝트: GeorgRamer/COS-2D
 def get_peak_vals(nu_1, nu_2, epsilon=10**-20, absolut=False):
     """
     gives itnerpretation according to Noda's rules. `absolut'
     """
     ind_1 = self.nu_to_index(nu_1)
     ind_2 = self.nu_to_index(nu_2, nu_1=False)
     sync_res = self.sync()[ind_1, ind_2]
     async_res = self.async()[ind_1, ind_2]
     return {"sync": sync_res, "async": async_res, "mean": np.means()[ind_1]}
예제 #4
0
    def cv_score(self, n):
        model = self.base_model(n)
        scores = []

        split_method = KFold(n_splits=2)
        for train_idx, test_idx in split_method.split(self.sequences):
            self.X, self.lengths = combine_sequences(train_idx, self.sequences)
            X, l = combine_sequences(test_idx, self.sequences)
            scores.append(model.score(X, l))
        return np.means(scores), model
예제 #5
0
def regularize(xMat):
    """
    xMat: np.matrix,每一行是一条数据,每一列是一类属性
    这个函数对输入的矩阵按照列进行规范化
    """
    inMat = xMat.copy()
    # 求均值方差,然后进行标准化
    inMeans = np.means(inMat, 0)
    inVar = np.var(inMat, 0)
    inMat = (inMat - inMenas)/inVar
    return inMat
예제 #6
0
def regLeaf(dataArr):
    '''
    Desc 回归树叶节点,按照数据集y的均值标记

    Args:
        dataArr 分布到叶节点的数据

    Returns:
        measn 数据集在y标签上的均值
    '''

    return np.means(dataArr[:, -1])
예제 #7
0
 def get_peak_vals(nu_1, nu_2, epsilon=10**-20, absolut=False):
     """
     gives itnerpretation according to Noda's rules. `absolut'
     """
     ind_1 = self.nu_to_index(nu_1)
     ind_2 = self.nu_to_index(nu_2, nu_1=False)
     sync_res = self.sync()[ind_1, ind_2]
     async_res = self. async ()[ind_1, ind_2]
     return {
         "sync": sync_res,
         "async": async_res,
         "mean": np.means()[ind_1]
     }
  def execute(self):
    
    #Get points from vtk file as numpy array
    features=self._feature_extractor(self._in_vtk)
    features=StandardScaler().fit_transform(features)

    #Clustering
    if self._method == 'DBSCAN':
      db=DBSCAN(eps=0.3, min_samples=10).fit(features)
      core_samples = db.core_sample_indices_
      labels = db.labels_
    elif self._method == 'KMeans':
      kmeans= KMeans(init='k-means++',n_clusters=self._number_of_clusters,n_init=50).fit(features)
      core_samples = kmeans.cluster_centers_
      labels = kmeans.labels_

    elif self._method == 'MiniBatchKMeans':
      mbk =  MiniBatchKMeans(init='k-means++', n_clusters=self._number_of_clusters, batch_size=20,
                             n_init=20, max_no_improvement=10, verbose=0).fit(features)
      labels = mbk.labels_
      core_samples =  mbk.cluster_centers_
    elif self._method == 'SpectralClustering':
      sc = SpectralClustering(n_clusters=self._number_of_clusters).fit(features)
      labels = mbk.labels_
      core_samples=np.zeros([self._number_of_clusters,features.shape[1]])
      for ii in self._number_of_clusters:
        core_samples[ii,:] = np.means(features[labels,:],axis=0)

    unique_labels=set(labels)
    self._labels=labels
    self._centroids = core_samples
    self._unique_labels = unique_labels
          
    fig=plt.figure()
    ax = fig.add_subplot(111)
    ax.set_aspect('equal')
    ax.scatter(features[:,0],features[:,1],c=labels,marker='.',cmap=plt.cm.jet,linewidth=0)
    ax.grid(True)
    fig.savefig('test.png')

    #Save data for each cluster as a vtkPolyData
    for k in unique_labels:
      ids = np.argwhere(labels == k).flatten()
      print labels.shape[0]
      print ids.shape[0]
      print self._in_vtk.GetNumberOfPoints()
      self._out_vtk_collection.AddItem(self.extract_particles(ids))
예제 #9
0
 def copy_params(self, target_cov_fit):
     self.A_ = np.copy(target_cov_fit.A_)
     self.powers_ = np.copy(target_cov_fit.powers_)
     n_samples = len(self.powers_)
     sigmas = target_cov_fit.sigmas_
     if not self.avg_noise:
         if not target_cov_fit.avg_noise:
             self.sigmas_ = np.copy(sigmas)
         else:
             self.sigmas_ = (np.ones(n_samples)[:, None] *
                             sigmas[None, :])
     else:
         if target_ica.avg_noise:
             self.sigmas_ = np.copy(sigmas)
         else:
             self.sigmas_ = np.means(sigmas, axis=0)
     self.initialized = True
     return self
예제 #10
0
def do_mcmc(params, data, emu_list, zs, sfs, truth):
    import emcee
    lnprob_args = (data, emu_list, zs, sfs)
    ndim = len(params)
    nwalkers = ndim * 2 + 2
    nsteps = 2000
    nburn = 200
    pos = [params + 1e-3 * np.random.randn(ndim) for k in range(nwalkers)]
    sampler = emcee.EnsembleSampler(nwalkers,
                                    ndim,
                                    lnprob,
                                    args=lnprob_args,
                                    threads=4)
    sampler.run_mcmc(pos, nsteps)
    likes = sampler.flatlnprobability
    fullchain = sampler.flatchain
    np.savetxt("txt_files/chains/emucosmo_chain.txt", fullchain)
    chain = fullchain[nburn * nwalkers:]
    out = np.array([np.means(chain, 0), np.std(chain, 0)]).T
    np.savetxt("analysis_output.txt", out)
    return
예제 #11
0
def gridsearch_with_crossvalidation():
    X_trainval, X_test, y_trainval, y_test=train_test_split(iris.data,iris.target,random_state=0)
    X_train, X_valid, y_train, y_valid=train_test_split(X_trainval,y_trainval,random_state=1)
    best_score=0
    for gamma in [0.001, 0.01, 0.1, 1, 10, 100]:
        for C in [0.001, 0.01, 0.1, 1, 10, 100]:
            svm=SVC(gamma=gamma,C=C)
            scores=cross_val_score(svm,X_trainval,y_trainval,cv=5)
            score=np.means(scores)
            if score>best_score:
                best_score=score
                best_params={'C':C,'gamma':gamma}
    print('网格搜索for循环<有cross_val_score交叉验证>获得的最好参数组合:',best_params)
    print(' ')
    svmf=SVC(**best_params)
    svmf.fit(X_trainval,y_trainval)
    print('网格搜索<有交叉验证>获得的最好估计器,在训练验证集上没做交叉验证的得分:', svmf.score(X_trainval, y_trainval))
    print(' ')
    #没有交叉验证的的得分直接是svmf.score,有交叉验证的得分是cross_val_score
    scores=cross_val_score(svmf, X_trainval, y_trainval, cv=5)
    print('网格搜索<有交叉验证>获得的最好估计器,在训练验证集上做交叉验证的平均得分:', np.mean(scores))  # 交叉验证的平均accuracy
    print(' ')
    print('网格搜索<有交叉验证>获得的最好估计器,在测试集上的得分:', svmf.score(X_test, y_test))  #####
예제 #12
0
파일: kmeans.py 프로젝트: lchigoc/hackaway
    def train(self, inputs):
        self.means = random.sample(inputs, self.k)
        assignments = None

        while True:
            # find new assignments
            new_assignments = map(self.classify, inputs)

            # if no assignments have changed, we're done
            if assignments == new_assignments:
                return

            # otherwise keep the new assignments
            assignments = new_assignments

            # and compute new means based on the new assignments
            for i in range(self.k):
                # find all the points assigend to cluster i
                i_points = [p for p, a in zip(inputs, assignments) if a == i]

                # compute the new k center points
                if i_points:
                    self.means[i] = np.means(i_points,
                                             axis=0)  # 0 means flatten
예제 #13
0
run3_pchev = []
for pchev_id in pchev_list:
    one_vehicle = pd.read_csv("/data/pchev/" + pchev_id)
    time = one_vehicle.columns[0]
    one_vehicle['time'] = pd.to_datetime(one_vehicle['time']).apply(
        lambda x: x.date())  #year,month,day
    agg = one_vehicle.groupby('time').agg({"runmodel": [a, b, c]})
    agg = agg.apply(lambda x: x / sum(x), axis=1)
    name = agg.columns
    r1 = list(agg[name[0]])
    r2 = list(agg[name[1]])
    r3 = list(agg[name[2]])
    run1_pchev += r1
    run2_pchev += r2
    run3_pchev += r3
np.means([i for i in run3 if np.isnan(i) == 0])
plt.hist(run3, bins=100, density=True)
plt.hist(run3_pchev, bins=100, density=True)
'''private car PHEV '''
plt.subplot(2, 1, 1)
plt.ylim(0, 11)
plt.hist(run1_pchev, bins=100, density=True)
plt.hist(run2_pchev, bins=100, density=True)
plt.legend(("runmodel1", "runmodel2"))
plt.xlabel("percentage of using one runmodel")
plt.ylabel("density")
'''didi car-hailing PHEV'''
plt.subplot(2, 1, 2)
plt.ylim(0, 11)
plt.hist(run1, bins=100, density=True)
plt.xlabel("percentage of using one runmodel")
예제 #14
0
    for x in range(100) :
        step = random_walk[-1]
        dice = np.random.randint(1,7)
        if dice <= 2:
            step = max(0, step - 1)
        elif dice <= 5:
            step = step + 1
        else:
            step = step + np.random.randint(1,7)
        if np.random.rand() <= 0.001 :
            step = 0
        random_walk.append(step)
    all_walks.append(random_walk)

# Create and plot np_aw_t
np_aw_t = np.transpose(np.array(all_walks))

# Select last row from np_aw_t: ends
ends = np_aw_t[-1]

# Plot histogram of ends, display plot
plt.hist(ends)
plt.show()

######################################################################

#EX69

np.means(ends >= 60)
#78.4% chance
예제 #15
0
import numpy as np
import cv2
import pickle
import DataProcessor
import EmotionLearner
from sklearn import svm
import matplotlib.pyplot as plt

dataProcessor=DataProcessor()
data,label=dataProcessor.loadCKData_With_Hog_32x32_3x3()
data,featureMeans,featureVariance=dataProcessor.normalizeData(data)

emotionLearner=EmotionLearner()
scores=emotionLearner.crossValidateSVM(data,label);
print scores
print np.means(scores)
clf=emotionLearner.trainSVM(data,label);

#save data and svm
with open('classifier/trainingDataCK.pkl', 'wb') as output:
    pickle.dump(data, output, pickle.HIGHEST_PROTOCOL)
    pickle.dump(label, output, pickle.HIGHEST_PROTOCOL)
    pickle.dump(featureMeans, output, pickle.HIGHEST_PROTOCOL)
    pickle.dump(featureVariance, output, pickle.HIGHEST_PROTOCOL)

with open('classifier/svmCK.pkl', 'wb') as output:
    pickle.dump(clf, output, pickle.HIGHEST_PROTOCOL)