def kmeans(self, pixelcluster=True): ''' calculates kmeans centers for the cob from the centers of the kernels it contains ''' if pixelcluster == True: pixellist = [] for kernel in self.kernellist: pixellist.extend(kernel.pixellist) kmeans = Kernel.KMeans(n_clusters=2).fit(pixellist) else: kmeans = Kernel.KMeans(n_clusters=2).fit(self.kernelcenters) meanc1 = kmeans.cluster_centers_[0].tolist() meanc2 = kmeans.cluster_centers_[1].tolist() sizec1 = 0 sizec2 = 0 kernelclusters = [] for kernel in self.kernellist: for cluster in kernel.clusters: kernelclusters.append(cluster) for label, cluster in zip(kmeans.labels_, kernelclusters): if label == 0: sizec1 += cluster[0] elif label == 1: sizec2 += cluster[0] self.clusters.append([sizec1, meanc1]) self.clusters.append([sizec2, meanc2])
def _LSSVMtrain(X, Y, kernel_dict, regulator): m = Y.shape[0] # Kernel if kernel_dict['type'] == 'RBF': K = Kernel.RBF(m, kernel_dict['gamma']) K.calculate(X) elif kernel_dict['type'] == 'LINEAR': K = Kernel.LINEAR(m) K.calculate(X) elif kernel_dict['type'] == 'POLY': K = Kernel.POLY(m, kernel_dict['c'], kernel_dict['d']) K.calculate(X) elif kernel_dict['type'] == 'TANH': K = Kernel.TANH(m, kernel_dict['c'], kernel_dict['d']) K.calculate(X) elif kernel_dict['type'] == 'TL1': K = Kernel.TL1(m, kernel_dict['rho']) K.calculate(X) H = np.multiply(np.dot(np.matrix(Y).T, np.matrix(Y)), K.kernelMat) M_BR = H + np.eye(m) / regulator #Concatenate L_L = np.concatenate((np.matrix(0), np.matrix(Y).T), axis=0) L_R = np.concatenate((np.matrix(Y), M_BR), axis=0) L = np.concatenate((L_L, L_R), axis=1) R = np.ones(m + 1) R[0] = 0 #solve b_a = LA.solve(L, R) b = b_a[0] alpha = b_a[1:] #return return (alpha, b, K)
def setColorStats(self): replineCenters = self.getReplineCenters() LMean = 0 aMean = 0 bMean = 0 numberOfCobs = 0 for cob in self.cobs: LMean += self.cobs[cob].averageLab["L"] aMean += self.cobs[cob].averageLab["a"] bMean += self.cobs[cob].averageLab["b"] numberOfCobs += 1 LMean = LMean / float(numberOfCobs) aMean = aMean / float(numberOfCobs) bMean = bMean / float(numberOfCobs) rgb1 = Kernel.HunterLabToRGB(replineCenters[0][0], replineCenters[0][1], replineCenters[0][2]) rgb2 = Kernel.HunterLabToRGB(replineCenters[1][0], replineCenters[1][1], replineCenters[1][2]) self.averageRGB["R"], self.averageRGB["G"], self.averageRGB[ "B"] = Kernel.HunterLabToRGB(LMean, aMean, bMean) self.averageLab["L"] = LMean self.averageLab["a"] = aMean self.averageLab["b"] = bMean self.centers = {"R1": rgb1["R"], "R2": rgb2["R"], "G1": rgb1["G"], "G2": rgb2["G"], "B1": rgb1["B"], "B2": rgb2["B"], "L1": replineCenters[0][0], "L2": replineCenters[1][0], "a1": replineCenters[0][1], "a2": replineCenters[1][1], "b1": replineCenters[0][2], "b2": replineCenters[1][2]} clusterDifference = (abs(replineCenters[0][0] - replineCenters[1][0]) + abs(replineCenters[0][1] - replineCenters[1][1]) + abs(replineCenters[0][2] - replineCenters[1][2]))
class KernelTestCase(unittest.TestCase): def runTest(self): self.kernel = Kernel() self.console = Console() self.i1 = Instruction(1) self.i2 = Instruction(2) self.program = Program("p1") self.program.inst.append(self.i1) self.program.inst.append(self.i2) self.kernel.run(self.program,self.console) self.assertEqual(self.program.inst[1],self.console.instructions[1]) def instructionNumberTest(self): self.i1 = Instruction(1) self.assertEqual(self.i1.number,1) def addConsoleTest(self): self.kernel = Kernel() self.console = Console() self.i1 = Instruction(1) self.i2 = Instruction(2) self.program = Program("p1") self.program.inst.append(self.i1) self.program.inst.append(self.i2) self.kernel.run(self.program,self.console) self.assertEqual(self.program.inst,self.console.instructions)
def _KSVMtrain(X, Y, kernel_dict): m = Y.shape[0] if kernel_dict['type'] == 'RBF': K = Kernel.RBF(m, kernel_dict['gamma']) K.calculate(X) elif kernel_dict['type'] == 'LINEAR': K = Kernel.LINEAR(m) K.calculate(X) elif kernel_dict['type'] == 'POLY': K = Kernel.POLY(m) K.calculate(X) elif kernel_dict['type'] == 'TANH': K = Kernel.TANH(m, kernel_dict['c'], kernel_dict['d']) K.calculate(X) elif kernel_dict['type'] == 'TL1': K = Kernel.TL1(m, kernel_dict['rho']) K.calculate(X) p1, p2 = trans_mat(K.kernelMat) K.kernelMat = np.dot((p1 - p2), K.kernelMat) #根据SVM求出alpha,b ??? svm = Algorithms.SVM(X, Y, kernel_dict) #更新alpha alpha = np.dot((p1 - p2), svm.alphas) b = svm.b return (alpha, b, K)
def getReplineCenters(self, graph=False, mean=False, size = False): kernelcenters = [] for cob in xrange(len(self.cobs.keys())): kernelcenters.extend(self.getCob(cob+1).KernelsCenters) KM = KMeans(n_clusters=2, n_init=20).fit(kernelcenters) kmeans = KM.cluster_centers_ if graph == True: l, a, b = [], [], [] for x in kernelcenters: l.append(x[0]) a.append(x[1]) b.append(x[2]) ax = Kernel.showScatterPlot(l, a, b) Kernel.addpoints(kmeans, ax, color='r', marker='o') if mean == True: labMean = [] labMean.append((self.averageLab["L"], self.averageLab["a"], self.averageLab["b"])) Kernel.addpoints(labMean, ax, color='g', marker='o') if size == True: size1 = 0 size2 = 0 for x in KM.labels_: if x == 0: size1 += 1 elif x == 1: size2 += 1 return KM, size1, size2 return np.array(kmeans)
def showscatterplot(self, s=80, closepreviousplot=True): if len(self.coblist) == 1: self.coblist[0].showscatterplot(s, closepreviousplot) else: lablists = Kernel.threeTupleToThreeLists(self.allkernelclusters) if closepreviousplot == True: plt.close(1) plot = plt.figure() axes = plot.add_subplot(111, projection='3d') llist = lablists[0] alist = lablists[1] blist = lablists[2] for l, a, b in zip(llist, alist, blist): R, G, B = Kernel.HunterLabToRGB(l, a, b, normalized=True) axes.scatter(l, a, b, color=[R, G, B], marker='s', s=s) axes.set_xlabel('L') axes.set_ylabel('a') axes.set_zlabel('b') totalsize = 0 for cluster in self.clusters: totalsize += cluster[0] for cluster in self.clusters: addedsize = int(s * (cluster[0] / totalsize)) s += addedsize Kernel.addpoints(cluster[1], axes, marker="o", color="g", s=s) plt.title(self.name) plt.ion() plt.show() return axes
def fit(self, X, Y): # print('Kernel:', kernel_dict) train_data = np.append(X, Y.reshape(len(Y), 1), axis=1) if self.databalance == 'LowSampling': data_maj = train_data[Y == 1] # 将多数 data_min = train_data[Y != 1] index = np.random.randint(len(data_maj), size=len(data_min)) lower_data_maj = data_maj[list(index)] train_data = np.append(lower_data_maj, data_min, axis=0) X = train_data[:, :-1] Y = train_data[:, -1] self.Y = Y elif self.databalance == 'UpSampling': X, Y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\ np.asarray(train_data[:, -1])) self.Y = Y else: X = X Y = Y self.Y = Y m = Y.shape[0] # Kernel if self.kernel_dict['type'] == 'RBF': K = Kernel.RBF(m, self.kernel_dict['sigma']) elif self.kernel_dict['type'] == 'LINEAR': K = Kernel.LINEAR(m) elif self.kernel_dict['type'] == 'POLY': K = Kernel.POLY(m, self.kernel_dict['d']) K.calculate(X) tmp1 = np.hstack((np.ones((1, 2 * m)), [[0]])) M_BR = K.kernelMat + np.eye(m) / (self.C * self.m_value) tmp2 = np.hstack((M_BR, K.kernelMat, np.ones((m, 1)))) M_BL = K.kernelMat + np.eye(m) / (self.C * (1 - self.m_value)) tmp3 = np.hstack((K.kernelMat, M_BL, np.ones((m, 1)))) L = np.vstack((tmp1, tmp2, tmp3)) R = np.ones(2 * m + 1) R[0] = 0 R[m + 1:] = -1 # solve solution = LA.solve(L, R) b = solution[-1] alpha = solution[:m] beta = solution[m:2 * m] print('b', b) # self.gamma = gamma self.beta = beta self.alpha = alpha self.b = b self.K = K self.kernelMat = K.kernelMat
def fit(self, X, Y): # print('Kernel:', self.kernel_dict) train_data = np.append(X, Y.reshape(len(Y), 1), axis=1) if self.databalance == 'LowSampling': data_maj = train_data[Y == 1] # 将多数 data_min = train_data[Y != 1] index = np.random.randint(len(data_maj), size=len(data_min)) lower_data_maj = data_maj[list(index)] train_data = np.append(lower_data_maj, data_min, axis=0) X = train_data[:, :-1] Y = train_data[:, -1] self.Y = Y elif self.databalance == 'UpSampling': X, Y = SVMSMOTE(random_state=42).fit_sample(train_data[:, :-1],\ np.asarray(train_data[:, -1])) self.Y = Y else: X = X Y = Y self.Y = Y m = len(Y) # Kernel if self.kernel_dict['type'] == 'RBF': K = Kernel.RBF(m, self.kernel_dict['sigma']) K.calculate(X) elif self.kernel_dict['type'] == 'LINEAR': K = Kernel.LINEAR(m) K.calculate(X) elif self.kernel_dict['type'] == 'POLY': K = Kernel.POLY(m, self.kernel_dict['d']) K.calculate(X) H = np.multiply(np.dot(np.matrix(Y).T, np.matrix(Y)), K.kernelMat) M_BR = H + np.eye(m) / (self.C) # Concatenate L_L = np.concatenate((np.matrix(0), np.matrix(Y).T), axis=0) L_R = np.concatenate((np.matrix(Y), M_BR), axis=0) L = np.concatenate((L_L, L_R), axis=1) R = np.ones(m + 1) R[0] = 0 # solve b_a = LA.solve(L, R) b = b_a[0] alpha = b_a[1:] e = alpha / self.C self.alpha = alpha self.b = b self.K = K self.kernelMat = K.kernelMat return self.alpha, self.b, e
def dbscan(self, eps=.5, plot=False): if len(self.coblist) > 1: pixlist = [] totalnumkernels = 0 for cob in self.coblist: for kernel in cob.kernellist: pixlist.extend(kernel.pixellist) totalnumkernels += 1 X = Kernel.np.array(pixlist) density = eps numberofpixels = len(pixlist) db = Kernel.DBSCAN(eps=density).fit(X) core_samples_mask = Kernel.np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) unique_labels = set(labels) colors = [] for k in unique_labels: class_member_mask = (labels == k) xyz = X[class_member_mask & core_samples_mask] llist, alist, blist = xyz[:, 0], xyz[:, 1], xyz[:, 2] if len(llist) >= numberofpixels / 100: lmean = llist.mean() amean = alist.mean() bmean = blist.mean() self.clusters.append([size, [lmean, amean, bmean]]) r, g, b = Kernel.HunterLabToRGB(lmean, amean, bmean) colors.append([r / 255.0, g / 255.0, b / 255.0]) else: colors.append('k') if plot is True: graph = plt.figure() ax = graph.add_subplot(111, projection='3d') for k, col in zip(unique_labels, colors): if k == -1: col = 'k' class_member_mask = (labels == k) xyz = X[class_member_mask & core_samples_mask] if len(xyz[:, 0]) >= numberofpixels / 100: print len(xyz[:, 0]) ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], c=col) xyz = X[class_member_mask & ~core_samples_mask] ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], c=col, marker='.') ax.set_xlabel('L') ax.set_ylabel('a') ax.set_zlabel('b') plt.title('Estimated number of clusters: %d' % n_clusters_) plt.ion() plt.show() return db elif len(self.coblist) == 1: self.clusters = self.coblist[0].clusters
def TablicaF2(X, Y, mp, wsp, F0, WarP): for Ele in range(le): Out = Przylozdo1(X, Y, mp[Ele], wsp, Ele) if (Out == true): # print("Przyłożono do Ele nr: ", Ele) w1, w2, w3, w4 = mp[Ele] F0 = KE.ObciazenieP(F0, WarP / 4, w1 - 1) F0 = KE.ObciazenieP(F0, WarP / 4, w2 - 1) F0 = KE.ObciazenieP(F0, WarP / 4, w3 - 1) F0 = KE.ObciazenieP(F0, WarP / 4, w4 - 1) return F0
def main(): interaction_kernel_0 = Kernel.GaussKernel(3) interaction_kernel_0.add_mode(1.0, [0.5, 0.5, 0.5], [0.0, 0.0, 0.0]) interaction_kernel_0.add_mode(-5.5, [5.5, 5.5, 5.5], [0.0, 0.0, 0.0]) interaction_kernel_0.calculate() interaction_kernel_1 = Kernel.GaussKernel(1) interaction_kernel_1.add_mode(1.0, [0.5], [0.0]) interaction_kernel_1.add_mode(-5.5, [5.5], [0.0]) interaction_kernel_1.calculate() input_field = DynamicField.DynamicField([[5], [2]], [], None) input_field.set_boost(20) blub = numpy.array([[0.0, 0.5, 1.0, 0.5, 0.0], [0.0, 0.25, 0.5, 0.25, 0.0]]) blub = blub.transpose() * 40 input_weight = DynamicField.Weight(blub) field_0 = DynamicField.DynamicField([[5], [4], [2]], [], None) field_1 = DynamicField.DynamicField([[4], [6], [10]], [], None) scaler = DynamicField.Scaler() # weight = DynamicField.Weight([0., 0.5, 1.0, 0.5, 0.0]) # weight = DynamicField.Weight(5.) projection_0 = DynamicField.Projection(2, 2, set([0, 1]), [1, 0]) processing_steps = [scaler] DynamicField.connect(field_0, field_1, processing_steps) #DynamicField.connect(input_field, field_0, [input_weight]) for i in range(0, 500): # input_field.step() # input_weight.step() field_0.step() print("field 0:") print(field_0.get_output()) print(field_0.get_output().shape) for processing_step in processing_steps: processing_step.step() print(processing_step.get_name() + ": ") print(processing_step.get_output()) print(processing_step.get_output().shape) field_1.step() print("field 1:") print(field_1.get_activation()) print(field_1.get_activation().shape)
def convolute(self, kernel, fourier=None): """ The implement of convolution. It will choose a better algorithm to do it depends on the size of kernel and image. :param kernel: :param fourier:Whether use fourier transform to calculate the convolution :return: """ if not isinstance(kernel, Kernel.Kernel): try: kernel = Kernel.Kernel(kernel) except Exception: raise Exception('The input must be a kernel') kernel_size = kernel.shape[0] * kernel.shape[1] image_size = np.sqrt(self.shape[0] * self.shape[1]) if fourier is not None: if fourier: return self.__fourier_convolution(kernel) else: return self.__base_convolution(kernel) # According to the Mark Nixon's book(p.87 3rd version),If m² < 4*log(N) + 1, # then we should use direct implementation, otherwise the fourier transform should be considered. else: if kernel_size < 4 * np.log(image_size) + 1: return self.__base_convolution(kernel) else: return self.__fourier_convolution(kernel)
def filter(image,depth,kernel_type,kernel_size): kernel=Kernel.kernel(kernel_type,kernel_size) depth=int(depth) dst = cv2.filter2D(image, depth, kernel) return dst
def drawScatterPlot(self, cob=1, kernel=0, pixelsPerKernel=100): c = self.getCob(cob) k = c.kernelList[kernel] r, g, B, l, a, b = self.getkernelcolorlists( cob=cob, kernel=kernel, pixelsPerKernel=pixelsPerKernel) # return Kernel.showScatterPlot(r,g,B) return Kernel.showScatterPlot(l, a, b)
def simulink(self,data,modelInputPath,col=50): tf.reset_default_graph() batch = 1 row = 1 channel = 1 # label container label = tf.placeholder(tf.float32, shape=[1, 1]) # input data container input = tf.placeholder(tf.float32, shape=[batch, row, col, channel], name='input') filterKernel = kernel.averageFilter(shape=[3, 1, 1, 1]); # convlution conv2dRel = tf.nn.conv2d(input, filter=filterKernel, strides=[1, 1, 1, 1], data_format='NHWC', padding='SAME') # pooling maxpoolRel = tf.nn.max_pool(conv2dRel, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC') deminsion = maxpoolRel.shape.as_list() # reshape maxPoolFlat = tf.reshape(maxpoolRel, shape=[1, deminsion[0] * deminsion[1] * deminsion[2] * deminsion[3]]) # Hidden layer hiddenW = tf.Variable(np.zeros(shape=[deminsion[0] * deminsion[1] * deminsion[2] * deminsion[3], deminsion[0] * deminsion[1] * deminsion[2] * deminsion[3]], dtype=np.float32), name='Hidden') hiddenBias = tf.Variable(np.zeros(shape=[1], dtype=np.float32), name='HiddenBias') Hidden = tf.nn.sigmoid(tf.matmul(maxPoolFlat, hiddenW) + hiddenBias) # output layer W = tf.Variable( np.zeros(shape=[deminsion[0] * deminsion[1] * deminsion[2] * deminsion[3], 1], dtype=np.float32), dtype=tf.float32, name='W') rel = tf.sigmoid(tf.matmul(Hidden, W) + tf.Variable(np.zeros(shape=[1], dtype=np.float32), name='OutputBias')); # define the loss function loss = tf.losses.mean_squared_error(labels=label, predictions=rel) # init all the weight value init = tf.global_variables_initializer() # begin training process with tf.Session() as sess: saver = tf.train.Saver(write_version=tf.train.SaverDef.V2) # use this save the network model # save the data for using tensorboard show the network structure # tf.summary.FileWriter("G:/GraduationDesignModelData/logs/", sess.graph) sess.run(init) # restore the all kinds of network weights to the cnn network # print('\033[1;32;47m', end='') print("\t\t\tmodel file restore from path : " + modelInputPath) saver.restore(sess=sess, save_path=modelInputPath) inputData = inda.getInputData([batch, row, col, channel]) for jj in range(len(data)): inputData[0][0][jj][0] = data[jj] predictValue=sess.run(rel,feed_dict={input:inputData}) return predictValue[0][0]
def TablicaF(X, Y, mp, wsp, F0, WarP): for Ele in range(le): Out = Przylozdo1(X, Y, mp[Ele], wsp, Ele) if (Out == true): print("Przyłożono do Ele nr: ", Ele, " o War", WarP) F0 = KE.ObciazenieP(F0, WarP, Ele) return F0
def setupSystem(scu): import os import zephyr.Kernel as Kernel from IPython.parallel.error import UnmetDependency global localSystem global localLocator tag = (scu['ifreq'], scu['iky']) # If there is already a system to do this job on this machine, push the duplicate to another if tag in localSystem: raise UnmetDependency subSystemConfig = baseSystemConfig.copy() subSystemConfig.update(scu) # Set up method output caching if 'cacheDir' in baseSystemConfig: subSystemConfig['cacheDir'] = os.path.join( baseSystemConfig['cacheDir'], 'cache', '%d-%d' % tag) localSystem[tag] = Kernel.SeisFDFDKernel(subSystemConfig, locator=localLocator) return tag
def __init__(self, kernels, show=False): ''' TEST THIS ''' self.name = '' self.averageRGB = {"R": 0, "G": 0, "B": 0} self.averageLab = {"L": 0, "a": 0, "b": 0} self.kernelList = kernels self.numberOfKernels = len(self.kernelList) LMean = 0 aMean = 0 bMean = 0 #The below code will calculate the 2SDMean wrong! for kernel in self.kernelList: LMean += kernel.LabDict["L2SDMean"] aMean += kernel.LabDict["a2SDMean"] bMean += kernel.LabDict["b2SDMean"] LMean = LMean / float(self.numberOfKernels) aMean = aMean / float(self.numberOfKernels) bMean = bMean / float(self.numberOfKernels) # the above code is wrong! rgb = Kernel.HunterLabToRGB(LMean, aMean, bMean) self.averageRGB["R"] = rgb["R"] self.averageRGB["G"] = rgb["G"] self.averageRGB["B"] = rgb["B"] self.averageLab["L"] = LMean self.averageLab["a"] = aMean self.averageLab["b"] = bMean self.KernelsCenters = [] self.setKernelsCenters()
def kmeans(self): ''' calculates kmeans centers for the repline from the centers of the cobs it contains ''' if len(self.coblist) > 1: allkernelsizes = [] self.allkernelclusters = [] for cob in self.coblist: for kernel in cob.kernellist: for cluster in kernel.clusters: allkernelsizes.append(cluster[0]) self.allkernelclusters.append(cluster[1]) kmeans = Kernel.KMeans(n_clusters=2).fit(self.allkernelclusters) meanc1 = kmeans.cluster_centers_[0].tolist() meanc2 = kmeans.cluster_centers_[1].tolist() sizec1 = 0 sizec2 = 0 for label, size in zip(kmeans.labels_, allkernelsizes): if label == 0: sizec1 += size elif label == 1: sizec2 += size self.clusters.append([sizec1, meanc1]) self.clusters.append([sizec2, meanc2]) self.checkdistance() else: self.segregating = self.coblist[0].segregating self.clusters = self.coblist[0].clusters
def simulink(self, inputData, inputNum, lstmCellNum, outputNum, modelInputPath): tf.reset_default_graph() everyLstmCellNums = 400 label = tf.placeholder(tf.float32, shape=[None, outputNum]) predictValue = [] # input data container input = tf.placeholder(tf.float32, shape=[None, 1, inputNum, 1], name='input') filterKernel = kernel.kernelA(shape=[1, 3, 1, 1]) # convlution conv2dRel = tf.nn.conv2d(input, filter=filterKernel, strides=[1, 1, 1, 1], data_format='NHWC', padding='SAME') # pooling maxpoolRel = tf.nn.max_pool(conv2dRel, ksize=[1, 1, 3, 1], strides=[1, 1, 3, 1], padding='SAME', data_format='NHWC') demensions = maxpoolRel.shape.as_list() ####0:batch,1H,2W,3C maxPoolRelFlat = tf.reshape(maxpoolRel, shape=[-1, demensions[2]]) listCells = [] for i in range(lstmCellNum): listCells.append(rnn.BasicLSTMCell(everyLstmCellNums)) rnn_cell = rnn.MultiRNNCell(listCells) outputs, states = tf.nn.static_rnn(rnn_cell, [maxPoolRelFlat], dtype=tf.float32) predict = tf.layers.dense(inputs=outputs[0], units=outputNum) predict = tf.nn.sigmoid(predict, name="predict") init = tf.global_variables_initializer() with tf.Session() as sess: saver = tf.train.Saver(write_version=tf.train.SaverDef.V2 ) # use this save the network model # save the data for using tensorboard show the network structure # tf.summary.histogram("W",W) sess.run(init) print("\t\t\tmodel file restore from path : " + modelInputPath) saver.restore(sess=sess, save_path=modelInputPath) predictValue = sess.run(predict, feed_dict={input: inputData}) return predictValue
def gradient(image,kernel_type,kernel_size): if image is None: print 'Error in input Image' return 0 kernel=Kernel.kernel(kernel_type,kernel_size) gradient = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel) return gradient
def close(image,kernel_type,kernel_size): if image is None: print 'Error in input Image' return 0 kernel=Kernel.kernel(kernel_type,kernel_size) closing = cv2.morphologyEx( image, cv2.MORPH_CLOSE, kernel ) return closing
def drawScatterPlotWithCenters(self, cob=1, kernel=0, pixelsPerKernel=100, x='', y='', z=''): if x != '' and y != '' and z != '': l = x a = y b = z lab = [] for i in xrange(len(x)): lab.append((x[i], y[i], z[i])) else: c = self.getCob(cob) k = c.kernelList[kernel] R, G, B, l, a, b = self.getkernelcolorlists( cob=cob, kernel=kernel, pixelsPerKernel=pixelsPerKernel) lab = k.getlabTupleList() kmeans = KMeans(n_clusters=2).fit(lab).cluster_centers_ ax = Kernel.showScatterPlot(l, a, b) Kernel.addpoints(kmeans, ax, color='r', marker='o') return kmeans
def Blackhat(image,kernel_type,kernel_size): if image is None: print 'Error in input Image' return 0 kernel=Kernel.kernel(kernel_type,kernel_size) blackhat = cv2.morphologyEx(image, cv2.MORPH_BLACKHAT, kernel) return blackhat
def getReplineCenters(self, graph=False, mean=False): kernelcenters = [] for cob in xrange(len(self.cobs.keys())): kernelcenters.append(self.clustersList(cob=(cob + 1))) KM = KMeans(n_clusters=2).fit(kernelcenters[0]) kmeans = KM.cluster_centers_ if graph == True: l, a, b = [], [], [] for x in kernelcenters[0]: l.append(x[0]) a.append(x[1]) b.append(x[2]) ax = Kernel.showScatterPlot(l, a, b) Kernel.addpoints(kmeans, ax, color='r', marker='o') if mean == True: labMean = (self.averageLab["L"], self.averageLab["a"], self.averageLab["b"]) Kernel.addpoints(labMean, ax, color='g', marker='o') return np.array(kmeans)
def runTest(self): self.kernel = Kernel() self.console = Console() self.i1 = Instruction(1) self.i2 = Instruction(2) self.program = Program("p1") self.program.inst.append(self.i1) self.program.inst.append(self.i2) self.kernel.run(self.program,self.console) self.assertEqual(self.program.inst[1],self.console.instructions[1])
def createcobs(self, clustertype="kmeans", stats=False): ''' looks at all the files in self.directory and finds and with the same base name as self.name. each file is turned into a cob object, and add to self.cobs ''' for cobfile in os.listdir(self.directory): if '_' + self.name + "." in cobfile: filenamewithoutlastextension = os.path.splitext(cobfile)[0] basename = os.path.splitext(filenamewithoutlastextension)[0] kernellist = [] with open(self.directory + "/" + cobfile) as csvfile: csvreader = csv.reader(csvfile) csvlist = list(csvreader) listofpixels = [] currentkernel = 1 for line in csvlist[:-1]: try: if line[0] == 'Image': pass elif int(line[1] ) != currentkernel and line[4] != '': kernellist.append( Kernel.Kernel(listofpixels, name=currentkernel, clustertype="dbscan", stats=stats)) listofpixels = [] currentkernel = int(line[1]) currentpixel = [ int(line[2]), int(line[3]), int(line[4]) ] listofpixels.append(currentpixel) elif int(line[1]) == currentkernel: currentpixel = [ int(line[2]), int(line[3]), int(line[4]) ] listofpixels.append(currentpixel) except Exception, e: print str(e), "in create cobs" IndexError currentcob = Cob.Cob(kernellist, basename, pixelcluster=False, clustertype=clustertype, stats=stats) print "Finished Cob: ", basename self.coblist.append(currentcob)
def checkdistance(self): c1 = self.clusters[0] c2 = self.clusters[1] dist = Kernel.clusterdistance(c1[1], c2[1]) if dist < 7.5: L = (c1[0] * c1[1][0] + c2[0] * c2[1][0]) / (c1[0] + c2[0]) a = (c1[0] * c1[1][1] + c2[0] * c2[1][1]) / (c1[0] + c2[0]) b = (c1[0] * c1[1][2] + c2[0] * c2[1][2]) / (c1[0] + c2[0]) self.clusters = [] self.clusters.append([c1[0] + c2[0], [L, a, b]]) self.segregating = False else: self.segregating = True return dist
def medianblur(image, kernel_type,kernel_size): if image is None: print 'Error in input Image' return 0 cv2.Laplacian(image,cv2.CV_8U) kernel=Kernel.kernel(kernel_type,kernel_size) medBlur = cv2.medianBlur(image, kernel) return medblur
def get_change(self, activation=None, use_time_scale=True): """Compute the next change to the system. By default, the current value of the field is used, but a different value can be supplied to compute the output for an arbitrary value. When use_time_scale is set to False, the time scale is not considered when computing the change.""" # get the current output of the dynamic field current_output = self.get_output() # if a specific current activation is supplied.. if activation is not None: # .. compute the output given this activation current_output = self.get_output(activation) else: # ..otherwise set the activation to the current value of the field activation = self._activation # if the time scale is to be used.. if use_time_scale is True: # ..compute the inverse of the time scale to have a factor.. relaxation_time_factor = 1. / self._relaxation_time else: # ..otherwise, set the factor to one relaxation_time_factor = 1. # compute the lateral interaction lateral_interaction = 0. if self._lateral_interaction_kernels is not None: for kernel in self._lateral_interaction_kernels: lateral_interaction += Kernel.convolve(current_output, kernel) # sum up the input coming in from all connected fields field_interaction = 0 for connectable in self.get_incoming_connectables(): field_interaction += connectable.get_output() global_inhibition = self._global_inhibition * current_output.sum( ) / math_tools.product(self._output_dimension_sizes) # generate the noise term noise = self._noise_strength * numpy.random.normal( 0.0, self._noise_standard_deviation, self._output_dimension_sizes) # compute the change of the system change = relaxation_time_factor * ( -self._normalization_factor * activation + self._resting_level + self._boost - global_inhibition + lateral_interaction + field_interaction + noise) return change
def testTuples(): assertEqual(Tuple.pair(1,2), Tuple.pair(1,2)) assertTrue(Kernel.eq(Tuple.pair(1,2), Tuple.pair(1,2))) assertFalse(Kernel.eq(Tuple.pair(1,2), Tuple.pair(3,4))) t = Tuple.pair(5, 6) assertEqual(Tuple.first(t), 5) assertEqual(Tuple.second(t), 6) assertEqual( Tuple.mapFirst(double, t), (10, 6)) assertEqual( Tuple.mapSecond(double, t), (5, 12)) assertEqual( Tuple.mapBoth(triple, double, t), (15, 12)) assertEqual( toPy(Tuple.mapFirst(F(List.cons)(0), toElm(([1], [2])))), ([0, 1], [2]))
def showscatterplot(self, s=20, closepreviousplot=True): ''' creates a 3d scatter plot whose points are either pixels from all the kernels in this cob, or centers from the kmean clusters of each kernel in this cob. ''' if self.type == 'pixels': pixels = [] for kernel in self.kernellist: pixels.extend(kernel.pixellist) lablists = Kernel.threeTupleToThreeLists(pixels) else: lablists = Kernel.threeTupleToThreeLists(self.kernelcenters) if closepreviousplot == True: plt.close(1) plot = plt.figure() axes = plot.add_subplot(111, projection='3d') llist = lablists[0] alist = lablists[1] blist = lablists[2] for l, a, b in zip(llist, alist, blist): R, G, B = Kernel.HunterLabToRGB(l, a, b, normalized=True) axes.scatter(l, a, b, color=[R, G, B], marker='s', s=s) axes.set_xlabel('L') axes.set_ylabel('a') axes.set_zlabel('b') totalsize = 0 for cluster in self.clusters: totalsize += cluster[0] for cluster in self.clusters: addedsize = int(s * (cluster[0] / totalsize)) s += addedsize Kernel.addpoints(cluster[1], axes, marker="o", color="g", s=s) plt.title(self.name) plt.ion() plt.show() return axes
def low_pass_filter(self, sigma, size=None, fourier=None): """ Filter implemented by Gaussian Kernel :param fourier: :param sigma: int, should be 1.0, 1.5 or anything else :param size: size of the kernel :return: """ if size: size = size else: size = int(8 * sigma + 1) low_filter_kernel = Kernel.GaussianKernel(size, sigma) self.low_pass = self.convolute(low_filter_kernel, fourier=fourier) self.low_pass.img_name = self.img_name + ' low-pass filter' return self.low_pass
def setKernelsandRGB(self, show=False): ''' For each entry in self.cobs, it will open the file with that name in repDirectory. The file is a csv file of the format "accessionName, Kernel, R, G, B". This function will return a list of ints that correspond to the number of kernels that are associated with the Cob file. ''' print "Creating Kernels and Pixels" progressBar = 0 for key in self.cobs: progressBar += 1 kernelList = [] filePath = self.repDirectory + "/" + key + ".tif.csv" with open(filePath) as csvFile: csvReader = csv.reader(csvFile) csvList = list(csvReader) print "Cob: ", progressBar, "/", len(self.cobs) listofpixels = [] currentKernel = 1 for line in csvList: try: if line[0] == 'Image': pass elif int(line[1]) != currentKernel and line[4] != '': kernelList.append( Kernel.Kernel(listofpixels, name=currentKernel)) if show == True: print "Kernel: %s" % currentKernel listofpixels = [] currentKernel = int(line[1]) currentPixel = Pixel.Pixel(int(line[2]), int(line[3]), int(line[4])) listofpixels.append(currentPixel) elif int(line[1]) == currentKernel: currentPixel = Pixel.Pixel(int(line[2]), int(line[3]), int(line[4])) listofpixels.append(currentPixel) except: IndexError self.cobs[key] = Cob.Cob(kernelList) print "Kernel's Initialized for %s" % key print "All Cob's Initialized"
def fit(self, touchData, targetsAreOffsets=False): """ Fits the model to the given training data. Parameters: touchData - 2D array, each row is one touch with intended target, with columns touch x and y, and target x and y targetsAreOffsets - boolean, defaults to false; if true, the 3rd and 4th column of each row in the touchData array are interpreted as measured offsets directly, instead of target locations Returns: This method has no return value. """ inputs = np.matrix(touchData[:, 0:2]) if not targetsAreOffsets: targetsX = np.matrix(touchData[:, 2] - touchData[:, 0]).T targetsY = np.matrix(touchData[:, 3] - touchData[:, 1]).T else: targetsX = np.matrix(touchData[:, 2]).T targetsY = np.matrix(touchData[:, 3]).T self.targetsX = targetsX self.targetsY = targetsY 'Stack targets:' self.targets = np.vstack((targetsX, targetsY)) 'Create design matrix:' n, m = np.shape(inputs) self.mX = np.matrix(inputs) 'Create covariance matrix / kernel:' mC = Kernel.createMixedKernel(self.mX, self.mX, self.gamma, self.kernelMix) mC_stacked_left = np.vstack((mC, mC * self.diag)) mC_stacked_right = np.vstack((mC * self.diag, mC)) mC_stacked = np.hstack((mC_stacked_left, mC_stacked_right)) mC_stacked_noised = mC_stacked + np.identity(2 * n) * self.noiseVar self.mC_chol = splin.cho_factor(mC_stacked_noised)[0] self.mC_inv_mult_targets = splin.cho_solve((self.mC_chol, False), self.targets)
def gaussianFilter(image, kernel_type,kernel_size, sigmaX, sigmaY): if image is None: print 'Error in input Image' return 0 kernel_size=Point.point_int(kernel_size) if sigmaY==0 and sigmaX==0: sigmaX=kernel_size[0] sigmaY=kernel_size[1] kernel=Kernel.kernel(kernel_type,kernel_size) sigmaColor = int(sigmaX) sigmaSpace = int(sigmaY) if kernel_size[0]%2==0 and kernel_size[1]%2==0: print 'The kernel size should be odd' return 0 image=cv2.GaussianBlur(image,kernel_size , sigmaColor, sigmaSpace) return image
def main(self): memoria = Memory() file_system = FileSystem() kernel = Kernel(memoria, file_system) kernel.execute()
def train(self, C=[0.01, 1, 10, 100], tol=1e-3): m = self.Y.shape[0] A = [0] * 10 B = [0] * 10 indices = numpy.random.permutation( self.X.shape[0]) # shape[0]表示第0轴的长度,通常是训练数据的数量 rand_data_x = self.X[indices] rand_data_y = self.Y[indices] # data_y就是标记(label) l = int(len(indices) / 10) for i in range(9): A[i] = rand_data_x[i * l:i * l + l] B[i] = rand_data_y[i * l:i * l + l] A[9] = rand_data_x[9 * l:] B[9] = rand_data_y[9 * l:] # ''' # X_num=self.X.shape[0] # train_index=range(X_num) # test_size=int(X_num*0.1)+1 # for i in range(9): # test_index=[] # for j in range(test_size): # randomIndex=int(numpy.random.uniform(0,len(train_index))) # test_index.append(train_index[randomIndex]) # #del train_index[randomIndex] # A[i]=self.X[test_index,:] # B[i]=self.Y[test_index,:] # A[9]=self.X.ix_[train_index] # B[9]=self.Y.ix_[train_index] # ''' acc_best = 0 C_best = None avg_acc = 0 # gamma_best = None for CVal in C: # for gammaVal in gamma: # avg_acc = 0 for i in range(10): X_test = A[i] Y_test = B[i] # X_train = None # Y_train = None #model= SMO.SMO_Model(X_train, Y_train, CVal, kernel,gammaVal, tol=1e-3, eps=1e-3) #output_model=SMO.SMO(model) #根据output_model的参数信息计算对应decision_function----->推得accuracy #acc = _evaulate(output_model) X_train = numpy.concatenate([ A[(i + 1) % 10], A[(i + 2) % 10], A[(i + 3) % 10], A[(i + 4) % 10], A[(i + 5) % 10], A[(i + 6) % 10], A[(i + 7) % 10], A[(i + 8) % 10], A[(i + 9) % 10] ], axis=0) Y_train = numpy.concatenate([ B[(i + 1) % 10], B[(i + 2) % 10], B[(i + 3) % 10], B[(i + 4) % 10], B[(i + 5) % 10], B[(i + 6) % 10], B[(i + 7) % 10], B[(i + 8) % 10], B[(i + 9) % 10] ], axis=0) # SMO.GG = gammaVal # calculate Kernel Matrix then pass it to SMO. if self.IK: if self.kernel_dict['type'] == 'TANH': K = Kernel.TANH(X_train.shape[0], self.kernel_dict['c'], self.kernel_dict['d']) K.calculate(X_train) elif self.kernel_dict['type'] == 'TL1': K = Kernel.TL1(X_train.shape[0], self.kernel_dict['rho']) K.calculate(X_train) p1, p2 = trans_mat(K.kernelMat) K.kernelMat = np.dot((p1 - p2), K.kernelMat) if self.kernel_dict['type'] == 'RBF': K = Kernel.RBF(X_train.shape[0], self.kernel_dict['gamma']) K.calculate(X_train) elif self.kernel_dict['type'] == 'LINEAR': K = Kernel.LINEAR(X_train.shape[0]) K.calculate(X_train) elif self.kernel_dict['type'] == 'POLY': K = Kernel.POLY(X_train.shape[0], self.kernel_dict['c'], self.kernel_dict['d']) K.calculate(X_train) elif self.kernel_dict['type'] == 'TANH': K = Kernel.TANH(X_train.shape[0], self.kernel_dict['c'], self.kernel_dict['d']) K.calculate(X_train) elif self.kernel_dict['type'] == 'TL1': K = Kernel.TL1(X_train.shape[0], self.kernel_dict['rho']) K.calculate(X_train) model = SMO.SMO_Model(X_train, Y_train, CVal, K, tol=1e-3, eps=1e-3) output_model = SMO.SMO(model) #IK if self.IK: output_model.alphas = np.dot((p1 - p2), output_model.alphas) acc = SMO._evaluate(output_model, X_test, Y_test) avg_acc = avg_acc + acc / 10 if avg_acc > acc_best: acc_best = avg_acc #更新C gamma C_best = CVal # gamma_best =gammaVal # self.gamma = gamma_best #最后一遍train # SMO.GG = gamma_best #!K if self.IK: if self.kernel_dict['type'] == 'TANH': K = Kernel.TANH(self.X.shape[0], self.kernel_dict['c'], self.kernel_dict['d']) K.calculate(self.X) elif self.kernel_dict['type'] == 'TL1': K = Kernel.TL1(self.X.shape[0], self.kernel_dict['rho']) K.calculate(self.X) p1, p2 = trans_mat(K.kernelMat) K.kernelMat = np.dot((p1 - p2), K.kernelMat) if self.kernel_dict['type'] == 'RBF': K = Kernel.RBF(self.X.shape[0], self.kernel_dict['gamma']) K.calculate(self.X) elif self.kernel_dict['type'] == 'LINEAR': K = Kernel.LINEAR(self.X.shape[0]) K.calculate(self.X) elif self.kernel_dict['type'] == 'POLY': K = Kernel.POLY(self.X.shape[0], self.kernel_dict['c'], self.kernel_dict['d']) K.calculate(self.X) elif self.kernel_dict['type'] == 'TANH': K = Kernel.TANH(self.X.shape[0], self.kernel_dict['c'], self.kernel_dict['d']) K.calculate(self.X) elif self.kernel_dict['type'] == 'TL1': K = Kernel.TL1(self.X.shape[0], self.kernel_dict['rho']) K.calculate(self.X) SVM_model = SMO.SMO( SMO.SMO_Model(self.X, self.Y, C_best, K, tol=1e-3, eps=1e-3)) # 参数传递给最后生成的SVM类 if self.IK: SVM_model.alphas = np.dot((p1 - p2), SVM_model.alphas) self.X = SVM_model.X self.Y = SVM_model.y self.kernel_dict = SVM_model.kernel self.alphas = SVM_model.alphas self.b = SVM_model.b # C_best = C # gamma_best =gamma # (w,b) = SMO(X_train,Y_train,C_best,gamma_best,kernal,tol=1e-3) # self.w = w # self.b = b return None
def f_Golden(y, x_glob, x_loc, y_off, coords, mType, wType,criterion, maxVal, minVal, tol, maxIter=200,flag=0): """ Golden section search Arguments ---------- y : array n*1, dependent variable. x_glob : array n*k1, fixed independent variable. x_local : array n*k2, local independent variable, including constant. y_off : array n*1, offset variable for Poisson model coords : dictionary including (x,y) coordinates involved in the weight evaluation (including point i) mType : integer GWR model type, 0: Gaussian, 1: Poisson, 2: Logistic wType : integer kernel type, 0: fix_Gaussian, 1: adap_Gaussian, 2: fix_Bisquare, 3: adap_Bisquare criterion : integer bandwidth selection criterion, 0: AICc, 1: AIC, 2: BIC, 3: CV maxVal : float maximum value used in bandwidth searching minVal : float minimum value used in bandwidth searching tol : float tolerance used to determine convergence maxIter : integer maximum number of iteration if convergence cannot arrive at the tolerance flag : integer distance type Return: opt_band : float optimal bandwidth opt_weit : kernel optimal kernel output : list of tuple report searching process, keep bandwidth and score, [(bandwidth, score),(bandwidth, score),...] """ dist = Kernel.get_pairDist(coords,flag) #get pairwise distance between points # 1 set range of bandwidth if x_glob is None: nVar_glob = 0 else: nVar_glob = len(x_glob[0]) if x_loc is None: nVar_loc = 0 else: nVar_loc = len(x_loc[0]) nVars = nVar_glob + nVar_loc a,c = ini_band_dist(dist, nVars, wType, maxVal, minVal) # 2 get initial b value output = [] lamda = 0.38197 #1 - (np.sqrt(5.0)-1.0)/2.0 # get b and d b = a + lamda * abs(c-a) #distance or nn based on wType d = c - lamda * abs(c-a) # golden section if wType == 1 or wType == 3: # bandwidth is nn b = round(b,0) d = round(d,0) # 3 loop pre_opt = 0.0 diff = 1.0e9 nIter = 0 while abs(diff) > tol and nIter < maxIter: nIter += 1 # 3.1 create kernel weit_a = Kernel.GWR_W(coords, a, wType, dist) weit_b = Kernel.GWR_W(coords, b, wType, dist) weit_c = Kernel.GWR_W(coords, c, wType, dist) weit_d = Kernel.GWR_W(coords, d, wType, dist) # 3.2 decide whether local model or mixed model if x_glob is None: # local model #if mType == 0: #mType == 0 or #gwrMod_a = GWR_Gaussian_Base(y, x_loc, weit_a) #gwrMod_b = GWR_Gaussian_Base(y, x_loc, weit_b) #gwrMod_c = GWR_Gaussian_Base(y, x_loc, weit_c) #gwrMod_d = GWR_Gaussian_Base(y, x_loc, weit_d) #else: gwrMod_a = GWGLM_Base(y, x_loc, weit_a, mType, y_off) gwrMod_b = GWGLM_Base(y, x_loc, weit_b, mType, y_off) gwrMod_c = GWGLM_Base(y, x_loc, weit_c, mType, y_off) gwrMod_d = GWGLM_Base(y, x_loc, weit_d, mType, y_off) else: # mixed model gwrMod_a = semiGWR_Base(y, x_glob, x_loc, weit_a, mType, y_off) gwrMod_b = semiGWR_Base(y, x_glob, x_loc, weit_b, mType, y_off) gwrMod_c = semiGWR_Base(y, x_glob, x_loc, weit_c, mType, y_off) gwrMod_d = semiGWR_Base(y, x_glob, x_loc, weit_d, mType, y_off) # 3.3 get diagnostic value(0: AICc, 1: AIC, 2: BIC, 3: CV) if mType == 0:#or mType == 3 f_a = getDiag_GWR[criterion](gwrMod_a) f_b = getDiag_GWR[criterion](gwrMod_b) f_c = getDiag_GWR[criterion](gwrMod_c) f_d = getDiag_GWR[criterion](gwrMod_d) else: f_a = getDiag_GWGLM[criterion](gwrMod_a) f_b = getDiag_GWGLM[criterion](gwrMod_b) f_c = getDiag_GWGLM[criterion](gwrMod_c) f_d = getDiag_GWGLM[criterion](gwrMod_d) #print "a: %.3f, b: %.3f, c: %.3f, d: %.3f" % (a, b, c, d) # determine next triple if f_b <= f_d: # current optimal bandwidth opt_weit = weit_b opt_band = b opt_cri = f_b c = d d = b b = a + lamda * abs(c-a) if wType == 1 or wType == 3: # bandwidth is nn b = round(b,0) else: # current optimal bandwidth opt_weit = weit_d opt_band = d opt_cri = f_d a = b b = d d = c - lamda * abs(c-a) if wType == 1 or wType == 3: # bandwidth is nn d = round(d,0) output.append((opt_band,opt_cri)) # determine diff diff = f_b - f_d #opt_cri - pre_opt pre_opt = opt_cri #print "diff: %.6f" % (diff) return opt_band, opt_weit, output
def f_Interval(y, x_glob, x_loc, y_off, coords, mType, wType, criterion, maxVal, minVal, interval,flag=0): """ Interval search, using interval as stepsize Arguments ---------- y : array n*1, dependent variable. x_glob : array n*k1, fixed independent variable. x_local : array n*k2, local independent variable, including constant. y_off : array n*1, offset variable for Poisson model coords : dictionary including (x,y) coordinates involved in the weight evaluation (including point i) mType : integer GWR model type, 0: M_Gaussian, 1: M_Poisson, 2: Logistic wType : integer kernel type, 0: fix_Gaussian, 1: adap_Gaussian, 2: fix_Bisquare, 3: adap_Bisquare criterion : integer bandwidth selection criterion, 0: AICc, 1: AIC, 2: BIC, 3: CV maxVal : float maximum value used in bandwidth searching minVal : float minimum value used in bandwidth searching interval : float interval used in interval search flag : integer distance type Return: opt_band : float optimal bandwidth opt_weit : kernel optimal kernel output : list of tuple report searching process, keep bandwidth and score, [(bandwidth, score),(bandwidth, score),...] """ dist = Kernel.get_pairDist(coords,flag=0) #get pairwise distance between points a = minVal c = maxVal # add codes to check whether a and c are valid #------------------------------------------------------------ if wType == 1 or wType == 3: # bandwidth is nn a = int(a) c = int(c) output = [] # 1 get initial b value b = a + interval #distance or nn based on wType if wType == 1 or wType == 3: # bandwidth is nn b = int(b) # 2 create weight weit_a = Kernel.GWR_W(coords, a, wType, dist) weit_c = Kernel.GWR_W(coords, c, wType, dist) # 3 create model if x_glob is None: # local model #if mType == 3: #gwrMod_a = GWR_Gaussian(y, x_loc, weit_a) #gwrMod_c = GWR_Gaussian(y, x_loc, weit_c) #else: gwrMod_a = GWGLM_Base(y, x_loc, weit_a, mType, y_off) gwrMod_c = GWGLM_Base(y, x_loc, weit_c, mType, y_off) else: # mixed model gwrMod_a = semiGWR_Base(y, x_glob, x_loc, weit_a, mType, y_off) gwrMod_c = semiGWR_Base(y, x_glob, x_loc, weit_c, mType, y_off) # 4 get diagnostic value if mType == 0:#or mType == 3 f_a = getDiag_GWR[criterion](gwrMod_a) f_c = getDiag_GWR[criterion](gwrMod_c) else: f_a = getDiag_GWGLM[criterion](gwrMod_a) f_c = getDiag_GWGLM[criterion](gwrMod_c) # 5 add to the output output.append((a,f_a)) output.append((c,f_c)) #print "bandwidth: %.3f, f value: %.6f" % (a, f_a) #print "bandwidth: %.3f, f value: %.6f" % (c, f_c) if f_a < f_c: opt_weit = weit_a opt_band = a opt_val = f_a else: opt_weit = weit_c opt_band = c opt_val = f_c while b < c: # model using bandwidth b weit_b = Kernel.GWR_W(coords, b, wType, dist) # local model if x_glob is None: # local model #if mType == 3: #gwrMod_b = GWR_Gaussian(y, x_loc, weit_b) #else: gwrMod_b = GWGLM_Base(y, x_loc, weit_b, mType, y_off) else: # mixed model gwrMod_b = semiGWR_Base(y, x_glob, x_loc, weit_b, mType, y_off) if mType == 0:#or mType == 3 f_b = getDiag_GWR[criterion](gwrMod_b) else: f_b = getDiag_GWGLM[criterion](gwrMod_b) #print "bandwidth: %.3f, f value: %.6f" % (b, f_b) # add output output.append((b,f_b)) # determine next triple if f_b < opt_val: opt_weit = weit_b opt_band = b opt_val = f_b # update b b = b + interval return opt_band,opt_weit, output
def pred(data, refData, band, y, x_local, y_hat=None, wType=0, mType=0, flag=0, y_offset=None, sigma2=1, y_fix=None, fMatrix=None): """ predict values at unsampled locations Arguments: data : dictionary, (x,y) of unsampled locations refData : dictionary, (x,y) of sampled locations band : float bandwidth y : array n*1, dependent variable y_hat : array n*1, predicted y from original model, to calculate local statistics x_local : array n*k1, local independent variable y_offset : array n*1, offset variable for Poisson model sigma2 : float used to calculate std. error of betas for Gaussian model y_fix : array n*1, fixed part of y from global Xs, used in mixed model fMatrix : array n*n, hat matrix for global model, used in mixed model wType : integer define which kernel function to use mType : integer model type, model type, 0: Gaussian, 1: Poisson, 2: Logistic flag : dummy, 0 or 1, 0: Euclidean distance; 1: spherical distance Return: Betas : array n*k, Beta estimation std_err : array n*k, standard errors of Beta t_stat : array n*k, local t-statistics localR2 : array n*1, local R square or local p-dev """ # 1 get W matrix dicDist = {} n_pred = len(data.keys()) for i in range(n_pred):# calculate distance between unsampled obs and sampled obs dicDist[i] = Kernel.get_focusDist(data[i], refData, flag) weit = Kernel.GWR_W(data, band, wType, dicDist) #print len(dicDist[0].keys()) #print len(weit.w.keys()) #print len(weit.w[0]) # 2 get predicted local Beta estimation #if mType == 0:# 2.1 basic Gaussian #mod_loc = GWR_Gaussian_Base(y, x_local, weit) #else:# 2.2 GWGLM models including mixed models mod_loc = GWGLM_Base(y, x_local, weit, mType, y_offset, y_fix, fMatrix) pred_betas = mod_loc.Betas[:n_pred] # 3 get std errors of Betas #if mType == 1 or mType == 2: #sigma2 = 1.0 pred_stdErr = np.sqrt(mod_loc.CCT * sigma2) # 4 get t statistics pred_tstat = pred_betas/pred_stdErr # 5 get local R2 or local p-dev localR2 = np.zeros(shape=(n_pred,1)) n_reg = len(y) if mType == 0 : # Gaussian model or mType == 3 for i in range(n_pred): w_i= np.reshape(np.array(weit.w[i]), (-1, 1)) sum_yw = np.sum(y * w_i) ybar = 1.0 * sum_yw / np.sum(w_i) rss = np.sum(w_i * (y - y_hat)**2) tss = np.sum(w_i * (y - ybar)**2) localR2[i] = (tss - rss)/tss if mType == 1: # Poisson model for i in range(n_pred): w_i= np.reshape(np.array(weit.w[i]), (-1, 1)) sum_yw = np.sum(y * w_i) ybar = 1.0 * sum_yw / np.sum(w_i * y_offset) dev = 0.0 dev0 = 0.0 for j in range(n_reg): if y[j] <>0: dev += 2 * y[j] * (np.log(y[j]) - np.log(y_hat[j])) * w_i[j] dev0 += 2 * y[j] * (np.log(y[j]) - np.log(ybar * y_offset[j])) * w_i[j] dev -= 2 * (y[j] - y_hat[j]) * w_i[j] dev0 -= 2 * (y[j] - ybar * y_offset[j]) * w_i[j] localR2[i] = 1.0 - dev / dev0 if mType == 2: # Logistic model for i in range(n_pred): w_i= np.reshape(np.array(weit.w[i]), (-1, 1)) sum_yw = np.sum(y * w_i) ybar = 1.0 * sum_yw / np.sum(w_i) dev = 0.0 dev0 = 0.0 for j in range(n_reg): if (1.0 - y_hat[j] < 1e-10): nu = np.log(y_hat[j]/1e-10) dev += -2* (y[j] * nu + np.log(1e-10) ) * w_i[j] else: nu = np.log(y_hat[j]/(1.0 - y_hat[j])) dev += -2* (y[j] * nu + np.log(1.0 - y_hat[j])) * w_i[j] nu0 = np.log(ybar/(1-ybar)) dev0 += -2* (y[j] * nu0 + np.log(1.0 - ybar) ) * w_i[j] localR2[i] = 1.0 - dev / dev0 return pred_betas, pred_stdErr, pred_tstat, localR2
Programa5.add(IoInstruccion1) Programa5.add(IoInstruccion5) Programa5.add(CpuInstruccion4) myCDROM = CDROM() myPrinter = Printer() myMMU = MMU() myDiskWithPrograms = Disk() myDiskWithPrograms.load(Programa1) myDiskWithPrograms.load(Programa2) myDiskWithPrograms.load(Programa3) myDiskWithPrograms.load(Programa4) myDiskWithPrograms.load(Programa5) myEmptyDisk = Disk() asignacionContinua = AsignacionContinua(PrimerAjuste(),myMMU) myMMU.setLogicalMemory(asignacionContinua) #myKernel = Kernel(Fifo, myMMU, myEmptyDisk) myKernel = Kernel(Fifo, myMMU, myDiskWithPrograms) myKernel.addDevice(myCDROM) myKernel.addDevice(myPrinter) myKernel.initializeThread() myKernel.runProcess("Programa1") myKernel.runProcess("Programa2") myKernel.runProcess("Programa3") myKernel.runProcess("Programa4") myKernel.runProcess("Programa5")
# Definimos un Disco hdd = HardDisk() hdd.save(program1) hdd.save(program2) hdd.save(program3) # Definimos el Sistema de I/O iosys = IOSystem() iosys.addDevice(hdd) # Definimos la CPU con un quantum de 3 cpu = CPU() # Definimos el Kernel kernel1point0 = Kernel() # Definimos la Memoria pageTable = PageTable() memory = Memoria(300,pageTable) # Definimos el Manejador de Interrupciones IH = InterruptionHandler(iosys,memory,pageTable,kernel1point0,cpu) # Seteamos el IH en todos los modulos que haga falta cpu.setInterruptionHandler(IH) kernel1point0.setInterruptionHandler(IH) # creamos el shell shell = Shell("123",IH)
# print "I am %s" % self.name if re.match("J", self.jiang): self.jiang = self.jiang[1:] import temp file = open("temp.py", "w") file.write("from test import " + self.jiang + "\n\n\ndef jiang():\n"+"\t" + self.jiang + "()\n") # file = open("temp.py", "r") file.close() reload(temp) temp.jiang() else: print(self.jiang) if __name__ == "__main__": k = Kernel() k.bootstrap(learnFiles=["en1.6/jiang.aiml", "self-test.aiml"]) lock = threading.Lock() print "\nEntering interactive mode (ctrl-c to exit)" i = 0 while True: jiang = k.respond(raw_input("> ")) my_thread = MyThread(jiang=jiang) my_thread.start() # print "current has %d threads" % (threading.activeCount()) ThA = threading.enumerate() for d in range(len(ThA)): print ThA[d] i += 1 print i