def predict(self, x_train,y_train): # 准确率 accRate=0 # dot向量点积和矩阵乘法 print(shape(x_train),shape(self.weight[0]),self.bias[0]) hidein_1 = np.dot(x_train, self.weight[0]) + self.bias[0] hideout_1 = self.__active_fun(hidein_1) hidein_2 = np.dot(hideout_1, self.weight[1]) + self.bias[1] hideout_2 = self.__active_fun(hidein_2) predict_y = hideout_2 # 计算第二层的输出 cur=[] for e in predict_y: # 对于输出向量,对区间进行划分[0,0.5),[0.5,1.5),[1.5,2.5),[2.5,--) if e>2.5: cur.append(3) elif e>1.5: cur.append(2) elif e>0.5: cur.append(1) else: cur.append(0) predict_y = cur for i in range(len(predict_y)): if(predict_y[i]==y_train[i]): accRate=accRate+1 # 返回准确率 return accRate/len(predict_y)
def sigmoid(z): x, y = npfunc.shape(z) ret = np.zeros((x, y)) for i in range(x): for j in range(y): ret[i, j] = 1.0 / (1.0 + np.exp(-z[i, j])) return ret
def predict(predictdata,weights,labelSet): m,n = shape(weights) predictresult = [] kk = labelSet.pop() if n == 1: predictdata = normalize(predictdata) results = sigMoid(predictdata * weights) for i in range(len(results) - 1): if(results[i] > 0.5): print results[i] predictresult.append(1) else: predictresult.append(0) else: predictdata = normalize(predictdata) results = fakeSigMoid(predictdata * weights) for i in range(m): for j in range(n): if results[i][j] > 0.5: predictresult.append(labelSet[j]) break else: predictresult.append(kk) return predictresult
def fixInterpAxis(var): """ Documentation for fixInterpAxis(var): ------- The fixInterpAxis(var) function corrects temporal axis so that genutil.statistics.linearregression returns coefficients which are unscaled by the time axis Author: Paul J. Durack : [email protected] Usage: ------ >>> from durolib import fixInterpAxis >>> (slope),(slope_err) = linearregression(fixInterpAxis(var),error=1,nointercept=1) Notes: ----- ... """ tind = range(shape(var)[0]) ; # Assume time axis is dimension 0 t = cdm.createAxis(tind,id='time') t.units = 'years since 0-01-01 0:0:0.0' t.calendar = var.getTime().calendar cdu.times.setTimeBoundsYearly(t) ; # Explicitly set time bounds to yearly var.setAxis(0,t) return var
def plotTable(v, vol, prob, lbds, MOQ, name): """ DEFINITION: v: index in volatility array vol: volatility array. Contains simulated volatilities. name: name of the plot. """ np, _, nr, _ = shape(MOQ) MOQshape = zeros((np, nr)) for p in xrange(len(prob)): for r in xrange(len(lbds)): MOQshape[p, r] = mean(MOQ[p, v, r, :]) fig = plt.figure() ax = fig.gca(projection='3d') X = lbds Y = prob X, Y = meshgrid(X, Y) Z = MOQshape plt.xlabel('Lambdas') plt.ylabel('Probabilities') plt.title(name + ' for fixed lambdas - v=' + str(vol[v])) surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) fig.colorbar(surf, shrink=0.5, aspect=5)
def sample(self, event): # Randomly generate an example in 'sample.mat' and show the image from random import random import ShowNum as sn import scipy.io as spio import numpy.core.fromnumeric as npfunc raw = spio.loadmat('sample.mat') X = raw['X'] row = int(random() * npfunc.shape(X)[0]) sn.createImg(X[row]).show()
def correct_potential(pot,r0,V0,x0,y0): new_pot = np.zeros(shape(pot)) for i in range(len(pot[:,0])): for j in range(len(pot[0,:])): r = np.sqrt(((i-x0)**2 + (j-y0)**2)) if (r >= r0): new_pot[i,j] = V0 else: new_pot[i,j] = pot[i,j] return new_pot
def fakeSigMoid(x): m,n = shape(x) # print "m,n",m,n sum = 0 results = zeros((m,n)).tolist() for i in range(m): for j in range(n): sum = sum + np.exp(x.tolist()[i][j]) sum = np.exp(x.tolist()[i][j]) / (1.0 + sum) results[i][j] = (sum) return results
def __getitem__(self, index): img = Image.open(self.images[index]).convert('RGB') if self.mode == 'test': if self.transform is not None: img = self.transform(img) return img, os.path.basename(self.images[index]) mask = Image.open(self.masks[index]) # synchrosized transform if self.mode == 'train': img, mask = self._sync_transform(img, mask) elif self.mode == 'val': img, mask = self._val_sync_transform(img, mask) else: assert self.mode == 'testval' img, mask = self._img_transform(img), self._mask_transform(mask) # general resize, normalize and toTensor if self.transform is not None: img = self.transform(img) print("img shape = " + str(shape(img)) + " mask shape = " + str(shape(mask))) return img, mask, os.path.basename(self.images[index])
def generate_arrays(batch_size): for i in range(0, len(x_train_raw), batch_size): input_matrix = [] t = y_train[i:i + batch_size] for tmp_index in x_train_raw[i:i + batch_size]: tmpinput = [] for index in tmp_index: tmpinput.append(embedding_matrix[index]) input_matrix.append(tmpinput) input_matrix = K.cast_to_floatx(input_matrix) t = K.cast_to_floatx(t) print(shape(input_matrix)) yield (input_matrix, t)
def loadData(): f = open("dataset.txt") lines = f.readlines() for line in lines: data.append(line.split()) f.close() f2 = open("predictdata.txt") lines = f2.readlines() for line in lines: predictdata.append(line.split()) f2.close() m,n = shape(predictdata) return data,predictdata
def processOutput(img_out_y): colors = np.array([[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 255]]) img_out_y = img_out_y[0, :, :, :] print(shape(img_out_y)) score = np.array(img_out_y).astype(np.float32) score = np.transpose(score, [1, 2, 0]) mask = np.zeros((crop_size, crop_size, 3)) for x in range(len(score)): for y in range(len(score[0])): maxindex = np.argmax(score[x][y]) mask[x][y] = colors[maxindex] return mask
def loadImg(img_name): # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.63658345, 0.5976706, 0.6074681], [0.30042663, 0.29670033, 0.29805037]), ]) img = Image.open(img_name).convert('RGB') img = img.resize((crop_size, crop_size), Image.NEAREST) # np_array = np.array(img).astype(np.float32) # np_array = np_array / 127.5 - 1 input = input_transform(img) print("input shape" + str(shape(img))) input.unsqueeze_(0) return img, input
def readField(h5fname, f1D=0, xn=None, yn=None): mdata = fdata(h5fname) h5f = tables.open_file(h5fname, mode='r') # To select temporal slice of field.... #z2s = 50 #z2e = 80 #z2si = int(np.floor(z2s / dz2)) #z2ei = int(np.floor(z2e / dz2)) #z2axis = (np.arange(z2si,z2ei) - z2si) * dz2 # ...otherwise take full field if (xn == None): xn = int(np.rint(mdata.vars.nx / 2)) if (yn == None): yn = int(np.rint(mdata.vars.ny / 2)) if (mdata.vars.q1d == 1): xf = h5f.root.aperp[:, 0] else: if (f1D == 0): xf = h5f.root.aperp[:, :, :, 0] else: print(xn, yn) print(shape(h5f.root.aperp)) xf = h5f.root.aperp[xn, yn, :, 0] # xfs = xf[z2si:z2ei] # for selecting slice... if (mdata.vars.q1d == 1): yf = h5f.root.aperp[:, 1] else: if (f1D == 0): yf = h5f.root.aperp[:, :, :, 1] else: yf = h5f.root.aperp[xn, yn, :, 1] h5f.close() return xf, yf
def gradAscent(data,label,labelSet): hmat = zeros((data.shape[0],len(labelSet) - 1)).tolist() # print "hmat[2][1]",hmat[2][1] i = 0 # print "ge zhong changdu",len(labelSet) - 1,data.shape[0] for k in range(len(labelSet) - 1): for j in range(data.shape[0]): if(label[j] == labelSet[k]): hmat[j][k] = 1 iteration = 1 error = 0.00001 m,n = shape(data) labelNum = len(labelSet) - 1 if(labelNum >= 2): weights = ones((n,labelNum)) else: weights = ones((n,1)) # for i in range(iteration): for i in range(labelNum): diff = 1 hmat = mat(hmat) while(diff > error): if(labelNum == 1): h = sigMoid(data * weights[:,i]) h = mat(h) deri = mat(label).transpose() - h ###11*1 else: h = fakeSigMoid(data * weights) h = mat(h) deri = hmat[:,i] - h[:,i] ###11*1 # cichu hai zhengchang formal = copy.deepcopy(weights[:,i]) weights[:,i] = weights[:,i] + alpha * data.transpose() * deri ####梯度下降法目标函数取最小值,所以此处为+ diff = abs(formal.transpose() * formal - weights[:,i].transpose() * weights[:,i]) print "diff = ",diff return weights
else: print(key) delattr(clim_ac, key) start_month_s = "01" end_month_s = "12" elif args.data_source == "UKMetOffice-HadISST": print(args.data_source) clim_ac = f_h(args.file_variable)[0, ...] clim_ac1 = cdm.createVariable(clim_ac.mask, id="sftlf") clim_ac1.setGrid(clim_ac.getGrid()) clim_ac = clim_ac1 del clim_ac1 start_month_s = "01" end_month_s = "07" elif (d.getAxisIndex("depth") != -1 and shape(d)[d.getAxisIndex("time")] == 12 and "WOA" in args.data_source): # Case WOA09 - test for 3d and trim off top layer clim_ac = cdu.ANNUALCYCLE.climatology(d[:, 0, :, :]) # shape 12,24,180,360 start_month_s = "01" end_month_s = "12" elif (d.getAxisIndex("PRESSURE") != -1 and shape(d)[d.getAxisIndex("time")] == 12 and "UCSD" in args.data_source): # Case ARGO UCSD - test for 3d and trim off top layer if args.target_variable in "sos": d_mean = f_h("ARGO_SALINITY_MEAN") elif args.target_variable in "tos": d_mean = f_h("ARGO_TEMPERATURE_MEAN") # Create annual cycle from annual mean d_ancycle = d_mean + d
print key delattr(clim_ac, key) start_month_s = '01' end_month_s = '12' elif args.data_source == 'UKMetOffice-HadISST': print args.data_source clim_ac = f_h(args.file_variable)[0, ...] clim_ac1 = cdm.createVariable(clim_ac.mask, id='sftlf') clim_ac1.setGrid(clim_ac.getGrid()) clim_ac = clim_ac1 del(clim_ac1) start_month_s = '01' end_month_s = '07' elif d.getAxisIndex('depth') != -1 and\ shape(d)[d.getAxisIndex('time')] == 12 and\ 'WOA' in args.data_source: # Case WOA09 - test for 3d and trim off top layer clim_ac = cdu.ANNUALCYCLE.climatology(d[:, 0, :, :]) # shape 12,24,180,360 start_month_s = '01' end_month_s = '12' elif d.getAxisIndex('PRESSURE') != -1 and\ shape(d)[d.getAxisIndex('time')] == 12 and\ 'UCSD' in args.data_source: # Case ARGO UCSD - test for 3d and trim off top layer if args.target_variable in 'sos': d_mean = f_h('ARGO_SALINITY_MEAN') elif args.target_variable in 'tos': d_mean = f_h('ARGO_TEMPERATURE_MEAN') # Create annual cycle from annual mean d_ancycle = d_mean + d
def print_matrice(self, val, label, n=13): print u'\x11' * n + ' ' + str(label) + ' ' + '\x10' * n for i in range(shape(val)[0]): print str(i) + u' \x10 ' + str(val[i, :]) print u'\x16' * 30 print
import numpy as np import os from numpy.core.fromnumeric import shape path = "tools/action.csv" out_path = "tools/action_out.csv" action = np.loadtxt(path, dtype=int, delimiter=",") with open(out_path, "a+") as file: for i in range(shape(action)[0]): for j in range(5): if j < 4: file.write(str(list(np.nonzero(action[i, :]))[0][j]) + ",") else: file.write(str(list(np.nonzero(action[i, :]))[0][j]) + "\n") pass pass pass pass # print(shape(action)[0])
import matplotlib.pyplot as plt import numpy as np import h5py from numpy.core.fromnumeric import shape filename = 'diversity_mega.h5' f = h5py.File(filename, 'r') data = f['data'] sh = shape(data) print(sh) # algo_names = ["Selection", "MRC", "CRC", "EGC", "rene", "power_and_crc"] # data[snr][branch][algorithm] = [error, total, payload_error, slots, pbes] ber = np.empty((sh[1], sh[0])) stops = np.array([25, 25, 25, 25, 25]) print(shape(ber)) for br in range(sh[1]): for snr in range(sh[0]): error, total, _, _, _ = data[snr][br][5] biterr = error / total ber[br][snr] = biterr if (biterr < (10**-5)): stops[br] = snr break x = np.arange(-10, 2.5 * 25 - 10, 2.5) plt.figure(figsize=(7, 4))
def main(): np.random.seed(64) # The CMA-ES algorithm strategy = cma.Strategy(centroid=[10.0] * N, sigma=0.05, lambda_=lambda_cmaes) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) halloffame = tools.HallOfFame(1) # halloffame_array = [] # C_array = [] # centroid_array = [] fbest = [] # np.ndarray((NGEN, 1)) #世代ごとのf(x)のベスト vbest = np.ndarray((NGEN, 1)) # best = np.ndarray((NGEN, N)) #世代ごとのxのベスト for gen in range(NGEN): #新たな世代の個体群を生成 if gen == 1: # a = 0.0 # b = 1.0 # x_start = (b - a) * np.random.rand(P1.N_x) + a x_start = np.array(halloffame[0]) x_p, fbest_nelder = nelder_mead(f_n, x_start) for i in range(lambda_cmaes): x_p[i] = creator.Individual(x_p[i]) population = x_p fbest.extend(fbest_nelder) else: population = toolbox.generate() # 個体群の評価 fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit # 個体群の評価から次世代の計算のためのパラメタ更新 toolbox.update(population) # hall-of-fameの更新 halloffame.update(population) # halloffame_array.append(halloffame[0]) # C_array.append(strategy.C) # centroid_array.append(strategy.centroid) # fbest.append(halloffame[0].fitness.values[1]) #V, Fで入力しているときは1 # vbest[gen] = halloffame[0].fitness.values[0] # best[gen, :N] = halloffame[0] fbest.append(halloffame[0].fitness.values[0]) # print("{} generation's (bestf, bestv) =({}, {})".format(gen+1, halloffame[0].fitness.values[1], vbest[gen])) print("{} generation's (bestf) =({})".format( gen + 1, halloffame[0].fitness.values[0])) if (gen + 1) % 100 == 0: x = [] y = [] f = [0] * P1.P g = [0] * P1.M h = [0] * int(P1.Q) x = halloffame[0] # best[gen] for n in range(P1.N_x): if x[n] < 1.0e-10: y.append(0.0) else: y.append(1.0) #evaluation f, g, h = P1.evaluation(x, y, f, g, h) #output print(x) print(y) for p in range(P1.P): print("f%d = %.10g " % (p + 1, f[p])) V = 0.0 for m in range(P1.M): # print("g%d = %.10g" % (m+1, g[m])) if g[m] > 0.0: V += g[m] for q in range(P1.Q): # print("h%d = %.10g" % (q+1, h[q])) abs(q) V += abs(h[q]) #check feasibility print('Sum of violation = {:.10g}'.format(V)) print("Tolerance = {:.2g} ".format(P1.eps[0])) if P1.checkFeasibility(x, y): print("Input solution is feasible.") else: print("Input solution is infeasible.") print(shape(population)) #グラフ描画 y = np.array(fbest) x = np.arange(1, len(fbest) + 1) fig = plt.figure() fig.subplots_adjust(left=0.2) plt.plot(x, y) plt.yscale('log') fig.savefig("img.pdf")
# coding:utf-8 from numpy.core.fromnumeric import shape from numpy.core.numeric import array #建立一个4×2的矩阵c, c.shape[1] 为第一维的长度(列),c.shape[0] 为第二维的长度(行)。 c = array([[1,1],[1,2],[1,3],[1,4]]) print shape(3) # 得到多少维度的数组.这里 print c.shape print c.shape[0] # 行数 print c.shape[1] #列数
import matplotlib.pyplot as plt import numpy as np import h5py from numpy.core.fromnumeric import shape filename = 'diversity_mega.h5' f = h5py.File(filename, 'r') data = f['data'] print(data[0][0][0]) sh = shape(data) # algo_names = ["Selection", "MRC", "CRC", "EGC", "rene", "power_and_crc"] # data[snr][branch][algorithm] = [error, total, payload_error, slots, pbes] one_branch_ber = [] one_branch_snr_stop = 0 one_branch_pbe = [] one_branch_payload_error = [] # selection selection_ber_stop = 0 selection_ber = [] selection_pbe = [] selection_payload_error = [] for snr_i in range(25): err, total, payload_error, slots, pbes = data[snr_i][0][0]
G = [] L0 = [] R_L = [] ENum = 5 for i in range(3): Pic.append(cv2.imread('data/venice_canal_exp_{}.jpg'.format(i))) tp1, tp2 = LapPyr(Pic[i]) L0.append(tp2) GO0.append(tp1) L.append(L0) G_Original.append(Pic) G_Original.append(GO0) G1 = [] print(shape(CalW(Pic))) for i in range(3): Gt, df = LapPyr(CalW(Pic)[i]) G1.append(Gt) print(shape(GO0)) G.append(CalW(Pic)) G.append(G1) #test=[] #ENum为金字塔高度 for i in range(ENum): L_t = [] G_t = [] GO_t = [] W_Now = G[i] #test.append(L[i]) L[i] = np.array(L[i])
y.append(yo) for yo in y_test: y.append(yo) x = np.array(x) y = np.array(y) imbalanced_data = [1, 4, 2, 0] for imb_data in imbalanced_data: index = 0 list_index = [] for index in range(len(y)): if (y[index] == imb_data): list_index.append(index) y = np.delete(y, list_index, None) x = np.delete(x, list_index, 0) print(shape(x)) print(shape(y)) with open('x-no-' + str(imb_data), 'wb') as f: pickle.dump(x, f) with open('y-no-' + str(imb_data), 'wb') as f: pickle.dump(y, f)
def get(self): img, mask = self._img_transform(self.img), self._mask_transform( self.mask) print(str(shape(img)) + " " + str(shape(mask))) return img, mask
cur.append(2) elif e>0.5: cur.append(1) else: cur.append(0) predict_y = cur for i in range(len(predict_y)): if(predict_y[i]==y_train[i]): accRate=accRate+1 # 返回准确率 return accRate/len(predict_y) if __name__ == "__main__": # 数据总集 data_x, data_y = load_data("./data/dataset.txt") print("shapX:",shape(data_x)) # 训练集 train_x,train_y=data_x[:1350],data_y[:1350] print("train_x:",shape(train_x),"train_y",shape(train_y)) # 测试集 test_x,test_y=data_x[1350:],data_y[1350:] print("test_x:",shape(test_x),"test_y",shape(test_y)) # 学习率为0.1 model = NeuralNetwork(0.1) t0 = time.time() model.fit(train_x,train_y) t1 = time.time() print("BP算法的时间开销:",(t1 - t0),"s")
transform = TfidfTransformer() x_tfidf = transform.fit_transform(X) #训练 用fit_transform # count_train=vectorizer.fit_transform(content_train) # tfidf = transform.fit_transform(count_train) Y_label = to_categorical(opinion_train_stc, len(class_index)) # x_train, x_test, y_train, y_test = train_test_split(x_tfidf, opinion_train_stc, test_size=0.1) X_train, X_test, y_train, y_test = train_test_split(content_train_src, opinion_train_stc, test_size=0.1) #划分词语 X_train_vec = vectorizer.transform(X_train) x_train = transform.transform(X_train_vec) X_test_vec = vectorizer.transform(X_test) x_test = transform.transform(X_test_vec) print(shape(x_train)) #(457, 62919) print(shape(y_train)) clf = SVC(probability=True) clf.fit(x_train, y_train) print(x_test[0]) print(shape(x_test)) pred_y = clf.predict(x_test) pred_y_proba = clf.predict_proba(x_test) print(classification_report(y_test, pred_y))
def print_matrice(self, val, label, n=13): print u'\x11' * n + ' ' + str(label) + ' ' + '\x10'*n for i in range(shape(val)[0]): print str(i) + u' \x10 ' + str(val[i, :]) print u'\x16' * 30 print
def nnCostFunction(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, lambda1): # Extract Theta1 and Theta2 from Theta Theta1_sz = hidden_layer_size * (input_layer_size + 1) Theta1 = Theta[:Theta1_sz].reshape(input_layer_size + 1, hidden_layer_size) Theta1 = Theta1.T Theta2_sz = num_labels * (hidden_layer_size + 1) Theta2 = Theta[Theta1_sz:].reshape(hidden_layer_size + 1, num_labels) Theta2 = Theta2.T # There are m samples m = npfunc.shape(X)[0] # Objects to return (Cost and Gradient) J = 0 Theta1_grad = np.zeros(npfunc.shape(Theta1)) Theta2_grad = np.zeros(npfunc.shape(Theta2)) # Add Constant term into equation tmp = [[1] for i in range(m)] X = np.bmat('tmp X') # Count Cost J a2 = np.copy(sigmoid(Theta1 * X.T)) a2m = npfunc.shape(a2)[1] tmp = np.ones((1, a2m)) a2 = np.bmat('tmp ; a2') a2 = a2.T h_theta = np.copy(sigmoid((Theta2 * a2.T).T)) for i in range(m): yi = np.zeros((1, num_labels)) yi[0, y[i, 0]] = 1 J = J - np.inner(yi, eachlog(h_theta[i]))[0] - np.inner( 1 - yi, eachlog(1 - h_theta[i]))[0] J = J / m # Count Gradient of Theta1 and Theta2 # for algorithm Backpropagation for t in range(m): a1 = X[t].T z2 = Theta1 * a1 a2 = np.copy(sigmoid(z2)) tmp = np.mat([[1]]) a2 = np.bmat('tmp ; a2') z3 = Theta2 * a2 a3 = np.copy(sigmoid(z3)) yt = np.zeros((num_labels, 1)) yt[y[t, 0], 0] = 1 delta3 = np.mat(a3 - yt) delta2 = Theta2.T * delta3 tmp = npfunc.shape(delta2)[0] for i in range(tmp): if (i == 0): delta2[i, 0] = 0 else: delta2[i, 0] = delta2[i, 0] * sigmoidGradient(z2[i - 1, 0]) Theta2_grad = Theta2_grad + delta3 * a2.T Theta1_grad = Theta1_grad + delta2[1:] * a1.T Theta1_grad = Theta1_grad / m Theta2_grad = Theta2_grad / m # Normalization reg = 0 # Theta1 l = npfunc.shape(Theta1)[0] for i in range(l): tmp1 = np.array(Theta1[i]) reg = reg + np.inner(tmp1[1:], tmp1[1:]) reg_Theta = Theta1[i] reg_Theta[0] = 0 Theta1_grad[i] = Theta1_grad[i] + reg_Theta * lambda1 / m # Theta2 l = npfunc.shape(Theta2)[0] for i in range(l): tmp1 = np.array(Theta2[i]) reg = reg + np.inner(tmp1[1:], tmp1[1:]) reg_Theta = Theta2[i] reg_Theta[0] = 0 Theta2_grad[i] = Theta2_grad[i] + reg_Theta * lambda1 / m J = J + reg * lambda1 / (2 * m) # Combine Theta1_grad and Theta2_grad into grad grad = [] for j in range(input_layer_size + 1): for i in range(hidden_layer_size): grad.append(Theta1_grad[i, j]) for j in range(hidden_layer_size + 1): for i in range(num_labels): grad.append(Theta2_grad[i, j]) grad = np.array(grad) print "Cost: ", J return (J, grad)