def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") naiveBayesVo = json.loads(jsonStr, object_hook=dictToNaiveBayesVo) base_path = naiveBayesVo.bayesDir x_file_name = naiveBayesVo.trainxFileName y_file_name = naiveBayesVo.trainyFileName # 入参训练数据读取 (x_train, x_test) = get_data(base_path + x_file_name) # 出参训练数据读取 y_train_dict = get_y_train_data(base_path + y_file_name) clf = GaussianNB() predictDataList = [] # 创建预测值文件对象 for (station, y_train) in y_train_dict.items(): clf.fit(x_train, y_train) y_predict = clf.predict(x_test.reshape(1, -1)) # 写预测值 siteData = com.SiteData(str(station), str(y_predict[0])) predictDataList.append(siteData.__dict__) commonResponse = com.CommonResponse( com.successCode, com.successMsg, dict([("list", predictDataList)])) except BaseException as e: print("执行朴素贝叶斯算法失败:{}".format(e)) commonResponse = com.CommonResponse(com.errorCode, "{}".format(e), dict([("list", [])])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") rhythmVo = json.loads(jsonStr, object_hook=dictToRhythmVo) datas = load_data(rhythmVo.sampleDataPath) length = len(datas) - 1 observeData = datas[length] index = 0 maxSimilarity = Similarity(np.array(observeData), np.array(datas[0])) if pd.isnull(maxSimilarity): maxSimilarity = 0 for i in range(1,length): s = Similarity(np.array(observeData), np.array(datas[i])) if pd.isnull(s): s = 0 if s > maxSimilarity: maxSimilarity = s index = i commonResponse = com.CommonResponse(com.successCode, com.successMsg, dict([("obj", index)])) except BaseException as e: print("执行韵律算法失败:{}".format(e)) commonResponse = com.CommonResponse(com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") mgfVo = json.loads(jsonStr, object_hook=dictToMgfVo) dataFilePath = mgfVo.sampleDataPath file = open(dataFilePath, 'r') predictDataList = [] for line in file.readlines(): line_arr = line.split(",") station_id = line_arr.pop(0) X = load_data(line_arr) #数据转float数组 N, = X.shape # shape返回数组维度,资料年份长度 NN = 1 # 需要预测的年数 N1 = N + NN IN = 10 # 循环次数 Y = np.zeros(shape=N1) try: ACTA(X, Y, N, NN, N1, IN) predict_value = Y[Y.size - 1] siteData = com.SiteData(str(station_id), str(predict_value)) predictDataList.append(siteData.__dict__) except TypeError as e: print(station_id, "预测失败:", e) if len(predictDataList) == 0: commonResponse = com.CommonResponse( com.errorCode, "每个站点都预测失败", dict([("list", predictDataList)])) else: commonResponse = com.CommonResponse( com.successCode, com.successMsg, dict([("list", predictDataList)])) self.write(commonResponse.__dict__)
def post(self): try: commonResponse = com.CommonResponse(com.errorCode, "无同聚类数据", dict([("list", [])])) jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") kmeansVo = json.loads(jsonStr, object_hook=dictToKmeansVo) k = 8 # 聚类中心的个数 data = load_data(kmeansVo.dataPath) while k >= 2: years = get_index_means(data, k) if len(years) != 0: print("聚类成功,k={}".format(k)) commonResponse = com.CommonResponse( com.successCode, com.successMsg, dict([("list", years)])) break k = k - 1 except BaseException as e: print("执行kmeans失败:{}".format(e)) commonResponse = com.CommonResponse(com.errorCode, "{}".format(e), dict([("list", [])])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") svmVo = json.loads(jsonStr, object_hook=svm.dictToSvmVo) file_name = svmVo.trainDataPath model = svmVo.modelPath # 1、导入训练数据 dataSet, labels = load_data_libsvm(file_name) # 2、训练SVM模型 C = 0.6 toler = 0.001 maxIter = 500 svm_model = svm.SVM_training(dataSet, labels, C, toler, maxIter) # 3、计算训练的准确性 svm.cal_accuracy(svm_model, dataSet, labels) # 4、保存最终的SVM模型 svm.save_svm_model(svm_model, model) commonResponse = com.CommonResponse(com.successCode, com.successMsg, dict([("obj", 1)])) except BaseException as e: print("执行支持向量机-生成模型失败:{}".format(e)) commonResponse = com.CommonResponse(com.errorCode, "{}".format(e), dict([("obj", 1)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: # 1、导入训练数据 jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") decisionTreeVo = json.loads(jsonStr, object_hook=dictToDecisionTreeVo) data_train = load_data(decisionTreeVo.trainDataPath) # 2、创建CART分类树 tree = build_tree(data_train) # 3、获取样本数据 test_data = load_data(decisionTreeVo.testDataPath) # 4、对样本进行预测 result = list(predict(test_data[0], tree).keys())[0] commonResponse = com.CommonResponse(com.successCode, com.successMsg, dict([("obj", result)])) except BaseException as e: print("执行决策树算法失败:{}".format(e)) commonResponse = com.CommonResponse( com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): global commonResponse try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") tccImageVo = json.loads(jsonStr, object_hook=dictToTccImageVo) result = create_svg(tccImageVo) commonResponse = com.CommonResponse(com.successCode, com.successMsg, dict([("obj", result)])) except BaseException as e: print("执行TCC画图失败:{}".format(e)) commonResponse = com.CommonResponse( com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") svmVo = json.loads(jsonStr, object_hook=sv.dictToSvmVo) file_name = svmVo.testDataPath model_name = svmVo.modelPath # 1、导入测试数据 test_data = load_test_data(file_name) # 2、导入SVM模型 svm_model = load_svm_model(model_name) # 3、得到预测值 prediction = get_prediction(test_data, svm_model) commonResponse = com.CommonResponse(com.successCode, com.successMsg, dict([("obj", prediction[0])])) except BaseException as e: print("执行支持向量机算法失败:{}".format(e)) commonResponse = com.CommonResponse(com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") pcaVo = json.loads(jsonStr, object_hook=dictToPcaVo) feature_path = pcaVo.sampleDataPath label_path = pcaVo.labelDataPath test_path = pcaVo.forecastDataPath data = loadDataSet(feature_path) lowDDataMat, reconMat = pca(data, 3) y = load_data(label_path) w_ls = least_square(mat(lowDDataMat), y) testData = loadDataSet(test_path) testlowDDataMat, testreconMat = pca(testData, 3) predict = get_prediction(mat(testlowDDataMat), mat(w_ls)) commonResponse = com.CommonResponse(com.successCode, com.successMsg, dict([("obj", predict[-1, 0].real)])) except BaseException as e: print("执行主成分分析失败:{}".format(e)) commonResponse = com.CommonResponse(com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") vectorWindVo = json.loads(jsonStr, object_hook=dictToVectorWindVo) x = vectorWindVo.lons y = vectorWindVo.lats u = vectorWindVo.uwindValues v = vectorWindVo.vwindValues fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.barbs(x, y, u, v, length=4, pivot='middle') plt.xticks([]) plt.yticks([]) plt.axis('off') imageDir = vectorWindVo.imageDir if os.path.exists(imageDir) == False: os.makedirs(imageDir) imageName = vectorWindVo.imageName plt.savefig(imageDir + imageName, format='svg', bbox_inches='tight', transparent=True, dpi=600) plt.clf() plt.close(fig) commonResponse = com.CommonResponse( com.successCode, com.successMsg, dict([("obj", imageDir + imageName)])) except BaseException as e: print("执行矢量风出图失败:{}".format(e)) commonResponse = com.CommonResponse( com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") multipleRegressionVo = json.loads( jsonStr, object_hook=dictToMultipleRegressionVo) # 1、导入数据 data_train = load_data(multipleRegressionVo.resultDataFilePath) # 3、获取样本数据 sample_data = load_data(multipleRegressionVo.sampleDataFilePath) test_data = multipleRegressionVo.testDataFilePath # 4、对样本进行预测 result = multiple_regression(data_train, sample_data, test_data) commonResponse = com.CommonResponse(com.successCode, com.successMsg, dict([("obj", result[0][0])])) except BaseException as e: print("执行多元回归算法失败:{}".format(e)) commonResponse = com.CommonResponse( com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") optimalSubsetVo = json.loads(jsonStr, object_hook=dictToOptimalSubsetVo) txtPath = optimalSubsetVo.sampleDataPath feature_num = int(optimalSubsetVo.featureNum) col_row_num = (feature_num // 100 + 1) * 100 ff1 = pd.read_csv(txtPath, sep='\\s+', header=None) ff2 = pd.DataFrame(ff1) nums = ff1.shape[0] xxCol = col_row_num xfCol = col_row_num xfRow = col_row_num n = feature_num nsize = n // 10 + 1 n1 = n + 1 # 创建二维数组 ex = np.zeros(shape=(30, 30), dtype=float) cc = np.zeros(shape=(30, 30), dtype=float) coe = np.zeros(shape=(30, 30), dtype=float) # 创建30*100的数组(指数先改为30*200)(高度场30*2800) xx = np.zeros(shape=(30, xxCol), dtype=float) # 创建100*100的数组(指数先改为200*200)(高度场2800*2800) xf = np.zeros(shape=(xfRow, xfCol), dtype=float) csca = [] ihh = [] for i in range(0, 30): csca.append(0) ihh.append(0) csca = np.array(csca, dtype=float) ihh = np.array(ihh) x1 = [] x2 = [] x = [] for j in range(0, n1): x1.append(0) x2.append(0) x.append(0) x1 = np.array(x1, dtype=float) x2 = np.array(x2, dtype=float) x = np.array(x, dtype=float) ''' ff1和ff2获取的二维数组数据有缺失的,比如此例中最后一行的最后一个数据, fillna缺测数据填充为参数值 ''' f1 = ff2.fillna(32766) f2 = ff2.fillna(-999) # 气候指数,需要的训练集指数刚好是10的整数位,最后一组需要求最优子集测试回归的数据,刚好就少一列,需要补回来 # 前面几组,有1个特征向量占一行,不会出现最后一组的情况 if feature_num % 10 == 0: f32766 = pd.DataFrame([[ 32766, 32766, 32766, 32766, 32766, 32766, 32766, 32766, 32766, 32766 ]]) # ignore_index=True True 添加时,需要索引,默认值是false f1 = f1.append(f32766, ignore_index=True) f0 = pd.DataFrame([[ -999, -999, -999, -999, -999, -999, -999, -999, -999, -999 ]]) f2 = f2.append(f0, ignore_index=True) dat = [] xy = [] for num in range(0, nums): f12 = [] f13 = [] # shape查看数组维数,shape[0]=11,shape[1]=143 或 2702 for feature_index in range(0, f1.shape[1]): f12.append(f1[feature_index][num]) f13.append(f2[feature_index][num]) for bb in f12: if bb == 32766: for bb1 in range(0, f12.count(32766)): f12.remove(bb) f12 = np.array(f12) f13 = np.array(f13) dat.append(f12) # xy.txt读取的数据,不满数组的数据补0操作 xy.append(f13) dat = np.array(dat) dat_mean = [] for i in range(0, len(dat)): dat_mean.append(np.nanmean(dat[i][0:n])) a1 = [] for i in range(0, nums): for j in range(0, nums): s = 0 for k in range(0, n): s += (xy[i][k] * xy[j][k]) s -= (n * dat_mean[i] * dat_mean[j]) a1.append(s) a = [] for i in range(0, nums): a2 = [] # 魔法数值 6 ,气候指数修改为nums for j in range(i * nums, i * nums + nums): a2.append(a1[j]) a.append(a2) a = np.array(a) sto = a[nums - 1][nums - 1] # **求次方 ih = 2**(nums - 2) - 1 ks = [] for i in range(0, ih + 1): ks.append(0) ks[0] = 1 for k in range(1, nums - 2): j = 2**k ks[j - 1] = k + 1 for i in range(0, j - 1): m = ks[j - i - 2] ks[j + i] = -m ks[ih] = 5 nb = 0 rss = [] ma = [] for i in range(0, nums - 1): rss.append(10.0**20) ma.append(0) rss = np.array(rss) ma = np.array(ma) def csc(): le = np.array([[0 for col in range(3)] for row in range(3)], dtype=float) p = [] q = [] for i in range(0, 3): p.append(0) q.append(0) p = np.array(p, dtype=float) q = np.array(q, dtype=float) v = 0.0 u = 0.0 for i in range(0, n - 1): u += abs(x1[i + 1] - x1[i]) / (n - 1) v += abs(x[i + 1] - x[i]) / (n - 1) for i in range(0, 3): for j in range(0, 3): le[i][j] = 0.0 for i in range(0, n - 1): xp = x[i + 1] - x[i] xp1 = x1[i + 1] - x1[i] if (xp1 >= u and xp >= v): le[0][0] += 1 if (xp1 >= u and xp <= 0): le[2][0] += 1 if (xp1 >= u and xp > 0 and xp < v): le[1][0] += 1 if (xp1 > 0 and xp1 < u and xp >= v): le[0][1] += 1 if (xp1 > 0 and xp1 < u and xp > 0 and xp < v): le[1][1] += 1 if (xp1 > 0 and xp1 < u and xp <= 0): le[2][1] += 1 if (xp1 <= 0 and xp >= v): le[0][2] += 1 if (xp1 <= 0 and xp > 0 and xp < v): le[1][2] += 1 if (xp1 <= 0 and xp <= 0): le[2][2] += 1 le = np.array(le) le = le.T r1 = 0.0 r2 = 0.0 r3 = 0.0 for i in range(0, 3): p[i] = 0.0 q[i] = 0.0 for i in range(0, 3): for j in range(0, 3): if le[j][i] != 0.0: r1 += (le[j][i] * (math.log(float(le[j][i])))) for i in range(0, 3): for j in range(0, 3): p[i] += le[j][i] q[i] += le[i][j] for i in range(0, 3): if (q[i] != 0.0): r2 += (q[i] * (math.log(q[i]))) if (p[i] != 0.0): r3 += (p[i] * (math.log(p[i]))) s1 = 2 * (r1 + (n - 1) * (math.log(float(n - 1))) - (r2 + r3)) xm1 = 0.0 for i in range(0, n): xm1 += (x[i] / float(n)) qk = 0.0 qx = 0.0 for i in range(0, n): qk += ((x[i] - x1[i]) * (x[i] - x1[i]) / n) qx += ((x[i] - xm1) * (x[i] - xm1) / n) s2 = (n - (k + 1)) * (1 - qk / qx) csc3 = s1 + s2 return csc3 def reg2(it): kk = it - 1 if (abs(a[kk][kk]) > (10**(-12))): for j in range(0, nums): for i in range(0, nums): if (i != kk and j != kk): a[i][j] -= a[kk][j] * a[i][kk] / a[kk][kk] else: continue for j in range(0, nums): if (j != kk): a[j][kk] = a[j][kk] / a[kk][kk] a[kk][j] = -a[kk][j] / a[kk][kk] else: continue a[kk][kk] = 1 / a[kk][kk] ma[kk] = 0 if (a[nums - 1][nums - 1] <= rss[nb - 1]): rss[nb - 1] = a[nums - 1][nums - 1] for j in range(0, nums - 1): ex[j][nb - 1] = ma[j] coe[j][nb - 1] = a[nums - 1][j] else: return ex, coe, ma, rss, a, nb, ts return ex, coe, ma, rss, a, nb, ts def reg1(it): kk = it - 1 if (abs(a[kk][kk]) > (10**(-12))): for j in range(0, nums): for i in range(0, nums): if (i != kk and j != kk): a[i][j] -= a[kk][j] * a[i][kk] / a[kk][kk] else: continue for j in range(0, nums): if (j != kk): a[j][kk] = a[j][kk] / a[kk][kk] a[kk][j] = -a[kk][j] / a[kk][kk] else: continue a[kk][kk] = 1 / a[kk][kk] ma[kk] = 1 if (a[nums - 1][nums - 1] <= rss[nb - 1]): rss[nb - 1] = a[nums - 1][nums - 1] for j in range(0, nums - 1): ex[j][nb - 1] = ma[j] coe[j][nb - 1] = a[nums - 1][j] else: return ex, coe, ma, rss, a, nb, ts return ex, coe, ma, rss, a, nb, ts ts = 0 for m in range(0, ih + 1): it = abs(ks[m]) if ks[m] > 0: nb += 1 ts += 1 ex, coe, ma, rss, a, nb, ts = reg1(it) else: nb -= 1 ts += 1 ex, coe, ma, rss, a, nb, ts = reg2(it) for m in range(0, ih): it = abs((-1) * ks[ih - 1 - m]) if (-1) * ks[ih - 1 - m] > 0: nb += 1 ts += 1 ex, coe, ma, rss, a, nb, ts = reg1(it) else: nb -= 1 ts += 1 ex, coe, ma, rss, a, nb, ts = reg2(it) for k in range(0, nums - 1): s = rss[k] c = 0.0 for j in range(0, nums - 1): if ex[j][k] != 0: c += (coe[j][k] * dat_mean[j]) else: continue c = dat_mean[nums - 1] - c mm = 0 for j in range(0, nums - 1): if ex[j][k] != 0.0: mm += 1 cc[mm - 1][k] = coe[j][k] for i in range(0, n1): xx[mm - 1][i] = xy[j][i] for i in range(0, n1): xf[i][k] = c for j in range(0, mm): xf[i][k] = xf[i][k] + cc[j][k] * xx[j][i] x[i] = xy[nums - 1][i] x1[i] = xf[i][k] for j in range(0, n): x2[j] = xf[j][k] - x[j] w = 0.0 for j in range(0, n): w += x2[j]**2 v1 = np.sqrt(w / n) csca[k] = csc() aa = 0.0 for i in range(0, nums - 1): if (csca[i] > aa): aa = csca[i] for i in range(0, nums - 1): if csca[i] == aa: ihh[0] = i kk1 = ihh[0] for j in range(0, nums - 1): if (j == kk1): for i in range(0, n1): x1[i] = xf[i][j] else: continue w = 0.0 for i in range(0, n): w += (x[i] - x1[1])**2 v = np.sqrt(w / n) commonResponse = com.CommonResponse( com.successCode, com.successMsg, dict([("obj", '%.2f' % x1[n])])) except BaseException as e: print("执行最优子集失败:{}".format(e)) commonResponse = com.CommonResponse( com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)
def post(self): try: jsonByte = self.request.body jsonStr = jsonByte.decode("utf-8") svgDto = json.loads(jsonStr, object_hook=dictToSvgDto) # 读取txt文件 lat = svgDto.lats lon = svgDto.lons val = svgDto.values minLon = svgDto.areaMinLon maxLon = svgDto.areaMaxLon minLat = svgDto.areaMinLat maxLat = svgDto.areaMaxLat lonLength = svgDto.lonLength latLength = svgDto.latLength # 经纬度处理 olon = np.linspace(minLon, maxLon, lonLength) olat = np.linspace(minLat, maxLat, latLength) # 转成地理经纬度 olon, olat = np.meshgrid(olon, olat) # 插值处理 func = Rbf(lon, lat, val, function='linear') rhu_data_new = func(olon, olat) # 自定义色卡处理 levels = svgDto.levels my_cmap = colors.ListedColormap(svgDto.colorMap) my_cmap.set_under(svgDto.underColor) my_cmap.set_over(svgDto.overColor) norm3 = colors.BoundaryNorm(levels, my_cmap.N) # 投影方式 projection = ccrs.Mercator() # 创建图实例 创建设置像素 fig = plt.figure(dpi=300) ax = fig.add_subplot(111, projection=projection) # 设定要显示的边界区域 ax.set_extent([minLon, maxLon, minLat, maxLat], crs=projection) # 等值面处理 cs = ax.contourf(olon, olat, rhu_data_new, levels=levels, cmap=my_cmap, norm=norm3, extend='both', transform=projection, alpha=1) # 读取shp文件,获取要裁剪的区域clip(环流图不用裁剪) shp_name = svgDto.shpPath if shp_name: sf = shapefile.Reader(shp_name, encoding="gbk") for shp_record in sf.shapeRecords(): vertices = [] codes = [] pts = shp_record.shape.points prt = list(shp_record.shape.parts) + [len(pts)] for i in range(len(prt) - 1): for j in range(prt[i], prt[i + 1]): vertices.append((pts[j][0], pts[j][1])) codes += [Path.MOVETO] codes += [Path.LINETO] * (prt[i + 1] - prt[i] - 2) codes += [Path.CLOSEPOLY] clip = Path(vertices, codes) clip = PathPatch(clip, transform=ax.transData) # 裁剪显示区域 for contour in cs.collections: contour.set_clip_path(clip) # cartopy 设置背景透明及去除边框 ax.outline_patch.set_visible(False) ax.background_patch.set_visible(False) #判断目录是否存在,不存在则创建 save_path = svgDto.savePath if os.path.exists(save_path) == False: os.makedirs(save_path) image_name = svgDto.imageName # 保存图片,tight让保存的图片更加紧促 plt.savefig(save_path + image_name, bbox_inches='tight', transparent=True, pad_inches=0) plt.clf() plt.close(fig) commonResponse = com.CommonResponse( com.successCode, com.successMsg, dict([("obj", save_path + image_name)])) except BaseException as e: print("执行svg出图失败:{}".format(e)) commonResponse = com.CommonResponse( com.errorCode, "{}".format(e), dict([("obj", com.missingValues)])) finally: # 返回数据 self.write(commonResponse.__dict__)