def PreprocessKernel(name): img = DataHelper.ReadImage(name, gray=True) grids = SVCClassifier.SVCPreprocess(img) labels = np.array( BoardHelper.FENtoOneHot( DataHelper.GetCleanNameByPath(name))).argmax(axis=1) return grids, labels
def make_json(self, coal_id, tech_id, json_file): # 读取数据库生成json文件(该文件是utf-8编码!!!) # 注意: 生成json的时候,所有字符串(包括key和value)都必须使用unicode,减少出错的概率!!!! DataHelper.generateJsonFileOfPoreReport(coal_id, tech_id, json_file) # json文件由utf-8转成gbk编码 # report.exe目前只能读取gbk编码的json文件才能正确的解析字符串里面的中文!!! EncodeHelper.UTF8_2_GBK(json_file, json_file)
def onCreatReport(self): coal = SQLClientHelper.GetCoalById(self.coal_id) if coal.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:144)') return # 查找掘进面的抽采技术 ws_tech = SQLClientHelper.GetDesignWorkSurfTechnologyById(self.design_id) if ws_tech.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:145)') return # 向cad发送命令请求生成钻孔数据 ret = CbmClientHelper.SendCommandToCAD("JL.GeneratePore23 %d %d" % (coal.id, ws_tech.id), True) if ret: # 显示钻孔报表 DataHelper.show_report23(coal, ws_tech) else: UiHelper.MessageBox(u'启动AutoCAD失败!!!') # json文件路径(使用绝对路径,避免出错!!!) # json_file = os.path.abspath('.\\help\\json\\reportP21.json') # 生成json文件 # self.make_json(coal.id, tws_tech.id, json_file) # 生成word报单 # doc.CreatReport(json_file)
def main(): data = DataHelper("data/Twitter.csv") # data.MinMaxScale() rp = Representation(data.getdata()) # rp.cal_mean_by_class() # rp.cal_cosdist_by_eigenvect() _ = rp.cal_KLdivergence()
def append(): # display template for submitting CSV if request.method == 'GET': return render_template('index.html', acs_variables=DataHelper.ACS_VARIABLES) # logic for manipulating submitted CSV and outputting elif request.method == 'POST': input_file = request.files['input_file'] acs_variable_codes = request.form.getlist('acs_variables') # checks if inputs exist assert input_file and acs_variable_codes and len(acs_variable_codes) > 0, 'INPUT ERROR' # checks if input is a CSV if DataHelper.allowed_file(input_file.filename): output_csv_string = '' try: output_csv_string, error = DataHelper.append_variables(input_file, acs_variable_codes) except: print "Unable to append variables:", sys.exc_info()[0] # sanitizes filename output_filename = 'APPENDED-' + secure_filename(input_file.filename) # prepares appended CSV file to be downloaded to user output = Response( output_csv_string, mimetype='text/csv', headers={ 'Content-disposition': 'attachment; filename=' + output_filename } ) return output else: # NOT VALID FILE TYPE # HANDLE EXCEPTION raise
def __init__(self, dataSet, featureCount=None, distanceCalculator=None, isNormalizeFeatures=True): if featureCount is not None and featureCount > dataSet.shape[1]: raise ValueError() if featureCount is None: featureCount = dataSet.shape[1] if distanceCalculator is None: distanceCalculator = ( lambda X, v: DataHelper.calcMinkowskiDistance(X, v)) if isNormalizeFeatures: if featureCount < dataSet.shape[1]: self.__dataSet = dataSet[:, 0:featureCount] self.__dataSet, self.__mu, self.__sigma = DataHelper.normalizeFeatures( self.__dataSet) self.__dataSet = np.hstack( (self.__dataSet, dataSet[:, featureCount:])) else: self.__dataSet, self.__mu, self.__sigma = DataHelper.normalizeFeatures( dataSet) else: self.__dataSet, self.__mu, self.__sigma = dataSet, 0, 1 self.__featureCount = featureCount self.__distanceCalculator = distanceCalculator self.__isNormalizeFeatures = isNormalizeFeatures self.__tree = KDTree.KDTree(self.__dataSet, featureCount)
def onT0GetFocus(self): # 提取界面数据 thick, ok = self.ui.thick.text().toDouble() permeability_lambda, ok = self.ui.permeability_lambda.text().toDouble() alpha, ok = self.ui.alpha.text().toDouble() p0, ok = self.ui.p0.text().toDouble() r0, ok = self.ui.r0.text().toInt() t0, ok = self.ui.t0.text().toInt() r1, ok = self.ui.r1.text().toInt() eta, ok = self.ui.eta.text().toDouble() if r0 <= 0 or thick <= 0: return # 孔径单位换算 r0, r1 = r0 * 0.001, r1 * 1 # psai计算 k = DataHelper.gas_psai(permeability_lambda, p0, alpha, r0) print 'k= %.2f' % log10(k) # 提取x. y, z x = t0 # t0 y = DataHelper.R0_func(r1, thick) / r0 # R0/r0 z = eta # η # 插值计算 gdo = DataHelper.GasDrillingOpt() x = gdo.xValue(y, z, k) x = int(round(x)) # 更新计算结果 if abs(x - t0) > 1: self.ui.t0.setText(u'%d' % x)
def onAnalyse(self): # 提取界面数据 thick, ok = self.ui.thick.text().toDouble() permeability_lambda, ok = self.ui.permeability_lambda.text().toDouble() alpha, ok = self.ui.alpha.text().toDouble() p0, ok = self.ui.p0.text().toDouble() r0, ok = self.ui.r0.text().toInt() t0, ok = self.ui.t0.text().toInt() r1, ok = self.ui.r1.text().toInt() eta, ok = self.ui.eta.text().toDouble() if r0 <= 0 or thick <= 0: return # 孔径单位换算 r0, r1 = r0 * 0.001, r1 * 1 # psai计算 k = DataHelper.gas_psai(permeability_lambda, p0, alpha, r0) print 'k= %.2f' % log10(k) # 提取x. y, z x = t0 # t0 y = DataHelper.R0_func(r1, thick) / r0 # R0/r0 z = eta # η DataHelper.DrawXYZ(x, y, k)
def onCreatReport(self): coal = SQLClientHelper.GetCoalById(self.coal_id) if coal.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:144)') return # 查找掘进面的抽采技术 ws_tech = SQLClientHelper.GetDesignWorkSurfTechnologyById( self.design_id) if ws_tech.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:145)') return # 向cad发送命令请求生成钻孔数据 ret = CbmClientHelper.SendCommandToCAD( "JL.GeneratePore23 %d %d" % (coal.id, ws_tech.id), True) if ret: # 显示钻孔报表 DataHelper.show_report23(coal, ws_tech) else: UiHelper.MessageBox(u'启动AutoCAD失败!!!') # json文件路径(使用绝对路径,避免出错!!!) # json_file = os.path.abspath('.\\help\\json\\reportP21.json') # 生成json文件 # self.make_json(coal.id, tws_tech.id, json_file) # 生成word报单 # doc.CreatReport(json_file)
def demo_update(i, graphHolder, rotations): if i < len(rotations): data, theta = DataHelper.get_points(rotations[i]) graphHolder.set_data(theta, data) else: data, theta = DataHelper.get_points(rotations[len(rotations) - 1]) graphHolder.set_data(theta, data)
def func_generator(train_file_names): for image_file_name in train_file_names: img = DataHelper.ReadImage(image_file_name) x = CNNClassifier.PreprocessImage(img) y = np.array( BoardHelper.FENtoOneHot( DataHelper.GetCleanNameByPath(image_file_name))) yield x, y
def onDrawCurve(self): # 从界面中读取数据 q0, ok = self.ui.q0.text().toDouble() alpha, ok = self.ui.alpha.text().toDouble() # 关闭对话框并返回1 self.accept() # 绘制曲线 DataHelper.draw_curve(q0, alpha)
def onReg(self): uname = unicode(self.ui.username.text()).encode('utf-8') pwd = unicode(self.ui.password.text()).encode('utf-8') # 检查用户名是否已注册 if DataHelper.sql_check_user(uname, pwd): UiHelper.MessageBox('用户名已经被注册了,换一个吧!') else: # 注册新用户 account_id = DataHelper.sql_create_user(uname, pwd) if account_id <= 0: UiHelper.MessageBox('抱歉,注册失败\n请联系技术人员(错误码:A1)!') return # 从界面中读取数据 mine_name = unicode(self.ui.name.text()).encode('utf-8') mine_province = unicode(self.ui.province.text()).encode('utf-8') mine_city = unicode(self.ui.city.text()).encode('utf-8') mine_region = unicode(self.ui.region.currentText()).encode('utf-8') # 注册新矿井 mine_id = DataHelper.sql_create_mine(mine_name, mine_province, mine_city, mine_region, account_id) # 注册新矿井失败 if mine_id <= 0: # 删除新注册用户 SQLClientHelper.DeleteAccount(account_id) UiHelper.MessageBox('抱歉,注册失败\n请联系技术人员(错误码:A2)!') return # 分解煤层编号(空格分隔) mine_coal_names = unicode(self.ui.coal_nums.text()).encode('utf-8') # 分解成字符串列表 mine_coal_names = mine_coal_names.strip().split() # 去除重复编号名称 mine_coal_names = list(set(mine_coal_names)) # 新建煤层(逐个添加到数据) coal_ids = [] for name in mine_coal_names: coal_ids.append(DataHelper.sql_create_coal(name, mine_id)) # 增加煤层失败,删除添加的所有煤层 if -1 in coal_ids: # 删除已增加的所有煤层 SQLClientHelper.DeleteMoreCoal(coal_ids) # 删除已注册的矿井 SQLClientHelper.DeleteMine(mine_id) # 删除已注册的用户 SQLClientHelper.DeleteAccount(account_id) UiHelper.MessageBox('抱歉,注册失败\n请联系技术人员(错误码:A3)!') else: UiHelper.MessageBox('恭喜您,注册成功啦!') # 记录注册成功的用户(用于外部调用) self.reg_user_name = uname # 关闭对话框,并返回1 self.accept()
def _getX(self, x): if self._K > 0: Y = np.hstack( tuple([ DataHelper.truncatedPower(x, self._knots[k], self.__d) for k in range(0, self._K) ])) return np.hstack( (DataHelper.vectorPoly(x, self.__d), Y)) if self.__d > 0 else Y else: return DataHelper.vectorPoly(x, self.__d) if self.__d > 0 else None
def onT0GetFocus(self): # 提取界面数据 thick, ok = self.ui.thick.text().toDouble() permeability_lambda, ok = self.ui.permeability_lambda.text().toDouble() alpha, ok = self.ui.alpha.text().toDouble() p0, ok = self.ui.p0.text().toDouble() r0, ok = self.ui.r0.text().toInt() t0, ok = self.ui.t0.text().toInt() r1, ok = self.ui.r1.text().toInt() eta, ok = self.ui.eta.text().toDouble() if r0 <= 0 or thick <= 0: return # 孔径单位换算 r0, r1 = r0 * 0.001, r1 * 1 # psai计算 k = DataHelper.gas_psai(permeability_lambda, p0, alpha, r0) print "k= %.2f" % log10(k) # 提取x. y, z x = t0 # t0 y = DataHelper.R0_func(r1, thick) / r0 # R0/r0 z = eta # η # 插值计算 gdo = DataHelper.GasDrillingOpt() x = gdo.xValue(y, z, k) x = int(round(x)) # 更新计算结果 if abs(x - t0) > 1: self.ui.t0.setText(u"%d" % x)
def main(): data = DataHelper.Data() x_train, y_train, x_test, y_test, attr_list = data.loadData( "hw2_data_2.txt", 20, 700) # print(x_train.shape, y_train.shape, x_test.shape, y_test.shape, attr_list.shape) gb = GradientBoosting(x_train, y_train, x_test, y_test, attr_list) gb.fit()
def main(): data = DataHelper.Data() x_train, y_train, x_test, y_test, _ = data.loadData("hw2_data_1.txt", 2, 70) epoch = 3 weights = np.ones(len(x_train)) / len(x_train) adaboost = Adaboost(weights, epoch, DecisionRule()) adaboost.train(x_train, y_train) prediction = adaboost.predict(x_test) print("Error rate for %d iterations is %.2f%%" % (epoch, adaboost.evaluate(prediction, y_test))) epoch = 5 weights = np.ones(len(x_train)) / len(x_train) adaboost = Adaboost(weights, epoch, DecisionRule()) adaboost.train(x_train, y_train) prediction = adaboost.predict(x_test) print("Error rate for %d iterations is %.2f%%" % (epoch, adaboost.evaluate(prediction, y_test))) epoch = 10 weights = np.ones(len(x_train)) / len(x_train) adaboost = Adaboost(weights, epoch, DecisionRule()) adaboost.train(x_train, y_train) prediction = adaboost.predict(x_test) print("Error rate for %d iterations is %.2f%%" % (epoch, adaboost.evaluate(prediction, y_test))) epoch = 20 weights = np.ones(len(x_train)) / len(x_train) adaboost = Adaboost(weights, epoch, DecisionRule()) adaboost.train(x_train, y_train) prediction = adaboost.predict(x_test) print("Error rate for %d iterations is %.2f%%" % (epoch, adaboost.evaluate(prediction, y_test)))
def setUp(self): maker = DataHelper.get_data_maker() clustering_year = 2020 self.model = Models.get_model('classic', clustering_year=clustering_year) self.model.load_raw_data(maker) self.model.fit()
def __calcDiscreteProbability(self, dataSet, featureValueNumbers): if dataSet is None: return np.log( np.mat( np.ones( (featureValueNumbers.max(), featureValueNumbers.shape[1]))) / featureValueNumbers) frequency = None count = dataSet.shape[0] result = np.mat(np.zeros( (featureValueNumbers.max(), dataSet.shape[1]))) for i in range(0, result.shape[1]): frequency = DataHelper.statisticFrequency(dataSet[:, i]) result[:, i] = np.mat([ np.log(((frequency[key] if key in frequency else 0) + self.__smoothingFactor) / (count + featureValueNumbers[0, i] * self.__smoothingFactor)) if key < featureValueNumbers[0, i] else np.nan for key in range(0, result.shape[0]) ]).T return result
def testOptimalSubset(): names, X, y = __loadData() p = X.shape[1] models = [] for i in range(0, p): layerModels = [] for j in DataHelper.combinations(p, i + 1): model = MultipleLinearRegression() model.fit(X[:, j], y) layerModels.append((j, model)) models.append(layerModels[np.argmin( [item[1].rss for item in layerModels])]) for indices, model in models: print( "variables: {0}, rss: {1}, r^2: {2}, aic: {3}, bic: {4}, adjusted-r^2: {5}" .format(", ".join(names[indices]), model.rss, model.r2, model.aic, model.bic, model.adjustedR2)) print("") print("selected model: {0}\r\n".format( np.argmin([item[1].bic for item in models]))) print("\r\n\r\n".join([item[1].__str__() for item in models]))
def main(): data = DataHelper.Data() x_train, y_train, x_test, y_test, _ = data.loadData( "hw2_data_2.txt", 20, 700) # radial kernel svm_radial = SVM(x_train, y_train, x_test, y_test, kernel="RADIAL", gamma_range=np.logspace(-3, 2, 6)) svm_radial.run() # sigmoid kernel svm_sigmoid = SVM(x_train, y_train, x_test, y_test, kernel="SIGMOID", gamma_range=np.logspace(-3, 2, 6)) svm_sigmoid.run() # polynomial kernel svm_poly = SVM(x_train, y_train, x_test, y_test, kernel="POLYNOMIAL", degree_range=range(1, 11)) svm_poly.run() plt.show()
def onAnalyse(self): # 提取界面数据 thick, ok = self.ui.thick.text().toDouble() permeability_lambda, ok = self.ui.permeability_lambda.text().toDouble() alpha, ok = self.ui.alpha.text().toDouble() p0, ok = self.ui.p0.text().toDouble() r0, ok = self.ui.r0.text().toInt() t0, ok = self.ui.t0.text().toInt() r1, ok = self.ui.r1.text().toInt() eta, ok = self.ui.eta.text().toDouble() if r0 <= 0 or thick <= 0: return # 孔径单位换算 r0, r1 = r0 * 0.001, r1 * 1 # psai计算 k = DataHelper.gas_psai(permeability_lambda, p0, alpha, r0) print "k= %.2f" % log10(k) # 提取x. y, z x = t0 # t0 y = DataHelper.R0_func(r1, thick) / r0 # R0/r0 z = eta # η DataHelper.DrawXYZ(x, y, k)
def main(): data = DataHelper('data/Twitter.csv') data.MinMaxScale() data = data.getdata() traindata = data[:4] # traindata = oversamplingSMOTE(traindata,read=True) traindata = np.concatenate(traindata) testdata = data[4:] # method = 'KNN' # run abour 40s acc 96.75% f-measure 0.65 # method = 'SVM' # run about 30s acc 95% f-measure overfit(nan) # method = 'Random Forest' # run about 0.44s acc 98.28% f-measure 0.79 method = 'XGBoost' start = time.time() clf = train(method, traindata) acc = [] imbalanced = [] MDDT = [[5, 9], [11, 14], [16, 18], [25, 29], [31, 37]] CUSUM = [[13, 16], [23, 24], [25, 26], [28, 30], [35, 38]] PH = [[7, 8], [13, 14], [25, 26]] for i in range(len(testdata) // 4): print(i) last_i = 0 testbatch = np.concatenate(testdata[i * 4:(i + 1) * 4]) try: acc.append(test(clf, testbatch, criteria='acc')) imbalanced.append(test(clf, testbatch, criteria='confusion matrix')) except: acc.append(0) imbalanced.append([0, 0, 0]) testbatch = np.concatenate(testdata[i * 4:(i + 1) * 4]) clf.fit(testbatch[:, :-1], testbatch[:, -1]) for t in MDDT: start = np.floor(t[0] / 4.0) - 1 end = np.ceil(t[1] / 4.0) - 1 if start == i: try: testbatch = np.concatenate(testdata[t[0] - 4:t[1] - 4]) clf.fit(testbatch[:, :-1], testbatch[:, -1]) except: continue break df = pd.DataFrame(acc) # df.to_csv("MDDT_ACC_XGB.csv",header=None,index=None) df = pd.DataFrame(imbalanced) # df.to_csv("MDDT_CONFUSION_XGB.csv",header=None,index=None) print("time:", time.time() - start)
def run_blstm( dim_proj=256, # LSTM number of hidden units. dim_frame=4096, # feature dimension of image frame in the video output_dim=4096, v_length=24, # video length or number of frames max_iter=100, # The maximum number of epoch to run l2_decay=0.0001, # Weight decay for model params. lrate=0.0001, # Learning rate for SGD, Adam lamb=0.2, optimizer='SGD', # SGD, Adam available saveto='pairwise-blstm_model.npz', # The best model will be saved there dispFreq=2, # Display to stdout the training progress every N updates validFreq=20, # Compute the validation error after this number of update. saveFreq=2, # Save the parameters after every saveFreq updates batch_size=256, # The batch size during training. valid_batch_size=20, # The batch size used for validation/test set. test_batch_size=1024, weights=[ 1. / 3., 1. / 3., 1. / 3. ], # The Weights for forwoad and backward reconstruction and mean value reconstruction pairwise_weight=0.999, reload_model=False, # If reload model from saveto. is_train=False, test_step=1, hiera_step=2, train_data_file_path='/mnt/data2/lixiangpeng/dataset/features/FCV/fcv/', test_data_file_path='/mnt/data2/lixiangpeng/dataset/features/FCV/fcv/', #train_data_file_path = './', #test_data_file_path = './', train_splits_num=1, test_splits_num=1, record_path='./records.txt', SS_path='/mnt/data2/lixiangpeng/dataset/features/FCV/SimilarityInfo/Sim_K1_10_K2_5_fcv.pkl' ): model_options = locals().copy() if reload_model: print "Reloading model options" with open('%s.pkl' % saveto, 'rb') as f: model_options = pkl.load(f) print "model options", model_options test_data = DataHelper.DataHelper(model_options['v_length'], model_options['valid_batch_size'], model_options['dim_frame'], data_file='./data/fcv_test_demo.h5', train=False) model = build_model(model_options) if reload_model: model.reload_params(saveto) model.compile(model_options) if is_train: model.train(model_options['train_data_file_path'], test_data, model_options) else: model.test(test_data, model_options)
def convertToTrainData(self, data): helper = DataHelper.Helper() data = helper.convertToAIFormat(data) unit_to_multiplier = { "smaller_to_smaller": self.smallerToSmaller(data), "smaller_to_larger": self.smallerToLarger(data), } return unit_to_multiplier[self.mode]
def main(): data = DataHelper.Data() x_train, y_train, x_test, y_test, _ = data.loadData( "hw2_data_2.txt", 20, 700) mars = MARS(x_train, y_train, x_test, y_test) mars.fit() print("The testing error rate for MARS classifier is: %.4f" % mars.evaluate())
def optimalSubsets(X, y, m=None): if X is None or y is None: raise ValueError("matrix X or vector y is None") result = [] p = X.shape[1] if m is not None and (m < 1 or m > p): raise ValueError("m must be between 1 and column numbers of X") # number of models when m is null: 2^p if p <= 14 or m is not None: result.extend([ min([ LinearRegression._optimalSubsetsCore(X, y, indices) for indices in DataHelper.combinations(p, k) ], key=lambda item: item[1].rss) for k in (range(1, p + 1) if m is None else range(m, m + 1)) ]) else: data, models = [], None for k in range(1, p + 1): data.extend([(X, y, indices) for indices in DataHelper.combinations(p, k)]) data = list( map( tuple, np.array(data, np.object)[ DataHelper.randomArrangement(len(data)), :].tolist())) with multiprocessing.Pool(max(1, psutil.cpu_count(False) - 2)) as pool: models = pool.starmap(LinearRegression._optimalSubsetsCore, data) for k in range(1, p + 1): result.append( min([item for item in models if len(item[0]) == k], key=lambda item: item[1].rss)) # result item format: (indices, model) return result
def isConstant(y, periods, alpha): if y.var() == 0: return True; p1 = [DataHelper.testWhiteNoise(y - y.mean(), m) for m in periods]; if np.any(np.mat(p1) <= alpha): return False; p2 = LinearRegression.LinearRegression().fit(np.mat(range(0, y.shape[0])).T, y).betaP; if p2[1, 0] <= alpha: return False; p3 = DataHelper.testRunsLeft((y > np.quantile(y, 0.5)) - 0); if p3 <= alpha: return False; print("{0}, {1}, {2}".format(p1, p2.T, p3)); return True;
def onCreatReport(self): coal = SQLClientHelper.GetCoalById(self.coal_id) if coal.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:124)') return # 查找掘进面的抽采技术 tws_tech = SQLClientHelper.GetDesignDrillingSurfTechnologyById(self.design_id) if tws_tech.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:125)') return # 向cad发送命令请求生成钻孔数据 ret = CbmClientHelper.SendCommandToCAD("JL.GeneratePore12 %d %d" % (coal.id, tws_tech.id), True) if ret: # 显示钻孔报表 DataHelper.show_report12(coal, tws_tech) else: UiHelper.MessageBox(u'启动AutoCAD失败!!!')
def onCreatReport(self): coal = SQLClientHelper.GetCoalById(self.coal_id) if coal.id <= 0: UiHelper.MessageBox(u"sorry,出了点问题,请联系技术人员(错误码:134)") return # 查找掘进面的抽采技术 ws_tech = SQLClientHelper.GetDesignWorkSurfTechnologyById(self.design_id) if ws_tech.id <= 0: UiHelper.MessageBox(u"sorry,出了点问题,请联系技术人员(错误码:135)") return # 向cad发送命令请求生成钻孔数据 ret = CbmClientHelper.SendCommandToCAD("JL.GeneratePore21 %d %d" % (coal.id, ws_tech.id), True) if ret: # 显示钻孔报表 DataHelper.show_report21(coal, ws_tech) else: UiHelper.MessageBox(u"启动AutoCAD失败!!!")
def login_or_switch(self, uname, pwd): account_id = SQLClientHelper.GetAccountIdByField2('username', uname, 'password', pwd) # 查找已登录用户 pre_account_id = CbmClientHelper.GetOnlineAccountId() # 当前没有用户登录 if pre_account_id <= 0: # 用户登陆(记录在sys_info表中) DataHelper.sql_login_user(account_id) # UiHelper.MessageBox('恭喜您,登录成功!') # 当前已有用户登录 elif pre_account_id == account_id: # UiHelper.MessageBox('您已经登录过了!') pass else: reply = UiHelper.MessageBox('是否注销并切换到用户%s?' % uname, True) if reply == True: # 切换用户 DataHelper.sql_switch_user(account_id)
def __findCluster(self, dataSet, center): result = None for i in range(0, center.shape[0]): distance = DataHelper.calcManhattanDistance(dataSet, center[i, :]) result = distance if result is None else np.hstack( (result, distance)) return result.argmin(1), result.min(1)
def login_or_switch(self, uname, pwd): account_id = SQLClientHelper.GetAccountIdByField2( 'username', uname, 'password', pwd) # 查找已登录用户 pre_account_id = CbmClientHelper.GetOnlineAccountId() # 当前没有用户登录 if pre_account_id <= 0: # 用户登陆(记录在sys_info表中) DataHelper.sql_login_user(account_id) # UiHelper.MessageBox('恭喜您,登录成功!') # 当前已有用户登录 elif pre_account_id == account_id: # UiHelper.MessageBox('您已经登录过了!') pass else: reply = UiHelper.MessageBox('是否注销并切换到用户%s?' % uname, True) if reply == True: # 切换用户 DataHelper.sql_switch_user(account_id)
def onCreatReport(self): coal = SQLClientHelper.GetCoalById(self.coal_id) if coal.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:144)') return # 查找掘进面的抽采技术 goaf_tech = SQLClientHelper.GetDesignGoafTechnologyById(self.design_id) if goaf_tech.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:145)') return # 向cad发送命令请求生成钻孔数据 ret = CbmClientHelper.SendCommandToCAD( "JL.GeneratePore32 %d %d" % (coal.id, goaf_tech.id), True) if ret: # 显示钻孔报表 DataHelper.show_report32(coal, goaf_tech) else: UiHelper.MessageBox(u'启动AutoCAD失败!!!')
def main(): data = DataHelper.Data() x_train, y_train, x_test, y_test, _ = data.loadData( "hw2_data_1.txt", 2, 70) # load data weights = np.ones(x_train.shape[1] + 1) epochs = 50 model = perceptron(weights, learningRate=1, epoch=epochs) model.train(x_train, y_train) print("The error rate for perceptron after %i epochs is %.2f %%" % (epochs, model.evaluate(x_test, y_test)))
def onCacl(self): DataHelper.drilling_ratio(40, 10, 1) return # 从界面中提取数据 r, ok = self.ui.r.text().toDouble() l, ok = self.ui.l.text().toDouble() k1, ok = self.ui.k1.text().toDouble() rho, ok = self.ui.rho.text().toDouble() q0, ok = self.ui.q0.text().toDouble() a, ok = self.ui.a.text().toDouble() t, ok = self.ui.t.text().toDouble() h, ok = self.ui.hw.text().toDouble() w, ok = self.ui.gas_w0.text().toDouble() qm, ok = self.ui.qm.text().toDouble() q, ok = self.ui.qsum.text().toDouble() eta, ok = self.ui.eta.text().toDouble() # 计算 if r == 0 or h == 0 or w == 0 or l == 0: UiHelper.MessageBox(u'所有数据都必须大于0!!!') return # 计算Qm if 2 * r < h or abs(2 * r - h) < 1e-3: s = pi * r * r # 面积 qm = 1440 * k1 * q0 * (1 - exp(-1 * a * t)) / (rho * a * s * (r + l)) else: s = 2 * r * r * asin(0.5 * h / r) + h * sqrt(r * r - 0.25 * h * h) qm = 1440 * k1 * q0 * (1 - exp(-1 * a * t)) / (rho * a * s * (r + l)) # 计算eta和wc eta = qm / w wc = w - q # 更新到界面 self.ui.qm.setText(u'%.1f' % qm) self.ui.eta.setText(u'%.1f' % eta) self.ui.gas_wc3.setText(u'%.1f' % wc) UiHelper.MessageBox(u'更新计算结果成功!') # 关闭对话框并返回1 # self.accept()
def run(): import sys app = QtGui.QApplication(sys.argv) # 设置utf-8编码 SetUTF8Code(app) # 设置样式 SetStyle(app, 'blue') # 设置qt中文界面(消息框、菜单之类的默认使用中文) SetChinese(app) # 首先启动登录窗口 if loginFirst(): #启动CAD # doc.RunCAD() # 登录成功后启动主界面 mw = MainWindow() mw.show() # 进入消息循环 app.exec_() # 注销(清空sys_info表) DataHelper.sql_logout()
def onCacl(self): DataHelper.drilling_ratio(40,10,1) return # 从界面中提取数据 r, ok = self.ui.r.text().toDouble() l, ok = self.ui.l.text().toDouble() k1, ok = self.ui.k1.text().toDouble() rho, ok = self.ui.rho.text().toDouble() q0, ok = self.ui.q0.text().toDouble() a, ok = self.ui.a.text().toDouble() t, ok = self.ui.t.text().toDouble() h, ok = self.ui.hw.text().toDouble() w, ok = self.ui.gas_w0.text().toDouble() qm, ok = self.ui.qm.text().toDouble() q, ok = self.ui.qsum.text().toDouble() eta, ok = self.ui.eta.text().toDouble() # 计算 if r == 0 or h == 0 or w == 0 or l == 0: UiHelper.MessageBox(u'所有数据都必须大于0!!!') return # 计算Qm if 2*r < h or abs(2*r - h) < 1e-3: s = pi * r * r # 面积 qm = 1440*k1*q0*(1-exp(-1*a*t)) / (rho * a * s *(r+l)) else: s = 2 * r * r * asin(0.5*h/r) + h*sqrt(r*r -0.25*h*h) qm = 1440*k1*q0*(1-exp(-1*a*t)) / (rho * a * s *(r+l)) # 计算eta和wc eta = qm / w wc = w - q # 更新到界面 self.ui.qm.setText(u'%.1f' % qm) self.ui.eta.setText(u'%.1f' % eta) self.ui.gas_wc3.setText(u'%.1f' % wc) UiHelper.MessageBox(u'更新计算结果成功!') # 关闭对话框并返回1 # self.accept()
def try_run(self, DialogClass, authority): can_run = True while can_run: # 检查用户登录状态 ret = DataHelper.sql_login_status() # 内部错误 if ret == 0 or ret == -1: UiHelper.MessageBox(u"系统技术性故障(错误码:M1),请联系技术人员!",error = True) can_run = False break # 用户未登录 elif ret == 2: if UiHelper.MessageBox(u"您需要登录才能使用本功能!", error = True): can_run = self.login() # 登录 else: can_run = False break # 管理员已登录 elif ret == 3 and authority == Authority.USER: if UiHelper.MessageBox(u"管理员禁止使用该功能,请重新登录!", error = True): can_run = self.login() # 登录 else: can_run = False break # 普通用户已登录 elif ret == 1 and authority == Authority.ADMIN: if UiHelper.MessageBox(u"您的权限不够,请重新登录!", error = True): can_run = self.login() # 登录 else: can_run = False break if can_run and DataHelper.sql_login_authority(authority): break # 启动对话框 if can_run: # 启动对话框(传入当前登录用户的) self.run_dialog(DialogClass)
def onPartition(self): index = self.ui.work_surf.currentIndex() if index < 0: UiHelper.MessageBox(u'请指定一个工作面进行设计!') return work_surf_id, ok = self.ui.work_surf.itemData(index).toInt() work_surf = SQLClientHelper.GetWorkSurfById(work_surf_id) if work_surf.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:Z9)') return deup = SQLClientHelper.GetDesignEvalUnitPartitionByForeignKey('work_surf_id', work_surf_id) if deup.id <= 0: UiHelper.MessageBox(u'sorry,出了点问题,请联系技术人员(错误码:Z12)') return # 设计评价单元 DataHelper.design_eval_unit(deup.id) # 给cad发送命令请求绘制评价单元示意图 ret = CbmClientHelper.SendCommandToCAD("JL.DrawEvalUnitGraph %d" % deup.id, True) if not ret: UiHelper.MessageBox(u'启动AutoCAD失败')
def onAddCoal(self): # 弹出名称对话框,获取名称 coal_name = unicode(UiHelper.GetNameFromDlg(u"新增煤层")) if coal_name == u'null' or coal_name == u'': UiHelper.MessageBox(u"请输入有效的名称!") else: # 新建煤层,并提交到数据库 coal_id = DataHelper.sql_create_coal(coal_name.encode('utf-8'), self.mine_id) if coal_id <= 0: UiHelper.MessageBox(u"新增煤层失败") else: # 添加到下拉列表 index = UiHelper.AddItemToCombobox(self.ui.coal, coal_name, coal_id) # 切换到新增item self.ui.coal.setCurrentIndex(index)
def onHelp(self): angle, ok = self.ui.dip_angle.text().toDouble() if angle < 0: UiHelper.MessageBox(u'煤层倾角不能小于0!!!') return # 煤层按倾角划分的类型 left,right, top, bottom = 15, 15, 15, 15 # 按瓦斯突出规定,调整上下左右帮距 if DataHelper.dip_angle_type(angle) > 2: # 倾斜、急倾斜煤层 top, bottom = 20, 10 # 更新界面 self.ui.left.setText(u'%.1f' % left) self.ui.right.setText(u'%.1f' % right) self.ui.top.setText(u'%.1f' % top) self.ui.bottom.setText(u'%.1f' % bottom)
def onAddDrillingSurf(self): # 获取当前煤层id index = self.ui.work_area.currentIndex() if index < 0:return work_area_id, ok = self.ui.work_area.itemData(index).toInt() # 弹出名称对话框,获取名称 drilling_surf_name = unicode(UiHelper.GetNameFromDlg(u"新增掘进工作面")) if drilling_surf_name == u'null' or drilling_surf_name == u'': UiHelper.MessageBox(u"请输入有效的名称!") else: # 新建煤层,并提交到数据库 drilling_surf_id = DataHelper.sql_create_drilling_surf(drilling_surf_name.encode('utf-8'), work_area_id) if drilling_surf_id <= 0: UiHelper.MessageBox(u"新增掘进工作面失败") else: # 添加到下拉列表 index = UiHelper.AddItemToCombobox(self.ui.drilling_surf, drilling_surf_name, drilling_surf_id) # 切换到新增item self.ui.drilling_surf.setCurrentIndex(index)
def onAddWorkArea(self): # 获取当前煤层id index = self.ui.coal.currentIndex() if index < 0:return coal_id, ok = self.ui.coal.itemData(index).toInt() # 弹出名称对话框,获取名称 work_area_name = unicode(UiHelper.GetNameFromDlg(u"新增采区")) if work_area_name == u'null' or work_area_name == u'': UiHelper.MessageBox(u"请输入有效的名称!") else: # 新建煤层,并提交到数据库 work_area_id = DataHelper.sql_create_work_area(work_area_name.encode('utf-8'), coal_id) if work_area_id <= 0: UiHelper.MessageBox(u"新增采区失败") else: # 添加到下拉列表 index = UiHelper.AddItemToCombobox(self.ui.work_area, work_area_name, work_area_id) # 切换到新增item self.ui.work_area.setCurrentIndex(index)
updateVarNames.py Creates new csv of variable code and name pairs with parent variable names added to variable names ''' import csv import sys sys.path.insert(0, '..') import DataHelper file_name = sys.argv[1] with open(file_name, 'rb') as f: # open list of variable code and name pairs labels_csv = csv.reader(f) with open('codes_names_new.csv', 'wb') as nf: # open new file for updated list of variable code and name pairs labels_csv_new = csv.writer(nf) parent_codes_names = DataHelper.create_labelcode_dict('parent_codes_names.csv') # update each line in original csv and add to new csv for row in labels_csv: var_code = row[1].split('_')[0] if var_code in parent_codes_names: var_name = parent_codes_names[var_code] extended_varname = var_name + ':!!' + row[2] newline = [row[1], extended_varname] if row[1] == 'B01001H_002E': print extended_varname print newline labels_csv_new.writerow(newline)
def onPermeabilityLambdaGetFocus(self): # 根据"透气性系数"计算"渗透率" p_k, ok = self.ui.permeability_k.text().toDouble() p_lambda = DataHelper.permeability_k_to_lambda(p_k) # 更新计算结果 self.ui.permeability_lambda.setText(u'%.2f' % p_lambda)
''' updateVarNames.py creates new csv of variable code and name pairs with parent variable names added to variable names ''' import csv import sys sys.path.insert(0, '..') import DataHelper with open('codes_names.csv', 'rb') as f: # open list of variable code and name pairs labels_csv = csv.reader(f) with open('codes_names_new.csv', 'wb') as nf: # open new file for updated list of variable code and name pairs labels_csv_new = csv.writer(nf) parent_codes_names = DataHelper.labels_to_codes('parent_codes_names.csv') #TODO: gotta update parent_codes csv to only contain important parents # update each line in original csv and add to new csv for row in labels_csv: var_code = row[0][:6] print var_code if var_code in parent_codes_names: var_name = parent_codes_names[var_code] newline = [row[0], var_name + ':!!' + row[1]] labels_csv_new.writerow(newline)
def onPermeabilityKGetFocus(self): # 根据"渗透率"计算"透气性系数" p_lambda, ok = self.ui.permeability_lambda.text().toDouble() p_k = DataHelper.permeability_lambda_to_k(p_lambda) # 更新计算结果 self.ui.permeability_k.setText(u'%.2f' % p_k)
def real_logout(self): # 注销(清空sys_info表) DataHelper.sql_logout() # 设置菜单激活状态 self.loginAction.setEnabled(True) self.logoutAction.setEnabled(False)