def register_client(self, id): try: Logging.getLog().debug("receive register_client(%s)\n" % (id)) agent_id = str(id) self._clients_dict[agent_id] = OrderedDict() #创建一个队列 new_queue_name = "cmd_queue_%s" % str(id) if new_queue_name not in self._channel_dict.keys(): channel = self._connection.channel() channel.queue_declare(queue=new_queue_name) self._channel_dict[new_queue_name] = channel #通知更新脚本 # cmd_dict = OrderedDict() # cmd_dict["func"] = "processScriptUpdate" # cmd_dict["func_param"] = "" update_script_msg = copy.copy(gl.update_script_msg) channel = self._channel_dict[new_queue_name] channel.basic_publish(exchange='', routing_key=new_queue_name, body=json.dumps(update_script_msg, ensure_ascii=False)) return 0, "OK" except: exc_info = cF.getExceptionInfo() print exc_info return -1, exc_info
def CaptureScreen(funcid): """ 截屏,在Agent本地Log中保存一份,再上传FTP一份 Args: runtaskid: 任务id filename: 数据ID Returns: 一个元组(ret,msg): ret : 0 表示 成功,-1 表示 失败 msg : 对应信息 """ try: print "CaptureScreen" im = ImageGrab.grab() global error_photo_path if not os.path.exists(error_photo_path): os.makedirs(error_photo_path) # if filename.startswith('common\\'): # filename = filename[7:] # 保存错误截图 jpg_file = "%s\\%s.jpeg" % (error_photo_path, str(funcid)) print "jpgfile = %s" % jpg_file im.save(jpg_file, 'jpeg') except: exc_info = cF.getExceptionInfo() logging.error().error(exc_info) return -1, exc_info
def AddStockAsset(servertype, operid_1, operid_2, trdpwd, orgid, fundid, secuid, market, stkcode, count): ''' 只支持沪市和深市 ''' try: serverid = cF.get_core(servertype) adapter = KCBPAdapter(gl.g_connectSqlServerSetting[serverid]["xpip"]) ret = adapter.KCBPCliInit() if ret < 0: return -1, u"初始化KCBP适配器失败" cmdstring = "g_serverid:%s,g_operid:%s,g_operpwd:%s,g_operway:4,g_funcid:150402,g_stationaddr:001641AA2350,orgid:%s,fundid:%s,secuid:%s,market:%s,stkcode:%s,stkeffect:%s,cost:10,remark:" % ( str(serverid), operid_1, trdpwd, orgid, fundid, secuid, market, stkcode, str(count)) ret, code, level, msg = adapter.SendRequest(cmdstring) if ret < 0 or code != "0": adapter.KCBPCliExit() return -1, u"ret=%s,code=%s,level=%s,msg=%s" % (str(ret), str(code), str(level), msg) ds = adapter.GetReturnResult() sno = ds[1][0] cmdstring = "g_serverid:%s,g_operid:%s,g_operpwd:%s,g_operway:4,g_funcid:150420,g_stationaddr:06690A0013D2,operdate:%s,sno:%s,action:1,afmremark:autotest" % ( serverid, operid_2, trdpwd, datetime.datetime.now().strftime('%Y%m%d'), sno) ret, code, level, msg = adapter.SendRequest(cmdstring) if ret < 0 or code != "0": adapter.KCBPCliExit() return -1, u"ret=%s,code=%s,level=%s,msg=%s" % (str(ret), str(code), str(level), msg) adapter.KCBPCliExit() return 0, "OK" except: exc_info = cF.getExceptionInfo() print exc_info return -1, exc_info
def deal_with_dataset_list(dataset_dict): """ 处理返回值 可能会返回两个数据集,一个是通用信息,一个是数据结果集 :param dataset_dict: :return: """ try: print "dataset_list####################################" print dataset_dict dataset_list = [] if True: # 返回通用信息 data = {'message': str(dataset_dict)} data['field'] = list(dataset_dict.keys()) dataset_list.append(data['field'] ) values_list = [] values_list.append(list(dataset_dict.values())) # for i in dataset_list[0][1:]: # i_new = [] # for j in i: # i_new.append(j.replace("'", "''")) # if len(str(values_list).replace(' ', '')) <= 90000: # values_list.append(i_new) data['values_list'] = values_list dataset_list.append(data['values_list']) if dataset_dict and dataset_dict.has_key("business_reject_text") and dataset_dict.has_key("reject_reason_code"): return -1, data, dataset_list return 0, data, dataset_list except BaseException, ex: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info, []
def register_client(self): """ 注册客户端 :return: """ try: func_body = OrderedDict() func_body["func"] = "register_client" func_body["func_param"] = ("%s" % str(self._agent_id),) ret, msg = self.call_server_rpc(func_body) if ret < 0: Logging.getLog().error(msg) return -1, msg # 启动一个线程去拉命令队列 # self.listen_cmd_queue_thread() self._cmd_thread = threading.Thread(target=self.listen_cmd_queue_thread) self._cmd_thread.start() # 启动一个线程去侦听任务队列 self._task_thread = threading.Thread(target=self.listen_task_queue) self._task_thread.start() return 0, "" except Exception, ex: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, ""
def dbf_read_write_info(path, index_column_name_values, update_column_name_value): # dbf_read_write_info('C:\\test.dbf', "YMTH=180000000040&KHRQ=20161013", "KHMC=花果山水帘洞20180203") try: Logging.getLog().info(u"path:%s, index_column_name_values:%s, update_column_name_value:%s" % ( path, str(index_column_name_values), str(update_column_name_value))) if "ZDMNHB.ini" in path: update_ini_file(path, index_column_name_values, update_column_name_value) logging.getLogger("更新成功 ZDMNHB.ini") return 0, ("",) logging.getLogger("enter dbf_read_write_info") # 判断是否需要修改多个文件的内容 # print path, index_column_name_values, update_column_name_value if '|' in update_column_name_value: update_column_name_values = update_column_name_value.split("|") if update_column_name_values: update_column_name_value = update_column_name_values[0] if 'delete' in path: path = path.replace("delete", "") delete_dbf_file(path, index_column_name_values, update_column_name_value) else: update_dbf_file(path, index_column_name_values, update_column_name_value) if len(update_column_name_values) >= 2: for i in update_column_name_values[1:]: path, index_column_name_values, update_column_name_value = i.split(";") update_dbf_file(path, index_column_name_values, update_column_name_value) elif 'delete' in path: path = path.replace("delete", "") delete_dbf_file(path, index_column_name_values, update_column_name_value) else: update_dbf_file(path, index_column_name_values, update_column_name_value) return 0, ("",) except: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def call_server_rpc(self, func_body_dict): try: self.corr_id = str(uuid.uuid4()) self.result_dict[self.corr_id] = None self.rpc_channel.basic_publish(exchange='', routing_key='server_rpc_queue', properties=pika.BasicProperties( reply_to=self.callback_queue, correlation_id=self.corr_id, ), body=json.dumps(func_body_dict, ensure_ascii=False)) try: while self.result_dict[self.corr_id] is None and gl.is_exit_all_thread == False: self._rpc_connection.process_data_events() if gl.is_exit_all_thread == True: Logging.getLog().debug("exit call_server_rpc") return -1, "exit call_server_rpc" else: Logging.getLog().debug("call %s return %s" %(func_body_dict["func"], self.result_dict[self.corr_id])) return 0, self.result_dict[self.corr_id] except KeyboardInterrupt: Logging.getLog().debug("get KeyboardInterrupt") return -1, "KeyboardInterrupt" except Exception, ex: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, ""
def get_queue_msg(self): conn = httplib.HTTPConnection("127.0.0.1", "15672") auth = ("timesvc:timesvc") headers = {"Authorization": "Basic "+base64.b64encode(auth)} path = '/api/queues?columns=name,messages' try: conn.request('GET', path, '', headers) except socket.error as e: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info resp = conn.getresponse() if resp.status == 400: Logging.getLog().error(json.loads(resp.read())['reason']) elif resp.status == 401: Logging.getLog().error('access refused') elif resp.status == 404: Logging.getLog().error('Not found') elif resp.status == 301: Logging.getLog().error('301 error') if resp.status < 200 or resp.status > 400: Logging.getLog().error("Received %d %s for path %s\n%s" % (resp.status, resp.reason, path, resp.read())) data = resp.read().decode('utf-8') queue_msg_dict = json.loads(data) return queue_msg_dict
def rpc_process(self, ch, method, props, body): try: msg_dict = json.loads(body) return_dict = OrderedDict() if msg_dict["func_param"] == "": func_call = "self.%s()" % msg_dict["func"] else: # func_call = "self.%s(%s)" % (msg_dict["func"], ','.join(str(i) for i in msg_dict["func_param"])) func_call = "self.%s(%s)" % (msg_dict["func"], ','.join("'%s'" % str(i) for i in msg_dict["func_param"])) Logging.getLog().debug(func_call) ret, ret_info = eval(func_call) # ret, ret_info = eval("self.%s" % msg_dict["func"])tuple(msg_dict["func_param"]) return_dict["return"] = ret return_dict["return_info"] = ret_info ch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(correlation_id= \ props.correlation_id), body=json.dumps(return_dict, ensure_ascii=False)) ch.basic_ack(delivery_tag=method.delivery_tag) except Exception, ex: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info)
def CheckJZJYColumnSetting(): Logging.getLog().info(u"更新集中交易数据库配置文件开始") # 读取文件 doc = etree.ElementTree(file='JZJY_column_report.xml') root = doc.getroot() # 备份接口参数文件 doc = etree.ElementTree(root) # root 为根元素 doc.write("JZJY_column_report.xml%s.xml" % time.strftime('%Y%m%d%H%M%S'), pretty_print=True, xml_declaration=True, encoding='utf-8') root = ET.parse("JZJY_column_report.xml").getroot() root_scan = etree.ElementTree(file="JZJY_column_report_scan.xml").getroot() for schemaElem in root: # print schemaElem.attrib['schemaname'] for tableElem in schemaElem: # print tableElem.tag,tableElem.attrib['tablename'] # if tableElem.attrib['disableflag']=='1': if tableElem.attrib.get("disableflag", None) == "1": try: xpath = "schema[@schemaname='%s']/table[@tablename='%s']" % ( schemaElem.attrib['schemaname'], tableElem.attrib['tablename']) column_elem_y = root_scan.find(xpath) if column_elem_y == None: continue column_elem_y.attrib["disableflag"] = '1' except Exception, ex: exc_info = cF.getExceptionInfo() Logging.getLog().critical(exc_info) return -1, exc_info for columnElem in tableElem: # print columnElem.tag,columnElem.attrib['column_name'] try: if columnElem.attrib['disableflag'] == '1': xpath = "schema[@schemaname='%s']/table[@tablename='%s']/column[@column_name='%s']" % ( schemaElem.attrib['schemaname'], tableElem.attrib['tablename'], columnElem.attrib['column_name']) column_elem = root_scan.find(xpath) if column_elem == None: continue column_elem.attrib["disableflag"] = "1" except Exception, ex: exc_info = cF.getExceptionInfo() Logging.getLog().critical(exc_info) return -1, exc_info
def read_curr_task_list(self): try: curr_task_list = self.r.lrange("curr_task_list", 0, -1) return 0, curr_task_list except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def read_task_env_init_state(self, task_id): try: state = self.r.hget("task_init_state:%s" % str(task_id), "INIT_STATE") return 0, state except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def selct(): try: conn = DB2SkDB() except BaseException, ex: exc_info = cF.getExceptionInfo() err = u"连接数据库出现异常,%s" % exc_info Logging.getLog().critical(err) return -1, err
def add_new_task_to_redis(self, task_id, collection_id, collection_name, system_id, cases_num, timed_task_flag, run_record_id_list, warning_info, agent_list): """ 新建一个任务,向redis存放一个任务 :param task_id: :param collection_id: :param collection_name: :param system_id: :param cases_num: :param timed_task_flag: :param run_record_id_list: :param warning_info: 警告信息,字符串形式 :param agent_list: testagent id list :return: """ try: Logging.getLog().debug("Redis:add_new_task_to_redis(%s)" % str(task_id)) now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") p = self.r.pipeline() p.rpush("curr_task_list", task_id) p.delete("task_record_id_set:%s" % str(task_id)) for id in run_record_id_list: p.sadd("task_record_id_set:%s" % str(task_id), id) p.hset("task_info:%s" % str(task_id), "collection_id", collection_id) p.hset("task_info:%s" % str(task_id), "collection_name", collection_name) p.hset("task_info:%s" % str(task_id), "system_id", system_id) p.hset("task_info:%s" % str(task_id), "cases_num", cases_num) p.hset("task_info:%s" % str(task_id), "timed_task_flag", timed_task_flag) p.hset("task_info:%s" % str(task_id), "run_start_time", now) p.hset("task_info:%s" % str(task_id), "run_end_time", "") p.hset("task_info:%s" % str(task_id), "run_case_num", 0) p.hset("task_info:%s" % str(task_id), "run_case_num_success", 0) p.hset("task_info:%s" % str(task_id), "run_case_num_fail", 0) p.hset("task_info:%s" % str(task_id), "warning_info", warning_info) p.hset("task_info:%s" % str(task_id), "cancel_flag", 0) # 新增任务的初始化状态 p.hset("task_init_state:%s" % str(task_id), "AGNET_ID", "") p.hset("task_init_state:%s" % str(task_id), "INIT_STATE", "STATE_UNINIT") p.hset("task_init_state:%s" % str(task_id), "INIT_MSG", "") p.hset("task_init_state:%s" % str(task_id), "SYNC_FLAG", "FALSE") # 是否同步到数据库了 p.hset("task_init_state:%s" % str(task_id), "SYSTEM_ID", system_id) # system_id fixme p.rpush("agent_curr_task", task_id) p.hset("task_agent_table", task_id, agent_list) p.execute() return 0, "OK" except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def remove_account_group_dict(self, task_id_list): try: p = self.r.pipeline() for id in task_id_list: p.hdel("account_group_dict", id) p.execute() return 0, "OK" except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def deal_with_pre_pro_info(colnames, FLAG, path, old_param_new_params, funcid, ACCOUNT_NUM_INFO): try: ACCOUNT_GROUP_ID = "" result = [] wb = load_workbook(path) sheets = wb.get_sheet_names() pre_pro = "" count = 0 # f = open("funcid_account_info", 'a+') for i in sheets: sheet = wb.get_sheet_by_name(i) for row in sheet.iter_rows(): count += 1 if count == 1: continue ACCOUNT = "" line_data = [] CASE_NAME = row[0].value ACCOUNTGROUPID = row[1].value func_id = row[2].value PRE_PRO = row[3].value if not ACCOUNTGROUPID or ACCOUNTGROUPID == 'None': ACCOUNTGROUPID = "" ACCOUNT = ACCOUNTGROUPID if func_id and str(funcid) == str(func_id) and PRE_PRO: # pre_pro = PRE_PRO_ACTION for accout_group in old_param_new_params: if str(accout_group) in PRE_PRO: if ACCOUNTGROUPID and ACCOUNTGROUPID != 'None': ACCOUNT = ACCOUNTGROUPID else: ACCOUNT = ACCOUNT_NUM_INFO[str(accout_group)] funcid_account = str(func_id) + "=" + str(ACCOUNT) + "\n" # f.write(funcid_account) PRE_PRO = PRE_PRO.replace(str(accout_group), old_param_new_params[str(accout_group)]) if ACCOUNT == "": ACCOUNT_GROUP_ID = 0 else: ACCOUNT_GROUP_ID = int(ACCOUNT) pre_pro = PRE_PRO # break line_data.append(CASE_NAME) line_data.append(str(ACCOUNT)) line_data.append(func_id) line_data.append(PRE_PRO) result.append(line_data) # f.close() cF.export_data(colnames, FLAG, result) return pre_pro, ACCOUNT_GROUP_ID except BaseException, ex: exc_info = cF.getExceptionInfo() print u"异常发生,请检查日志文件,提示信息为:%s" % (exc_info) return -1, u"异常发生,请检查日志文件,提示信息为:%s" % (exc_info), "", ""
def report_case_result(self, task_id, case_result_dict): try: p = self.r.pipeline() for id in case_result_dict.keys(): p.hset("case_result:%s" % str(task_id), id, case_result_dict[id]) p.srem("task_record_id_set:%s" % str(task_id), id) p.execute() return 0, "OK" except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def remove_case_result(self, task_id, run_record_id_list): try: p = self.r.pipeline() for id in run_record_id_list: # p.hset("case_result:%d" % task_id, id, case_result_dict[id]) p.hdel("case_result:%s" % str(task_id), id) p.execute() return 0, "OK" except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def truncateHTKSempTable(): Logging.getLog().info(u"准备清空临时表……") doc = ET.parse("DB2_column_report_scan.xml") table_root = doc.getroot() conn = DB2SkDB() crs = conn.cursor() try: sql = "select NAME from sysibm.systables where type='T' AND CREATOR='KS' AND NAME='TMP_AUTOTEST_TRIGGER'" crs.execute(sql) print sql ds = crs.fetchall() conn.commit() if len(ds) != 0: sql = "truncate table KS.TMP_AUTOTEST_TRIGGER IMMEDIATE;" crs.execute(sql) conn.commit() # print sql # print sql for schemaElem in table_root: for i, tableElem in enumerate(schemaElem): if tableElem.attrib["disableflag"] == "1": continue schema = schemaElem.attrib["schemaname"] tablename = tableElem.attrib["tablename"] sql_i = "select COUNT(*) from sysibm.systables where CREATOR='KS' AND name='AUTO_I_%s';" % (tablename) # print sql_i sql_d = "select COUNT(*) from sysibm.systables where CREATOR='KS' AND name='AUTO_D_%s';" % (tablename) # print sql_d r = crs.execute(sql_i) ds_i = crs.fetchall() r = crs.execute(sql_d) ds_d = crs.fetchall() conn.commit() if ds_i[0][0] != 0: sql = "truncate table %s.AUTO_I_%s IMMEDIATE " % (schema, tablename) # print sql crs.execute(sql) if ds_d[0][0] != 0: conn.commit() sql = "truncate table %s.AUTO_D_%s IMMEDIATE" % (schema, tablename) # print sql crs.execute(sql) conn.commit() except BaseException, ex: exc_info = cF.getExceptionInfo() Logging.getLog().critical(exc_info) err = u"连接数据库出现异常,%s" % exc_info crs.close() conn.rollback() conn.close() return -1, err
def read_dbf_file(path): try: Logging.getLog().info(u"path:%s" % (str(path))) f = open(path, 'rb') db = list(dbfreader(f)) num = len(db) if num >= 2: num = num - 1 print num except BaseException, ex: exc_info = cF.getExceptionInfo() Logging.getLog().error(u"异常发生,请检查日志文件,提示信息为:%s" % (exc_info)) return -1, u"异常发生,请检查日志文件,提示信息为:%s" % (exc_info)
def generateDB2ColumnXml(): """ 产生数据库字段配置表 """ Logging.getLog().info(u"扫描DB2数据库开始") root = etree.Element("comparesetting") try: conn = DB2SkDB() except BaseException, ex: exc_info = cF.getExceptionInfo() err = u"连接数据库出现异常,%s" % exc_info Logging.getLog().critical(err) return -1, err
def read_task_info(self, task_id): try: p = self.r.pipeline() p.smembers("task_record_id_set:%s" % str(task_id)) p.hgetall("task_init_state:%s" % str(task_id)) p.hgetall("case_result:%s" % str(task_id)) p.hgetall("task_info:%s" % str(task_id)) task_info_list = p.execute() # curr_task_list = self.r.lrange("curr_task_list", 0, -1) return 0, task_info_list except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def pre_process_test_case(self, case): ''' modify cmdstring on-line before send to the agent :param case: :return: ''' # case_id = try: tree_directory_id = '' case_id = case["CASES_ID"] sql = "SELECT TREE_DIRECTORY_ID FROM SX_CASES WHERE CASES_ID ='%s'" %str(case_id) ds = cF.executeCaseSQL(sql) if len(ds)>0: tree_directory_id = str(ds[0]["TREE_DIRECTORY_ID"]) else: Logging.getLog().error("fail to find the tree_directory_id according cases_id(%s)" %str(case_id)) return -1, case ret, func_string = self.get_func_string_according_id(tree_directory_id) if ret < 0: return ret,func_string ret, GND_string = self.get_GND_param_from_func_string(func_string) if ret < 0: Logging.getLog().error(GND_string) return ret,GND_string Logging.getLog().debug("begin to process %s" %case["CMDSTRING"]) cmdstring = case["CMDSTRING"] cmd_list= cmdstring.split('#') user = cmd_list[0].split('=')[1] pwd = cmd_list[1].split('=')[1] func_string_list = func_string.split('-') cmdstring = "GND00 %s %s %s %s " %(func_string_list[0],user,pwd,GND_string) for i in range(1,len(func_string_list)): cmdstring = cmdstring + "%s " %func_string_list[i] for i in range(2,len(cmd_list)): cmdstring = cmdstring + "%s " %cmd_list[i].split('=')[1] cmdstring = cmdstring +"GND99" case["CMDSTRING"] = cmdstring return 0,case except: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def ModifyRemoteServerDateTime(year,month,day,hour,min,sec): ''' 修改本地日期和时间 ''' try: dat = "date %u-%02u-%02u"%(year,month,day) tm = "time %02u:%02u:%02u"%(hour,min,sec) os.system(dat) os.system(tm) except : exc_info = cF.getExceptionInfo() print exc_info return -1,exc_info return 0,"";
def process_disable_column(db_id, schema, tablename, result_dict): ''' 将传递进来的表数据,根据disableflag的配置情况从中剔除,不参与比较 :param db_id: :param schema: :param tablename: :param result_dict: :param model: :return: ''' try: table_column_disable_info = scanOracleDB.getTableColumnInfo() # if gl.g_table_column_disable_info == OrderedDict(): # scanOracleDB.getTableColumnInfo() remove_index_list = [] field_list = result_dict["field"] if table_column_disable_info[db_id][schema][tablename]["disableflag"] == 1: # 如果该表被禁掉 return 0, {} for idx, column in enumerate(table_column_disable_info[db_id][schema][tablename]["column_list"].keys()): if table_column_disable_info[db_id][schema][tablename]["column_list"][column]["disableflag"] == 1: remove_index_list.append(idx) # 删除的时候,索引值从大到小删除。 # 从大到小排序 remove_index_list.sort(reverse=True) for idx in remove_index_list: result_dict["field"].pop(idx) if result_dict.has_key('add'): for row in result_dict["add"]: row.pop(idx) if result_dict.has_key('del'): for row in result_dict["del"]: row.pop(idx) if result_dict.has_key('update'): for row in result_dict["update"]["old"]: row.pop(idx) for row in result_dict["update"]["new"]: row.pop(idx) for row in result_dict["update"]["diff_flag"]: row.pop(idx) return 0, result_dict except BaseException, ex: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def ChangStkPrice(server=0): ''' 初始化所有用到股票代码的价格 ''' price_dict = {"maxrisevalue": "17.8300", "maxdownvalue": "14.5900", "stopflag": "F", "closeprice": "16.2100", "openprice": "16.2200", "lastprice": "16.2500", "highprice": "16.3200", "lowprice": "16.1300", "lastcloseprice": "16.2500"} stkcode_list = [] Logging.getLog().info(u"准备在集中交易 server %s 初始化股票价格……" % str(server)) ret, conn = cF.ConnectToRunDB(server) cursor = conn.cursor() servertype = gl.g_connectSqlServerSetting[server]["servertype"] try: sql = "select * from tbl_cases where cmdstring like '%stkcode:%' and cmdstring like '%price:%'" ds = cF.executeCaseSQL(sql) if len(ds) > 0: for i, line in enumerate(ds): stkcode = line["cmdstring"].split('stkcode:')[1].split(',')[0] if stkcode not in stkcode_list: stkcode_list.append(stkcode) for item in stkcode_list: sql = "select * from run..stktrd where stkcode='%s'" % (stkcode) cursor.execute(sql) dss = cursor.fetchall() if len(dss) > 0: sql = "update run..stktrd set maxrisevalue='%s',maxdownvalue='%s',stopflag='%s',fixprice='%s' where stkcode='%s'" % ( price_dict["maxrisevalue"], price_dict["maxdownvalue"], price_dict["stopflag"], price_dict["openprice"], item) cursor.execute(sql) sql1 = "select * from run..stkprice where stkcode='%s'" % (item) cursor.execute(sql1) dsp = cursor.fetchall() if len(dsp) > 0: sql1 = "update run..stkprice set closeprice='%s',openprice='%s',lastprice='%s',highprice='%s',lowprice='%s',lastcloseprice='%s' where stkcode='%s'" % ( price_dict["closeprice"], price_dict["openprice"], price_dict["lastprice"], price_dict["highprice"], price_dict["lowprice"], price_dict["lastcloseprice"], item) cursor.execute(sql1) except Exception, ex: exc_info = cF.getExceptionInfo() Logging.getLog().info(u"在核心%s初始化股票代码价格出现异常,提示信息 stkcode= %s %s……" % (str(server), stkcode, exc_info)) cursor.close() conn.rollback() conn.close() gl.threadQueue.put((-1, u"exc_info", "ChangStkPrice", server)) # 返回结果放入队列,供线程结束后读取运行结果 return -1, exc_info
def createHTKSTrigger(): Logging.getLog().info(u"准备在创建触发器……") doc = ET.parse("DB2_column_report_scan.xml") table_root = doc.getroot() conn = DB2SkDB() crs = conn.cursor() try: sql = "select trigname from syscat.triggers WHERE tabschema='KS' and trigname LIKE 'KS_%_%_TRIGGER'" crs.execute(sql) ds = crs.fetchall() for rec in ds: sql = "drop trigger KS.%s" % (rec[0]) print sql try: crs.execute(sql) except: print "%s except and continue" % sql conn.commit() # 创建新触发器 # for schemaElem in table_root: # for i,tableElem in enumerate(schemaElem): # if tableElem.attrib["disableflag"] == "1": # continue # schema = schemaElem.attrib["schemaname"] # tablename = tableElem.attrib["tablename"] # #if tableElem.attrib["have_exclude_column"] == "1": # column_list = [] # for columnElem in tableElem: # if columnElem.attrib["is_exclude"] == "0": # column_list.append(columnElem.attrib["column_name"]) # sql_d = "CREATE TRIGGER %s.%s_%d_d_trigger AFTER DELETE ON %s.%s REFERENCING OLD AS O FOR EACH ROW MODE DB2SQL begin atomic insert into KS.TMP_AUTOTEST_TRIGGER(username,tablename,operation) values('%s','%s',3);insert into %s.AUTO_D_%s (%s) values (%s); end"%(schema,schema,i,schema,tablename,schema,tablename,schema,tablename,",".join(column_list),"O."+",O.".join(column_list)) # sql_u = "CREATE TRIGGER %s.%s_%d_u_trigger AFTER UPDATE ON %s.%s REFERENCING OLD AS O FOR EACH ROW MODE DB2SQL begin atomic insert into KS.TMP_AUTOTEST_TRIGGER(username,tablename,operation) values('%s','%s',2);insert into %s.AUTO_I_%s (%s) values (%s); end"%(schema,schema,i,schema,tablename,schema,tablename,schema,tablename,",".join(column_list),"O."+",O.".join(column_list)) # sql_i = "CREATE TRIGGER %s.%s_%d_i_trigger AFTER INSERT ON %s.%s REFERENCING NEW AS O FOR EACH ROW MODE DB2SQL begin atomic insert into KS.TMP_AUTOTEST_TRIGGER(username,tablename,operation) values('%s','%s',1);insert into %s.AUTO_I_%s (%s) values (%s); end"%(schema,schema,i,schema,tablename,schema,tablename,schema,tablename,",".join(column_list),"O."+",O.".join(column_list)) # #print sql_d # #print sql_u # #print sql_i # #print '===========================================' # crs.execute(sql_d) # crs.execute(sql_u) # crs.execute(sql_i) # conn.commit() except BaseException, ex: exc_info = cF.getExceptionInfo() err = u"出现异常,提示%s" % (exc_info) Logging.getLog().critical(err) crs.close() conn.rollback() conn.close() return -1, err
def is_same_size(self, localfile, remotefile): try: self.ftp.sendcmd("TYPE i") remotefile_size = self.ftp.size(remotefile) except: exc_file = cF.getExceptionInfo() #Logging.getLog().debug("文件大小相同, 无需下载") Logging.getLog().error(exc_file) remotefile_size = -1 try: if os.path.exists(localfile): localfile_size = os.path.getsize(localfile) else: localfile_size = -1 except: exc_file = cF.getExceptionInfo() Logging.getLog().debug(exc_file) localfile_size = -1 # debug_print('lo:%d re:%d' % (localfile_size, remotefile_size), ) if remotefile_size == localfile_size: return 1 else: debug_print('lo:%d re:%d' % (localfile_size, remotefile_size), ) return 0
def del_task(self, task_id): try: p = self.r.pipeline() self.lrem_item(p, "curr_task_list", task_id) p.delete("task_record_id_set:%s" % str(task_id)) p.delete("task_init_state:%s" % str(task_id)) p.delete("task_info:%s" % str(task_id)) self.lrem_item(p, "agent_curr_task", task_id) p.hdel("task_agent_table", task_id) p.execute() return 0, "OK" except redis.RedisError: exc_info = cF.getExceptionInfo() Logging.getLog().error(exc_info) return -1, exc_info
def RunScript(execEngine, system_id, funcid, cmdstring, runtaskid, run_record_id, expect_ret): """ 运行接口脚本 :param system_id: 系统ID :param funcid: :param cmdstring: :param runtaskid: :param run_record: :return: 一个元组(ret,msg): ret : 0 表示 成功,-1 表示 失败 msg : 对应信息 log_info : 日志信息 dataset_list: 中间件的返回值列表,用于后续动作的一个参数 return_json_string: 中间件返回值的json字符串,返回给TestAgentServer,并写入数据库 data_change_info_json_string:数据库变动的json字符串,返回给TestAgentServer,并写入数据库 """ succ_flag = False msg = "" log_info = "" return_json_string = "" data_change_info_json_string = "" dataset_list = [] sdk = GetSdk() try: dataset_dict = {} ret, msg = ExecuteOneTestCase(sdk, funcid, cmdstring) if ret == 0: dataset_dict = sdk.GetReturnResult() r, return_json_string , dataset_list= deal_with_dataset_list(dataset_dict) if int(expect_ret) != 0: succ_flag = False else: succ_flag = True if r < 0: succ_flag = False else: succ_flag = False if succ_flag == True: return 0, msg, log_info, dataset_list, return_json_string, data_change_info_json_string else: return -1, msg, log_info, dataset_list, return_json_string, data_change_info_json_string except BaseException, ex: exc_info = cF.getExceptionInfo() Logging.getLog().critical(u"异常发生,请检查日志文件,提示信息为:%s" % (exc_info)) return -1, u"异常发生,请检查日志文件,提示信息为:%s" % ( exc_info), log_info, [], return_json_string, data_change_info_json_string