def add_result_to_case(self, testrail_case, xunit_case): itrr_result = xunit_case.get_result() if itrr_result == itrr.TEST_RESULT_PASS: status_name = 'passed' elif itrr_result == itrr.TEST_RESULT_FAIL: status_name = 'failed' elif itrr_result == itrr.TEST_RESULT_SKIP: status_name = 'skipped' elif itrr_result == itrr.TEST_RESULT_BLOCKED: status_name = 'blocked' else: return status_ids = [ k for k, v in self.testrail_statuses.items() if v == status_name ] if len(status_ids) == 0: logger.warning("Can't find status {} for result {}".format( status_name, xunit_case.name)) return status_id = status_ids[0] case_info = xunit_case.get_info() case_info['time'] = case_info['time'].seconds case_info['env_description'] = self.env_description comment = tools.get_rendered('case_result.tmpl', case_info) elasped = case_info['time'] if elasped > 0: elasped = "{}s".format(elasped) testrail_case.add_result(status_id=status_id, elapsed=elasped, comment=comment)
def home_view(): """Main app view - shows status and lets user control settings. """ current_time = datetime.datetime.now().strftime("%H:%M") user_playlist = None username = None try: username = goodclock_spotify.get_current_username() except spotipy.client.SpotifyException: logger.warning('Spotify exception getting username') if username: try: user_playlist = goodclock_spotify.get_or_create_goodclock_playlist( username) except spotipy.client.SpotifyException: logger.warning('Spotify exception getting/creating playlist') alarm_time = DB.get_alarm_time() return render_template( 'home.html', username=username, current_time=current_time, user_playlist=user_playlist, alarm_time=alarm_time, )
def restore_access(request): if request.method == 'POST': form = RestoreForm(data=request.POST) if form.is_valid(): form_data = form.cleaned_data user_email = form_data["user_email"] res_user_by_email = requests.post( "{CONTENT_URL}/search_user_by_email", data={"email": user_email}) if res_user_by_email.status_code == 200: user = res_user_by_email.json() user_id = user["id"] else: logger.warning( f"Request to get user was gailed with status {res.user_by_email.status_code}" ) raise Exception( f"Request to get user was gailed with status {res.user_by_email.status_code}" ) now_in_secs = int(datetime.now().strftime("%s")) email_text = email_for_restore_access(now_in_secs, user_email, user_id) send_restore_message(user_email, email_text) return render(request, "restore_message_sent.html") else: logger.warning("Email not sent") raise Exception("Email not sent") else: form = RestoreForm() context = {'access_form': form} return render(request, "webui/users/restore_pass.html", context) pass
def get_existing_bug_link(previous_results): results_with_bug = [ result for result in previous_results if result["custom_launchpad_bug"] is not None ] if not results_with_bug: return for result in sorted(results_with_bug, key=lambda k: k['created_on'], reverse=True): try: bug_id = int( result["custom_launchpad_bug"].strip('/').split('/')[-1]) except ValueError: logger.warning('Link "{0}" doesn\'t contain bug id.'.format( result["custom_launchpad_bug"])) continue try: bug = LaunchpadBug(bug_id).get_duplicate_of() except KeyError: logger.warning("Bug with id '{bug_id}' is private or \ doesn't exist.".format(bug_id=bug_id)) continue except Exception: logger.exception("Strange situation with '{bug_id}' \ issue".format(bug_id=bug_id)) continue for target in bug.targets: if target['project'] == LaunchpadSettings.project and\ target['milestone'] == LaunchpadSettings.milestone and\ target['status'] not in LaunchpadSettings.closed_statuses: target['bug_link'] = result["custom_launchpad_bug"] return target
def create_rider_inf(): if (request.method == 'POST'): point_A = request.form.get("point_A") point_B = request.form.get('point_B') car = request.form.get("car") free_place = request.form.get("free_place") children = request.form.get("children") pets = request.form.get("pets") music = request.form.get("music") phone = request.form.get("phone") driver = Driver(point_A, point_B, car, free_place, children, pets, music, phone) try: add_data(driver) except Exception as exc: logger.warning('create action failed with errors: {exc}', exc_info=True) return redirect( url_for(".passenger_find", a=point_A, b=point_B, free_place=free_place, children=children, pets=pets))
def test_data(self): try: data = self.get_test_data(self.url) except Exception as e: logger.warning("No test data for {0}: {1}".format( self.url, e, )) # If we failed to get any tests for the build, return # meta test case 'jenkins' with status 'failed'. data = { "suites": [ { "cases": [ { "name": "jenkins", "className": "jenkins", "status": "failed", "duration": 0 } ] } ] } return data
def test_data(self, result_path=None): try: data = self.get_test_data(self.url, result_path) except Exception as e: logger.warning("No test data for {0}: {1}".format( self.url, e, )) # If we failed to get any tests for the build, return # meta test case 'jenkins' with status 'failed'. data = { "suites": [ { "cases": [ { "name": "jenkins", "className": "jenkins", "status": "failed", "duration": 0 } ] } ] } return data
def apply_transactions(transactions, auto=False): ''' Apply renaming transactions. apply_transactions(transactions) transactions = [(old_path, new_path),(old_path),(new_path),...] Manual review of transactions is required. ''' if auto: logger.warning('Auto is On. No confirmation required.') print('=' * 30) if not transactions: logger.debug('NO TRANSACTIONS') sys.exit('No Transactions to apply.') return for t in transactions: print('[{}] > [{}]'.format(t[0].name, t[1].name)) print('{} Transactions to apply. Renaming...'.format(len(transactions))) count = 0 if auto or input('EXECUTE ? [y]\n>') == 'y': for src, dst in transactions: try: src.rename(dst) except: logger.error(sys.exc_info()[0].__name__) logger.error('Could not rename: [{}]>[{}]'.format(src, dst)) else: logger.debug('[{}] renamed to [{}]'.format(src, dst)) count += 1 print('{} folders renamed.'.format(count))
def apply_transactions(transactions, auto=False): ''' Apply renaming transactions. apply_transactions(transactions) transactions = [(old_path, new_path),(old_path),(new_path),...] Manual review of transactions is required. ''' if auto: logger.warning('Auto is On. No confirmation required.') print('='*30) if not transactions: logger.debug('NO TRANSACTIONS') sys.exit('No Transactions to apply.') return for t in transactions: print('[{}] > [{}]'.format(t[0].name, t[1].name)) print('{} Transactions to apply. Renaming...'.format(len(transactions))) count = 0 if auto or input('EXECUTE ? [y]\n>') == 'y': for src, dst in transactions: try: src.rename(dst) except: logger.error(sys.exc_info()[0].__name__) logger.error('Could not rename: [{}]>[{}]'.format(src, dst)) else: logger.debug('[{}] renamed to [{}]'.format(src, dst)) count += 1 print('{} folders renamed.'.format(count))
async def main(): queue = asyncio.Queue() tasks = [] for i in range(IPN_WORKERS): task = asyncio.create_task(worker(queue)) tasks.append(task) ldb = db.DataBase() ipn_sync_block = int(ldb.get_setting("ipn_sync_block")) while True: last_block = ldb.get_last_block() logger.warning("last_block: %s" % last_block) if last_block > ipn_sync_block: for block in range(ipn_sync_block + 1, last_block + 1, 1): logger.warning("active block: %s" % block) for row in ldb.get_ipns(block) or []: queue.put_nowait(row) ldb.set_setting("ipn_sync_block", block) ipn_sync_block = block # waiting for next block usually each 10 minutes for BTC await asyncio.sleep(10) # code for unittests if TESTING: return None # check for missing workers for task in tasks: if task.done(): tasks.remove(task) tasks.append(asyncio.create_task(worker(queue)))
def findAppsWithAutoscaleLabels(self): list = self.marathon_cli.list_apps(embed_counts=True, embed_task_stats=True) logger.debug('Lista recebida {}'.format(list)) if len(list) == 0: logger.warning('0 apps loaded. Your marathon have apps?') for app in list: if LABEL_FOR_AUTOSCALE_ENABLE in app.labels: new_app = MarathonApp(app.id) new_app.tasksRunning = app.tasks_running new_app.tasksStaged = app.tasks_staged for label in MANDATORY_LABELS_APP: if label in app.labels: value = app.labels[label] if value.isnumeric(): value = int(value) new_app.__setattr__(label, value) else: logger.error( 'App: [{}] :: dont have MANDATORY_LABELS :: {}'. format(app.id, label)) for label in OPTIONAL_LABELS_APP: if label in app.labels: value = app.labels[label] if value.isnumeric(): value = int(value) new_app.__setattr__(label, value) self.dict_apps[app.id] = new_app else: logger.debug( 'App: [{}] :: dont have {} = True. If you want to scale, please add labels.' .format(app.id, LABEL_FOR_AUTOSCALE_ENABLE))
def get_existing_bug_link(previous_results): results_with_bug = [result for result in previous_results if result["custom_launchpad_bug"] is not None] if not results_with_bug: return for result in sorted(results_with_bug, key=lambda k: k['created_on'], reverse=True): try: bug_id = int(result["custom_launchpad_bug"].strip('/').split( '/')[-1]) except ValueError: logger.warning('Link "{0}" doesn\'t contain bug id.'.format( result["custom_launchpad_bug"])) continue try: bug = LaunchpadBug(bug_id).get_duplicate_of() except KeyError: logger.warning("Bug with id '{bug_id}' is private or \ doesn't exist.".format(bug_id=bug_id)) continue except Exception: logger.exception("Strange situation with '{bug_id}' \ issue".format(bug_id=bug_id)) continue for target in bug.targets: if target['project'] == LaunchpadSettings.project and\ target['milestone'] == LaunchpadSettings.milestone and\ target['status'] not in LaunchpadSettings.closed_statuses: target['bug_link'] = result["custom_launchpad_bug"] return target
def get(self): uid = self.current_user type_ = self.get_argument('type', None) if not type_: self.set_status(400) result = dict(code=40011, msg=u'缺少type参数') return self.jsonify(result) keep_info = self.keep_map(type_) key = "uid:{}:keep:{}".format(uid, type_) times = rdb.incr(key) if times == 1: rdb.expire(key, get_to_tomorrow()) else: logger.warning('have try times {}'.format(times)) result = dict(code=40010, msg=u'每天只能{}一次哦!'.format(keep_info['name'])) return self.jsonify(result) try: row = Pet.keep(uid=uid, score=keep_info['score']) logger.info('keep pet {}'.format(row)) except Exception, e: self.set_status(500) logger.error('keep pet error {}'.format(e)) result = dict(code=40012, msg=u'更新服务器错误, 请稍后重试!') return self.jsonify(result)
def create_passenger_inf(): if (request.method == 'POST'): point_A = request.form.get("point_A") point_B = request.form.get('point_B') pas_quantity = request.form.get("pas_quantity") children = request.form.get("children") pets = request.form.get("pets") music = request.form.get("music") phone = request.form.get("phone") if pets == 'on': pets = 1 else: pets = 0 passenger = Passenger(point_A, point_B, pas_quantity, children, pets, music, phone) try: add_data(passenger) except Exception as exc: logger.warning('create action failed with errors: {exc}', exc_info=True) return redirect( url_for(".driver_find", a=point_A, b=point_B, pas_quantity=pas_quantity, children=children, pets=pets))
def add_result_to_case(self, testrail_case, xunit_case): itrr_result = xunit_case.get_result() if itrr_result == itrr.TEST_RESULT_PASS: status_name = 'passed' elif itrr_result == itrr.TEST_RESULT_FAIL: status_name = 'failed' elif itrr_result == itrr.TEST_RESULT_SKIP: status_name = 'skipped' elif itrr_result == itrr.TEST_RESULT_BLOCKED: status_name = 'blocked' else: return status_ids = [k for k, v in self.testrail_statuses.items() if v == status_name] if len(status_ids) == 0: logger.warning("Can't find status {} for result {}".format( status_name, xunit_case.name)) return status_id = status_ids[0] case_info = xunit_case.get_info() case_info['time'] = case_info['time'].seconds case_info['env_description'] = self.env_description comment = tools.get_rendered('case_result.tmpl', case_info) elasped = case_info['time'] if elasped > 0: elasped = "{}s".format(elasped) testrail_case.add_result( status_id=status_id, elapsed=elasped, comment=comment )
def send_email(from_, to_, subject, body_text, body_html): if not from_: from_ = SMTP["default_sender"] if not re.match(r"[^@]+@[^@]+\.[^@]+", from_) or not re.match( r"[^@]+@[^@]+\.[^@]+", to_): logger.error("Invalid email address from {} -- to {}".format( from_, to_)) return False # Create message container - the correct MIME type is multipart/alternative. msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = from_ msg['To'] = to_ # Record the MIME types of both parts - text/plain and text/html. parts = [] if body_text: parts.append(MIMEText(body_text, 'plain')) if body_html: parts.append(MIMEText(body_html, 'html')) for part in parts: msg.attach(part) logger.warning("EMAIL: " + msg.as_string()) return True server = smtplib.SMTP_SSL(SMTP["host"], SMTP["port"]) server.ehlo() server.login(SMTP["user"], SMTP["password"]) server.sendmail(from_, to_, msg.as_string()) server.close()
def add_result_to_case(self, testrail_case, xunit_case): if xunit_case.success: status_name = 'passed' elif xunit_case.failed: status_name = 'failed' elif xunit_case.skipped: status_name = 'skipped' elif xunit_case.errored: status_name = 'blocked' else: return status_ids = [k for k, v in self.testrail_statuses.items() if v == status_name] if len(status_ids) == 0: logger.warning("Can't find status {} for result {}".format( status_name, xunit_case.methodname)) return status_id = status_ids[0] comment = xunit_case.message elasped = int(xunit_case.time.total_seconds()) if elasped > 0: elasped = "{}s".format(elasped) testrail_case.add_result( status_id=status_id, elapsed=elasped, comment=comment )
def wrapper(*args, **kwargs): self = args[0] if self.request.remote_ip not in ALLOW_IP: self.set_status(500) self.finish() logger.warning("该IP请求不合法"+self.request.remote_ip) else: return func(*args, **kwargs)
def clean_last_block(): chain = ChainData() ldb = db.DataBase() block = ldb.get_last_block() hash = ldb.get_block_hash(block) logger.warning("Cleaning block %s, hash %s" % (block, hash)) ldb.delete_block_hash(block, hash)
def getMetadata(self, tag): elems = self._root.findall(tag) if not elems: logger.error("Tag %s is not valid!") return None elif len(elems) >1: logger.warning("Tag %s has more than one element (len = %d)! Returning first!"%(tag,len(elems))) return elems[0].text
def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups): import_tests() tests = [] for jenkins_suffix in groups: group = groups[jenkins_suffix] for case in TestProgram(groups=[group]).cases: if not case.entry.info.enabled: continue if tests_include: if tests_include not in case.entry.home.func_name: logger.debug( "Skipping '{0}' test because it doesn't " "contain '{1}' in method name".format(case.entry.home.func_name, tests_include) ) continue if tests_exclude: if tests_exclude in case.entry.home.func_name: logger.debug( "Skipping '{0}' test because it contains" " '{1}' in method name".format(case.entry.home.func_name, tests_exclude) ) continue docstring = case.entry.home.func_doc or "" docstring = "\n".join([s.strip() for s in docstring.split("\n")]) steps = [{"content": s, "expected": "pass"} for s in docstring.split("\n") if s and s[0].isdigit()] test_duration = re.search(r"Duration\s+(\d+[s,m])\b", docstring) title = docstring.split("\n")[0] or case.entry.home.func_name test_group = case.entry.home.func_name if case.entry.home.func_name in GROUPS_TO_EXPAND: """Expand specified test names with the group names that are used in jenkins jobs where this test is started. """ title = " - ".join([title, jenkins_suffix]) test_group = "_".join([case.entry.home.func_name, jenkins_suffix]) test_case = { "title": title, "type_id": 1, "milestone_id": milestone_id, "priority_id": 5, "estimate": test_duration.group(1) if test_duration else "3m", "refs": "", "custom_test_group": test_group, "custom_test_case_description": docstring or " ", "custom_test_case_steps": steps, } if not any([x["custom_test_group"] == test_group for x in tests]): tests.append(test_case) else: logger.warning("Testcase '{0}' run in multiple Jenkins jobs!".format(test_group)) return tests
def run(self): while not self.quit: try: cont = self.callback(*self.args, **self.kwargs) self.quit = not cont time.sleep(self.interval) except: exc = traceback.format_exc() logger.warning(exc)
def selected_ride(): phone = request.form.get("phone") try: ride_pas = Passenger.query.filter_by(phone=phone).all() ride_driv = Driver.query.filter_by(phone=phone).all() except Exception as exc: logger.warning('postgre_request exeption: {exc}', exc_info=True) return render_template('change_inf.html', driver=ride_driv, passenger=ride_pas)
def execute(self): xunit_suite, _ = self.get_xunit_test_suite() cases = self.find_testrail_cases(xunit_suite) if len(cases) == 0: logger.warning('No cases matched, program will terminated') return plan = self.get_or_create_plan() test_run = self.create_test_run(plan, cases) test_run.add_results_for_cases(cases) self.print_run_url(test_run)
def courses(request): user = users.session_user_info(request) res = requests.get(f"{endpoints.COURSES_ENDPOINT}") if res.status_code in OK_CODES: courses_data = res.json()['results'] # logger.info(f'url:{endpoints.COURSES_ENDPOINT} - username:{user["username"]} - status_code:{res.status_code} - res:{res.json()}') return render(request, "webui/courses_and_lessons/courses.html",{'courses':courses_data, "user":user}) else: logger.warning(f"url:{endpoints.COURSES_ENDPOINT} - get_data:{res.json()}") raise Exception(f"Some troubles with request - {res.status_code}")
def valid_forgot_code(id, tm, code): now = int(time.time()) tm_int = int(tm) # valid for only one hour if now - tm_int > 3600: flash("Expired password forgot code!") logger.warning("Expired password forgot code! id: %s" % id) return False if gen_forgot(id, tm) != code: flash("Invalid password forgot code!") return False return True
def find_testrail_cases(self, xunit_suite): cases = self.suite.cases() filtered_cases = [] for xunit_case in xunit_suite.test_cases: test_name = xunit_case.name testrail_case = cases.find(custom_test_group=test_name) if testrail_case is None: logger.warning('Testcase for {} not found'.format(test_name)) continue self.add_result_to_case(testrail_case, xunit_case) filtered_cases.append(testrail_case) cases[:] = filtered_cases return cases
def get_list_item_total_page(self): list_items_url = 'https://sycm.taobao.com/mq/rank/listItems.json?cateId=122966004&categoryId=122966004&dateRange={yesterday}%7C{yesterday}&dateRangePre={yesterday}|{yesterday}&dateType=recent1&dateTypePre=recent1&device=0&devicePre=0&itemDetailType=1&keyword=&orderDirection=desc&orderField=payOrdCnt&page=1&pageSize=100&rankTabIndex=0&rankType=1&seller=-1&token=aa970f317&view=rank&_=1498206609142'\ .format(yesterday=get_lastday()) res = self.session.get(url=list_items_url, headers=HEADERS, verify=False) list_items = json.loads(res.text) try: total_items_count = list_items['content']['data']['recordCount'] total_page = int(total_items_count / 100) except Exception as e: logger.warning('\033[96m 请先在本机上登录 生意参谋 后,再运行程序 \033[0m') return total_page
def dump(self, run_id=None): stats = dict() if not run_id: joint_bugs_statistics = dict() for run in self.bugs_statistics: for bug, tests in self.bugs_statistics[run].items(): if bug in joint_bugs_statistics: joint_bugs_statistics[bug].update(tests) else: joint_bugs_statistics[bug] = tests else: for _run_id, _stats in self.bugs_statistics.items(): if _run_id == run_id: joint_bugs_statistics = _stats for bug_id in joint_bugs_statistics: try: lp_bug = LaunchpadBug(bug_id).get_duplicate_of() except KeyError: logger.warning("Bug with ID {0} not found! Most probably it's " "private or private security.".format(bug_id)) continue bug_target = inspect_bug(lp_bug) if lp_bug.bug.id in stats: stats[lp_bug.bug.id]['tests'].update( joint_bugs_statistics[bug_id]) else: stats[lp_bug.bug.id] = { 'title': bug_target['title'], 'importance': bug_target['importance'], 'status': bug_target['status'], 'project': bug_target['project'], 'link': lp_bug.bug.web_link, 'tests': joint_bugs_statistics[bug_id] } stats[lp_bug.bug.id]['failed_num'] = len([ t for t, v in stats[lp_bug.bug.id]['tests'].items() if not v['blocked'] ]) stats[lp_bug.bug.id]['blocked_num'] = len([ t for t, v in stats[lp_bug.bug.id]['tests'].items() if v['blocked'] ]) return OrderedDict( sorted(stats.items(), key=lambda x: (x[1]['failed_num'] + x[1]['blocked_num']), reverse=True))
def check_block(block): """ Check if block hash remain the same. If hash change, delete and update all the related info. """ chain = ChainData() ldb = db.DataBase() hash = ldb.get_block_hash(block) if chain.get_blockhash(block) != hash: if block and hash: logger.warning("Block %s change hash" % block) ldb.delete_block_hash(block, hash) balances, blockhash = chain.getblock_out_balances(block) for balance in balances: ldb.add_output(block, balance[0], balance[1], blockhash)
def alarm_set(): """Handling for the alarm-set form. """ logger.info("ALARM FORM: {}".format(request.form)) alarm_time = request.form.get("time") if not alarm_time: logger.warning("Time not received") return redirect('/') hour, minute = alarm_time.split(":") DB.set_alarm_time(int(hour), int(minute)) return redirect('/')
def crypto_in(): if request.method == "POST": auth = request.form.get("ipn_auth") if auth != IPN_AUTH: abort(401) address = request.form.get("address") confirmed = float(request.form.get("confirmed")) unconfirmed = float(request.form.get("unconfirmed")) logger.warning("IPN address {} -- confirmed {} -- unconfirmed {}".format(address, confirmed, unconfirmed)) session = database.Session(autocommit=False) user = CryptoAddress.get_address_user(address=address, session=session) if user: logger.warning("IPN for user {} -- address {}".format(user, address)) dep = UserTransactions.get_user_netdeposits(user, session=session) if confirmed > dep: UserTransactions.add_transaction(user, confirmed-dep, "deposit", reference="IPN", session=session) logger.warning("Deposit confirmed! user {} -- address {} -- amount {}".format(user, address, confirmed-dep)) else: logger.warning("Confirmed is lower than net deposit yet. User {} -- address {} -- conf {} -- dep {} " .format(user, address, confirmed, dep)) else: logger.error("Address {} don't match any user.".format(address)) data = { "address": address, "confirmed": confirmed, "unconfirmed": unconfirmed, "ipn_auth": auth } return jsonify(data) else: logger.error("Invalid request at IPN url!") abort(401)
def send_restore_message(email_address, email_text): try: res = send_mail("Password restore", email_text, DEFAULT_MAIL_NAME, [email_address], fail_silently=False) except Exception as e: logger.warning(f"Problem with sending email - {e}") raise Exception(f"Problem with sending email - {e}") if res == 1: return True else: logger.warning("Some problems with sending email") raise Exception("Some problems with sending email")
def delete(): flag = request.args.get('flag') id = request.args.get('delete') if flag: try: dele = Driver.query.filter_by(id=id).delete() except Exception as exc: logger.warning('postgre_del exeption: {exc}', exc_info=True) else: try: dele = Passenger.query.filter_by(id=id).delete() except Exception as exc: logger.warning('postgre_del exeption: {exc}', exc_info=True) db.session.commit() return render_template("start_page.html")
def dump(self, run_id=None): stats = dict() if not run_id: joint_bugs_statistics = dict() for run in self.bugs_statistics: for bug, tests in self.bugs_statistics[run].items(): if bug in joint_bugs_statistics: joint_bugs_statistics[bug].update(tests) else: joint_bugs_statistics[bug] = tests else: for _run_id, _stats in self.bugs_statistics.items(): if _run_id == run_id: joint_bugs_statistics = _stats for bug_id in joint_bugs_statistics: try: lp_bug = LaunchpadBug(bug_id).get_duplicate_of() except KeyError: logger.warning("Bug with ID {0} not found! Most probably it's " "private or private security.".format(bug_id)) continue bug_target = inspect_bug(lp_bug) if lp_bug.bug.id in stats: stats[lp_bug.bug.id]['tests'].update( joint_bugs_statistics[bug_id]) else: stats[lp_bug.bug.id] = { 'title': bug_target['title'], 'importance': bug_target['importance'], 'status': bug_target['status'], 'project': bug_target['project'], 'link': lp_bug.bug.web_link, 'tests': joint_bugs_statistics[bug_id] } stats[lp_bug.bug.id]['failed_num'] = len( [t for t, v in stats[lp_bug.bug.id]['tests'].items() if not v['blocked']]) stats[lp_bug.bug.id]['blocked_num'] = len( [t for t, v in stats[lp_bug.bug.id]['tests'].items() if v['blocked']]) return OrderedDict(sorted(stats.items(), key=lambda x: (x[1]['failed_num'] + x[1]['blocked_num']), reverse=True))
class SQLServerHandler(object): """an instance to query and modify data in the sqlserver""" def __init__(self): super(SQLServerHandler, self).__init__() self.conn = self.__connect() self.cursor = self.conn.cursor() def __del__(self): self.conn.close() def __connect(self): conn_cnt = 0 logger.info('trying to connect to sqlserver on %s:%s' % (s.get('host'), s.get('port'))) while conn_cnt < s.get('reconnect_cnt', 3): try: conn = pymssql.connect(host=s.get('host'), port=s.get('port'), user=s.get('user'),\ password=s.get('password'), database=s.get('database'), charset=s.get('charset')) return conn except Exception, e: # add a specified exception conn_cnt += 1 logger.debug('connecting failed, times to reconnect: %d' % conn_cnt) logger.warning( 'unable to establish a connection, waiting for the next time')
def setting_recup(): # Function to read parameter settings args = parser_generation() logger.setLevel(args.verbose.upper()) if args.i_path is None and not args.profile == 2: logger.warning( "\nCannot analyse an image neither a video without a path\nSwitching to Webcam..." ) args.profile = 2 try: model = load_model(args.model_path) except OSError: logger.critical("\nCannot localize model\nLeaving...") sys.exit(3) return args, model
def change(): passenger = request.args.get('a') driver = request.args.get('b') try: ride_pas = Passenger.query.filter_by(id=passenger).all() ride_driv = Driver.query.filter_by(id=driver).all() except Exception as exc: logger.warning('postgre_request exeption: {exc}', exc_info=True) if passenger: idd = passenger elif driver: idd = driver return render_template("editor.html", passenger=ride_pas, driver=ride_driv, id=idd)
def check_expire(datetime_now: int, datetime: int): delta = datetime_now - datetime if delta > max_diff: result = False if delta == max_diff: result = False if delta < max_diff: result = True if delta <= 0: logger.warning( f"SOME WRONG WITH DELTA {time_from_token}, {datetime_now}, {delta}" ) raise Exception("SOME WRONG WITH DELTA", time_from_token, datetime_now, delta) return result pass
def crop(img_name, dst_name, region): from PIL import Image im = Image.open(img_name) try: xmin, ymin, xmax, ymax = normalize(region, im.width, im.height) except BoundingBoxError as e: if e.code == BoundingBoxError.SERIOUS or e.code == BoundingBoxError.WARNING: logger.error("unable to crop {0} for {1}".format(img_name, e)) return -1 if e.code == BoundingBoxError.TRIVIAL: logger.warning("found error {1} for {0}, but cropped still".format(img_name, e)) xmin, ymin, xmax, ymax = e.recommend region_im = im.crop((xmin, ymin, xmax, ymax)) region_im.save(dst_name)
def _remove_special_tokens(self, flow): # rm first token flow = flow[1:] try: flow_end_idx = np.where(flow == self.eos_token_id)[0][0] except IndexError: flow_end_idx = flow.shape[0] - 1 logger.warning('could not find EOS token, removing the last one') if flow_end_idx == flow.shape[0] - 1: flow = flow[:-1] else: flow = flow[:-1] # replace pad token with quantizer's non packet value for consistency flow[flow_end_idx:] = self.packet_quantizer.non_packet_value return flow
def va_stat(self,time_id): va_stat=genericStat() va_stat.set_fk_field('tn_id') va_stat.set_table_name('table_va_stat') va_stat.set_fk_value(self.id) va_stat.set_time_id(time_id) va_stat_q="""SELECT pg_stat_get_last_vacuum_time(oid) AS last_vacuum, pg_stat_get_last_autovacuum_time(oid) AS last_autovacuum, pg_stat_get_last_analyze_time(oid) AS last_analyze, pg_stat_get_last_autoanalyze_time(oid) AS last_autoanalyze FROM pg_class WHERE oid={0}""".format(self.db_fields['obj_oid']) try: self.prod_cursor.execute(va_stat_q) except Exception, e: logger.warning("Details: {0}".format(e.pgerror)) return
def execute(self): xunit_suites = source.TrepSource().get_itrr() if not xunit_suites.test_results: logger.info("Empty suite.") return for xunit_suite in xunit_suites.test_results: cases = self.find_testrail_cases(xunit_suite) if len(cases) == 0: logger.warning('No cases matched, program will terminated') return plan = self.get_or_create_plan() test_run = self.get_check_create_test_run(plan, cases) if not test_run: logger.error('Empty test run') else: # test_run = self.get_or_create_test_run(plan, cases) # self.check_cases_inside_run(plan, test_run, cases) test_run.add_results_for_cases(cases) self.print_run_url(test_run)
def post(self, aid): uid = self.current_user pet = Pet.findone(uid=uid) # 如果没有领取 if not pet: self.set_status(400) result = dict(code=40021, msg=u'尚未领取宝贝哦') return self.jsonify(result) award = Award.findone(id=aid, status=1) # 奖品下线 if not award: self.set_status(400) result = dict(code=40022, msg=u'奖品已经下线,请联系客服哦') return self.jsonify(result) key = 'aid:{}'.format(award['id']) if int(rdb.llen(key)) == 0: self.set_status(400) result = dict(code=40022, msg=u'奖品已经领取完') return self.jsonify(result) # # 所需点数大于当前点数,无法领取 if award['score'] > pet['score']: self.set_status(400) result = dict(code=40022, msg=u'领取点数不符合哦') return self.jsonify(result) # 领取扣除点数 award_code = rdb.lpop(key) if Pet.wining(uid=uid, aid=award['id'], score=award['score'], code=award_code): logger.info("insert winning success award code {}".format(award_code)) result = dict(code=40027, msg=u'兑换成功!', award_code=award_code, provide=award['provide']) else: rdb.lpush(key, award_code) logger.warning('insert wining failed') self.set_status(500) result = dict(code=40012, msg=u'更新服务器错误, 请稍后重试!') # TODO test code # result = dict(code=40027, msg=u'兑换成功!', award_code='11111', provide='provide') return self.jsonify(result)
def annotated_label_region(labels, img_name): for label, annotations in labels.items(): if len(annotations) == 1: anno = annotations[0] #return label, (anno['x1']*2.57, anno['y1']*3.25, anno['x2']*2.57, anno['y2']*3.25) try: region = normalize((anno['x1'], anno['y1'], anno['x2'], anno['y2'])) except BoundingBoxError as e: if e.code == BoundingBoxError.TRIVIAL: region = e.recommend logger.warning("trivial error {1} found for {0}, swapped already".format(img_name, e)) else: logger.error("invalid region annotated for {0} - {1}".format(img_name, e)) raise ValueError return label, region elif len(annotations) == 0: pass else: logger.error('more than 1 markers was annotated for {0}'.format(label)) raise ValueError
def get_filelist(self, shared_device, root_dir, search=55, pattern="*"): shared_dirs = [root_dir] # extracted until no files existed shared_files = [] if not self.is_connected: self.smb_conn = self.connect() while shared_dirs: path = shared_dirs.pop() # classifies files in the list, try: shared_items = self.smb_conn.listPath(shared_device, path, search, pattern) except OperationFailure, e: logger.error("unable to access the path %s" % root_dir) else: for item in shared_items: file_path = os.path.join(path, item.filename) if item.filename.startswith("."): logger.warning('path %s starts with ".", ignored' % file_path) continue if item.isDirectory: shared_dirs.append(file_path) else: shared_files.append((shared_device, file_path))
def create_stat(self,time_id): try: self.prod_cursor.execute(self.stat_stmt) except Exception, e: logger.warning("Details: {0},{1}".format(e.pgcode,e.pgerror)) return
def ping_db(self): logger.info('ping db conn {}'.format(id(db))) db.query("show variables") logger.warning("ping database...........")
def warn_file_exists(file_path): if os.path.exists(file_path): logger.warning('File {0} exists and will be ' 'overwritten!'.format(file_path))
def retrieve_file(self, shared_device, root_dir, file_obj, timeout=60): if not self.is_connected: logger.warning("connection to smb was reset, reconnecting...") self.smb_conn = self.connect() self.smb_conn.retrieveFile(shared_device, root_dir, file_obj, timeout)
try: conn = pymssql.connect( host=ss.get("host"), port=ss.get("port"), user=ss.get("user"), password=ss.get("password"), database=ss.get("database"), charset=ss.get("charset"), ) logger.info("connected to sqlserver") return conn except Exception, e: # TODO:add a specified exception conn_cnt += 1 logger.info("connecting failed, times to reconnect: %d" % conn_cnt) logger.warning("unable to establish a connection, waiting for the next time") return None def close(self): try: self.conn.close() self.conn = None self.cursor = None except AttributeError, e: logger.error("connection closed already, invalid call") raise AttributeError # guarantee to return a reliable connection def connect(self): while not self.conn:
def __init__(self, filename, ws): if os.path.exists(filename): logger.warning("File %s exists and will be overwritten!"%filename) self.filename = filename self.ws = ws self.__init_data()
def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups, default_test_priority): from system_test.tests.actions_base import ActionsBase import_tests() define_custom_groups() plan = TestPlan.create_from_registry(DEFAULT_REGISTRY) all_plan_tests = plan.tests[:] tests = [] for jenkins_suffix in groups: group = groups[jenkins_suffix] plan.filter(group_names=[group]) for case in plan.tests: if not case.entry.info.enabled: continue home = case.entry.home if not hasattr(case.entry, 'parent'): # Not a real case, some stuff needed by template based tests continue parent_home = case.entry.parent.home case_state = case.state if issubclass(parent_home, ActionsBase): case_name = parent_home.__name__ test_group = parent_home.__name__ if any([x['custom_test_group'] == test_group for x in tests]): continue else: case_name = home.func_name test_group = case.entry.home.func_name if tests_include: if tests_include not in case_name: logger.debug("Skipping '{0}' test because it doesn't " "contain '{1}' in method name" .format(case_name, tests_include)) continue if tests_exclude: if tests_exclude in case_name: logger.debug("Skipping '{0}' test because it contains" " '{1}' in method name" .format(case_name, tests_exclude)) continue if issubclass(parent_home, ActionsBase): docstring = parent_home.__doc__.split('\n') case_state.instance._load_config() configuration = case_state.instance.config_name docstring[0] = "{0} on {1}".format(docstring[0], configuration) docstring = '\n'.join(docstring) else: docstring = home.func_doc or '' configuration = None docstring = '\n'.join([s.strip() for s in docstring.split('\n')]) steps = [{"content": s, "expected": "pass"} for s in docstring.split('\n') if s and s[0].isdigit()] test_duration = re.search(r'Duration\s+(\d+[s,m])\b', docstring) title = docstring.split('\n')[0] or case.entry.home.func_name if case.entry.home.func_name in GROUPS_TO_EXPAND: """Expand specified test names with the group names that are used in jenkins jobs where this test is started. """ title = ' - '.join([title, jenkins_suffix]) test_group = '_'.join([case.entry.home.func_name, jenkins_suffix]) test_case = { "title": title, "type_id": 1, "milestone_id": milestone_id, "priority_id": default_test_priority, "estimate": test_duration.group(1) if test_duration else "3m", "refs": "", "custom_test_group": test_group, "custom_test_case_description": docstring or " ", "custom_test_case_steps": steps } if not any([x['custom_test_group'] == test_group for x in tests]): tests.append(test_case) else: logger.warning("Testcase '{0}' run in multiple Jenkins jobs!" .format(test_group)) plan.tests = all_plan_tests[:] return tests
def get_previous_entries(language=None, country=None, category=None, feed=None, limit=10, end_id=None): """ find entries before end_id """ if not language or not country or not category or not feed: return None if language not in LANGUAGES: return None if country not in COUNTRIES: return None if limit < 0: return None # limit the number of items if limit > 100: limit = 100 # return list entries = [] category_name = '%s::%s' % (country, category) label_name = '%s::%s::%s' % (country, category, feed) try: # check if redis is alive rclient.ping() class_name = 'news::%s::%s' % (language, feed) if not rclient.exists(class_name): class_name = 'news::%s::%s' % (language, label_name) if not rclient.exists(class_name): label_name = None else: # reset label_name as the flag label_name = None # preprocess end_id entry_ids_total = rclient.zcard(class_name) end_id_index = 0 END_ID_IN_MEMORY = False limit_in_memory = 0 if not end_id: end_id_index = entry_ids_total if entry_ids_total: # end_id is assign the most recent one end_id = rclient.zrevrange(class_name, 0, 0)[0] END_ID_IN_MEMORY = True limit_in_memory = entry_ids_total else: end_id = None # which is in most cases, pointless END_ID_IN_MEMORY = False else: end_id_index = rclient.zrank(class_name, end_id) END_ID_IN_MEMORY = True if end_id_index > 0 else False if END_ID_IN_MEMORY: limit_in_memory = rclient.zrank(class_name, end_id) if END_ID_IN_MEMORY: # see if data in memory suffice if limit_in_memory >= limit: # purely get from memory entry_ids = rclient.zrevrange( class_name, entry_ids_total - end_id_index, entry_ids_total - end_id_index + limit - 1) dirty_expired_ids = [] for entry_id in entry_ids: entry_id_in_memory = rclient.get(entry_id) if entry_id_in_memory: entries.append(eval(entry_id_in_memory)) else: dirty_expired_ids.append(entry_id) else: # memory + database # memory entry_ids = rclient.zrevrange( class_name, entry_ids_total - end_id_index, entry_ids_total - end_id_index + limit_in_memory - 1) last_entry_in_memory = None dirty_expired_ids = [] for entry_id in entry_ids: entry_id_in_memory = rclient.get(entry_id) if entry_id_in_memory: last_entry_in_memory = eval(entry_id_in_memory) entries.append(last_entry_in_memory) else: dirty_expired_ids.append(entry_id) tmp = last_entry_in_memory['updated'] last_entry_in_memory_updated = float(tmp) if tmp else None limit_in_database = limit - len(entries) # find the remaining items in database items = [] col = Collection(db, language) # query only one of its values if label_name: feeds = Collection(db, FEED_REGISTRAR) feed_lists = feeds.find( {'%s.%s' % ('labels', label_name): {'$exists': True}}, {'feed_title': 1}) feed_names = [feed_list['feed_title'] for feed_list in feed_lists] items = col.find( {'updated': {'$lt': last_entry_in_memory_updated}, 'feed': {'$in': feed_names}}).sort( 'updated', -1).limit(limit_in_database) else: items = col.find( {'updated': {'$lt': last_entry_in_memory_updated}, 'feed': feed}).sort( 'updated', -1).limit(limit_in_database) for item in items: entries.append(item) new_entries = [] for entry in entries: # string-ify all the values: ObjectId new_item = {} for x, y in entry.iteritems(): if not x.endswith('_local') and x != 'images': if x != 'updated': new_item[str(x)] = str(y) # remove 'u' in "{u'url':u'xxx'}" if x == 'category_image' or x == 'thumbnail_image' or \ x == 'hotnews_image' or x == \ 'text_image': image_dumped = json.dumps(y, encoding='utf-8') new_item[x] = eval( image_dumped) if image_dumped and \ image_dumped != "null" else \ "null" new_item['updated'] = entry['updated'] new_entries.append(new_item) # expired ids not cleaned found if dirty_expired_ids: sys.path.append(os.path.join(CODE_BASE, 'newsman')) from newsman.watchdog import clean_memory clean_memory.clean_by_items(class_name, dirty_expired_ids) logger.warning('Memory contains dirty expired items') return new_entries else: raise ConnectionError( 'Find nothing about %s in memory' % class_name) except ConnectionError: # no memory or data in memory are not enough, so query database items = [] col = Collection(db, language) if end_id: end_id_entry = col.find_one({'_id': ObjectId(end_id)}) if end_id_entry: end_id_updated = float(end_id_entry['updated']) if label_name: feeds = Collection(db, FEED_REGISTRAR) feed_lists = feeds.find( {'%s.%s' % ('labels', label_name): {'$exists': True}}, {'feed_title': 1}) feed_names = [feed_list['feed_title'] for feed_list in feed_lists] items = col.find({'updated': {'$lt': end_id_updated}, 'feed': {'$in': feed_names}}).sort( 'updated', -1).limit(limit) else: items = col.find({'updated': {'$lt': end_id_updated}, 'feed': feed}).sort( 'updated', -1).limit(limit) else: return None else: # get the most recent limit number of entries if label_name: feeds = Collection(db, FEED_REGISTRAR) feed_lists = feeds.find( {'%s.%s' % ('labels', label_name): {'$exists': True}}, {'feed_title': 1}) feed_names = [feed_list['feed_title'] for feed_list in feed_lists] items = col.find({'feed': {'$in': feed_names}}).sort( 'updated', -1).limit(limit) else: items = col.find({'feed': feed}).sort( 'updated', -1).limit(limit) for item in items: # string-ify all the values: ObjectId new_item = {} for x, y in item.iteritems(): if not x.endswith('_local') and x != 'images': if x != 'updated': new_item[str(x)] = str(y) # remove 'u' in "{u'url':u'xxx'}" if x == 'category_image' or x == 'thumbnail_image' or x \ == 'hotnews_image' or x == 'text_image': image_dumped = json.dumps(y, encoding='utf-8') new_item[x] = eval( image_dumped) if image_dumped and image_dumped != \ "null" else "null" new_item['updated'] = item['updated'] entries.append(new_item) return entries
def get_latest_entries(language=None, country=None, category=None, feed=None, limit=10, start_id=None): """ find out latest news items search entries newer than start_id """ if not language or not country or not category or not feed: return None if language not in LANGUAGES: return None if country not in COUNTRIES: return None if limit < 0: return None # limit the number of items if limit > 100: limit = 100 # return list entries = [] category_name = '%s::%s' % (country, category) label_name = '%s::%s::%s' % (country, category, feed) try: # check if redis is alive rclient.ping() class_name = 'news::%s::%s' % (language, feed) if not rclient.exists(class_name): class_name = 'news::%s::%s' % (language, label_name) if not rclient.exists(class_name): label_name = None else: # reset label_name as the flag label_name = None # get the latest entries entry_ids_total = rclient.zcard(class_name) if entry_ids_total: # memory (partially) meets the limit if entry_ids_total >= limit: entry_ids = rclient.zrevrange(class_name, 0, limit - 1) dirty_expired_ids = [] for entry_id in entry_ids: if start_id and entry_id == start_id: return entries entry_id_in_memory = rclient.get(entry_id) if entry_id_in_memory: entries.append(eval(entry_id_in_memory)) else: dirty_expired_ids.append(entry_id) else: # memory + database entry_ids = rclient.zrevrange( class_name, 0, entry_ids_total - 1) last_entry_in_memory = None dirty_expired_ids = [] for entry_id in entry_ids: if start_id and entry_id == start_id: return entries entry_id_in_memory = rclient.get(entry_id) if entry_id_in_memory: last_entry_in_memory = eval(entry_id_in_memory) entries.append(last_entry_in_memory) else: dirty_expired_ids.append(entry_id) # compute boundary variables tmp = last_entry_in_memory['updated'] last_entry_in_memory_updated = float(tmp) if tmp else None limit_in_database = limit - len(entries) # database items = [] col = Collection(db, language) # query only one of its values if label_name: feeds = Collection(db, FEED_REGISTRAR) feed_lists = feeds.find( {'%s.%s' % ('labels', label_name): {'$exists': True}}, {'feed_title': 1}) feed_names = [feed_list['feed_title'] for feed_list in feed_lists] items = col.find( {'updated': {'$lt': last_entry_in_memory_updated}, 'feed': {'$in': feed_names}}).sort('updated', -1).limit( limit_in_database) else: items = col.find( {'updated': {'$lt': last_entry_in_memory_updated}, 'feed': feed}).sort( 'updated', -1).limit(limit_in_database) for item in items: entries.append(item) new_entries = [] for entry in entries: # string-ify all the values: ObjectId new_item = {} for x, y in entry.iteritems(): if not x.endswith('_local') and x != 'images': if x != 'updated': new_item[str(x)] = str(y) # remove 'u' in "{u'url':u'xxx'}" if x == 'category_image' or x == 'thumbnail_image' or \ x == 'hotnews_image' or x == \ 'text_image': image_dumped = json.dumps(y, encoding='utf-8') new_item[x] = eval( image_dumped) if image_dumped and \ image_dumped != "null" else \ "null" new_item['updated'] = entry['updated'] new_entries.append(new_item) # expired ids not cleaned found if dirty_expired_ids: sys.path.append(os.path.join(CODE_BASE, 'newsman')) from newsman.watchdog import clean_memory clean_memory.clean_by_items(class_name, dirty_expired_ids) logger.warning('Memory contains dirty expired items') return new_entries else: raise ConnectionError( 'Find nothing about %s in memory' % class_name) except ConnectionError: # query the database items = [] col = Collection(db, language) if label_name: feeds = Collection(db, FEED_REGISTRAR) feed_lists = feeds.find( {'%s.%s' % ('labels', label_name): {'$exists': True}}, {'feed_title': 1}) feed_names = [feed_list['feed_title'] for feed_list in feed_lists] items = col.find({'feed': {'$in': feed_names}}).sort( 'updated', -1).limit(limit) else: items = col.find({'feed': feed}).sort('updated', -1).limit(limit) for item in items: if start_id and str(item['_id']) == start_id: return entries # string-ify all the values: ObjectId new_item = {} for x, y in item.iteritems(): if not x.endswith('_local') and x != 'images': if x != 'updated': new_item[str(x)] = str(y) # remove 'u' in "{u'url':u'xxx'}" if x == 'category_image' or x == 'thumbnail_image' or x \ == 'hotnews_image' or x == 'text_image': image_dumped = json.dumps(y, encoding='utf-8') new_item[x] = eval( image_dumped) if image_dumped and image_dumped != \ "null" else "null" new_item['updated'] = item['updated'] entries.append(new_item) return entries
def get_tests_results(systest_build, os): tests_results = [] test_build = Build(systest_build['name'], systest_build['number']) run_test_data = test_build.test_data() test_classes = {} for one in run_test_data['suites'][0]['cases']: className = one['className'] if className not in test_classes: test_classes[className] = {} test_classes[className]['child'] = [] test_classes[className]['duration'] = 0 test_classes[className]["failCount"] = 0 test_classes[className]["passCount"] = 0 test_classes[className]["skipCount"] = 0 else: if one['className'] == one['name']: logger.warning("Found duplicate test in run - {}".format( one['className'])) continue test_class = test_classes[className] test_class['child'].append(one) test_class['duration'] += float(one['duration']) if one['status'].lower() in ('failed', 'error'): test_class["failCount"] += 1 if one['status'].lower() == 'passed': test_class["passCount"] += 1 if one['status'].lower() == 'skipped': test_class["skipCount"] += 1 for klass in test_classes: klass_result = test_classes[klass] if len(klass_result['child']) == 1: test = klass_result['child'][0] if check_untested(test): continue check_blocked(test) test_result = TestResult( name=test['name'], group=expand_test_group(test['className'], systest_build['name'], os), status=test['status'].lower(), duration='{0}s'.format(int(test['duration']) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test['name']), version='_'.join( [test_build.build_data["id"]] + ( test_build.build_data["description"] or test['name']).split()), description=test_build.build_data["description"] or test['name'], comments=test['skippedMessage'] ) else: case_steps = [] test_duration = sum( [float(c['duration']) for c in klass_result['child']]) steps = [c for c in klass_result['child'] if c['name'].startswith('Step')] steps = sorted(steps, key=lambda k: k['name']) test_name = steps[0]['className'] test_group = steps[0]['className'] test_comments = None is_test_failed = any([s['status'].lower() in ('failed', 'error') for s in steps]) for step in steps: if step['status'].lower() in ('failed', 'error'): case_steps.append({ "content": step['name'], "actual": step['errorStackTrace'] or step['errorDetails'], "status": step['status'].lower()}) test_comments = "{err}\n\n\n{stack}".format( err=step['errorDetails'], stack=step['errorStackTrace']) else: case_steps.append({ "content": step['name'], "actual": "pass", "status": step['status'].lower() }) test_result = TestResult( name=test_name, group=expand_test_group(test_group, systest_build['name'], os), status='failed' if is_test_failed else 'passed', duration='{0}s'.format(int(test_duration) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test_name), version='_'.join( [test_build.build_data["id"]] + ( test_build.build_data["description"] or test_name).split()), description=test_build.build_data["description"] or test_name, comments=test_comments, steps=case_steps, ) tests_results.append(test_result) return tests_results
def main(): parser = argparse.ArgumentParser( description="Generate statistics for bugs linked to TestRun. Publish " "statistics to testrail if necessary." ) parser.add_argument('plan_id', type=int, nargs='?', default=None, help='Test plan ID in TestRail') parser.add_argument('-j', '--job-name', dest='job_name', type=str, default=None, help='Name of Jenkins job which runs tests (runner). ' 'It will be used for TestPlan search instead ID') parser.add_argument('-n', '--build-number', dest='build_number', default='latest', help='Jenkins job build number') parser.add_argument('-r', '--run-id', dest='run_ids', type=str, default=None, help='(optional) IDs of TestRun to check (skip other)') parser.add_argument('-b', '--handle-blocked', action="store_true", dest='handle_blocked', default=False, help='Copy bugs links to downstream blocked results') parser.add_argument('-s', '--separate-runs', action="store_true", dest='separate_runs', default=False, help='Create separate statistics for each test run') parser.add_argument('-p', '--publish', action="store_true", help='Publish statistics to TestPlan description') parser.add_argument('-o', '--out-file', dest='output_file', default=None, type=str, help='Path to file to save statistics as JSON and/or ' 'HTML. Filename extension is added automatically') parser.add_argument('-H', '--html', action="store_true", help='Save statistics in HTML format to file ' '(used with --out-file option)') parser.add_argument('-q', '--quiet', action="store_true", help='Be quiet (disable logging except critical) ' 'Overrides "--verbose" option.') parser.add_argument("-v", "--verbose", action="store_true", help="Enable debug logging.") args = parser.parse_args() if args.verbose: logger.setLevel(DEBUG) if args.quiet: logger.setLevel(CRITICAL) testrail_project = get_testrail() if args.job_name: logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan ' 'details...'.format(args.build_number, args.job_name)) test_plan_name = generate_test_plan_name(args.job_name, args.build_number) test_plan = testrail_project.get_plan_by_name(test_plan_name) if test_plan: args.plan_id = test_plan['id'] else: logger.warning('TestPlan "{0}" not found!'.format(test_plan_name)) if not args.plan_id: logger.error('There is no TestPlan to process, exiting...') return 1 run_ids = () if not args.run_ids else map(int, args.run_ids.split(',')) generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids, args.handle_blocked) generator.generate() stats = generator.dump() if args.publish: logger.debug('Publishing bugs statistics to TestRail..') generator.publish(stats) if args.output_file: html = generator.dump_html(stats) if args.html else args.html save_stats_to_file(stats, args.output_file, html) if args.separate_runs: for run in generator.test_runs_stats: file_name = '{0}_{1}'.format(args.output_file, run['id']) stats = generator.dump(run_id=run['id']) html = (generator.dump_html(stats, run['id']) if args.html else args.html) save_stats_to_file(stats, file_name, html) logger.info('Statistics generation complete!')
def ping_db(): logger.info('ping mysql db conn {}'.format(id(db))) db.query('SHOW VARIABLES') logger.warning('ping databases ... ...')