def test_ete_error_message_for_invalid_bill_id(client, sess): data = { "bill_id": "0", } response = client.get("/reports/111", data=data) match(response, 404)
def listen(seconds=7): # open stream au = pyaudio.PyAudio() stream = au.open(format=pyaudio.paInt16, channels=2, rate=44100, input=True, frames_per_buffer=8192) print("* recording") left, right = [], [] for i in range(0, int(44100 / 8192 * seconds)): data = stream.read(8192) nums = np.fromstring(data, np.int16) left.extend(nums[1::2]) right.extend(nums[0::2]) print("* done recording") # close and stop the stream stream.stop_stream() stream.close() db_client = DB('fingerprints.db') # match both channels matches = [] matches.extend(utils.match(left, db_client)) matches.extend(utils.match(right, db_client)) # align and return return utils.align_matches(matches)
def parse_condition_expression(tokens, idx): # <conditional-exp> ::= <logical-or-exp> [ "?" <exp> ":" <conditional-exp> ] try: idx, logical_or_expression = parse_logical_or_expression(tokens, idx) condition_expression = ConditionExpression( logical_or_expression=logical_or_expression) old_idx = idx try: idx, tok = utils.match(tokens, idx, OPERATOR, '?') idx, expression = parse_expression(tokens, idx) idx, tok = utils.match(tokens, idx, OPERATOR, ':') idx, condition_expression = parse_condition_expression(tokens, idx) condition_expression = ConditionExpression( logical_or_expression=logical_or_expression, expression=expression, condition_expression=condition_expression) return idx, condition_expression except: idx = old_idx return idx, condition_expression except: pass assert False
def parse_vars(tokens, idx): # <id> "[" <exp> "]" old_idx = idx try: idx, tok = utils.match_type(tokens, idx, IDENTIFIER) id_name = tok.value idx, tok = utils.match(tokens, idx, OPERATOR, '[') idx, index_expression = parse_expression(tokens, idx) idx, tok = utils.match(tokens, idx, OPERATOR, ']') return idx, ArrayVariable(id_name=id_name, index_expression=index_expression) except: idx = old_idx # <id> try: idx, tok = utils.match_type(tokens, idx, IDENTIFIER) return idx, Variable(id_name=tok.value) except: pass assert False
def test_ete_error_message_for_invalid_bill_id(client, sess): query_string = { "bill_id": "0", } response = client.get("/reports/111", query_string=query_string) match(response, 404)
def rule_lord_feast1( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # A 1st class feast of the Lord occurring on a Sunday or Feria # takes the place of that day with all rights and privileges; # hence there is no commemoration of the day. if match(observances, SANCTI_01_13) and match(observances, TEMPORA_EPI1_0): return [match(observances, TEMPORA_EPI1_0)], [], []
def parse_single_declaration(tokens, idx): # <single_declaration> ::= <id> "[" <exp>(a num) "]" | <id> [ '=' <exp> ] old_idx = idx try: idx, tok = utils.match_type(tokens, idx, IDENTIFIER) id_name = tok.value idx, tok = utils.match(tokens, idx, OPERATOR, '[') idx, tok = utils.match_type(tokens, idx, INT) index_expression = Number(num=int(tok.value)) idx, tok = utils.match(tokens, idx, OPERATOR, ']') return idx, SingleArrayDeclaration(id_name=id_name, index_expression=index_expression) except: idx = old_idx old_idx = idx try: idx, tok = utils.match_type(tokens, idx, IDENTIFIER) id_name = tok.value try: idx, tok = utils.match(tokens, idx, OPERATOR, '=') idx, expression = parse_subexpression(tokens, idx) return idx, SingleDeclaration(id_name=id_name, expression=expression) except: pass return idx, SingleDeclaration(id_name=id_name, expression=None) except: idx = old_idx assert False
def parse_declaration(tokens, idx): # <declaration> ::= "int" <single_declaration> {',' <single_declaration>} ";" old_idx = idx try: idx, tok = utils.match(tokens, idx, RESERVED, 'int') type_name = tok.value idx, single_declaration = parse_single_declaration(tokens, idx) declaration = Declaration(type_name=type_name, single_declaration=single_declaration, declaration=None) old_idx = idx while True: old_idx = idx try: old_idx = idx idx, tok = utils.match(tokens, idx, OPERATOR, ',') idx, single_declaration = parse_single_declaration(tokens, idx) declaration = Declaration( type_name=type_name, single_declaration=single_declaration, declaration=declaration) except: idx = old_idx break idx, tok = utils.match(tokens, idx, OPERATOR, ';') return idx, declaration except: idx = old_idx assert False
def rule_4th_class_feria_are_removed_from_celebration( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): fourth_class_tempora = match(observances, PATTERN_TEMPORA_CLASS_4) if fourth_class_tempora: commemoration = match(observances, PATTERN_SANCTI_CLASS_4) if commemoration: return [o for o in observances if o != fourth_class_tempora], [commemoration], [] return [o for o in observances if o != fourth_class_tempora], [], []
def _infer_inter_reading_section(self, observance): if observance.id in CUSTOM_INTER_READING_SECTIONS: return CUSTOM_INTER_READING_SECTIONS[observance.id] elif match(self.tempora, PATTERN_EASTER): return GRADUALE_PASCHAL elif match(self.tempora, [PATTERN_PRE_LENTEN, PATTERN_LENT]): return TRACTUS return GRADUALE
def rule_feb27(calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # Feb 27, normally on Feb 27 but in leap year on Feb 28 if match(observances, SANCTI_02_27) and isleap( date_.year) and date_.day == 27: return [match(observances, PATTERN_TEMPORA)], [], [[ date(date_.year, 2, 28), [match(observances, SANCTI_02_27)] ]]
def rule_all_souls( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # All Souls Day; if not Sunday - Nov 2, else Nov 3; additionally it has three masses if match(observances, SANCTI_11_02_1): all_souls = [ld for ld in observances if ld.id.startswith('sancti:11-02m')] if date_.weekday() == 6: return [match(observances, PATTERN_TEMPORA_SUNDAY)], [], [[date(date_.year, 11, 3), all_souls]] return all_souls, [], []
def rule_first_class_feast_with_sunday_commemoration( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # In case of some 1st class feasts the Sunday is commemorated, e.g. St. Michael the Archangel on Sunday 2019-09-29 if match(observances, SANCTI_09_29) and match( observances, PATTERN_TEMPORA_SUNDAY_CLASS_2): return [match(observances, PATTERN_CLASS_1) ], [match(observances, PATTERN_TEMPORA_SUNDAY_CLASS_2)], []
def rule_first_class_feast_no_commemoration(calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): if match(observances, PATTERN_CLASS_1): return [ match(sorted(observances, key=lambda x: x.priority), PATTERN_CLASS_1) ], [], []
def rule_1st_class_feria(calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # Ash wednesday and holy week always wins if match(observances, [ TEMPORA_QUAD6_1, TEMPORA_QUAD6_2, TEMPORA_QUAD6_3, TEMPORA_QUAD6_4, TEMPORA_QUAD6_5, TEMPORA_QUAD6_6, TEMPORA_PASC0_0, TEMPORA_QUADP3_3 ]): return [match(observances, PATTERN_TEMPORA)], [], []
def rule_2nd_class_sunday( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # When 2nd class Sunday occurs along with 2nd class feast, the Sunday takes precedence and the feast is commemorated # lower commemorations are skipped if match(observances, PATTERN_TEMPORA_SUNDAY_CLASS_2) and date_.weekday() == SUNDAY: if match(observances, PATTERN_SANCTI_CLASS_2): return [match(observances, PATTERN_SANCTI_CLASS_2)], [match(observances, PATTERN_TEMPORA_SUNDAY_CLASS_2)], [] else: return [match(observances, PATTERN_TEMPORA_SUNDAY_CLASS_2)], [], []
def rule_st_matthias(calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # St. Matthias the Apostle, normally on Feb 24, but in leap year on Feb 25 if match(observances, SANCTI_02_24) and isleap( date_.year) and date_.day == 24: return [match(observances, PATTERN_TEMPORA)], [], [[ date(date_.year, 2, 25), [match(observances, SANCTI_02_24)] ]]
def rule_same_class_feasts_take_over_advent_feria_and_ember_days( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): adv_or_ember = match(observances, EMBER_DAYS + (PATTERN_ADVENT, )) if adv_or_ember: sancti = match(observances, [PATTERN_SANCTI]) if not sancti: return [adv_or_ember], [], [] if adv_or_ember.rank == sancti.rank: return [sancti], [adv_or_ember], [] if adv_or_ember.rank < sancti.rank: return [adv_or_ember], [sancti], []
def test_with_scenario(mocker, sess, client): mock_Thread = mocker.patch("chellow.reports.report_247.threading.Thread") properties = """{ "scenario_start_year": 2009, "scenario_start_month": 8, "scenario_duration": 1, "era_maps": { 2000-08-01T00:00:00Z: { "llfcs": { "22": { "new_export": "521" } }, "supplier_contracts": { "new_export": 10 } } }, "hh_data": { "CI017": { "generated": " 2009-08-01 00:00, 40 2009-08-15 00:00, 40" } } }""" scenario_props = loads(properties) scenario = Scenario.insert(sess, "New Gen", scenario_props) sess.commit() now = utc_datetime(2020, 1, 1) mocker.patch("chellow.reports.report_247.utc_datetime_now", return_value=now) site_code = "CI017" site = Site.insert(sess, site_code, "Water Works") data = { "site_id": site.id, "scenario_id": scenario.id, "compression": False, } response = client.post("/reports/247", data=data) match(response, 303) base_name = ["New Gen"] args = scenario_props, base_name, site.id, None, None, False, [], now mock_Thread.assert_called_with(target=content, args=args)
def rule_lent_commemoration( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): lent_observance = match(observances, PATTERN_LENT) if lent_observance: sancti = match(observances, [PATTERN_SANCTI]) if not sancti: return [lent_observance], [], [] if lent_observance.rank == sancti.rank: if sancti.rank == 1: # will be shifted to a different day by the other rule return return [lent_observance], [sancti], [] if lent_observance.rank > sancti.rank: return [sancti], [lent_observance], []
def info_from_meta(novel, content): novel.id = novel.read_link[novel.read_link.rfind("/", 0, -1) + 1:-1] novel.read_link = novel.novel_link novel.name = utils.match( content, r'<meta\s+property="og:novel:book_name"\s+content="(.*)"\s*/>') or "" novel.cover = utils.match( content, r'<meta\s+property="og:image"\s+content="(.*)"\s*/>') or "" novel.author = utils.match( content, r'<meta\s+property="og:novel:author"\s+content="(.*)"\s*/>') or "" novel.subject = utils.match( content, r'<meta\s+property="og:novel:category"\s+content="(.*)"\s*/>') or ""
def _calc_proper_for_given_period(): if match(tempora, PATTERN_ADVENT): return TEMPORA_C_10A # B. M. V. Saturdays in Advent if date_ >= date(date_.year, 12, 25) or date_ < date(date_.year, 2, 2): return TEMPORA_C_10B # B. M. V. Saturdays between Nativity and Purification wednesday_in_holy_week, _ = calendar.find_day(TEMPORA_QUAD6_3) if date(date_.year, 2, 2) <= date_ < wednesday_in_holy_week: return TEMPORA_C_10C # B. M. V. Saturdays between Feb 2 and Wednesday in Holy Week if match(tempora, PATTERN_EASTER): return TEMPORA_C_10PASC # B. M. V. Saturdays in Easter period return TEMPORA_C_10T # B. M. V. Saturdays between Trinity Sunday and Saturday before 1st Sunday of Advent
def parse_extended_statement(tokens, idx): # normal expression try: idx, expression = parse_expression(tokens, idx) idx, tok = utils.match(tokens, idx, OPERATOR, ';') return idx, expression except: pass # nop expression try: idx, tok = utils.match(tokens, idx, OPERATOR, ';') return idx, NopExpression() except: pass assert False
def matchIdenticalPhrases(L1, L2): P = set() considered1 = set() considered2 = set() while (True): new = '' pos = None for i in range(len(L1)): for j in range(len(L2)): if i not in considered1 and j not in considered2: temp = match(L1, L2, i, j) if len(temp) > len(new): new = temp pos = (i, j) if len(new) > 0: P.add(new) considered1 = considered1.union( set(range(pos[0], pos[0] + len(new)))) considered2 = considered2.union( set(range(pos[1], pos[1] + len(new)))) # print(new, considered1,considered2) else: break return P, considered1, considered2
def rule_4th_class_commemorations_are_only_commemorated( calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): fourth_class_sancti = match(observances, PATTERN_SANCTI_CLASS_4) if fourth_class_sancti: observances.pop(observances.index(fourth_class_sancti)) return [o for o in observances if o != fourth_class_sancti], [fourth_class_sancti], []
def rule_nativity_has_multiple_masses(calendar: Calendar, date_: date, tempora: List[Observance], observances: List[Observance], lang: str): # Nativity Vigil takes place of 4th Advent Sunday. if match(observances, SANCTI_12_25_1): return [ld for ld in observances if ld.id.startswith('sancti:12-25m')], [], []
def words(message): lower_text = message.text.lower() # Ban if message.qq not in NOBAN_USERS: record = BanRecord.get(message.qq) for o in BANNED_WORDS: keywords = o.get('keywords', []) duration = o.get('duration', 1) if match(lower_text, keywords): # duration *= record.multiply qqbot.send(GroupBan(message.group, message.qq, duration * 60)) return True # Ignore if match(lower_text, IGNORED_WORDS): return True # else return False
def match(self, template): """ 当前设备图片识别 :param template: 识别图片对象或路径 :return: 匹配结果 boolean """ if type(template) == str: template = self.get_img(template) return match(self.adb.screen, template)
def test_batch_http(mocker, sess, client): g_contract = GContract.insert(sess, "Fusion 2020", "", {}, utc_datetime(2000, 1, 1), None, {}) g_batch = g_contract.insert_g_batch(sess, "b1", "Jan batch") sess.commit() data = {"g_batch_id": g_batch.id} mock_Thread = mocker.patch("chellow.reports.report_429.threading.Thread", autospec=True) response = client.get("/reports/429", data=data) match(response, 303) user = None args = (g_batch.id, None, user) mock_Thread.assert_called_with(target=chellow.reports.report_429.content, args=args)
def prepare_input(df, target_cols=[]): """ Prepare a dataframe for input into ML methods by: * Dropping non one-hot encoded genotype columns * Dropping index columns * Dropping any other columns indicated (for example, representing the target variable) """ gt_cols = list(filter(match("GT", pos=-1), df.columns.values)) to_drop = list(set(['#CHROM', 'POS'] + gt_cols + target_cols)) return df.drop(to_drop, axis=1).values.astype("float32")
def run(self): print(r"We are running...") os.help() keep_running = True while keep_running: user_input = input("[A,S,t,p#,d#,c#,P#,D#,C#]: ").strip() while not validate_input(user_input): user_input = input("Not valid! Try again: ") # We know this is valid now.... if user_input == "A": pcb = PCB() self.readyQueue.add_pcb_to_readyqueue(pcb) print("Added process %d " % pcb.pid) elif user_input == "S": selection = input("[r,d,p,c]: ") while not validate_input_snapshot(selection): selection = input("Invalid input! [r,d,p,c]: ") if selection == "r": self.snapshot(readyQ=True) elif selection == "d": self.snapshot(diskQ=True) elif selection == "p": self.snapshot(printerQ=True) elif selection == "c": self.snapshot(cdrwQ=True) else: print("If we get here...fail me.") elif match(r"^[PDCpdc]{1}\d+$", user_input): if user_input[0].islower(): self.syscall(user_input) elif user_input[0].isupper(): self.interrupt(user_input) else: print("If you see this message...dock points!") elif parse_if_terminate_syscall(user_input): if self.readyQueue.queue_len() > 0: pcb = self.readyQueue.pop() print("Terminated Process %d " % pcb.pid) else: print("No process is currently in the CPU.") elif do_we_quit(user_input): print("This always happens...was it me??!! I love you anyways...") keep_running = False # break out.
def populatePCB(pcb, printer=False): file_name = input("Give me a file name: ") file_size = input("How big is the file: ") while not check_integer(file_size): file_size = input("Only enter integers for file size try again: ") memory_start_region = input("Enter a memory start region: ") while not check_integer(memory_start_region): memory_start_region = input( "Must be an integer. Enter a memory start region: ") if printer: pcb.readwrite = "w" else: pcb.readwrite = input("Is this a read or write (r/w): ") while match(r"^[rw]{1}$", pcb.readwrite) is False: pcb.readwrite = input( "Invalid input for read/write! You must only enter r or w! Try again: ") pcb.file_size = file_size pcb.file_name = file_name pcb.memory_start_region = memory_start_region return pcb
def apply_first_map(self, type, name, obj, maplist): for (map_pattern,rename_dict) in maplist: if utils.match (name,map_pattern): if self.options.verbose: utils.header("TestMapper/{} : applying rules '{}' on {}"\ .format(type, map_pattern, name)) for (k,v) in rename_dict.items(): # apply : separator path = k.split(':') # step down but last step in path o = obj for step in path[:-1]: if step not in o: o[step] = {} if self.options.verbose: utils.header ("WARNING : created step {} in path {} on {} {}"\ .format(step,path,type,name)) o = o[step] # last step is the one for side-effect step = path[-1] if self.options.verbose: if step not in o: utils.header ("WARNING : inserting key {} for path {} on {} {}"\ .format(step, path, type, name)) # apply formatting if '%s' found in the value if v is None: if self.options.verbose: print("TestMapper WARNING - None value - ignored, key=",k) continue if v.find('%s') >= 0: v = v % obj[k] if self.options.verbose: print(("TestMapper, rewriting {}: {} into {}"\ .format(name, k, v))) o[step] = v # only apply first rule return
def run (self): usage = """usage: %%prog [options] steps arch-rpms-url defaults to the last value used, as stored in arg-arch-rpms-url, no default config defaults to the last value used, as stored in arg-config, or {} ips_vnode, ips_vplc and ips_qemu defaults to the last value used, as stored in arg-ips-{{bplc,vplc,bnode,vnode}}, default is to use IP scanning steps refer to a method in TestPlc or to a step_* module run with -l to see a list of available steps === """.format(TestMain.default_config) parser = ArgumentParser(usage = usage) parser.add_argument("-u", "--url", action="store", dest="arch_rpms_url", help="URL of the arch-dependent RPMS area - for locating what to test") parser.add_argument("-b", "--build", action="store", dest="build_url", help="ignored, for legacy only") parser.add_argument("-c", "--config", action="append", dest="config", default=[], help="Config module - can be set multiple times, or use quotes") parser.add_argument("-p", "--personality", action="store", dest="personality", help="personality - as in vbuild-nightly") parser.add_argument("-d", "--pldistro", action="store", dest="pldistro", help="pldistro - as in vbuild-nightly") parser.add_argument("-f", "--fcdistro", action="store", dest="fcdistro", help="fcdistro - as in vbuild-nightly") parser.add_argument("-e", "--exclude", action="append", dest="exclude", default=[], help="steps to exclude - can be set multiple times, or use quotes") parser.add_argument("-i", "--ignore", action="append", dest="ignore", default=[], help="steps to run but ignore - can be set multiple times, or use quotes") parser.add_argument("-a", "--all", action="store_true", dest="all_steps", default=False, help="Run all default steps") parser.add_argument("-l", "--list", action="store_true", dest="list_steps", default=False, help="List known steps") parser.add_argument("-V", "--vserver", action="append", dest="ips_bplc", default=[], help="Specify the set of hostnames for the boxes that host the plcs") parser.add_argument("-P", "--plcs", action="append", dest="ips_vplc", default=[], help="Specify the set of hostname/IP's to use for vplcs") parser.add_argument("-Q", "--qemus", action="append", dest="ips_bnode", default=[], help="Specify the set of hostnames for the boxes that host the nodes") parser.add_argument("-N", "--nodes", action="append", dest="ips_vnode", default=[], help="Specify the set of hostname/IP's to use for vnodes") parser.add_argument("-s", "--size", action="store", dest="size", default=1, type=int, help="set test size in # of plcs - default is 1") parser.add_argument("-q", "--qualifier", action="store", dest="qualifier", default=None, type=int, help="run steps only on plc numbered <qualifier>, starting at 1") parser.add_argument("-y", "--rspec-style", action="append", dest="rspec_styles", default=[], help="OBSOLETE - for compat only") parser.add_argument("-k", "--keep-going", action="store", dest="keep_going", default=False, help="proceeds even if some steps are failing") parser.add_argument("-D", "--dbname", action="store", dest="dbname", default=None, help="Used by plc_db_dump and plc_db_restore") parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Run in verbose mode") parser.add_argument("-I", "--interactive", action="store_true", dest="interactive", default=False, help="prompts before each step") parser.add_argument("-n", "--dry-run", action="store_true", dest="dry_run", default=False, help="Show environment and exits") parser.add_argument("-t", "--trace", action="store", dest="trace_file", default=None, help="Trace file location") parser.add_argument("-g", "--bonding", action='store', dest='bonding_build', default=None, help="specify build to bond with") # if we call symlink 'rung' instead of just run this is equivalent to run -G bonding_default = 'rung' in sys.argv[0] parser.add_argument("-G", "--bonding-env", action='store_true', dest='bonding_env', default=bonding_default, help="get bonding build from env. variable $bonding") parser.add_argument("steps", nargs='*') self.options = parser.parse_args() # handle -G/-g options if self.options.bonding_env: if 'bonding' not in os.environ: print("env. variable $bonding must be set with --bonding-env") sys.exit(1) self.options.bonding_build = os.environ['bonding'] if self.options.bonding_build: ## allow to pass -g ../2015.03.15--f18 so we can use bash completion self.options.bonding_build = os.path.basename(self.options.bonding_build) if not os.path.isdir("../{}".format(self.options.bonding_build)): print("could not find test dir for bonding build {}".format(self.options.bonding_build)) sys.exit(1) # allow things like "run -c 'c1 c2' -c c3" def flatten (x): result = [] for el in x: if hasattr(el, "__iter__") and not isinstance(el, str): result.extend(flatten(el)) else: result.append(el) return result # flatten relevant options for optname in ['config', 'exclude', 'ignore', 'ips_bplc', 'ips_vplc', 'ips_bnode', 'ips_vnode']: setattr(self.options, optname, flatten([arg.split() for arg in getattr(self.options, optname)])) if self.options.rspec_styles: print("WARNING: -y option is obsolete") # handle defaults and option persistence for recname, filename, default, need_reverse in ( ('build_url', 'arg-build-url', TestMain.default_build_url, None), ('ips_bplc', 'arg-ips-bplc', [], True), ('ips_vplc', 'arg-ips-vplc', [], True), ('ips_bnode', 'arg-ips-bnode', [], True), ('ips_vnode', 'arg-ips-vnode', [], True), ('config', 'arg-config', TestMain.default_config, False), ('arch_rpms_url', 'arg-arch-rpms-url', "", None), ('personality', 'arg-personality', "linux64", None), ('pldistro', 'arg-pldistro', "onelab", None), ('fcdistro', 'arg-fcdistro', 'f14', None), ): # print('handling', recname) path = filename is_list = isinstance(default, list) is_bool = isinstance(default, bool) if not getattr(self.options, recname): try: with open(path) as file: parsed = file.readlines() if is_list: # lists parsed = [x.strip() for x in parsed] else: # strings and booleans if len(parsed) != 1: print("{} - error when parsing {}".format(sys.argv[1], path)) sys.exit(1) parsed = parsed[0].strip() if is_bool: parsed = parsed.lower() == 'true' setattr(self.options, recname, parsed) except Exception as e: if default != "": setattr(self.options, recname, default) else: print("Cannot determine", recname, e) print("Run {} --help for help".format(sys.argv[0])) sys.exit(1) # save for next run fsave = open(path, "w") if is_list: # lists for value in getattr(self.options, recname): fsave.write(value + "\n") else: # strings and booleans - just call str() fsave.write(str(getattr(self.options, recname)) + "\n") fsave.close() # utils.header('Saved {} into {}'.format(recname, filename)) # lists need be reversed # I suspect this is useful for the various pools but for config, it's painful if isinstance(getattr(self.options, recname), list) and need_reverse: getattr(self.options, recname).reverse() if self.options.verbose: utils.header('* Using {} = {}'.format(recname, getattr(self.options, recname))) # hack : if sfa is not among the published rpms, skip these tests TestPlc.check_whether_build_has_sfa(self.options.arch_rpms_url) # initialize steps if not self.options.steps: # defaults, depends on using bonding or not if self.options.bonding_build: self.options.steps = TestPlc.default_bonding_steps else: self.options.steps = TestPlc.default_steps if self.options.list_steps: self.init_steps() self.list_steps() return 'SUCCESS' # rewrite '-' into '_' in step names self.options.steps = [ step.replace('-', '_') for step in self.options.steps ] self.options.exclude = [ step.replace('-', '_') for step in self.options.exclude ] self.options.ignore = [ step.replace('-', '_') for step in self.options.ignore ] # technicality, decorate known steps to produce the '_ignore' version TestPlc.create_ignore_steps() # exclude selected = [] for step in self.options.steps: keep = True for exclude in self.options.exclude: if utils.match(step, exclude): keep = False break if keep: selected.append(step) # ignore selected = [ step if step not in self.options.ignore else step + "_ignore" for step in selected ] self.options.steps = selected # this is useful when propagating on host boxes, to avoid conflicts self.options.buildname = os.path.basename(os.path.abspath(self.path)) if self.options.verbose: self.show_env(self.options, "Verbose") # load configs all_plc_specs = [] for config in self.options.config: modulename = 'config_' + config try: m = __import__(modulename) all_plc_specs = m.config(all_plc_specs, self.options) except : traceback.print_exc() print('Cannot load config {} -- ignored'.format(modulename)) raise # provision on local substrate all_plc_specs = LocalSubstrate.local_substrate.provision(all_plc_specs, self.options) # remember substrate IP address(es) for next run with open('arg-ips-bplc', 'w') as ips_bplc_file: for plc_spec in all_plc_specs: ips_bplc_file.write("{}\n".format(plc_spec['host_box'])) with open('arg-ips-vplc', 'w') as ips_vplc_file: for plc_spec in all_plc_specs: ips_vplc_file.write("{}\n".format(plc_spec['settings']['PLC_API_HOST'])) # ditto for nodes with open('arg-ips-bnode', 'w') as ips_bnode_file: for plc_spec in all_plc_specs: for site_spec in plc_spec['sites']: for node_spec in site_spec['nodes']: ips_bnode_file.write("{}\n".format(node_spec['host_box'])) with open('arg-ips-vnode','w') as ips_vnode_file: for plc_spec in all_plc_specs: for site_spec in plc_spec['sites']: for node_spec in site_spec['nodes']: # back to normal (unqualified) form stripped = node_spec['node_fields']['hostname'].split('.')[0] ips_vnode_file.write("{}\n".format(stripped)) # build a TestPlc object from the result, passing options for spec in all_plc_specs: spec['failed_step'] = False all_plcs = [ (x, TestPlc(x,self.options)) for x in all_plc_specs] # pass options to utils as well utils.init_options(self.options) # populate TestBonding objects # need to wait until here as we need all_plcs if self.options.bonding_build: # this will fail if ../{bonding_build} has not the right arg- files for spec, test_plc in all_plcs: test_plc.test_bonding = TestBonding (test_plc, onelab_bonding_spec(self.options.bonding_build), LocalSubstrate.local_substrate, self.options) overall_result = 'SUCCESS' all_step_infos = [] for step in self.options.steps: if not TestPlc.valid_step(step): continue # some steps need to be done regardless of the previous ones: we force them force = False if step.endswith("_force"): step = step.replace("_force", "") force = True # allow for steps to specify an index like in # run checkslice@2 try: step, qualifier = step.split('@') except: qualifier = self.options.qualifier try: stepobj = Step (step) for substep, method in stepobj.tuples(): # a cross step will run a method on TestPlc that has a signature like # def cross_foo (self, all_test_plcs) cross = False if substep.find("cross_") == 0: cross = True all_step_infos.append ( (substep, method, force, cross, qualifier, ) ) except : utils.header("********** FAILED step {} (NOT FOUND) -- won't be run".format(step)) traceback.print_exc() overall_result = 'FAILURE' if self.options.dry_run: self.show_env(self.options, "Dry run") # init & open trace file if provided if self.options.trace_file and not self.options.dry_run: # create dir if needed trace_dir = os.path.dirname(self.options.trace_file) if trace_dir and not os.path.isdir(trace_dir): os.makedirs(trace_dir) trace = open(self.options.trace_file,"w") # do all steps on all plcs TIME_FORMAT = "%H-%M-%S" TRACE_FORMAT = "TRACE: {plc_counter:d} {begin}->{seconds}s={duration}s " + \ "status={status} step={stepname} plc={plcname} force={force}\n" for stepname, method, force, cross, qualifier in all_step_infos: plc_counter = 0 for spec, plc_obj in all_plcs: plc_counter += 1 # skip this step if we have specified a plc_explicit if qualifier and plc_counter != int(qualifier): continue plcname = spec['name'] across_plcs = [ o for (s,o) in all_plcs if o!=plc_obj ] # run the step beg_time = datetime.now() begin = beg_time.strftime(TIME_FORMAT) if not spec['failed_step'] or force or self.options.interactive or self.options.keep_going: skip_step = False if self.options.interactive: prompting = True while prompting: msg="{:d} Run step {} on {} [r](un)/d(ry_run)/p(roceed)/s(kip)/q(uit) ? "\ .format(plc_counter, stepname, plcname) answer = input(msg).strip().lower() or "r" answer = answer[0] if answer in ['s','n']: # skip/no/next print('{} on {} skipped'.format(stepname, plcname)) prompting = False skip_step = True elif answer in ['q','b']: # quit/bye print('Exiting') return 'FAILURE' elif answer in ['d']: # dry_run dry_run = self.options.dry_run self.options.dry_run = True plc_obj.options.dry_run = True plc_obj.apiserver.set_dry_run(True) if not cross: step_result=method(plc_obj) else: step_result=method(plc_obj, across_plcs) print('dry_run step ->', step_result) self.options.dry_run = dry_run plc_obj.options.dry_run = dry_run plc_obj.apiserver.set_dry_run(dry_run) elif answer in ['p']: # take it as a yes and leave interactive mode prompting = False self.options.interactive = False elif answer in ['r','y']: # run/yes prompting = False if skip_step: continue try: force_msg = "" if force and spec['failed_step']: force_msg=" (forced after {} has failed)".format(spec['failed_step']) utils.header("********** {:d} RUNNING step {}{} on plc {}"\ .format(plc_counter, stepname, force_msg, plcname)) if not cross: step_result = method(plc_obj) else: step_result = method(plc_obj, across_plcs) if isinstance (step_result, Ignored): step_result = step_result.result if step_result: msg = "OK" else: msg = "KO" # do not overwrite if FAILURE if overall_result == 'SUCCESS': overall_result = 'IGNORED' utils.header('********** {} IGNORED ({}) step {} on {}'\ .format(plc_counter, msg, stepname, plcname)) status="{}[I]".format(msg) elif step_result: utils.header('********** {:d} SUCCESSFUL step {} on {}'\ .format(plc_counter, stepname, plcname)) status = "OK" else: overall_result = 'FAILURE' spec['failed_step'] = stepname utils.header('********** {:d} FAILED step {} on {} (discarded from further steps)'\ .format(plc_counter, stepname, plcname)) status = "KO" except: overall_result = 'FAILURE' spec['failed_step'] = stepname traceback.print_exc() utils.header ('********** {} FAILED (exception) step {} on {} (discarded from further steps)'\ .format(plc_counter, stepname, plcname)) status = "KO" # do not run, just display it's skipped else: why = "has failed {}".format(spec['failed_step']) utils.header("********** {} SKIPPED Step {} on {} ({})"\ .format(plc_counter, stepname, plcname, why)) status = "UNDEF" if not self.options.dry_run: delay = datetime.now()-beg_time seconds = int(delay.total_seconds()) duration = str(delay) # always do this on stdout print(TRACE_FORMAT.format(**locals())) # duplicate on trace_file if provided if self.options.trace_file: trace.write(TRACE_FORMAT.format(**locals())) trace.flush() if self.options.trace_file and not self.options.dry_run: trace.close() # free local substrate LocalSubstrate.local_substrate.release(self.options) return overall_result