def parse_drc(drc_file): from re import search as regex_search with open(drc_file, 'r') as f: lines = f.read().splitlines() drc_errors = None unconnected_pads = None for line in lines: if drc_errors != None and unconnected_pads != None: break m = regex_search('^\*\* Found ([0-9]+) DRC errors \*\*$', line) if m != None: drc_errors = m.group(1) continue m = regex_search('^\*\* Found ([0-9]+) unconnected pads \*\*$', line) if m != None: unconnected_pads = m.group(1) continue return { 'drc_errors': int(drc_errors), 'unconnected_pads': int(unconnected_pads) }
def youtube_trailer_id(url): ''' Extrai o id do vídeo no youtube Argumentos-chave: url -- link para o vídeo ''' from re import search as regex_search result = regex_search(r'(?<=v=)[^&#]+', url) result = result or regex_search(r'(?<=be/)[^&#]+', url) return result.group(0) if result else None
def index_generator(self, folder_ids, add_non_nsw_files: bool, add_nsw_files_without_title_id: bool, success: str = None): for folder_id in folder_ids: for (file_id, file_details) in self.gdrive_service.get_files_in_folder_id( folder_id).items(): if add_non_nsw_files or file_details["name"][-4:] in ( ".nsp", ".nsz", ".xci", ".xcz"): if add_nsw_files_without_title_id or regex_search( r"\%5B[0-9A-Fa-f]{16}\%5D", url_encode(file_details["name"], safe="")): self.index["files"].append({ "url": "gdrive:{file_id}#{file_name}".format( file_id=file_id, file_name=url_encode(file_details["name"], safe="")), "size": int(file_details["size"]) }) if success is not None: self.index.update({"success": success})
def index_generator( self, folder_ids, add_non_nsw_files: bool, add_nsw_files_without_title_id: bool, success: str = None, ) -> None: title_id_pattern = r"\[[0-9A-Fa-f]{16}\]" for folder_id in folder_ids: files = self.gdrive_service.get_files_in_folder_id(folder_id) for (file_id, file_details) in files.items(): if add_non_nsw_files or file_details["name"][-4:] in ( ".nsp", ".nsz", ".xci", ".xcz", ): file_name = url_encode(file_details["name"], safe="") if add_nsw_files_without_title_id or regex_search( title_id_pattern, file_name, ): size = int(file_details["size"]) self.index["files"].append({ "url": f"gdrive:{file_id}#{file_name}", "size": size, }) if success is not None: self.index.update({"success": success})
def get(): with open(_file(), 'r') as _: line_list = _.readlines() for line in line_list: if not regex_search('^#', line): return line.strip()
def __init__(self, path): self.path = os.path.dirname(path) self.pkgname, self.pkgver, self.group = run_command( self.path, "echo $pkgname;echo $pkgver;echo $groups").split() self.up2date = run_command(self.path, "echo -n $up2date") try: with open(path, "r") as f: self.m8r = regex_search( r"# Maintainer:\s+(.*?)\s+<", f.read()).groups()[0] except (IndexError, AttributeError): self.m8r = "????????" # probably Maintainer is empty or without mail self.skip = False if devel and self.m8r != devel: self.skip = True return # just ignore this package and don't eval $up2date if " " in self.up2date: # hardcoded version numer in $up2date self.queue = Queue() p = Process(target=self.eval_up2date) p.start() p.join(timeout) if p.is_alive(): p.terminate() p.join() self.up2date = "TIMEOUT" else: self.up2date = self.queue.get() del self.queue
def add_student(request, course_id): # This is add student to course. There is another function add_student_to_team id_or_email = request.POST['id or email'] eagle_id = '' email = '' valid_student = False # check is it email format if regex_search(r'@bc.edu', id_or_email) is not None: email = id_or_email # check is it eagle id format elif id_or_email.isdigit() and len(id_or_email) == 8: eagle_id = id_or_email else: messages.error(request, "Error: The format of email or Eagle ID is incorrect") # get course object course = get_object_or_404(Course, pk=course_id) # Validate the existance of the student if eagle_id != '': new_student = User.objects.filter(eagle_id = eagle_id).first() valid_student = True elif email != '': new_student = User.objects.filter(email = email).first() valid_student =True else: messages.error(request, "Error: Student is not found") if valid_student: course.students.add(new_student) course.save() print("student successfully added") return my_courses(request) # refresh page
def scan_folder( self, folder_id: str, files_progress_bar: tqdm, recursion: bool, add_nsw_files_without_title_id: bool, add_non_nsw_files: bool ): """Scans the folder id for files and updates the instance index""" title_id_pattern = r"\%5B[0-9A-Fa-f]{16}\%5D" files = self.gdrive_service.get_all_files_in_folder( folder_id, recursion, files_progress_bar ) for (file_id, file_details) in files.items(): url_encoded_file_name = url_encode(file_details["name"], safe="") file_valid_nsw_check = add_non_nsw_files or \ url_encoded_file_name[-4:] in (".nsp", ".nsz", ".xci", ".xcz") file_title_id_check = add_nsw_files_without_title_id or \ regex_search(title_id_pattern, url_encoded_file_name) if file_title_id_check and file_valid_nsw_check: file_entry_to_add = { "url": f"gdrive:{file_id}#{url_encoded_file_name}", "size": int(file_details["size"]) } if file_entry_to_add not in self.index["files"]: self.index["files"].append(file_entry_to_add) self.files_shared_status.update({ file_id: file_details["shared"] })
def formatOk(fname, name, value): preauthRegexps = { 'client_id': MAC_REGEXP, 'hotspot_id': SN_REGEXP, 'entrypoint_id': SN_REGEXP, 'hotspot_login_url': ANY_REGEXP, } postauthRegexps = { 'client_id': MAC_REGEXP, 'hotspot_id': SN_REGEXP, 'entrypoint_id': SN_REGEXP, 'traffic_limit': POSINT_REGEXP, 'session_timeout': POSINT_REGEXP, 'next_conn_in': POSINT_REGEXP, 'session_hash': r'.*' } ppostauthGoodVars = { 'client_id': MAC_REGEXP, 'hotspot_id': SN_REGEXP, 'entrypoint_id': SN_REGEXP, } if fname == 'preauthGoodVars': if name == 'hotspot_id' and value == 'outage': return True #for unit tests try: if regex_search( preauthRegexps.get(name, EMPTY_REGEXP), value ): pass #return True except TypeError as e: print "name '{0}' value '{1}' type '{2}'".format(name, value, type(value) ) return True elif fname == 'postauthGoodVars': try: if regex_search( postauthRegexps.get(name, EMPTY_REGEXP), str(value) ): pass #return True except TypeError as e: print "name '{0}' value '{1}' type '{2}'".format(name, value, type(value) ) return True #doesn't check variable format elif fname == 'ppostauthGoodVars': try: if regex_search( ppostauthGoodVars.get(name, EMPTY_REGEXP), value ): return True except TypeError as e: print "name '{0}' value '{1}' type '{2}'".format(name, value, type(value) ) return False
def remove_namespace(full_name): if full_name in bone_map or full_name in bone_map_inverse: return full_name i = regex_search(r"[:_]", full_name[::-1]) if i: return full_name[-(i.start())::] else: return full_name
def put(key): with open(_file(), 'w') as _: line_list = _.readlines() write_list = [] for line in line_list: if regex_search('^#', line): write_list.append(line) write_list.append(key)
def part_2(advent_of_code): with open('input.txt', 'r') as input_file: valid = 0 for line in input_file: s = regex_search('(\d+)-(\d+) (\w): (\w+)', line) if bool(s.group(4)[int(s.group(1)) - 1] == s.group(3)) != bool( s.group(4)[int(s.group(2)) - 1] == s.group(3)): print(s.group(1), s.group(2), s.group(3), s.group(4)) valid += 1 print(valid) advent_of_code.answer(2, valid)
def part_1(advent_of_code): with open('input.txt', 'r') as input_file: valid = 0 for line in input_file: s = regex_search('(\d+)-(\d+) (\w): (\w+)', line) if int(s.group(1)) <= len(regex_findall( s.group(3), s.group(4))) <= int(s.group(2)): print(s.group(1), s.group(2), s.group(3), s.group(4)) valid += 1 print(valid) advent_of_code.answer(1, valid)
def updateFocusColor(color): nonlocal focusColor target = getattr(self, f"ui{color.title()}FocusPeaking", None) if target: #Find the colour of the panel to be highlighted. match = regex_search(r'background:\s*?([#\w]+)', target.customStyleSheet) assert match, f"Could not find background color of {target.objectName()}. Check the background property of it's customStyleSheet." focusColor = match.group(1) else: #Just pass through whatever the colour is. focusColor = color self.uiFocusPeakingColor.update()
def update_dates(request, assessment_id): start_dt = request.POST['start date'] end_dt = request.POST['end date'] valid = False # Validate Date Format if regex_search(r'[0-9]+\-[0-9]+\-[0-9]+', start_dt) and regex_search(r'[0-9]+\-[0-9]+\-[0-9]+', end_dt): st_dt_list = start_dt.split('-') end_dt_list = end_dt.split('-') valid = True else: messages.error(request, "Error: Incorrect date format. Use yyyy-mm-dd") if valid: assessment = get_object_or_404(Assessment, pk=assessment_id) # get the assessment instance assessment.start_date = start_dt assessment.end_date = end_dt assessment.save() return all_assessments(request) # refresh page
def get_remote_ip(request: web.Request) -> str: try: if "Forwarded" in request.headers: addresses = regex_search(r"for=(.*?);", request.headers["Forwarded"]).group(1) addr = addresses.split(",")[0].strip() elif "X-Forwarded-For" in request.headers: addr = request.headers["X-Forwarded-For"].split(",")[0].strip() else: addr = request.remote ip_address(addr) return addr except Exception: return ""
def capture_view(): globlist = glob('static/capture/*.jpg') globlist.sort(key=getmtime) globlist.reverse() fn = [] for i in globlist: fn.append({ "path": i, "filename": regex_search(r"(?<=static/capture/)(.*)(?=.jpg)", i).group() }) return render_template("captures.html", filenames=fn)
def useMQTT(file_path, filename): """Function prepares the eml message and the global message and sends the message to the MQTT using the iottl library. Args: file_path (str): Full path to the eml file in the queue/new directory. filename (str): Name of the file with eml. """ content = get_file_content(file_path) stamp = int(time.time()) eml_msg = {"timestamp": stamp, "filename": filename, "contents": content} try: ip = regex_search("(?:[0-9]{1,3}\.){3}[0-9]{1,3}", filename) if ip is None: ip = "" else: ip = ip.group() global_msg = { "timestamp": stamp, "family": "SMTP", "honeypot": utils.settings.data["relay"]["mqtt_honeypot"], "source": { "ip": ip }, "destination": { "ip": utils.settings.data["relay"]["mqtt_destination_ip"] }, "filename": filename, } except Exception as e: logging.error( "[-] (salmonrelay.py) - Error occurred during preparing global message", e) try: utils.settings.client.send( utils.settings.data["relay"]["eml_msg_topic"], eml_msg) except Exception as e: logging.error( "[-] (salmonrelay.py) - Error occurred during sending of an eml file", e) try: utils.settings.client.send( utils.settings.data["relay"]["global_msg_topic"], global_msg) except Exception as e: logging.error( "[-] (salmonrelay.py) - Error occurred during sending of a global message", e)
def generic_result_check(self, run): assert run is not None assert run.end_time is not None assert run.end_time > run.start_time assert run.total_points > 0 assert run.error_points >= 0 assert run.ok_points >= 0 assert run.output_file outdir = os.path.dirname(run.output_file.path) ## check netcdf output length=-1 num_vars=-1 with netCDF4.Dataset(run.output_file.path) as ds: ## check the metrics contained in the file for metric in self.out_variables: ## This gets all variables in the netcdf file that start with the name of the current metric metric_vars = ds.get_variables_by_attributes(name=lambda v: regex_search(r'^{}(_between|$)'.format(metric), v, IGNORECASE) is not None) ## check that all metrics have the same number of variables (depends on number of input datasets) if num_vars == -1: num_vars = len(metric_vars) assert num_vars > 0, 'No variables containing metric {}'.format(metric) else: assert len(metric_vars) == num_vars, 'Number of variables for metric {} doesn\'t match number for other metrics'.format(metric) ## check the values of the variables for formal criteria (not empty, matches lenght of other variables, doesn't have too many NaNs) for m_var in metric_vars: values = m_var[:] assert values is not None if length == -1: length = len(values) assert length > 0, 'Variable {} has no entries'.format(m_var.name) else: assert len(values) == length, 'Variable {} doesn\'t match other variables in length'.format(m_var.name) nan_ratio = np.sum(np.isnan(values.data)) / float(len(values)) assert nan_ratio <= 0.35, 'Variable {} has too many NaNs. Ratio: {}'.format(metric, nan_ratio) # check zipfile of graphics zipfile = os.path.join(outdir, 'graphs.zip') assert os.path.isfile(zipfile) with ZipFile(zipfile, 'r') as myzip: assert myzip.testzip() is None for fname in myzip.namelist(): data = myzip.read(fname) print(fname, len(data), repr(data[:10]))
def valid_password(password): """ Check the strength of "password". Returns a dict indicating the wrong criteria. A password is considered strong if: 8 characters length or more; 1 digit or more; 1 symbol or more; 1 uppercase letter or more; 1 lowercase letter or more; """ # Checking password length and searching for digits, uppercase, lowercase and symbols, respectively length_error = len(password) < 8 digit_error = regex_search(r"\d", password) is None uppercase_error = regex_search(r"[A-Z]", password) is None lowercase_error = regex_search(r"[a-z]", password) is None symbol_error = regex_search(r"\W", password) is None if length_error or digit_error or uppercase_error or lowercase_error or symbol_error: return False return True
def load(self, file): file_name = path.splitext(path.basename(file))[0] matched = regex_search(r'.*?(\d+)$', file_name) if matched.group(1): year = int(matched.group(1)) self.mappings['start_date'].setdefault( 'value', datetime(year=year, month=1, day=1)) self.mappings['end_date'].setdefault( 'value', datetime(year=year, month=12, day=31)) else: now = datetime.utcnow() self.mappings['start_date'].setdefault('value', now) self.mappings['end_date'].setdefault('value', now) return super(Cerrado, self).load(file)
def upload_file(self, filename, content): if self.resource_id is None: raise HydroShareNotFound( (self.resource_id, "resource has no resource_id")) try: self.client.deleteResourceFile(self.resource_id, filename) except HydroShareNotFound: # If file doesn't exist on hydroshare, it's okay, just keep breathing. pass except HydroShareNotAuthorized: pass # Write file to disk... because that's how hs_restclient needs it to be done! suffix = '.csv' if regex_search('\.csv', filename) else '' fd, path = mkstemp(suffix=suffix) with open(path, 'w+') as f: f.write(content) # upload file try: return self.client.addResourceFile(self.resource_id, path, resource_filename=filename) except HydroShareNotFound: raise Http404(u"Resource '{0}' was not found".format( self.resource_id)) except Exception as e: raise e finally: # close the file descriptor os.close(fd) # delete the tmp file os.remove(path)
# Sanity check that shit if (not storageLocation or not isdir(storageLocation)): print("Please enter a valid path. Ensure that the directory exists") exit() # Shit is about to go down print(f"\r\nFetching {str(total)} pages from '{base}'", end="\r\n\r\n") # Loop and parse each page for i in range(1, total + 1): print(f"Parsing page {str(i)} out of {str(total)}") # Get the html of the page as an html string html = read_url_as_string(f"{base}?{paginationQuery}={str(i)}") # Get the download links of the PDFs # Yes, yes. Using regex on HTML strings in never good due to walla walla. But # in this use case it doesn't really matter much. We control the output so meh links = regex_search(searchExpression, html) # Push links to collection final.extend(links) # Check how many links we got print( f"\r\n\r\nParsing complete. Starting document downloads. {str(len(final))} downloads in queue" ) for i, link in enumerate(final): print(f"Downloading file {str(i+1)} of {str(len(final))}") download_file(link, storageLocation)
def make_new_assessment(request): name = request.POST['Assessment name'] description = request.POST['description'] course_id = request.POST['course id'] start_dt = request.POST['start date'] end_dt = request.POST['end date'] valid = False # Validate Date Format if regex_search(r'[0-9]+\-[0-9]+\-[0-9]+', start_dt) and regex_search(r'[0-9]+\-[0-9]+\-[0-9]+', end_dt): st_dt_list = start_dt.split('-') end_dt_list = end_dt.split('-') valid = True else: messages.error(request, "Error: Incorrect date format. Use yyyy-mm-dd") return make_new_assessment(request) if valid: # -----------if starts here ------------------------- # Create an instance of assessment assessment = Assessment( name = name.title(), description = description, start_date = start_dt, end_date = end_dt, course = Course.objects.get(pk=course_id), ) assessment.save() # Add questions to the assessment idx = 0 more_questions = True # while more_questions: # keeps checking if more questions for idx in range(1000): try: question_text = request.POST['question-{}'.format(idx)] answer_type = request.POST['answer type-{}'.format(idx)] except MultiValueDictKeyError: # this error means no more questions question_text = False answer_type = False if question_text == False: # if no more questions - break more_questions = False # break else: # Check which answer type if answer_type == "Free Response": type_answer = Question.TYPE_TEXT else: type_answer = Question.TYPE_Rating # Create an instance of question new_question = Question( question_text=question_text, type_answer=type_answer, ) new_question.save() # Save this new question into the assessment's ManytoMany field assessment.questions.add(new_question) assessment.save() # # update counter # idx += 1 messages.error(request, 'New assessment created') # ---------------if ends here------------- # messages.error(request, 'New course creation is successful!') return all_assessments(request) # go to all assessment page
def getvariable(htmltext, varname): pattern = varname+'\s*=\s*[\']*([^"\';]+)[\']*;' result = regex_search(pattern, htmltext) if result: return result.group(1) return None
def _trim_activity_label_tail(s, patt): from re import search as regex_search match = regex_search(patt, s) return s[:match.start()]
def answer(self, part, a): frm = inspect.stack()[1] mod = inspect.getmodule(frm[0]) x = regex_search("Day (\d+)", dirname(mod.__file__)) if not x or not x.group(1): frm = inspect.stack()[2] mod = inspect.getmodule(frm[0]) x = regex_search("Day (\d+)", dirname(mod.__file__)) if not x or not x.group(1): print("Cannot find day") return day = x.group(1) prefix = "Day " + str(day) + " - Part " + str(part) + " ||" if a is None: print(prefix, "Not answered; answer is None") else: if not exists( join(self.__location__, "Day " + str(day), ".answers_" + str(part))): f = open( join(self.__location__, "Day " + str(day), ".answers_" + str(part)), "w+") f.close() with open( join(self.__location__, "Day " + str(day), ".answers_" + str(part)), "r+") as previous_answers: old = previous_answers.read() old_split = old.split("\n") temp = old_split temp.reverse() found = False if self.hints: low_border = None high_border = None # low_border = max([int(line.split(" ")[0]) for line in temp if len(line.split(" ")) == 2 and line.split(" ")[1].endswith("<<<too low>>>")]) # high_border = min([int(line.split(" ")[0]) for line in temp if len(line.split(" ")) == 2 and line.split(" ")[1].endswith("<<<too high>>>")]) for line in temp: split_line = [x for x in line.split(' ', 1) if x] if len(split_line) == 2 and split_line[1].endswith( "<<<too low>>>") and ( low_border is None or int(split_line[0]) > low_border): low_border = int(split_line[0]) elif len(split_line) == 2 and split_line[1].endswith( "<<<too high>>>") and ( high_border is None or int(split_line[0]) < high_border): high_border = int(split_line[0]) if low_border is not None and low_border > int(a): print( prefix, "Your answer is likely to be wrong; you have provided", a, "while you have previously provided", str(low_border) + ", which was too low.") return elif high_border is not None and high_border < int(a): print( prefix, "Your answer is likely to be wrong; you have provided", a, "while you have previously provided", str(high_border) + ", which was too high.") return for line in temp: split_line = [x for x in line.split(' ', 1) if x] if len(split_line) == 2 and split_line[1].endswith( "<<<CORRECT>>>"): print( prefix, "You have already correctly answered this question. The answer was", split_line[0] + ". Your current answer is " + str(a)) found = True break elif len(split_line) == 1 and split_line[0] == str(a): if len(split_line) == 2 and self.hints: print( prefix, "You answered", a, "---", "You have already provided that answer. That answer was", regex_search("<<<([\w\s]+)>>>", split_line[1]).group(1) + ".") else: print(prefix, "You answered", a, "---", "You have already provided that answer.") found = True break if not found: print(prefix, "Answering: ", a) response = self.session.post( 'https://adventofcode.com/' + str(self.year) + '/day/' + str(day) + "/answer", { 'level': part, 'answer': str(a) }) if response.status_code == 200: response = BeautifulSoup( response.content, features='html.parser').find("article") t = response.get_text() if t.startswith("You gave an answer too recently;"): seconds_find = regex_search( "You have ((\d+)m\s*)?(\d+)s left to wait.", t) if seconds_find: s = int(seconds_find.group(3)) + 1 if seconds_find.group(2): s += 60 * int(seconds_find.group(2)) print(prefix, "Too fast! Waiting for", s, "seconds before automatic retry!") sleep(s) self.answer(part, a) elif t.startswith("That's not the right answer"): print(prefix, t) if t.startswith( "That's not the right answer; your answer is too high." ): old_split.append(str(a) + " <<<too high>>>") elif t.startswith( "That's not the right answer; your answer is too low." ): old_split.append(str(a) + " <<<too low>>>") else: old_split.append(str(a)) previous_answers.seek(0) previous_answers.write("\n".join( [x for x in old_split if x])) elif t.startswith("That's the right answer!"): print(prefix, t) old_split.append(str(a) + " <<<CORRECT>>>") previous_answers.seek(0) previous_answers.write("\n".join( [x for x in old_split if x])) self.pull(wait=False) elif t.startswith( "You don't seem to be solving the right level." ): print(prefix, t) elif t.startswith("--- Day"): print( prefix, "There is likely something wrong with your session token, maybe it is outdated. Please fix it in `.session` before continuing." ) else: print(t) elif response.status_code == 302: print(prefix, "Fill in your session token in `.session`.") else: print(prefix, "Error found:", response.status_code, response.text) return
def pull(self, wait=True): for day in range(1, 26): main_response = self.session.get('https://adventofcode.com/' + str(self.year) + '/day/' + str(day)) day_name = "Day " + str(day) if main_response.status_code == 200: if not isdir(join(self.__location__, day_name)): print(day_name) mkdir(day_name) if not exists(join(self.__location__, day_name, "input.txt")): print(day_name, "-", "input.txt") input_response = self.session.get( 'https://adventofcode.com/' + str(self.year) + '/day/' + str(day) + "/input") if input_response.status_code == 200: with open( join(self.__location__, day_name, "input.txt"), "w") as f: f.write(str(input_response.text)) f.close() if not exists(join(self.__location__, day_name, "main.py")): f = open(join(self.__location__, day_name, "main.py"), "w+") f.close() with open(join(self.__location__, day_name, "main.py"), 'r+') as f: old = f.read() soup = BeautifulSoup(main_response.content, features='html.parser') parts = soup.findAll("article") if old == "": print(day_name, "-", "Main.py") f.write( "from pull import AocInteraction\n\n\n# https://adventofcode.com/" + str(self.year) + "/day/" + str(day) + "\n") lines = [ i for i in parts[0].get_text().replace( " ---", " ---\n").split("\n") if i ] f.write(str('\n'.join(map(lambda x: "# " + x, lines)))) f.write(""" def part_1(advent_of_code): with open('input.txt', 'r') as input_file: advent_of_code.answer(1, None) \n def part_2(advent_of_code): with open('input.txt', 'r') as input_file: advent_of_code.answer(2, None) \n if __name__ == "__main__": aoc_interaction = AocInteraction() part_1(aoc_interaction) part_2(aoc_interaction) """) if len(parts) == 2: f.seek(0) old = f.read() old_split = old.split("\n") for line_nr in range(len(old_split)): if old_split[ line_nr] == 'def part_2(advent_of_code):' and ( old_split[line_nr - 1] == "" or old_split[line_nr - 1][0] != "#"): prefix = "" if old_split[line_nr - 1] != "": prefix = "\n\n" print("Found!") lines = [ i for i in parts[1].get_text().replace( " ---", " ---\n").split("\n") if i ] old_split.insert( line_nr, prefix + (str('\n'.join( map(lambda x: "# " + x, lines))))) break f.seek(0) f.write("\n".join(old_split)) elif main_response.text.startswith( "Please don't repeatedly request this endpoint before it unlocks!" ): countdown_page = self.session.get('https://adventofcode.com/' + str(self.year)) countdown_f = regex_search("var server_eta = (\d+);", countdown_page.text) if wait and countdown_f and countdown_f.group(1): countdown_s = countdown_f.group(1) print( day_name, "This day is not yet unlocked... It will take about", self._s_to_text(countdown_s), "seconds to unlock.") print( "Waiting to unlock automatically... To cancel, terminate script." ) sleep(int(countdown_s)) print("Unlocking...") self.pull() break else: print(day_name, "-", "Error found:", str(main_response.status_code), main_response.text) break