def start(self): while self.continuous: self.delete_past_backups() self.logger.info("Doing cluster backup at %s for %s" % (datetime.date.today(), self.hostname)) filename = self.generate_filename() self.logger.info("Starting to zip dirs named %s..." % (self.dirs)) with Timer() as compression_elapsed_time: self.zipit(self.dirs, filename) compression_elapsed_time.end_timer() self.export_compression_timer(compression_elapsed_time) self.export_backup_file_size(filename) self.logger.info("Successfully zipped dirs named %s" % (self.dirs)) self.logger.info("Uploading zipped file %s" % filename) if self.bucket_name: with Timer() as upload_elapsed_time: self.storage.save_file(filename, key=filename, bucket_name=self.bucket_name) upload_elapsed_time.end_timer() self.export_upload_timer(upload_elapsed_time) self.logger.info("Sucessfully uploaded %s" % filename) else: self.logger.info("No bucket specified") self.logger.info("Waiting 12 hours to backup again...") pause.hours(self.interval)
def fb_bible(): if os.path.exists(bible_log) == False: for book in bible: get = bible_dir + book + '.json' with open(get, 'r') as f: data = json.load(f) num_verse = range(len(data['verses']) - 1) for n in num_verse: verse = data['verses'][n]['verse'] chap = data['verses'][n]['chapter'] text = data['verses'][n]['text'] verse_str = text + ' - ' + book + str(chap) + ':' + str(verse) log_str = {'book': book, 'start': n} graph.put_object(parent_object='me', connection_name='feed', message=verse_str) # print(verse_str) with open(bible_log, 'w+') as log: log.write(json.dumps(log_str)) pause.hours(1) elif os.path.exists(bible_log): with open(bible_log, 'r') as log: last_v = eval(log.read()) for n in range(len(bible)): if bible[n] == last_v['book']: start = n else: pass for n in range(start, len(bible)): with open(bible_dir + bible[n] + '.json', 'r') as f: data = json.load(f) last_verse = last_v['start'] num_verse = len(data['verses']) - 1 if last_verse > num_verse: last_verse = 0 for i in range(last_verse, num_verse): verse = data['verses'][i]['verse'] chap = data['verses'][i]['chapter'] text = data['verses'][i]['text'] verse_str = text + ' - ' + bible[n] + str(chap) + ':' + str( verse) log_str = {'book': bible[n], 'start': i} graph.put_object(parent_object='me', connection_name='feed', message=verse_str) # print(verse_str) with open(bible_log, 'w+') as log: log.write(json.dumps(log_str)) pause.hours(1)
site_text = sn.text if site_text.startswith("Site"): site_text = site_text.split("\n")[1] if site_text == desired_site: accordian_site.click() reservation = wait.until( ec.element_to_be_clickable( (By.ID, f"reserveButton-{acc_idx}"))) print("Reservation Queued. Waiting until trigger time...") pause.until(dt) reservation.click() confirm_ac = wait.until( ec.presence_of_element_located((By.ID, "mat-checkbox-3"))) all_correct = confirm_ac.find_element_by_class_name("mat-checkbox-label") all_correct.click() confirm_ra = wait.until( ec.presence_of_element_located((By.ID, "mat-checkbox-4"))) read_ack = confirm_ra.find_element_by_class_name("mat-checkbox-label") read_ack.click() rb = wait.until( ec.element_to_be_clickable((By.ID, "confirmReservationDetails"))) rb.click() print("Hopefully that worked!!!") print("Reservation completed. holding open to checkout...") pause.hours(2) driver.close()
localtime = time.localtime(make_time) next_time = time.strftime("%X %x %Z", localtime) f = open("mail.html", "w") f.write( "<p>Hi, </p> \n<p>Here is the {} update for {} from <b><a href='https://www.nhc.noaa.gov/gtwo.php?basin=atlc&fdays=5'>NOAA</a></b>.</p>\n <p>The next update will be sent at {}. </p> \n <pre> {} </pre> \n" .format(time.strftime('%I %p'), storm_name, next_time, outlook_text)) f.close() os.system( "wget -O storm.png https://www.nhc.noaa.gov/storm_graphics/AT09/refresh/AL092020_5day_cone_no_line_and_wind+png/storm.png" ) os.system( "wget -O message.png https://www.nhc.noaa.gov/storm_graphics/AT09/refresh/AL092020_key_messages+png/messages.png" ) time.sleep(1) os.system( "mutt -e 'set content_type=text/html crypt_use_gpgme=no' -a storm.png -a message.png -s '{} Update: {}' -c {} < mail.html" .format(storm_name, current_time, EMAIL)) print("Email update sent to: {} at {}. \n".format(EMAIL, current_time)) driver.quit() wait_bool = True timeout = timeout - 1 continue else: pause.hours(wait_interval) wait_bool = False continue
accordian_sites = site_list.find_elements_by_tag_name("mat-expansion-panel") for acc_idx, accordian_site in enumerate(accordian_sites): sitenumbers = accordian_site.find_elements_by_tag_name("h3") for sn in sitenumbers: site_text = sn.text if site_text.startswith("Site"): site_text = site_text.split("\n")[1] if site_text == desired_site: accordian_site.click() reservation = wait.until(ec.element_to_be_clickable((By.ID, f"reserveButton-{acc_idx}"))) print("Reservation Queued. Waiting until trigger time...") pause.until(dt) reservation.click() confirm_ac = wait.until(ec.presence_of_element_located((By.ID, "mat-checkbox-3"))) all_correct = confirm_ac.find_element_by_class_name("mat-checkbox-label") all_correct.click() confirm_ra = wait.until(ec.presence_of_element_located((By.ID, "mat-checkbox-4"))) read_ack = confirm_ra.find_element_by_class_name("mat-checkbox-label") read_ack.click() rb = wait.until(ec.element_to_be_clickable((By.ID, "confirmReservationDetails"))) rb.click() print("Hopefully that worked!!!") print("Reservation completed. holding open to checkout...") pause.hours(1) driver.close()
links.insert(counter,video_link+' '+time) #using INSERT for mixing videos counter += 2 #counter for mixing video from other counter = 0 #watching videos link_file = os.path.dirname(sys.argv[0]) + '/watched.txt' for link in links: link_and_duration = link.split(' ') #check watched videos and ignore it if any(link_and_duration[0] in s for s in open(link_file, "r")): print "watched" else: driver.get(link_and_duration[0]) duration = link_and_duration[1].split(":") #check if video less hour if len(duration) == 2: #insert 0 hours if video less hour duration.insert(0,'0') wr = open(link_file,"a") #wr.write(title+"\n") wr.write(link+"\n")#write link to watched file wr.close() #sleep while user is watching pause.hours(int(duration[0])) pause.minutes(int(duration[1])) pause.seconds(int(duration[2])) driver.quit()
def scanner_thread(): while True: run_downloader() pause.hours(12 * random.random())
format_title = '|'.join( ['title', 'author', 'subreddit', 'url', '# of comments\n']) format_columns = '|'.join(['-', '-', '-', '-', '-\n']) for i in range(len(posts)): posts[i] = '|'.join([ posts[i].title, str(posts[i].author), str(posts[i].subreddit), posts[i].shortlink, str(posts[i].num_comments) ]) format_table = '\n'.join(posts) print(format_title + format_columns + format_table) reddit.redditor(message_user).message( 'New Posts', format_title + format_columns + format_table) posts.clear() for post in subreddit.stream.submissions( pause_after=0 ): # Optional parameters: pause_after=0, skip_existing=True # Avoids posts that link to the same url in multiple subreddits if post is not None and post.url not in url_set: url_set.add(post.url) posts.insert(0, post) # If there are new posts, send message if post is None and len(posts) > 0: output() # If there aren't any new posts, pause for 24 hours if len(posts) == 0: pause.hours(24)