def important_time(self): """ Return the status and critical time required for the tmux output. Variables ending in _s are integers representing time intervals in seconds. """ work_s = self.work_time().seconds break_s = self.break_time().seconds if self.status(): remaining_time_s = tomato(work_s, break_s) else: remaining_time_s = potato(work_s, break_s) imp_time = datetime.now() + timedelta(0, remaining_time_s) return imp_time
def stale(self): if not self.toggles: return False if self.status(): # If currently working, then the session is not stale s = False else: # If tomato time is unacceptably long, it is stale t = tomato(self.work_time().seconds, self.break_time().seconds) logging.info("Evaluating tomato to check staleness") logging.info("Tomato was found to be %d mins" % (t/60)) s = t > max_work() logging.info("Session staleness was found to be %d" % s) return s
def important_time(self): """ Return the status and critical time required for the tmux output. Variables ending in _s are integers representing time intervals in seconds. """ work_s = self.work_time().seconds break_s = self.break_time().seconds if self.status(): st = "ON" remaining_time_s = tomato(work_s, break_s) else: st = "OFF" remaining_time_s = potato(work_s, break_s) imp_time = datetime.now() + timedelta(0, remaining_time_s) return imp_time
def stale(self): if not self.toggles: return False if self.status(): # If currently working, then the session is not stale s = False else: # If tomato time is unacceptably long, it is stale t = tomato(self.work_time().seconds, self.break_time().seconds) logging.info("Evaluating tomato to check staleness") logging.info("Tomato was found to be %d mins" % (t / 60)) s = t > max_work() if s: logging.info("Session was found to be stale") else: logging.info("Session is not stale") return s
import json, os import urllib import urllib.request from selenium import webdriver from bs4 import BeautifulSoup from tomato import tomato import wget domain = 'https://en.wikipedia.org' url = 'https://en.wikipedia.org/wiki/Rider-Waite_tarot_deck' scraper = tomato() soup = scraper.changeUrl(url) cardList = soup.find_all('li', class_='gallerybox') imgList = [] for each in cardList: imgList.append(each.find('a')['href']) l = [] for each in imgList: soup = scraper.changeUrl(domain + each) thing = soup.find('div', class_='fullImageLink').find('a')['href'] print(thing) l.append(thing) for href in l: fname = href.split('.')[-2] + '.jpg' print('downloading ' + fname) fname = wget.download('http:' + href)