def update_photo(id_group): try: imgkit.from_url(get_url(id_group), get_img_name(id_group), options={"xvfb": ""}) except Exception: logging("Error update_photo(with access)")
async def my_background_task(self): await self.wait_until_ready() channel = self.get_channel( CHANNEL_ID) # The channel id that we want the bot to send message print('Background task started') print('------') while not self.is_closed(): img_options = { 'format': 'jpg', 'encoding': "UTF-8", 'crop-w': '455', 'crop-h': '250', 'crop-x': '250', 'crop-y': '545', 'user-style-sheet': 'hide.css' } img_path = f'almanax-{date.today().strftime("%d-%m-%Y")}.jpg' imgkit.from_url('http://www.krosmoz.com/fr/almanax', img_path, options=img_options) # Send image to Discord print('Send a new Almanax day') file = discord.File(img_path) await channel.send(file=file) # Delete img file os.remove(img_path) # Wait until midnight for next message now = datetime.now() tommorrow = now + timedelta(days=1) duration = tommorrow.replace(hour=0, minute=0, second=0) - now print(f'Waiting {duration.total_seconds()} seconds.') await asyncio.sleep(duration.total_seconds())
def takescreengrab(ip): t = datetime.datetime.now() filename = str(random.randint(100000, 999999)) filename += "-" date = t.strftime("%Y%m%d") filename += date filename += "-" time = t.strftime("%H%M%S") filename += time filename += ".jpg" path = "/pictures/" + filename imgkit_options = {'quiet': ''} response = {} response['date'] = date response['time'] = time try: imgkit.from_url(ip, path, options=imgkit_options) except ConnectionRefusedError: response['Message'] = "Connection refused" except IOError: response['Message'] = "IOError on" else: response['Filename'] = filename return jsonify(response)
def get(url): sucesso = False while not sucesso: try: if os.name == 'nt': config = imgkit.config(wkhtmltoimage=windows_path) else: config = imgkit.config(wkhtmltoimage=heroku_path) options = { 'format': 'jpg', 'crop-h': '1230', 'crop-w': '1440', 'crop-x': '0', 'crop-y': '0', 'encoding': "UTF-8", 'quiet': '', } imgkit.from_url(url, 'screenshot.jpg', config=config, options=options) sucesso = True except OSError as e: print(e) sleep(2) continue
def screenshot(twit_url, twit_id): options = { 'encoding': "UTF-8", 'javascript-delay': '1000', 'crop-h': 600, 'xvfb': '' } imgfile = twit_id + '.png' try: f = urllib.request.urlopen(twit_url) page = BeautifulSoup(f, 'html.parser') tweet = page.find("div", class_="permalink-tweet") if tweet: head = page.head css_hack = BeautifulSoup( "<style>.icon { background: transparent }</style>", "html.parser") head.append(css_hack) imgkit.from_string(str(head) + str(tweet), imgfile, options=options) else: imgkit.from_url(twit_url, imgfile, options=options) except Exception as e: logging.debug(traceback.format_exception(*sys.exc_info())) return str(e) return imgfile
def get_outing(rno, pas, tid): br = RoboBrowser(history=True, parser="html.parser") br = RoboBrowser( user_agent= 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6' ) #out_time=bot.reply_to(message,"Enter outdate and time") #in_time=bot.reply_to(message,"Enter in date and time") #reason=bot.reply_to(message,"Enter Reasson") br.open("http://studentscorner.vardhaman.org/students_permission_form.php") #form = br.get_form(action="insert_permission.php") #br.form['out_time']=outtime #br.form['in_time']=intime #br.form['reason']=reason #br.submit_form(form) #DRIVER = 'chromedriver' #driver = webdriver.Chrome(DRIVER) #driver.get('http://studentscorner.vardhaman.org/students_permission_form.php') #img = imgkit.from_url('http://google.com', False) imgkit.from_url('http://google.com', 'out.jpg') #img = Image.open(n) #img.show() #n=str(tid)+'.png' #driver.save_screenshot(n) #bot.reply_to(message,z) #link="/home/puneeth/Desktop/work/tele/"+str(tid)+".png" #bot.reply_to(chat_id=tid,img) bot.send_photo(message, open(out, 'rb'))
def collect_png(site, save_dir, option=0): try: pages = pickle.load(open(save_dir + ".picklerick", "rb")) page = pages[-1] except: pages = [] page = site if option == 0: links = get_all_links(site, page, save_dir, pages) elif option == 1: links = get_links_on_page(site, site) else: links = [site] for link in links: if link == site: filename = save_dir + "/home.png" else: n = save_dir[-1] == "/" if link[-1] == "/": link = link[:-1] filename = save_dir + remove_extension( link[link.rfind("/") + n:]) + ".png" try: options = { 'format': 'png', 'width': 1080, 'disable-smart-width': '' } imgkit.from_url(link, filename, options=options) except: pass
def pharming(url): my_resolver = dns.resolver.Resolver() my_resolver.nameservers = ['208.67.222.222','208.67.220.220','8.8.8.8','8.8.8.9','156.154.70.1','156.154.71.1'] subDomain, domain, suffix = extract(url) try: answer = my_resolver.query(domain+'.'+ suffix) addr2 = socket.gethostbyname(domain+'.'+ suffix)#It gives the ip from your DNS add1 = [] for rdata in answer: add1.append(str(rdata)) if addr2 not in add1: config = imgkit.config(wkhtmltoimage="/usr/local/bin/wkhtmltoimage") imgkit.from_url('http://'+add1[0], 'out1.jpg',config=config) imgkit.from_url('http://'+addr2, 'out2.jpg',config=config) original = cv2.imread("out1.jpg") duplicate = cv2.imread("out2.jpg") # 1) Check if 2 images are equals if original.shape == duplicate.shape: #print("The images have same size and channels") difference = cv2.subtract(original, duplicate) b, g, r = cv2.split(difference) if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0: #print("The images are completely Equal") return 1#Legi #print(1) else: #print(-1) return -1#Attack else: #print(1) return 1#legi #np.array_equal(original,duplicate) except Exception: #print(0) return 0#suspicious
def htmlurl_to_image(UrlPath="", printer_name="", widthPage=300, heightPage=100): """ Функция вывода HTML на принтер :param UrlPath: - адрес запроса :param printer_name: - имя принтера :return: """ requestMessage = {} try: filename = tempfile.mktemp(".png") imgkit.from_url(UrlPath, filename, options={ 'width': widthPage, 'height': heightPage }) except Exception: requestMessage["Error"] = "create temp file %s %s" % ( filename, traceback.format_exc()) return requestMessage requestMessage = print_local_file(filename, printer_name) try: os.remove(filename) except Exception: print("Remove file from temp :(%s) %s" % (filename, traceback.format_exc())) return requestMessage
def dark_search(): site = 'youtube' keyword = request.args.get('keywords') site = request.args.get('domain') siteConfig = siteconfig.findAvailableSiteConfigure(site) if siteConfig is not None: url = siteConfig['search_url'] + keyword else: url = "https://www.youtube.com/results?search_query=" + keyword site = 'youtube' ##url = "https://cn.pornhub.com/video/search?search="+keyword name = "" if current_user.is_authenticated: name = current_user.email userfolder = os.path.join(Config.IMG_CACHE, name) Path(userfolder).mkdir(parents=True, exist_ok=True) filename = os.path.join(userfolder, str(uuid.uuid4()) + site + '.jpg') if not os.path.exists(filename): options = {} if os.name != 'nt': options = {'quiet': '', "xvfb": ''} imgkit.from_url(url, filename, options=options) # app.get_download_service.submitDownloadTask(site, url, keyword, name) return send_file(filename, mimetype='image/jpg')
def screenShot(): json_ = request.json print('json:', json_) logger.info('json_ %s' % json_) # print('json_g---->',json_g) json_obj = json.dumps(json_) print(json_['data']) inputurl = json_['data'] logger.info('json_ %s' % json_obj) # adding imagekit lib for capturing scree shot on server inputurl = str(inputurl) # s = 'Part 1. Part 2. Part 3 then more text' if ('https' in inputurl): inputurlimg = re.search(r'https:(.*?)co', inputurl).group(1) inputurlimg = re.sub(r'[^\w]', '', inputurlimg) else: inputurlimg = re.search(r'http:(.*?)co', inputurl).group(1) inputurlimg = re.sub(r'[^\w]', '', inputurlimg) print(inputurlimg) try: options = {'xvfb': ''} imgOutput = inputurlimg + ".jpg" print(imgOutput) imgkit.from_url(inputurl, imgOutput, options=options) # print("image name::---->",inputurl+'.jpg') output = "Success" except Exception as e: output = "Failure" print(e) return output
def main(page_url): extracted = tldextract.extract(page_url) folder_name = "./" + extracted.domain + "." + extracted.suffix css_folder = folder_name + "/css" screenshot_folder = folder_name + "/screenshot" # # create the directories to store the files # # check if the path does not exist before if not os.path.exists(folder_name): os.mkdir(folder_name) os.mkdir(css_folder) os.mkdir(screenshot_folder) print("Created Folders for : ", page_url) # #make screenshot of the webapge imgkit.from_url(page_url, screenshot_folder + "/" + extracted.domain + ".jpg") print("Done making screenshot for : " + page_url) hdr = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive' } req = urllib2.Request(page_url, headers=hdr) page_contents = urllib2.urlopen(req) page = urllib2.urlopen(req) soup = BeautifulSoup(page, 'html.parser') links = soup.find_all('link', href=True) with open(folder_name + "/index.html", "w+") as f: f.write(page_contents.read()) print("Done Writing the HTML file") for url in links: correct_url = url['href'] filename = url['href'].split('/')[-1].split('.')[0] file_ext = url['href'].split('.')[-1] if (filename != '' and file_ext == 'css'): try: req = urllib2.Request(url['href'], headers=hdr) content = urllib2.urlopen(req) output_filename = css_folder + "/" + filename + "." + file_ext with open(output_filename, "w+") as f: f.write(content.read()) print("Done Writing CSS file : ", output_filename) except ValueError as e: pass print("Tasks Accomplished for : ", page_url)
def make_previews(pages=0, uid='', domain='', size=None, is_user_preview=False): ''' Generate preview of book :param pages: number of pages :param uid: uid of book :param domain: site url, where book was created :param size: size of final image :param is_user_preview: indicate if it's user's book :return: ''' if create_destination_file_for_preview(domain, uid) is None: return {'message': "Unregistered domain name received", 'code': 400} if size is None: size = default_size page = 1 while page <= pages: if is_user_preview is False: destination = create_destination_file_for_preview(domain, uid, '{}-full'.format(page)) else: destination = create_destination_file_for_preview(domain, '%s/%s' % (uid, 'preview'), page) url = render_url.format(domain, uid, page - 1) url = url + '&width={}&height={}'.format(size['width'], size['height']) options.update(size) print('Create request to {}'.format(url)) imgkit.from_url(url, destination, options=options) try: imgkit.from_url(url, destination, options=options) except: return {'message': "Error occurred while render image with wkhtmltoimage", 'code': 404} image = Image.open(destination) image.save(destination, quality=100, dpi=(600, 600)) os.chmod(destination, 0o777) # if rendering user's book # save it to preview dir without slicing if is_user_preview is False: slice_page(page, pages, domain, uid) page = page + 1 # if is user's book render # don't create borders if is_user_preview is False: create_borders(pages, domain, uid) return create_response( create_destination_file_for_preview(domain, uid), create_destination_file_for_preview(domain, uid, None, False) ) else: return create_response( create_destination_file_for_preview(domain, '%s/%s' % (uid, 'preview')), create_destination_file_for_preview(domain, '%s/%s' % (uid, 'preview'), None, False) )
def screenshot(url): if not os.path.exists("shots"): os.makedirs("shots") file_name = "".join( [char.lower() if char.isalnum() else "_" for char in url]) file_name = f"{os.getcwd()}/shots/{file_name}_{int(time.time())}.jpg" try: imgkit.from_url(url, file_name, options={"quiet": ""}) except: pass print(file_name)
def ScreenShotsOfAlllinks(self): # VarForMenuBar = self.AllParsedDomainLinksOfMenuBar() # VarForDomain = self.AllParsedDomainLinksOfDropDown() counter = 0 setof_final_domain_name = list( "http://" + self.data + each for each in set(self.menubar_dropdown__links1 + self.menubar_dropdown__links2)) for each in setof_final_domain_name: print(each) counter = counter + 1 imgkit.from_url(each, str(counter) + '.jpg')
def do_screen_by_url(url: str, js_delay=0): path = md5(url.encode()).hexdigest() + '.png' options = { 'format': 'png', 'width': '1920', 'javascript-delay': js_delay } imgkit.from_url(url, path, options=options) return path
def download_web_to_image(url): try: imgkit.from_url(url, "./search-output.jpg", options={ "width": 800, "quality": 50, "load-error-handling": "skip" }) print("hello") return 1 except Exception as e: print(e) return 0
def render_book(uid='', domain='', size=None, pages=0, no_border=False): if create_destination_file_for_preview(domain, uid) is None: return {'message': "Unregistered domain name received", 'code': 400} try: path = os.path.join(domains[domain], 'image/photobook/renders', uid) except KeyError: return None if os.path.exists(path): shutil.rmtree(path) if size is None: size = default_size border_offset = 100 if no_border: border_offset = 0 # add offset for pattern's borders size['width'] += border_offset * 2 size['height'] += border_offset page = 0 while page < pages: destination_file = create_destination_file_for_render(domain, uid, page) url = render_url.format(domain, uid, page) url = url + '&isFullRender=true&width={}&height={}'.format(size['width'] - border_offset * 2, size['height'] - border_offset) options.update(size) try: imgkit.from_url(url, destination_file, options=options) except: print(sys.exc_info()[0]) return {'message': "Error occurred while render image with wkhtmltoimage", 'code': 404} image = Image.open(destination_file) os.remove(destination_file) image.save(destination_file, quality=100, dpi=(600, 600)) os.chmod(destination_file, 0o777) print('Rendering progress: {}%'.format(int(100 / pages * page))) #sys.stdout.write("\033[F") #sys.stdout.write("\033[K") page = page + 1 print('Rendering progress: {}%'.format(100)) return create_response( create_destination_file_for_render(domain, uid), create_destination_file_for_render(domain, uid, None, False) )
async def futures(ctx): import imgkit import cv2 options = { 'format': 'png', 'crop-h': '355', 'crop-w': '250', 'crop-x': '355', 'crop-y': '160', 'encoding': "UTF-8" } imgkit.from_url('https://money.cnn.com/data/premarket/', 'out1.jpg', options=options) image_cv2 = cv2.imread(r'out1.jpg') await ctx.channel.send(image_cv2)
def run_core(self, buyer_id=None, notification_at=None): options = { 'format': 'png', 'crop-h': '300', 'crop-w': '500', 'crop-x': '30', 'crop-y': '520', 'javascript-delay': '1000', } cities = CityEnum.values() for city in cities: print 'Вытаскиваем информацию для %s' % city url = CityEnum.grab_map[city] now = arrow.now() filename = '%s_%s.%s' % (city, now.isoformat(), options['format']) fullfilename = '%s/%s' % (settings.MEDIA_ROOT, filename) try: result = imgkit.from_url(url, fullfilename, options=options) except Exception as e: wrong_wkhtmltoimage_error = 'Exit with code 1 due to network error: HostNotFoundError' if wrong_wkhtmltoimage_error in e.message: logger.info( 'Неверная ошибка wkhtmltoimage с запятой в урле: %s' % wrong_wkhtmltoimage_error) else: raise e WeatherPicture.objects.create(created_at=now.datetime, city=city, picture=filename)
async def echo(message: types.Message): options = { 'format': 'png', 'width': 1920, 'height': 1080, 'disable-smart-width': '', 'encoding': "UTF-8", 'javascript-delay': 10, 'custom-header': [('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36' )], } await message.answer('подождите пожалуйста') try: img = imgkit.from_url(message.text, False, options=options, config=img_config) await bot.send_photo(message['from']['id'], img, caption=message.text) except: await message.answer( 'Укажите правильный адрес к сайту! Сайт не отвечает либо указан не верно' )
def signature(request, uuid, filetype): """ Return signature rendered as image """ import os from pyvirtualdisplay import Display filetype = request.GET.get('filetype', 'png') phrase_signature = request.GET.get('phrase_signature', False) privacy_signature = request.GET.get('privacy_signature', False) name = "phrase" if phrase_signature else "signature" user = get_user_model().objects.get(username=uuid) role = get_role(user) role_obj = Roles.objects.get(name=role) role_id = str(role_obj.id) base_url = request.build_absolute_uri( reverse('cards:view_usercard', args=[user.username])) iframe_url = base_url + '?current_signature=True&role_id=' + role_id if phrase_signature or privacy_signature: text_signature_url = request.build_absolute_uri( reverse('cards:views_text_signature')) if phrase_signature: iframe_url = text_signature_url + '?phrase_signature=True&role_id=' + role_id, if privacy_signature: iframe_url = text_signature_url + '?privacy_signature=True&role_id=' + role_id, filename = 'Ad_Astra_{0}_{1}.{2}'.format( user.business_card_name.strip().replace(' ', '_'), name, filetype) # 'xvfb': '' options = {'width': 550, 'quality': '100', 'zoom': 2} xephyr = Display(visible=0, size=(550, 600)) xephyr.start() imgkit.from_url(iframe_url, filename, options=options) image_data = open(filename, "rb") content_type = 'image/{}'.format(filetype) response = HttpResponse(image_data.read(), content_type=content_type) response['Content-Disposition'] = 'attachment; filename=%s' % filename xephyr.stop() image_data.close() os.remove(filename) return response
def handle(self, *args, **options): print "creating coupon...." reg_users = RegisteredUsers.objects.all() current_site = Site.objects.get_current() domain = current_site.domain options = { 'format': 'png', 'encoding': "UTF-8", } for user in reg_users: url = domain + str(reverse_lazy('invoice_view', kwargs={'pk': encoded_id(user.id)})) coupon_file_name = '%s.png' % user.id imgkit.from_url(url, os.path.join(settings.BASE_DIR, 'Media', coupon_file_name), options=options)
def take_screenshot_from_url(self, response, **kwargs): """Function to get screenshot by directly page loading.""" try: # get cookies to take a screenshot cookies_list = kwargs.get('cookies', '') if not cookies_list: cookies = response.headers.getlist('Set-Cookie') c = SimpleCookie() for cookie in cookies: c.load(cookie.decode("utf-8")) cookies_list = [(key, c[key].value) for key in c] # initialization file_id = str(uuid.uuid4()) filename = "{file_id}.png".format(file_id=file_id) file_path = os.path.join(path, "screenshots", self.scrape_id, filename) # imgkit processing options = { 'format': 'png', 'encoding': "UTF-8", 'quiet': '', 'cookie': cookies_list } imgkit.from_url(response.url, file_path, options=options) except Exception as exc: error_msg = { "error_type": "SCREENSHOT_ISSUED", "page": file_path, "details": str(exc) } self.warnings.append(error_msg) self.logger.warning(error_msg) finally: # upload file to s3 if os.path.exists(file_path): self.screenshots_ids.append(file_id) self.upload_screenshot(file_id) else: error_msg = { "error_type": "SCREENSHOT_NOT_TAKEN", "page": file_path, "details": str(exc) } self.errors.append(error_msg) self.logger.error(error_msg)
def screenShot(): path = '' json_ = request.json print('json:', json_) logger.info('json_ %s' % json_) # print('json_g---->',json_g) json_obj = json.dumps(json_) print(json_['url']) inputurl = json_['url'] if ((json_['path']) is not None): path = str(json_['path']) print("===Path===", str(path)) logger.info('path %s' % path) if not os.path.exists(path): os.mkdir(path) logger.info('json_ %s' % json_obj) # adding imagekit lib for capturing scree shot on server options = {'xvfb': ''} inputurl = str(inputurl) if ('https' in inputurl): inputurlimg = re.search(r'https:(.*?)co', inputurl).group(1) inputurlimg = re.sub(r'[^\w]', '', inputurlimg) else: inputurlimg = re.search(r'http:(.*?)co', inputurl).group(1) inputurlimg = re.sub(r'[^\w]', '', inputurlimg) try: imgOutput = inputurlimg + ".jpg" if (path is not None): imgOutput = str(path) + str(imgOutput) print("imgOutput with path=======> ", imgOutput) imgkit.from_url(inputurl, imgOutput, options=options) print("image name::---->", imgOutput) output = "Image name is ::" + inputurlimg except Exception as e: output = "Failure" print(e) return output
def append_img_from_url(self, url): """添加网页截图""" opt = {'zoom': '3', 'minimum-font-size': '28'} imgbytes = imgkit.from_url(url, False, options=opt) f = BytesIO() f.write(imgbytes) img = Image.open(f) self._msgs.append(('P', self.__img_to_str(img))) f.close()
def getImage(self, url): imagehash = hashlib.md5(url.encode('utf-8')).hexdigest() if settings.STATIC_ROOT is None: filepath = settings.STATICFILES_DIRS[ 0] + "webimg/" + imagehash + ".png" else: filepath = settings.STATIC_ROOT + "webimg/" + imagehash + ".png" path = "static/webimg/" + imagehash + ".png" options = { 'quiet': '', } if not os.path.exists(filepath): try: imgkit.from_url(url, filepath, options=options) except Exception as e: logger.error(e) return return path
def gettechnicals(fromcoin, tocoin): print("download tech") options = {'crop-y': '612', 'crop-h': '620', 'javascript-delay': 1100} try: img = imgkit.from_url(gettechnicalsUrl(fromcoin, tocoin), "tech.jpg", options=options) except: raise ValueError('Invalid coin info for ' + fromcoin + tocoin)
def text_jpg(message): print(message.text) if message.text.isdigit(): img = imgkit.from_url( f'http://student.kazgasa.kz/Comments/{message.text}.htm', False) #нужно менять config bot.send_photo(message.chat.id, img) start(message)
async def on_message(self, message): if message.author.bot: return if not re.match("^https?://logs.tf/[0-9]+/?", message.content): return await message.channel.trigger_typing() url = message.content parsed_url = urlparse(url) async with httpx.AsyncClient() as client: response = await client.get(url) soup = BeautifulSoup(response.text, 'html.parser') player_table = soup.select_one("#log-section-players").select_one( "#players") table_height = 0 for row in player_table.select_one("tbody").find_all("tr"): name = row.select_one(".log-player-name a.dropdown-toggle").text nclasses = len(row.select(".log-classes i")) if len(name) > 25 or nclasses > 4: table_height += 48 else: table_height += 29 options = { "xvfb": "", 'crop-h': table_height + 28 + 30 + 66 + 10 + 28, 'crop-w': '980', 'crop-x': '178', 'crop-y': '201', } filename = parsed_url.path.replace("/", "") filepath = f"/tmp/{filename}.png" imgkit.from_url(url, filepath, options=options) await message.edit(suppress=True) await message.reply(file=discord.File(filepath)) os.remove(filepath)
print("The username or password you used is incorrect. Please check the credentials and try again") sys.exit() url = "https://dsbclient.noim.io/%s/%s" % (USERNAME, PASSWORD) # generate the url for the DSB-API response = urllib.urlopen(url) # raw input data dsbData = json.loads(response.read()) # unprocessed JSON data # initialize the arrays timetables = [0, 0] # it contains the timetables timetableUrls = ["", ""] # it contains the url to the corresponding timetable timetablesJpg = [0, 0] # it contains the JPG versions of the timetables # fill the arrays timetables[0] = dsbData['timetables'][0] # select the 1st timetable timetables[1] = dsbData['timetables'][1] # select the 2nd timetable timetableUrls[0] = dsbData['timetables'][0]['src'] # select the 1st timetable-Url timetableUrls[1] = dsbData['timetables'][1]['src'] # select the 2nd timetable-Url print("-----------please ignore the follering messages-----------") # conversion from html to .jpg with suppress_stdout(): imgkit.from_url(str(timetableUrls[0]),'%s[0].jpg' % USERNAME) imgkit.from_url(str(timetableUrls[1]),'%s[1].jpg' % USERNAME) print("----------------------------------------------------------") # sleep for 30 Minutes (30*60Seconds = 1800 Seconds) print("waiting 30 minutes for next update") time.sleep(1800) # every 30 minutes the script gets the timetables