def remove_website(uuid): removed = False try: URL_MAP.remove_site(uuid) Website.remove_website(uuid) removed = True except Exception as e: print(f'{e}') time.sleep(5) return removed
def handler(event, context): url = json.loads(event['body'])['url'] print("Processing " + url) website = Website(url) response = website.getWebsiteContent() response['headers'] = { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': origin } return response
def main(): websites = list() try: with open(os.path.abspath("Settings.txt"), "r") as settings: web_json = load(settings) for w in web_json: site = Website(w["site"], w["search"], w["img"], w["separator"], w["priority"]) websites.append(site) except FileNotFoundError as E: print("Settings.txt could not be opened:\n", E) return search = SearchWindow(websites) chosen_site = search.update() #call main loop for i in web_json: if i["site"] == chosen_site: i["priority"] += 1 try: with open(os.path.abspath("Settings.txt"), "w") as settings: dump(web_json, settings, indent=4, sort_keys=True) except FileNotFoundError: print("settings.txt not found") return
def add_website(url, site_type, max_price): browser = Browser() if url not in URL_MAP.map.values(): uuid = URL_MAP.add(url) website = Website(uuid, url=url, site_type=site_type, max_price=max_price) cur_price, in_stock, name, value = browser.check_website(website) website.update_website(cur_price, in_stock, name) created = True else: created = False browser.driver.close() return created
def handler(event, context): url = json.loads(event['body'])['url'] is_https = json.loads(event['body'])['https'] url = url.replace('https://', '') url = url.replace('http://', '') if (is_https): url = "https://" + url else: url = "http://" + url print("Processing " + url) website = Website(url) response = website.getWebsiteContent() response['headers'] = { 'Content-Type': 'application/json', 'Access-Control-Allow-Origin': origin } return response
def make_website(self): """ Entry method to - download files, prepare files for deployment and finally deploy the files :returns: json representation of a website object with the deployed url :rtype: return """ taskIds = self.client.enqueue_tasks(self.selection) wait_time = 2.0 for i in range(len(taskIds)): if i % 10 == 0 and i != 0: wait_time += 0.5 temp_dir_name = self.__make_website_folder() self.__is_download_complete(taskIds, wait_time, 0) file_streams = self.client.download_files(self.results) self.__save_downloaded_files(file_streams, temp_dir_name) self.__prepare_deployment(temp_dir_name) url = self.__deploy_website(temp_dir_name) self.__finish_deployment(temp_dir_name, url) return Website(url)
from Page import Page from Website import Website ##PRUEBAS CLASE PAGE Pag1 = Page(url='https://wikileaks.org/', folder='News', link='https://wikileaks.org/Amazon-Atlas-Press-Release.html', titulo='WikiLeaks - Amazon Atlas', desc='', formato='html') Pag2 = Page( url='https://cuevana3.io', folder='Series', link='https://cuevana3.io/serie/chernobyl', titulo='Chernobyl', desc= 'La serie relata lo que aconteció en 1986, en uno de los mayores desastres provocados por el hombre en la historia reciente, así como los sacrificios realizados para salvar al continente de un desastre sin precedentes.', formato='JSON') #PRUBEAS CLASE WEBSITE paginas = [Pag1, Pag2] site = Website('.net', 'developers.', paginas) #PRUEBAS FUNCION SEARCH print('--------------') site.search(Pag2)
from flask import Flask, render_template, redirect, url_for, request, flash, session #from data import Articles from Website import Website from flaskext.mysql import MySQL import CustomForm import WebsiteAPI from ConstantTable import ErrorCode, DatabaseModel, AccountInfo, PostInfo, CommentInfo, WebsiteLoginStatus, DefaultFileInfo, RoleType from werkzeug.utils import secure_filename import time import datetime app = Flask(__name__) mysql_server = MySQL() main_website = Website(app, mysql_server) @app.route('/') def index(): if session.get(WebsiteLoginStatus.LOGGED_IN) is None: session[WebsiteLoginStatus.LOGGED_IN] = False category = None if 'category' in request.args: category = request.args['category'] all_posts = WebsiteAPI.get_all_posts(main_website, order=True, post_category=category) if len(all_posts) == 0: flash('No posts to display')
''' Created on Oct 27, 2018 @author: robert harrison ''' import time from tracker_manager import Manager from Website import Website import random if __name__ == '__main__': website = Website("https://www.cbssports.com/college-football/scoreboard/") manager = Manager(website, "1oBVJChfUCfxFvNkfdf79g6CU9pDGgYXMmRHAbxBJwpE") players = manager.get_players() #make sure team names match cbssports.com manager.format_all_team_names() games_list = manager.get_games() manager.add_column_headings_and_format(len(games_list)) games_completed = 0 while (True): #get fresh score updates website.update_divs() for game in games_list: website.update_game(game) if ('final' in game.status.lower() and not game.been_updated): print('updating game: ' + game.teams[0] + ' vs ' + game.teams[1]) games_completed += 1 manager.update_sheet(players, game) if (games_completed == len(games_list)):
from CONSTANTS import * from Website import Website from URLMap import urlmap from Browser import Browser from datetime import datetime import time import colorama URL_MAP = urlmap() website_list = [] position = 12 for uuid in URL_MAP: site = Website(uuid) website_list.append([site, position]) position += 1 def clear(): print("\x1b[2J") def reposition(x, y): print("\x1b[{};{}H".format(x + 1, y + 1)) def reset_screen(): clear() reposition(0, 0)
from Contents import Contents from Compile import compileAll from Website import Website website = Website() website['website_title'] = "Test Compilation Website" home_page = Contents("index.html") home_page['document_title'] = "Home page" home_page[ 'introduction'] = "This is the introduction for the home page, located at index.html" home_page[ 'contents'] = "Some contents will go here for the homepage, such as the latest updates" home_page['contents'] = "Isn't compilation nice?" another_page = Contents("page2.html") another_page['document_title'] = "Another page" another_page['introduction'] = "The 'another page' is introduced here." another_page['contents'] = "Another page will contain moreinformation" another_page['contents'] = "Isn't compilation really nice?" contents = [home_page, another_page] compileAll(contents, website, output_directory="compiled")
from Crawler import Crawler from Website import Website crawler = Crawler() id = 10157 idLength = 15000 successLink = [] faileLink = [] while id < idLength: url = 'https://item.szlcsc.com/' + str(id) + '.html' crawler.parse(Website(), url) id += 1
frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1, ) outputFrame = None lock = threading.Lock() vs = cv2.VideoCapture(0) doorLock = Motor() shouldLock = True web = Website() recognizer = Recognizer() database = Database() processFrame = False count = 0 name = "No one" changeLockState = False initTimer = True unlockTimer = threading.Timer(0.1, doorLock.lock) # Checks to see if this is the main thread of execution # start a thread that will perform motion detection # start the flask ap # start the flask app try: