Ejemplo n.º 1
0
def something():
    products = []
    if request.method == "POST":
        query = request.form['query']
        products = scrapper.scrap(query)

    return render_template('index.html', products=products)
Ejemplo n.º 2
0
def scrap_into_database():
    print("Scrapping into database")
    crawlers = db.session.query(Crawler).all()
    for c in crawlers:
        data = scrap(c.url)
        for d in data:
            create_data(c.name, d["name"], d["price"], d["date"], d["link"])
Ejemplo n.º 3
0
def admin_scrap_and_cache_data(update: Update, _: CallbackContext) -> None:
    audio_ids = {}
    update.message.reply_text(
        "Initiating scrapping and caching\n" +
        "This can take several hours\n" +
        "(!)Turn off notifications to this bot, there will be a lot of spam")
    for tf2class, l in scrapper.scrap():
        audio_ids[tf2class] = []
        for e in l:
            retry = True
            while retry:
                try:
                    retry = False
                    x = update.message.reply_audio(
                        BytesIO(e['data']),
                        performer=tf2class,
                        title=e['text'],
                    )
                    audio_ids[tf2class].append(
                        {'file_id': x.audio.file_id, 'text': e['text']})
                except Exception as error:
                    logger.warning('triggering telegram anti-spam filter')
                    retry = True
                    sleep(3)
    update.message.reply_text('Data cached successfully')
    query_handler.save_ids(audio_ids)

    file = json.dumps(query_handler.audio_ids).encode()
    try:
        update.message.reply_document(file, filename='audio_ids.json')
        update.message.reply_text("use this if you want to move bot")
    except NetworkError:
        update.message.reply_text(
            f'Seems like this file is too large: {len(file)}. Maximum file size is 1.5MB')
Ejemplo n.º 4
0
def startCrawling(date1, date2):
    agents = UserAgent()
    print("Crawling started for date range ", date1.strftime('%d-%m-%Y'), ' to ', date2.strftime('%d-%m-%Y'))
    if date1 > date2:
        temp = date1
        date1 = date2
        date2 = temp
    pagenumber = 1
    request_number = 0
    agent = agents.random
    while date1 <= date2:
        request_number += 1
        currenturl = 'https://www.prothomalo.com/archive/'+date1.strftime('%Y-%m-%d')+'?page='+str(pagenumber)
        print("Currently fetching address: ", currenturl)        
        nextpage = scrapper.scrap(currenturl, date1.strftime('%Y-%m-%d'), agent)
        if nextpage:
            pagenumber += 1
        else:
            pagenumber = 1
            agent = agents.random
            date1 = date1 + timedelta(days=1)
        sleep(0.3)
        if request_number == 50:
            sleep(10)
            request_number = 0
    print("ended")
Ejemplo n.º 5
0
def scrap_into_database(urls):
    print("Scrapping into database")
    for category, url in urls.items():
        print(category, url)
        data = scrap(url)
        data = filter_data(data, exception_words, exception_words, price_range)
        for d in data:
            create_data(d["name"], d["price"], d["date"], d["link"], category)
Ejemplo n.º 6
0
    def run(self):
        r = scrapper.scrap(self.accounts, self.N, self.config,
                           self.output_folder)
        self.res.append(r)

        # on lance un événement <<thread_fini>> à la fenêtre principale
        # pour lui dire que le thread est fini, l'événement est ensuite
        # géré par la boucle principale de messages
        # on peut transmettre également le résultat lors de l'envoi du message
        # en utilisant un attribut de la classe Event pour son propre compte
        self.win.event_generate("<<thread_fini>>")
def main():
    # parse args
    args = docopt(__doc__.format(self_filename=Path(__file__).name))

    # Parse config file
    with open(args['<config_file>'] or DEFAULT_CONFIG_FILE) as f:
        config_yaml = yaml.load(f.read(), Loader=yaml.BaseLoader)
        config = config_model.Config(**config_yaml)

    # Init logging
    log = logging.getLogger(__name__)
    logging.basicConfig(
        format="%(levelname)s: %(message)s",
        level=(logging.DEBUG if args['--debug'] else logging.INFO))

    # Parse already_seen file
    if not SEEN_FILEPATH.exists():
        log.error(
            f'init "{SEEN_FILEPATH}" file before use. ("echo \'[]\' > {SEEN_FILEPATH})"'
        )
        sys.exit(-1)
    already_seen_set = set()
    if SEEN_FILEPATH.stat().st_size != 0:
        with open(SEEN_FILEPATH) as f:
            already_seen_set = set(ensure_list(json.load(f)))

    new_ids_set = links = None
    try:
        _, new_ids_set, links = scrapper.scrap(
            config,
            log,
            already_seen_set=already_seen_set,
            send_sms=not args["--no-sms"])
    except:
        if not args["--test"] and not args["--no-sms"]:
            scrapper.send_sms("EXCEPTION", config)
        raise
    finally:
        # Write new found ids to seen file
        if new_ids_set:
            print(f'-> update {SEEN_FILEPATH!r}')
            write_json_file(SEEN_FILEPATH,
                            list(already_seen_set | new_ids_set))
        if args['--clipboard'] and links:
            # Copy urls links to clipboard
            import clipboard
            try:
                clipboard.copy("\n".join(links))
            except Exception as e:
                log.error(f"Error while copying to clipboard:\n{e}")
                traceback.print_tb(e.__traceback__)
            else:
                log.info("URLs copied to clipboard")
Ejemplo n.º 8
0
async def scrappin():
    while True:
        a, b = scrapper.scrap()
        print(a)
        print(b)
        if (a == "69"):
            print("Changing Gate")
            stateIO.stateChange("gate")

        if (b == "69"):
            print("Changing Garage")
            stateIO.stateChange("garage")

        await asyncio.sleep(1)
Ejemplo n.º 9
0
def fetch_menu(bot=None, job=None, callback=None):
    print("Fetching menu...")
    global latest_menu

    latest_menu = None
    try:
        latest_menu = scrap()
    except:
        print("Couldn't fetch menu: ", sys.exc_info()[0])
        # updater.job_queue.run_once(fetch_menu, 5)
        return

    try:
        pickle.dump(
            latest_menu,
            open("menus/" + time.strftime("%H%M-%m-%d-%Y") + ".pickle", "wb"),
        )
    except:
        print("Couldn't pickle menu to disk!")
        pass  # this is not important enough to crash the bot

    if callback is not None:
        callback()
Ejemplo n.º 10
0
    print("Database is initialized/updated")
    raise SystemExit(0)

CHECK_INPUT_DATA = checkdata.check_input_data(INPUT_DATA, IATA_LIST)
if CHECK_INPUT_DATA != 0:
    print(CHECK_INPUT_DATA)
    raise SystemExit(1)

CHECK_DATES = checkdata.check_dates(INPUT_DATA)
if CHECK_DATES != 0:
    print(CHECK_DATES)
    raise SystemExit(1)

TRIP = INPUT_DATA[1:]
CHECK_SCHEDULE = check_schedule(TRIP, DB_FILE)
if len(CHECK_SCHEDULE[0]) == 0:
    print("No flights for " + str(TRIP[2]) + " and 2 next days")
    raise SystemExit(0)

URL = "https://www.airblue.com/bookings/flight_selection.aspx"
ROUND_TRIP = True
if len(TRIP) == 3:
    ROUND_TRIP = False

RESERVATION = scrapper.scrap(TRIP, CHECK_SCHEDULE, ROUND_TRIP)
print("**********************")
print(RESERVATION[0])
for flight in RESERVATION[1]:
    print(flight)
print("**********************")
import base
import scrapper
import msvcrt as m

base.clear()

print("First step")
print("Create connection to database")
print("Make sure u have mysql database, user with have privileges to it and he using plugin mysql_native_password")
print("")
base.raw_print("Any key to continue ")
m.getch()

database = base.connect()

if base.create_schema(database):
    print("Operation end successfully")
    print("")
    print("Start webscraping")
    
    xx = "https://www.zalando.pl/odziez-damska"
    xy = "https://www.zalando.pl/odziez-meska/"

    scrapper.scrap(xx, "kobiety", database)
    scrapper.scrap(xy, "mezczyzni", database)
    
    
else:
    print("Error occur during creating schema")

Ejemplo n.º 12
0
def main():
    a, b = scrapper.scrap()
    print(a)
    print(b)
Ejemplo n.º 13
0
import scrapper
from scrapper_mysql_db_manager import ScrapperMysqlDbManager
from config_manager import ConfigManager

pages_details = scrapper.scrap()
succeeded = True
try:
    db_manager = ScrapperMysqlDbManager(ConfigManager())
    db_manager.open_connection()
    db_manager.insert_pages_details(pages_details)
    db_manager.close_connection()
except Exception:
    succeeded = False
scrapper.print_result(succeeded)
Ejemplo n.º 14
0
from helpers import *
import pickle
import sys
import scrapper

result = scrapper.scrap()

sys.exit()
menu = pickle.load(open("menus/07-01-2018.pickle", "rb"))["Menu"]
print(menu)
print(pickle.load(open("menus/07-01-2018.pickle", "rb"))["FullMenu"])
for i in menu:
    print(i.allergens())