コード例 #1
0
ファイル: main.py プロジェクト: Catta1997/AnimeUnityEngine
def main():
    logging_aux.init_logger(level=config['log_level'],
                            file_log=config['file_log'])
    if len(sys.argv) == 1:
        keyword = interactive_mode()
    else:
        keyword = cli_mode()
    logger.debug("Keyword selected: {}".format(keyword))
    search_res = scraper.search(title=keyword)
    logger.debug("search results: {}".format(search_res))
    if not search_res:
        print(f"{colorama.Fore.RED}No Anime Found{colorama.Style.RESET_ALL}")
        logger.debug("No anime found, keyword: {}".format(keyword))
        exit(1)
    logger.debug("Printing anime list")
    printer.print_anime_list(search_res, config, 1)
    anime_id = input("ID: ")
    logger.debug("ID selected: {}".format(anime_id))
    selected = res_obj_manipulator.get_selected_anime_obj_by_id(
        search_res, anime_id)
    logger.debug("Anime selected: {}".format(selected))
    logger.debug("Printing anime episodes")
    if config['season'] is not None:
        selected = scraper.season_scraper(selected, config)
    if not config['no_print']:
        printer.print_anime_list(selected, config, 2)
    if config['crawl_path'] is not None and config['download_path'] is not None:
        jdownloader.send_to_jdownloader([selected], config)
コード例 #2
0
def search_by_fileid():
    """
        call this to search in a file using given keywords
        provide following arguments in a query:
            file-id = STRING
            keywords = TAG1;TAG2
    """
    file_id = request.form["file-id"]
    tags = [t.lower() for t in request.form["keywords"].split(";")]
    search_result = scraper.search(file_id, tags)
    return jsonify(search_result)
コード例 #3
0
def search():

    component_name = request.args["component-name"].lower()
    searching_mode = request.args["mode"]
    #last_doc_only = 'true'
    files = glob(scraper.get_file_link("*"))
    #if user wants to search to all documentation related to component name (multisearch)
    if searching_mode == 'Components':
        print('Searching mode: Component related')
        file_id = component_name
    #if user wants to search through all database and specifying component name is not necceassy (multisearch)
    elif searching_mode == 'All':
        print('Searching mode: All db')
        if not component_name:
            file_id = ''
        else:
            file_id = component_name
    #if user wants to search in the latest uploaded document. specifying component is not neccessary
    else:
        #search files that relates to component name
        matching_files = [f for f in files if component_name in f]
        #sorting matching files in uploaded time order
        matching_files.sort(key=os.path.getmtime)
        print('mathcing files after sorting:  ' + str(matching_files))
        print('Searching mode: latest file')
        print('latest file:  ' + matching_files[-1])
        file_id = matching_files[-1]
    tags = [t.lower() for t in request.args["keywords"].split(";")]
    if len(tags) < 1:
        return "ERROR: NO TAGS"
    search_result = scraper.search(file_id, tags, searching_mode)
    if (search_result == 'Nothing found'):
        print('going to send this')
        return "Record not found", 400
    else:
        return send_file("result.pdf", as_attachment=True)
コード例 #4
0
def search():
    r = scraper.search(request.args.get('qs', None, type=str))
    j = json.dumps(r, separators=(',', ':'))
    return j
コード例 #5
0
import scraper
import time
import os
import sys
print("welcome to web scraper")
query = input("enter a google search: ")
result = scraper.search(query)
print("Loading websiets with info on search.......................")
time.sleep(3)
print(result)
time.sleep(3)
print("scraping data................................")
content = scraper.scrape_data(result)
print("scraping complete")
time.sleep(3)
print("parsing data")
data = scraper.parse_data(content)
time.sleep(3)
print("data parsed")
file_name = input("enter .txt filename: ")
scraper.write_data(file_name, data)
コード例 #6
0
def search_hotel():
    response = scraper.search()
    return jsonify(results=response)
コード例 #7
0
ファイル: test.py プロジェクト: Slaughterhaus/card-counter
import scraper

print("Maindeck\n")
print(scraper.search('2020', '04', '14', 'main', 'Karn, the Great Creator'))
print('\nSideboard\n')
print(scraper.search('2020', '04', '14', 'side', 'defense grid'))