Esempio n. 1
0
def download():
    if request.method == "POST":
        dir_name = os.path.join(CONF.get('SERVER', 'files_dir'),
                                hex(int(time.time()))[2:])
        if request.form.get('img_height') and request.form.get('img_width'):
            img_size = (min(640, int(request.form.get('img_height'))),
                        min(640, int(request.form.get('img_width'))))
            max_count = min(5000, int(request.form.get('max_count', 100)))
        else:
            img_size = None
            max_count = min(1000, int(request.form.get('max_count', 100)))
        tag = request.form.get('tag', 100)

        photos = Photos.query.filter(Photos.tag == tag)\
            .order_by(Photos.update_time.desc())\
            .limit(max_count).all()
        urls = [ph.url for ph in photos]
        downloader.main(urls, dir_name, img_size)
        zip_file = downloader.zipdir(dir_name)

        return_data = io.BytesIO()
        with open(zip_file, 'rb') as fo:
            return_data.write(fo.read())
        return_data.seek(0)
        os.remove(zip_file)
        wrapped_data = FileWrapper(return_data)
        return Response(wrapped_data,
                        mimetype="application/zip",
                        direct_passthrough=True)
    else:
        tags = db.session.query(
            func.count(Photos.tag).label('count'),
            Photos.tag).group_by(Photos.tag).all()
        return render_template('download.html', tag_list=tags)
def test_dowload_files_from_internet(tmpdir, caplog, file, links):
    """Test for downloader.main

    Assertions:
      all links exists in log
      files stored or exception exists in log

    """
    output_dir = str(tmpdir.mkdir("download"))
    arguments = ["downloader", str(file), output_dir]
    downloader.main(arguments)

    caplog.setLevel(logging.INFO)
    out = caplog.records()
    info = "\n".join([o.message for o in out if o.levelname == "INFO"])
    errors = [o.message for o in out if o.levelname == "ERROR"]
    error = "\n".join(errors)

    listdir = os.listdir(output_dir)

    # all links exists in log
    assert all(link in info for link in links) or all(link in error for link in links)

    for link in links:
        name = link.split("/")[-1]
        # files stored or exception exists in log
        assert name in listdir or link in error
Esempio n. 3
0
 def comm_create_database(self, bot: Bot, update: Update):
     bot.send_message(update.message.chat_id, "Creating DB")
     try:
         db = MySQLdb.connect(host="localhost",
                              user=database_user,
                              passwd=database_passwd,
                              charset='utf8')
     except MySQLdb.Error as err:
         bot.send_message(update.message.chat_id, "{0}".format(err))
     else:
         cursor = db.cursor()
         try:
             downloader.main([
                 '-i', 'local', '-o', 'database', '--if',
                 dirname(abspath(__file__)) +
                 '/../../CreateDatabase/pokemon_data.json', '--db',
                 'pokemon'
             ])
             cursor.execute("USE pokemon")
         except MySQLdb.Error as err:
             return err.args[1]
         try:  # fill database
             create_tables(cursor)
             fill_tables(cursor)
             db.commit()
         except MySQLdb.Error as err:
             bot.send_message(update.message.chat_id, "{0}".format(err))
         except Timeout as err:
             bot.send_message(update.message.chat_id, "{0}".format(err))
         else:
             bot.send_message(update.message.chat_id,
                              "Created DB succesfull new")
Esempio n. 4
0
def acquire_data_async():
    try:
        main()
    except Exception:
        print("Failed to acquire data")
        print '-' * 60
        traceback.print_exc(file=sys.stdout)
        print '-' * 60
Esempio n. 5
0
def main(url):
    if urlparse(url).netloc == "www.newgrounds.com":
        ngpage = requests.get(url).content
        soup = BS(ngpage, "html.parser")
        nameLabel = soup.find_all("div", {
            "class": "pod-head",
            "id": "embed_header"
        })[0].find("h2")
        name = nameLabel.string.replace(":", ",")
        print(name)
        downloader.main(url=url, gameName=name)
    else:
        print("Its recomended to use newgrounds")
        downloader.main(url=url, gameName=input("Game name: "))
Esempio n. 6
0
def call_main ():
    print(' Comment Thread 생성중 \n')
    print(' **************************************************************')
    print(' **************************************************************')
    print(' **************************************************************')
    print(' **************** 생성 완료 정보를 입력하세요. ****************  ')
    print(' **************************************************************')
    print(' **************************************************************')
    print(' **************************************************************')
    a = downloader.main()
    return a
Esempio n. 7
0
def createdb():
    global cursor, isconnected
    if not isconnected:
        return "You need to connect with a database first!"
    else:
        try:
            downloader.main([
                '-i', 'local', '-o', 'database', '--if', 'pokemon_data.json',
                '--db', 'pokemon'
            ])
            cursor.execute("USE pokemon")
        except MySQLdb.Error as err:
            return err.args[1]
        try:  #fill database
            create_tables(cursor)
            fill_tables(cursor)
            db.commit()
        except MySQLdb.Error as err:
            return err.args[1]
        except Timeout as err:
            return err.args[0]
        else:
            return "Created DB succesfull new"
Esempio n. 8
0
            fp = open("epis.txt", "r")
            for i in fp:
                ##        std.addstr(count+2,1,i)
                flag = 1
                time.sleep(1)
                for j in range(len(find)):
                    if (find[j] in i):
                        pass
                    else:
                        flag = 0

                if (flag != 0):
                    url = i
                    break
            std.addstr(count + 2, 1, url)
            downloader.main(url.strip('\n'))
            ##    std.addstr(11,1,str(link))
            std.refresh()
            std.clear()
            std.refresh()
            std.addstr(
                1, 1,
                "do you want to add this to you favorite ? 'y' for Yes and 'n' for No "
            )
            std.addstr(
                2, 1,
                "* adding this to favorite will automatically download all the latest episode on the arrival on our server"
            )
            std.refresh()
            answer = std.getstr(
                1,
Esempio n. 9
0
 feed = feedparser.parse(rss)
 # prepare database
 with open('database.pwp', 'r') as f:
     donelist = f.read().split('$')
 # check update
 update_pool = []
 for i in feed.entries:
     i.link = bv2av(i.link.split('/')[-1])
     if i.link not in donelist:
         print(i.link)
         update_pool.append(i)
 # update
 for i in update_pool:
     try:
         # download video
         ftitle = downloader.main(i.link + '?p=1')
         # convert into audio
         mp3path = 'output/' + re.sub(r'[\/\\:*?"<>|]', '',
                                      i.title) + '.mp3'
         {
             ffmpeg.input('bilibili_video/' + ftitle + '/' + ftitle +
                          '.flv').output(mp3path, ab='1080k').run()
         }
         # insert cover
         try:
             cover.add_cover(cover.get_cover(i.link), mp3path)
         except:
             with open('./log.txt', 'a') as f:
                 f.writelines('error when inserting cover to: ' + ftitle +
                              i.link)
     except:
Esempio n. 10
0
# -*- coding:utf-8 -*-
import downloader
import html_parser

dst = 'subreddit_list.tsv'
downloader.main()
html_parser.main(dst)
Esempio n. 11
0
def main():

    parser = argparse.ArgumentParser(description="subtitle Downloader")

    parser.add_argument("-v",
                        "--version",
                        help="display version",
                        action="store_true")

    parser.add_argument("-t",
                        "--title",
                        default=None,
                        type=str.lower,
                        help="Enter the movie title")
    parser.add_argument("-y",
                        "--year",
                        default=' ',
                        type=str,
                        help="Enter the released year")
    parser.add_argument(
        "-lang",
        "--language",
        type=str.lower,
        default="english",
        choices=[
            "english", "arabic", "albanian", "arabic", "bengali", "turkish",
            "spanish", "swedish", "thai", "slovenian", "russian", "portuguese",
            "malay", "farsi/persian"
        ],
        help="Enter any language from choices, default value ='english'")
    parser.add_argument(
        "-l",
        "--limit",
        type=str.lower,
        default="1",
        help="No of files you want to download , default value = 1")
    parser.add_argument(
        "-r",
        "--resolution",
        type=str.lower,
        default="all",
        help=
        "Filter subtitles on resolution\nSelect any resolution from choices",
        choices=['360p', '480p', '720p', '1080p', '1440p'])
    parser.add_argument(
        "-p",
        "--path",
        type=str.lower,
        default="PWD",
        help=
        'Select path for save subtitles, default value is current path (PWD)')
    args = parser.parse_args()

    if args.version:
        print("subtitle-Downloader 0.1")

    else:
        #info = {'Title' : args.title ,'Year':args.year,'Language':args.language,'limit':args.limit }
        #print("{}".format(info))

        print("Connecting......")
        print(f"Searching for {args.title + '  ' + args.year}")
        print("Please wait...")

        downloader.main(title=args.title,
                        year=args.year,
                        language=args.language,
                        limit=args.limit,
                        resolution=args.resolution,
                        path_to_save=args.path)
Esempio n. 12
0
    def yt_get_comments(self):

        driver = webdriver.Firefox(options=self.options)

        df = pd.read_excel('User_Inputs.xlsx')
        df = df['YT_Artist_Searches']
        df.dropna(inplace=True)

        yt_url = []
        yt_id = []
        yt_title = []
        yt_comments = []
        yt_datetime = []

        for artist in df:

            driver.get("https://www.youtube.com/results?search_query=" +
                       artist + "&sp=CAI%253D")

            driver.execute_script('window.scrollTo(1, 150000);')

            time.sleep(1.5)

            soup = bs.BeautifulSoup(driver.page_source, 'lxml')

            for item in soup.find_all(
                    'a',
                    class_='yt-simple-endpoint style-scope ytd-video-renderer'
            ):
                if artist.lower() in item['title'].lower():
                    try:
                        dl.main([
                            '-y' + item['href'].replace('/watch?v=', ''),
                            '-oyt_comments.json'
                        ])
                        for line in open('yt_comments.json',
                                         'r',
                                         encoding='utf-8'):
                            yt_comments.append(
                                json.loads(line.replace(';', ""))['text'])
                            yt_datetime.append(
                                json.loads(line.replace(';', ""))['time'])
                            yt_url.append('https://www.youtube.com/' +
                                          item['href'])
                            yt_title.append(item['title'])

                    except:
                        pass

        df_yt = pd.DataFrame(
            zip(yt_title, yt_url, yt_comments, yt_datetime),
            columns=['Mix', 'MixURL', 'Comments', 'Comments Datetime'])

        def transform_yt_datetime(x):
            if any(i in ['年', 'an', 'ans', 'year', 'years'] for i in x):
                return datetime.date.today() - relativedelta(
                    years=int(re.search(r'\d+', x).group()))
            elif any(i in ['月', 'mois', 'month', 'months'] for i in x):
                return datetime.date.today() - relativedelta(
                    months=int(re.search(r'\d+', x).group()))
            elif any(i in ['週', 'semaine', 'semaines', 'week', 'weeks']
                     for i in x):
                return datetime.date.today() - relativedelta(
                    weeks=int(re.search(r'\d+', x).group()))
            elif any(i in ['日', 'jour', 'jours', 'day', 'days'] for i in x):
                return datetime.date.today() - relativedelta(
                    days=int(re.search(r'\d+', x).group()))
            elif any(i in ['時', 'heure', 'heures', 'hour', 'hours']
                     for i in x):
                return datetime.date.today() - relativedelta(
                    hours=int(re.search(r'\d+', x).group()))
            elif any(i in ['分', 'minute', 'minutes'] for i in x):
                return datetime.date.today() - relativedelta(
                    minutes=int(re.search(r'\d+', x).group()))
            elif any(i in ['秒', 'seconde', 'secondes', 'heure', 'heures']
                     for i in x):
                return datetime.date.today() - relativedelta(
                    seconds=int(re.search(r'\d+', x).group()))
            else:
                x

        df_yt['Comments Datetime'] = df_yt['Comments Datetime'].apply(
            lambda x: transform_yt_datetime(x))

        driver.close()
        driver.quit()

        return df_yt
Esempio n. 13
0
#!/usr/bin/env python3

import downloader
downloader.main()
Esempio n. 14
0
 def test_main(self):
   actual = downloader.main()