Ejemplo n.º 1
0
Credits: https://github.com/jaskaranSM/UniBorg/blob/b42cd70144143ce079e5fb3aed49c9aa1412481b/stdplugins/aria.py
"""

import aria2p
import asyncio
import io
import os
from uniborg.util import admin_cmd

ARIA_2_PORT = 6800
cmd = f"aria2c --enable-rpc --rpc-listen-all=false --rpc-listen-port {ARIA_2_PORT}  --max-connection-per-server=10 --rpc-max-request-size=1024M --seed-time=0.01 --min-split-size=10M --follow-torrent=mem --split=10 --daemon=true"

aria2_is_running = os.system(cmd)

aria2 = aria2p.API(
    aria2p.Client(host="http://localhost", port=ARIA_2_PORT, secret=""))


@borg.on(admin_cmd("addmagnet"))
async def magnet_download(event):
    if event.fwd_from:
        return
    var = event.raw_text
    var = var.split(" ")
    magnet_uri = var[1]
    logger.info(magnet_uri)
    # Add Magnet URI Into Queue
    try:
        download = aria2.add_magnet(magnet_uri)
    except Exception as e:
        await event.edit(
Ejemplo n.º 2
0
import aria2p

# initialization, these are the default values
aria2 = aria2p.API(
    aria2p.Client(
        host="http://localhost",
        port=6800,
        secret="5Uj3I+RPGwZI++qmXVOAf7J57Jh2xOasZgpR807TV7Y="
    )
)

# add downloads
uris = ["https://dv98.sibnet.ru/32/28/81/3228810.mp4?st=ivxq78rmkDpoqvNl7f9DRQ&e=1585347000&stor=56&noip=1",
"https://dv98.sibnet.ru/32/28/81/3228811.mp4?st=3eu4wGqxGnW2SpR-TBEZgA&e=1585347000&stor=53&noip=1",
"https://dv98.sibnet.ru/32/32/11/3232114.mp4?st=lQM82wDFHO52tr_Xzf58LA&e=1585347000&stor=53&noip=1",
"https://dv98.sibnet.ru/32/35/51/3235516.mp4?st=5a0p3Lsx9K6HN29hKRg3Kg&e=1585347000&stor=25&noip=1",
"https://dv98.sibnet.ru/32/39/35/3239350.mp4?st=4eYSm1X3DXC5b6E4VaRrag&e=1585347000&stor=46&noip=1",
"https://dv98.sibnet.ru/32/43/46/3243466.mp4?st=Vlu_pW_iD7690gtj5M_L2Q&e=1585347000&stor=25&noip=1",
"https://dv98.sibnet.ru/32/46/78/3246781.mp4?st=5EVsQUYQiyJowUa_ZlueTw&e=1585347000&stor=56&noip=1",
"https://dv98.sibnet.ru/32/50/11/3250118.mp4?st=y6dtkfUrnbRPOrtQxsilIA&e=1585347000&stor=53&noip=1",
"https://dv98.sibnet.ru/32/53/46/3253460.mp4?st=oruky-w9Oxj-VNHTvvQBZQ&e=1585347000&stor=6&noip=1",
"https://dv98.sibnet.ru/32/56/93/3256938.mp4?st=edsnQNjp-op_99emq0-v3w&e=1585347000&stor=56&noip=1",
"https://dv98.sibnet.ru/32/60/60/3260609.mp4?st=8-dj5GVbeeWnKb9Cwa9_4Q&e=1585347000&stor=56&noip=1",
"https://dv98.sibnet.ru/32/63/93/3263932.mp4?st=Zds66enAvN6bU4_xg6q_QQ&e=1585347000&stor=58&noip=1",
"https://dv98.sibnet.ru/32/67/78/3267784.mp4?st=nLQFv1Sehmp0HTNm1KaNYQ&e=1585347000&stor=53&noip=1",
"https://dv98.sibnet.ru/32/75/11/3275116.mp4?st=3CwT8y0IH_cBcR8L9615Ug&e=1585347000&stor=6&noip=1",
"https://dv98.sibnet.ru/32/80/04/3280049.mp4?st=EHYiknNl5GlgAFhhwX9SRQ&e=1585347000&stor=46&noip=1",
"https://dv98.sibnet.ru/32/84/20/3284201.mp4?st=bdkooktQPMzi9gfXBBunkw&e=1585347000&stor=46&noip=1",
"https://dv98.sibnet.ru/32/88/61/3288612.mp4?st=NCwgyH1hDlP0qqro3m9ifA&e=1585347000&stor=46&noip=1",
"https://dv98.sibnet.ru/32/92/20/3292200.mp4?st=nXVtvC45BvstxwYwcFHzQg&e=1585347000&stor=46&noip=1",
"https://dv98.sibnet.ru/32/96/48/3296488.mp4?st=1JGt_KRSniaiMk_IPUgu7A&e=1585347000&stor=26&noip=1",
Ejemplo n.º 3
0
import re
import aria2p

from func import send_one
from db_sheets import get_db_sheet
import config_aria2

application = Flask(__name__)
# application.debug = True

user_db_sheet = get_db_sheet(database_name="user", sheet_name="user")

aria2 = aria2p.API(
    aria2p.Client(
        host=config_aria2.aria2_host,
        port=config_aria2.aria2_port,
        secret=config_aria2.aria2_secret
    )
)


@application.route('/')
def hello_world():
    return 'Hello, World!' + '<br /><br />' + str(datetime.datetime.now())


@application.route('/wx', methods=["GET", "POST"])
def get():
    if request.method == "GET":  # 判断请求方式是GET请求
        my_echostr = request.args.get('echostr')  # 获取携带的echostr参数
        return my_echostr
Ejemplo n.º 4
0
        ) as response:
            await message.reply(await response.text())


if __name__ == "__main__":
    log(f"Starting Akira {akira}...")

    # Is this a good thing to do? I dont know.
    log("Starting Aria2 daemon...")
    aria2proc = subprocess.Popen(["aria2p", "--enable-rpc"],
                                 stdout=subprocess.DEVNULL,
                                 stderr=subprocess.DEVNULL,
                                 stdin=subprocess.DEVNULL)
    time.sleep(1)

    log("Creating Aria2 client...")
    aria2client = aria2p.API(aria2p.Client(host="http://127.0.0.1", port=6800))

    if not os.path.exists(akira_dir):
        os.mkdir(akira_dir)

    async def on_startup(dp):
        await bot.set_webhook(
            os.environ.get("URL") + "/" + os.environ.get("BOT_TOKEN"))

    log("Started.")
    start_webhook(dispatcher=dp,
                  webhook_path="/" + os.environ.get("BOT_TOKEN"),
                  on_startup=on_startup,
                  port=os.environ.get("PORT"))
                link = get_magnet_link(url + true_data['href'])
            else:
                link = '0'
        elif mode == 1:
            link = get_magnet_link(url + true_data['href'])
        true_data['full_magnet'] = link
    return true_data_list


if __name__ == '__main__':
    ufd_list = get_unfinished_list()
    # [调用rpc]模块初始化
    aria2 = aria2p.API(
        aria2p.Client(
            host=rpc_host,
            port=rpc_port,
            secret=rpc_secret
        )
    )
    for ufd_ani in ufd_list:
        # 如果路径已经存在,则使用继续追番模式,只下载昨天新上传的内容。如果路径不存在的话则开启全下模式。
        if os.path.exists(path + ufd_ani['name']):
            dl_links = get_download_links(ufd_ani, 0)
        else:
            os.mkdir(path + ufd_ani['name'])
            dl_links = get_download_links(ufd_ani, 1)
        for dl_link in dl_links:
            # 追番模式下,不是昨天刚发布的不下载。当该资源不是昨天发布的时候,磁力链接会显示为0。
            if dl_link['full_magnet'] != '0':
                # # 直接新开一个aria2进程进行下载,而不是调用rpc
                # command = 'aria2c -D ' +\
Ejemplo n.º 6
0
import requests
import os
import aria2p
import sys
import time

headers = {
    "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}

base_url = "https://muse.jhu.edu"
chapter_pattern = re.compile(r"/chapter/\d+/pdf")
base_dir = "/Users/zhangye/aria2"

aria_client = aria2p.API(aria2p.Client(host="http://localhost", port=6800, secret=""))


def parse_book_name(text: str) -> str:
    bs = BeautifulSoup(text, "html.parser")
    title: str = bs.find("li", class_="title").text
    clean_title = re.sub("\W+", "_", title)
    return clean_title.replace(" ", "_")


def parse_chapter_uri(text: str) -> List[str]:
    return chapter_pattern.findall(text)


def download(chapter_url: str, out_dir: str):
    import ssl
Ejemplo n.º 7
0
import aria2p
import socket
import os

from aria2p.downloads import Download

# Resolve IP of aria2 by hostname
host = socket.gethostbyname('aria2-pro')

# Instance of Aria2 api
# This line connects the bot to the aria2 rpc server

aria2: aria2p.API = aria2p.API(
    aria2p.Client(host=f"http://{host}", secret=os.environ.get("RPC_SECRET")))


def addDownload(link: str) -> None:
    """Adds download link to aria and starts the download

    Args:
        link: Download url link
    """
    link = link.replace('/mirror', '')
    link = link.strip()
    download: Download = aria2.add_magnet(link)
    while download.is_active:
        print("downloading")
    if (download.is_complete):
        print("Download complete")

Ejemplo n.º 8
0
    def getfiles(self, be_careful=False):
        """Downloads all files associated with this API data"""
        if self.config is None:
            print('Config file not found. Please run in project directory')
            return
        if not os.path.exists(self.storagedir):
            os.mkdir(self.storagedir)
        if self.storage_type != 'zip':
            print('Only zip storage supported right now')
            return
        storage_file = os.path.join(self.storagedir, 'storage.zip')
        if not os.path.exists(storage_file):
            print('Storage file not found')
            return
        uniq_ids = set()

        allfiles_name = os.path.join(self.storagedir, 'allfiles.csv')
        if not os.path.exists(allfiles_name):
            if not self.config.has_section('follow'):
                logging.info('Extract file urls from downloaded data')
                mzip = ZipFile(storage_file,
                               mode='r',
                               compression=ZIP_DEFLATED)
                n = 0
                for fname in mzip.namelist():
                    n += 1
                    if n % 10 == 0:
                        logging.info('Processed %d files, uniq ids %d' %
                                     (n, len(uniq_ids)))
                    tf = mzip.open(fname, 'r')
                    data = json.load(tf)
                    tf.close()
                    try:
                        if self.data_key:
                            iterate_data = get_dict_value(
                                data,
                                self.data_key,
                                splitter=self.field_splitter)
                        else:
                            iterate_data = data
                        for item in iterate_data:
                            if item:
                                for key in self.files_keys:
                                    file_data = get_dict_value(
                                        item,
                                        key,
                                        as_array=True,
                                        splitter=self.field_splitter)
                                    if file_data:
                                        for uniq_id in file_data:
                                            if uniq_id is not None:
                                                if isinstance(uniq_id, list):
                                                    uniq_ids.update(
                                                        set(uniq_id))
                                                else:
                                                    uniq_ids.add(uniq_id)
                    except KeyError:
                        logging.info('Data key: %s not found' %
                                     (str(self.data_key)))
            else:
                details_storage_file = os.path.join(self.storagedir,
                                                    'details.zip')
                mzip = ZipFile(details_storage_file,
                               mode='r',
                               compression=ZIP_DEFLATED)
                n = 0
                for fname in mzip.namelist():
                    n += 1
                    if n % 1000 == 0:
                        logging.info('Processed %d records' % (n))
                    tf = mzip.open(fname, 'r')
                    data = json.load(tf)
                    tf.close()
                    items = []
                    if self.follow_data_key:
                        for item in get_dict_value(
                                data,
                                self.follow_data_key,
                                splitter=self.field_splitter):
                            items.append(item)
                    else:
                        items = [
                            data,
                        ]
                    for item in items:
                        for key in self.files_keys:
                            urls = get_dict_value(item,
                                                  key,
                                                  as_array=True,
                                                  splitter=self.field_splitter)
                            if urls is not None:
                                for uniq_id in urls:
                                    if uniq_id is not None and len(
                                            uniq_id.strip()) > 0:
                                        uniq_ids.append(uniq_id)
            mzip.close()

            logging.info('Storing all filenames')
            f = open(allfiles_name, 'w', encoding='utf8')
            for u in uniq_ids:
                f.write(str(u) + '\n')
            f.close()
        else:
            logging.info('Load all filenames')
            uniq_ids = load_file_list(allfiles_name)
        # Start download
        processed_files = []
        skipped_files_dict = {}
        files_storage_file = os.path.join(self.storagedir, 'files.zip')
        files_list_storage = os.path.join(self.storagedir, 'files.list')
        files_skipped = os.path.join(self.storagedir, 'files_skipped.list')
        if os.path.exists(files_list_storage):
            processed_files = load_file_list(files_list_storage,
                                             encoding='utf8')
            list_file = open(files_list_storage, 'a', encoding='utf8')
        else:
            list_file = open(files_list_storage, 'w', encoding='utf8')
        if os.path.exists(files_skipped):
            skipped_files_dict = load_csv_data(files_skipped,
                                               key='filename',
                                               encoding='utf8')
            skipped_file = open(files_skipped, 'a', encoding='utf8')
            skipped = csv.DictWriter(
                skipped_file,
                delimiter=';',
                fieldnames=['filename', 'filesize', 'reason'])
        else:
            skipped_files_dict = {}
            skipped_file = open(files_skipped, 'w', encoding='utf8')
            skipped = csv.DictWriter(
                skipped_file,
                delimiter=';',
                fieldnames=['filename', 'filesize', 'reason'])
            skipped.writeheader()

        use_aria2 = True if self.use_aria2 == 'True' else False
        if use_aria2:
            aria2 = aria2p.API(
                aria2p.Client(host="http://localhost", port=6800, secret=""))
        else:
            aria2 = None
        if self.file_storage_type == 'zip':
            fstorage = ZipFileStorage(files_storage_file,
                                      mode='a',
                                      compression=ZIP_DEFLATED)
        elif self.file_storage_type == 'filesystem':
            fstorage = FilesystemStorage(os.path.join('storage', 'files'))

        n = 0
        for uniq_id in uniq_ids:
            if self.fetch_mode == 'prefix':
                url = self.root_url + str(uniq_id)
            elif self.fetch_mode == 'pattern':
                url = self.root_url.format(uniq_id)
            n += 1
            if n % 50 == 0:
                logging.info('Downloaded %d files' % (n))
#            if url in processed_files:
#                continue
            if be_careful:
                r = self.http.head(url, timeout=DEFAULT_TIMEOUT)
                if 'content-disposition' in r.headers.keys(
                ) and self.storage_mode == 'filepath':
                    filename = r.headers['content-disposition'].rsplit(
                        'filename=', 1)[-1].strip('"')
                elif self.default_ext is not None:
                    filename = uniq_id + '.' + self.default_ext
                else:
                    filename = uniq_id
#                if not 'content-length' in r.headers.keys():
#                    logging.info('File %s skipped since content-length not found in headers' % (url))
#                    record = {'filename' : filename, 'filesize' : "0", 'reason' : 'Content-length not set in headers'}
#                    skipped_files_dict[uniq_id] = record
#                    skipped.writerow(record)
#                    continue
                if 'content-length' in r.headers.keys() and int(
                        r.headers['content-length']
                ) > FILE_SIZE_DOWNLOAD_LIMIT and self.file_storage_type == 'zip':
                    logging.info('File skipped with size %d and name %s' %
                                 (int(r.headers['content-length']), url))
                    record = {
                        'filename':
                        filename,
                        'filesize':
                        str(r.headers['content-length']),
                        'reason':
                        'File too large. More than %d bytes' %
                        (FILE_SIZE_DOWNLOAD_LIMIT)
                    }
                    skipped_files_dict[uniq_id] = record
                    skipped.writerow(record)
                    continue
            else:
                if self.default_ext is not None:
                    filename = str(uniq_id) + '.' + self.default_ext
                else:
                    filename = str(uniq_id)
            if self.storage_mode == 'filepath':
                filename = urlparse(url).path
            logging.info('Processing %s as %s' % (url, filename))
            if fstorage.exists(filename):
                logging.info('File %s already stored' % (filename))
                continue
            if not use_aria2:
                response = self.http.get(url, timeout=DEFAULT_TIMEOUT)
                fstorage.store(filename, response.content)
                list_file.write(url + '\n')
            else:
                aria2.add_uris(uris=[
                    url,
                ],
                               options={
                                   'out':
                                   filename,
                                   'dir':
                                   os.path.abspath(
                                       os.path.join('storage', 'files'))
                               })

        fstorage.close()
        list_file.close()
        skipped_file.close()
Ejemplo n.º 9
0
secret = os.getenv('SECRET', '')

downloaddir = os.getenv('DOWNLOADDIR', '/downloads')
extractdir = os.getenv('EXRACTDIR', '/downloads/Extract')
endeddir = os.getenv('ENDEDDIR', '/downloads/Ended')

print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") +  " Server: " + server)
print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") +  " Port: " + port)

print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") +  " downloaddir: " + downloaddir)
print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") +  " extractdir: " + extractdir)
print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") +  " endeddir: " + endeddir)

aria2 = aria2p.API(
    aria2p.Client(
        host=server,
        port=port,
        secret=secret
    )
)

autodl = automateddl.AutomatedDL(aria2, downloaddir, extractdir, endeddir)

def signal_handler(sig, frame):
    autodl.stop()
    sys.exit(0)

signal.signal(signal.SIGINT, signal_handler)

autodl.start()
Ejemplo n.º 10
0
import aria2p

# initialization, these are the default values
aria2 = aria2p.API(
    aria2p.Client(host="http://192.168.9.111", port=6800, secret="xxtkacch"))

# list downloads
downloads = aria2.get_downloads()

for download in downloads:
    print(download.name, download.download_speed)
Ejemplo n.º 11
0
from pyromod import listen
from pyrogram import Client

import os
import json

Aria2_host = "http://127.0.0.1"
Aria2_port = "8080"
Aria2_secret = os.environ.get('Aria2_secret')
App_title = os.environ.get('Title')
Telegram_bot_api = os.environ.get('Telegram_bot_api')
Telegram_user_id = os.environ.get('Telegram_user_id')
Api_hash = os.environ.get('Api_hash')
Api_id = os.environ.get('Api_id')

aria2 = aria2p.API(
    aria2p.Client(host=Aria2_host, port=int(Aria2_port), secret=Aria2_secret))

client = Client("my_bot",
                bot_token=Telegram_bot_api,
                api_hash=Api_hash,
                api_id=Api_id)

client.start()

client.send_message(chat_id=int(Telegram_user_id), text="Bot上线!!!")
Bot_info = client.get_me()

BOT_name = Bot_info.username
client.stop()
Ejemplo n.º 12
0
#!/usr/bin/env python3

import aria2p, os, sys
from IPython import embed

cwd = os.getcwd()

# initialization, these are the default values
a2 = aria2p.API(
    aria2p.Client(
        host="http://localhost",
        port=6800,
        secret=os.environ.get('ARIA_SECRET','')
    )
)

## TODO don't add duplicate download, just resume the previous one
# list downloads
downloads = a2.get_downloads()

for download in downloads:
    print(download.__dict__)
    # embed()
    pass

dl = a2.add_uris([sys.argv[1]], { 'dir' : cwd, 'out' : sys.argv[2] })
print(dl.gid)
# embed()
Ejemplo n.º 13
0
        rec = select(list(zip_longest(*([iter(rec_list)] * PAGE_SIZE), fillvalue=None)))
        if rec is None:  # Refresh list
            rec_list.flush()
            continue
        title, rid = rec['title'], rec['rid']
        if confirm(f"RID: {rid}, Title: {title}"):
            break
    if confirm("Download danmaku?"):
        dm = []
        for i, new_dm in enumerate(Danmaku(rid)):
            if isinstance(new_dm_list := new_dm['dm_info'], list):
                dm.extend(new_dm_list)
            else:
                warnings.warn("Invalid danmaku chunk!")
            print(f'Finish getting index {i}, current length {len(dm)}')
        print(f'Reach the end')
        with open(rid + '.json', 'w') as f:
            json.dump(dm, f)
    if confirm("Download with aria2?"):
        import aria2p
        import re

        conf = note['aria2']
        aria2 = aria2p.API(aria2p.Client(**conf['client']))
        all_uri = [u['url'] for u in URLList(rid)]
        date = datetime.utcfromtimestamp(rec['start_timestamp'] + UTC_OFFSET).strftime('%y%m%d')
        options = {'dir': f"{conf['dir']}/[{date}] {title} - {note[name]['name']}/source"}
        for u in all_uri:
            options['out'] = re.search(r".{13}:\d\d:\d\d\.flv", u).group().replace(':', '')
            aria2.add_uris([u], options)
Ejemplo n.º 14
0
    except Error as e:
        LOGGER.error(e)
        exit(1)


try:
    if bool(getConfig('_____REMOVE_THIS_LINE_____')):
        logging.error('The README.md file there to be read! Exiting now!')
        exit()
except KeyError:
    pass

aria2 = aria2p.API(
    aria2p.Client(
        host="http://localhost",
        port=6800,
        secret="",
    ))

DOWNLOAD_DIR = None
BOT_TOKEN = None

download_dict_lock = threading.Lock()
status_reply_dict_lock = threading.Lock()
# Key: update.effective_chat.id
# Value: telegram.Message
status_reply_dict = {}
# Key: update.message.message_id
# Value: An object of Status
download_dict = {}
# Stores list of users and chats the bot is authorized to use in
Ejemplo n.º 15
0
def main():
    if len(sys.argv) < 3:
        print("Script input path to config.ini as argument")
        return

    config_ini = sys.argv[1]
    config = ConfigParser()
    config.read(config_ini, encoding='UTF-8')

    watch_path = config['inotify']['watch_dir']
    if not watch_path.endswith('/'):
        watch_path = watch_path + '/'
    rpc_host = config['aria2']['host']
    port = int(config['aria2']['port'])
    secret = config['aria2']['secret']
    url_pre = config['aria2']['url_pre']
    if not url_pre.endswith('/'):
        url_pre = url_pre + '/'

    url_queue = queue.Queue()

    dir_list = [
        watch_path + sys.argv[2],
    ]
    while len(dir_list) > 0:
        pathname = dir_list.pop()
        if os.path.isdir(pathname):
            for name in os.listdir(pathname):
                dir_list.append(pathname + '/' + name)
        elif os.path.isfile(pathname):
            ralative_url = pathname[len(watch_path):]
            download_url = url_pre + ralative_url
            ralative_dir = ""
            if '/' in ralative_url:
                ralative_dir = os.path.dirname(pathname)[len(watch_path):]
            url_queue.put((download_url, ralative_dir))
        else:
            print("Skip " + pathname)
    print(str(url_queue.qsize()) + " files to be downlaoded")

    # download files
    client = aria2p.Client(host=rpc_host, port=port, secret=secret)
    aria2 = aria2p.API(client)
    while True:
        try:
            options = client.get_global_option()
            break
        except Exception as e:
            print("Link error: ", e)
            time.sleep(10)

    options['max_connection_per_server'] = config['aria2']['connections']
    base_dir = options['dir']
    if not base_dir.endswith('/'):
        base_dir = base_dir + '/'
    print("Host target dir: ", base_dir)

    while not url_queue.empty():
        download_url, ralative_dir = url_queue.get(block=True, timeout=None)
        options['dir'] = base_dir + ralative_dir
        uris = [
            download_url,
        ]
        print("Downloading: " + download_url)
        try:
            aria2.add_uris(uris, options=options)
        except Exception as e:
            print("Download error", e)
            url_queue.put((download_url, ralative_dir))
            time.sleep(10)
Ejemplo n.º 16
0
--rpc-listen-port 8210 \
--rpc-max-request-size=1024M \
--seed-time=0.01 \
--split=10 \
"

subprocess_run(cmd)

if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
    os.makedirs(TEMP_DOWNLOAD_DIRECTORY)

download_path = os.getcwd() + TEMP_DOWNLOAD_DIRECTORY.strip(".")
if not download_path.endswith("/"):
    download_path += "/"

aria2 = aria2p.API(aria2p.Client(host="http://localhost", port=8210,
                                 secret=""))

aria2.set_global_options({"dir": download_path})


@register(outgoing=True, pattern=r"^\.amag(?: |$)(.*)")
async def magnet_download(event):
    magnet_uri = event.pattern_match.group(1)
    # Add Magnet URI Into Queue
    try:
        download = aria2.add_magnet(magnet_uri)
    except Exception as e:
        LOGS.info(str(e))
        return await event.edit(f"**Error:**\n`{e}`")
    gid = download.gid
    await check_progress_for_dl(gid=gid, event=event, previous=None)
Ejemplo n.º 17
0
def get_aria2():
    return aria2p.API(
        aria2p.Client(host=return_default_config_string('aria_address'),
                      port=int(return_default_config_string('aria_port') or 0),
                      secret=return_default_config_string('aria_token')))