def getConfig(name: str): return os.environ[name] LOGGER = logging.getLogger(__name__) try: if bool(getConfig('_____REMOVE_THIS_LINE_____')): logging.error('The README.md file there to be read! Exiting now!') exit() except KeyError: pass aria2 = aria2p.API( aria2p.Client( host="http://localhost", port=6800, secret="", )) DOWNLOAD_DIR = None BOT_TOKEN = None download_dict_lock = threading.Lock() status_reply_dict_lock = threading.Lock() # Key: update.effective_chat.id # Value: telegram.Message status_reply_dict = {} # Key: update.message.message_id # Value: An object of Status download_dict = {} # Stores list of users and chats the bot is authorized to use in
import re import aria2p from func import send_one from db_sheets import get_db_sheet import config_aria2 application = Flask(__name__) # application.debug = True user_db_sheet = get_db_sheet(database_name="user", sheet_name="user") aria2 = aria2p.API( aria2p.Client( host=config_aria2.aria2_host, port=config_aria2.aria2_port, secret=config_aria2.aria2_secret ) ) @application.route('/') def hello_world(): return 'Hello, World!' + '<br /><br />' + str(datetime.datetime.now()) @application.route('/wx', methods=["GET", "POST"]) def get(): if request.method == "GET": # 判断请求方式是GET请求 my_echostr = request.args.get('echostr') # 获取携带的echostr参数 return my_echostr
--seed-time=0.01 \ --max-upload-limit=5K \ --max-concurrent-downloads=5 \ --min-split-size=10M \ --follow-torrent=mem \ --split=10 \ --bt-tracker={trackers} \ --daemon=true \ --allow-overwrite=true" subprocess_run(cmd) if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY): os.makedirs(TEMP_DOWNLOAD_DIRECTORY) download_path = os.getcwd() + TEMP_DOWNLOAD_DIRECTORY.strip(".") aria2 = aria2p.API(aria2p.Client(host="http://localhost", port=8210, secret="")) aria2.set_global_options({"dir": download_path}) @register(outgoing=True, pattern="^.amag(?: |$)(.*)") async def magnet_download(event): magnet_uri = event.pattern_match.group(1) # Add Magnet URI Into Queue try: download = aria2.add_magnet(magnet_uri) except Exception as e: LOGS.info(str(e)) return await event.edit("Error:\n`" + str(e) + "`") gid = download.gid await check_progress_for_dl(gid=gid, event=event, previous=None)
link = get_magnet_link(url + true_data['href']) else: link = '0' elif mode == 1: link = get_magnet_link(url + true_data['href']) true_data['full_magnet'] = link return true_data_list if __name__ == '__main__': ufd_list = get_unfinished_list() # [调用rpc]模块初始化 aria2 = aria2p.API( aria2p.Client( host=rpc_host, port=rpc_port, secret=rpc_secret ) ) for ufd_ani in ufd_list: # 如果路径已经存在,则使用继续追番模式,只下载昨天新上传的内容。如果路径不存在的话则开启全下模式。 if os.path.exists(path + ufd_ani['name']): dl_links = get_download_links(ufd_ani, 0) else: os.mkdir(path + ufd_ani['name']) dl_links = get_download_links(ufd_ani, 1) for dl_link in dl_links: # 追番模式下,不是昨天刚发布的不下载。当该资源不是昨天发布的时候,磁力链接会显示为0。 if dl_link['full_magnet'] != '0': # # 直接新开一个aria2进程进行下载,而不是调用rpc # command = 'aria2c -D ' +\
import aria2p from telethon import events import asyncio import io import os from uniborg.util import admin_cmd ARIA_2_PORT = 6800 cmd = f"aria2c --enable-rpc --rpc-listen-all=false --rpc-listen-port {ARIA_2_PORT} --max-connection-per-server=10 --rpc-max-request-size=1024M --seed-time=0.01 --min-split-size=10M --follow-torrent=mem --split=10 --daemon=true" aria2_is_running = os.system(cmd) aria2 = aria2p.API( aria2p.Client( host="http://localhost", port=ARIA_2_PORT, secret="" ) ) @borg.on(admin_cmd("magnet")) async def magnet_download(event): if event.fwd_from: return var = event.raw_text var = var.split(" ") magnet_uri = var[1] logger.info(magnet_uri) # Add Magnet URI Into Queue try:
import aria2p import socket import os from aria2p.downloads import Download # Resolve IP of aria2 by hostname host = socket.gethostbyname('aria2-pro') # Instance of Aria2 api # This line connects the bot to the aria2 rpc server aria2: aria2p.API = aria2p.API( aria2p.Client(host=f"http://{host}", secret=os.environ.get("RPC_SECRET"))) def addDownload(link: str) -> None: """Adds download link to aria and starts the download Args: link: Download url link """ link = link.replace('/mirror', '') link = link.strip() download: Download = aria2.add_magnet(link) while download.is_active: print("downloading") if (download.is_complete): print("Download complete")
def main(): if len(sys.argv) < 3: print("Script input path to config.ini as argument") return config_ini = sys.argv[1] config = ConfigParser() config.read(config_ini, encoding='UTF-8') watch_path = config['inotify']['watch_dir'] if not watch_path.endswith('/'): watch_path = watch_path + '/' rpc_host = config['aria2']['host'] port = int(config['aria2']['port']) secret = config['aria2']['secret'] url_pre = config['aria2']['url_pre'] if not url_pre.endswith('/'): url_pre = url_pre + '/' url_queue = queue.Queue() dir_list = [ watch_path + sys.argv[2], ] while len(dir_list) > 0: pathname = dir_list.pop() if os.path.isdir(pathname): for name in os.listdir(pathname): dir_list.append(pathname + '/' + name) elif os.path.isfile(pathname): ralative_url = pathname[len(watch_path):] download_url = url_pre + ralative_url ralative_dir = "" if '/' in ralative_url: ralative_dir = os.path.dirname(pathname)[len(watch_path):] url_queue.put((download_url, ralative_dir)) else: print("Skip " + pathname) print(str(url_queue.qsize()) + " files to be downlaoded") # download files client = aria2p.Client(host=rpc_host, port=port, secret=secret) aria2 = aria2p.API(client) while True: try: options = client.get_global_option() break except Exception as e: print("Link error: ", e) time.sleep(10) options['max_connection_per_server'] = config['aria2']['connections'] base_dir = options['dir'] if not base_dir.endswith('/'): base_dir = base_dir + '/' print("Host target dir: ", base_dir) while not url_queue.empty(): download_url, ralative_dir = url_queue.get(block=True, timeout=None) options['dir'] = base_dir + ralative_dir uris = [ download_url, ] print("Downloading: " + download_url) try: aria2.add_uris(uris, options=options) except Exception as e: print("Download error", e) url_queue.put((download_url, ralative_dir)) time.sleep(10)
secret = os.getenv('SECRET', '') downloaddir = os.getenv('DOWNLOADDIR', '/downloads') extractdir = os.getenv('EXRACTDIR', '/downloads/Extract') endeddir = os.getenv('ENDEDDIR', '/downloads/Ended') print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") + " Server: " + server) print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") + " Port: " + port) print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") + " downloaddir: " + downloaddir) print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") + " extractdir: " + extractdir) print(datetime.datetime.now().strftime("%Y/%m/%dT%H:%M:%S.%f") + " endeddir: " + endeddir) aria2 = aria2p.API( aria2p.Client( host=server, port=port, secret=secret ) ) autodl = automateddl.AutomatedDL(aria2, downloaddir, extractdir, endeddir) def signal_handler(sig, frame): autodl.stop() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) autodl.start()
import aria2p # initialization, these are the default values aria2 = aria2p.API( aria2p.Client(host="http://192.168.9.111", port=6800, secret="xxtkacch")) # list downloads downloads = aria2.get_downloads() for download in downloads: print(download.name, download.download_speed)
from pyromod import listen from pyrogram import Client import os import json Aria2_host = "http://127.0.0.1" Aria2_port = "8080" Aria2_secret = os.environ.get('Aria2_secret') App_title = os.environ.get('Title') Telegram_bot_api = os.environ.get('Telegram_bot_api') Telegram_user_id = os.environ.get('Telegram_user_id') Api_hash = os.environ.get('Api_hash') Api_id = os.environ.get('Api_id') aria2 = aria2p.API( aria2p.Client(host=Aria2_host, port=int(Aria2_port), secret=Aria2_secret)) client = Client("my_bot", bot_token=Telegram_bot_api, api_hash=Api_hash, api_id=Api_id) client.start() client.send_message(chat_id=int(Telegram_user_id), text="Bot上线!!!") Bot_info = client.get_me() BOT_name = Bot_info.username client.stop()
#!/usr/bin/env python3 import aria2p, os, sys from IPython import embed cwd = os.getcwd() # initialization, these are the default values a2 = aria2p.API( aria2p.Client( host="http://localhost", port=6800, secret=os.environ.get('ARIA_SECRET','') ) ) ## TODO don't add duplicate download, just resume the previous one # list downloads downloads = a2.get_downloads() for download in downloads: print(download.__dict__) # embed() pass dl = a2.add_uris([sys.argv[1]], { 'dir' : cwd, 'out' : sys.argv[2] }) print(dl.gid) # embed()
rec = select(list(zip_longest(*([iter(rec_list)] * PAGE_SIZE), fillvalue=None))) if rec is None: # Refresh list rec_list.flush() continue title, rid = rec['title'], rec['rid'] if confirm(f"RID: {rid}, Title: {title}"): break if confirm("Download danmaku?"): dm = [] for i, new_dm in enumerate(Danmaku(rid)): if isinstance(new_dm_list := new_dm['dm_info'], list): dm.extend(new_dm_list) else: warnings.warn("Invalid danmaku chunk!") print(f'Finish getting index {i}, current length {len(dm)}') print(f'Reach the end') with open(rid + '.json', 'w') as f: json.dump(dm, f) if confirm("Download with aria2?"): import aria2p import re conf = note['aria2'] aria2 = aria2p.API(aria2p.Client(**conf['client'])) all_uri = [u['url'] for u in URLList(rid)] date = datetime.utcfromtimestamp(rec['start_timestamp'] + UTC_OFFSET).strftime('%y%m%d') options = {'dir': f"{conf['dir']}/[{date}] {title} - {note[name]['name']}/source"} for u in all_uri: options['out'] = re.search(r".{13}:\d\d:\d\d\.flv", u).group().replace(':', '') aria2.add_uris([u], options)
def getfiles(self, be_careful=False): """Downloads all files associated with this API data""" if self.config is None: print('Config file not found. Please run in project directory') return if not os.path.exists(self.storagedir): os.mkdir(self.storagedir) if self.storage_type != 'zip': print('Only zip storage supported right now') return storage_file = os.path.join(self.storagedir, 'storage.zip') if not os.path.exists(storage_file): print('Storage file not found') return uniq_ids = set() allfiles_name = os.path.join(self.storagedir, 'allfiles.csv') if not os.path.exists(allfiles_name): if not self.config.has_section('follow'): logging.info('Extract file urls from downloaded data') mzip = ZipFile(storage_file, mode='r', compression=ZIP_DEFLATED) n = 0 for fname in mzip.namelist(): n += 1 if n % 10 == 0: logging.info('Processed %d files, uniq ids %d' % (n, len(uniq_ids))) tf = mzip.open(fname, 'r') data = json.load(tf) tf.close() try: if self.data_key: iterate_data = get_dict_value( data, self.data_key, splitter=self.field_splitter) else: iterate_data = data for item in iterate_data: if item: for key in self.files_keys: file_data = get_dict_value( item, key, as_array=True, splitter=self.field_splitter) if file_data: for uniq_id in file_data: if uniq_id is not None: if isinstance(uniq_id, list): uniq_ids.update( set(uniq_id)) else: uniq_ids.add(uniq_id) except KeyError: logging.info('Data key: %s not found' % (str(self.data_key))) else: details_storage_file = os.path.join(self.storagedir, 'details.zip') mzip = ZipFile(details_storage_file, mode='r', compression=ZIP_DEFLATED) n = 0 for fname in mzip.namelist(): n += 1 if n % 1000 == 0: logging.info('Processed %d records' % (n)) tf = mzip.open(fname, 'r') data = json.load(tf) tf.close() items = [] if self.follow_data_key: for item in get_dict_value( data, self.follow_data_key, splitter=self.field_splitter): items.append(item) else: items = [ data, ] for item in items: for key in self.files_keys: urls = get_dict_value(item, key, as_array=True, splitter=self.field_splitter) if urls is not None: for uniq_id in urls: if uniq_id is not None and len( uniq_id.strip()) > 0: uniq_ids.append(uniq_id) mzip.close() logging.info('Storing all filenames') f = open(allfiles_name, 'w', encoding='utf8') for u in uniq_ids: f.write(str(u) + '\n') f.close() else: logging.info('Load all filenames') uniq_ids = load_file_list(allfiles_name) # Start download processed_files = [] skipped_files_dict = {} files_storage_file = os.path.join(self.storagedir, 'files.zip') files_list_storage = os.path.join(self.storagedir, 'files.list') files_skipped = os.path.join(self.storagedir, 'files_skipped.list') if os.path.exists(files_list_storage): processed_files = load_file_list(files_list_storage, encoding='utf8') list_file = open(files_list_storage, 'a', encoding='utf8') else: list_file = open(files_list_storage, 'w', encoding='utf8') if os.path.exists(files_skipped): skipped_files_dict = load_csv_data(files_skipped, key='filename', encoding='utf8') skipped_file = open(files_skipped, 'a', encoding='utf8') skipped = csv.DictWriter( skipped_file, delimiter=';', fieldnames=['filename', 'filesize', 'reason']) else: skipped_files_dict = {} skipped_file = open(files_skipped, 'w', encoding='utf8') skipped = csv.DictWriter( skipped_file, delimiter=';', fieldnames=['filename', 'filesize', 'reason']) skipped.writeheader() use_aria2 = True if self.use_aria2 == 'True' else False if use_aria2: aria2 = aria2p.API( aria2p.Client(host="http://localhost", port=6800, secret="")) else: aria2 = None if self.file_storage_type == 'zip': fstorage = ZipFileStorage(files_storage_file, mode='a', compression=ZIP_DEFLATED) elif self.file_storage_type == 'filesystem': fstorage = FilesystemStorage(os.path.join('storage', 'files')) n = 0 for uniq_id in uniq_ids: if self.fetch_mode == 'prefix': url = self.root_url + str(uniq_id) elif self.fetch_mode == 'pattern': url = self.root_url.format(uniq_id) n += 1 if n % 50 == 0: logging.info('Downloaded %d files' % (n)) # if url in processed_files: # continue if be_careful: r = self.http.head(url, timeout=DEFAULT_TIMEOUT) if 'content-disposition' in r.headers.keys( ) and self.storage_mode == 'filepath': filename = r.headers['content-disposition'].rsplit( 'filename=', 1)[-1].strip('"') elif self.default_ext is not None: filename = uniq_id + '.' + self.default_ext else: filename = uniq_id # if not 'content-length' in r.headers.keys(): # logging.info('File %s skipped since content-length not found in headers' % (url)) # record = {'filename' : filename, 'filesize' : "0", 'reason' : 'Content-length not set in headers'} # skipped_files_dict[uniq_id] = record # skipped.writerow(record) # continue if 'content-length' in r.headers.keys() and int( r.headers['content-length'] ) > FILE_SIZE_DOWNLOAD_LIMIT and self.file_storage_type == 'zip': logging.info('File skipped with size %d and name %s' % (int(r.headers['content-length']), url)) record = { 'filename': filename, 'filesize': str(r.headers['content-length']), 'reason': 'File too large. More than %d bytes' % (FILE_SIZE_DOWNLOAD_LIMIT) } skipped_files_dict[uniq_id] = record skipped.writerow(record) continue else: if self.default_ext is not None: filename = str(uniq_id) + '.' + self.default_ext else: filename = str(uniq_id) if self.storage_mode == 'filepath': filename = urlparse(url).path logging.info('Processing %s as %s' % (url, filename)) if fstorage.exists(filename): logging.info('File %s already stored' % (filename)) continue if not use_aria2: response = self.http.get(url, timeout=DEFAULT_TIMEOUT) fstorage.store(filename, response.content) list_file.write(url + '\n') else: aria2.add_uris(uris=[ url, ], options={ 'out': filename, 'dir': os.path.abspath( os.path.join('storage', 'files')) }) fstorage.close() list_file.close() skipped_file.close()
def get_aria2(): return aria2p.API( aria2p.Client(host=return_default_config_string('aria_address'), port=int(return_default_config_string('aria_port') or 0), secret=return_default_config_string('aria_token')))