class RemoteBackend(Thread): def __init__(self, manager): Thread.__init__(self) self.manager = manager self.pyload = manager.pyload self._ = manager.pyload._ self.enabled = True self.__running = Event() @property def running(self): return self.__running.is_set() def run(self): self.__running.set() try: self.serve() except Exception as e: self.pyload.log.error( self._("Remote backend error: {0}").format(str(e))) if self.pyload.debug: print_exc() finally: self.__running.clear() def setup(self, host, port): raise NotImplementedError def check_deps(self): return True def serve(self): raise NotImplementedError def shutdown(self): raise NotImplementedError def quit(self): self.enabled = False #: set flag and call shutdowm message, so thread can react self.shutdown()
class RemoteBackend(Thread): __slots__ = ['enabled', 'manager', 'pyload', 'running'] def __init__(self, manager): Thread.__init__(self) self.manager = manager self.pyload = manager.pyload self.enabled = True self.running = Event() def run(self): self.running.set() try: self.serve() except Exception as e: self.pyload.log.error( _("Remote backend error: {0}").format(e.message)) if self.pyload.debug: print_exc() finally: self.running.clear() def setup(self, host, port): raise NotImplementedError def check_deps(self): return True def serve(self): raise NotImplementedError def shutdown(self): raise NotImplementedError def stop(self): self.enabled = False #: set flag and call shutdowm message, so thread can react self.shutdown()
class WebSocketServer(socketserver.ThreadingMixIn, http.server.HTTPServer): """ HTTPServer specialized for WebSocket. """ # Overrides SocketServer.ThreadingMixIn.daemon_threads daemon_threads = True # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address allow_reuse_address = True def __init__(self, options): """ Override SocketServer.TCPServer.__init__ to set SSL enabled socket object to self.socket before server_bind and server_activate, if necessary """ # Removed dispatcher init here self._logger = logging.getLogger('pyload') self.request_queue_size = options.request_queue_size self.__ws_is_shut_down = Event() self.__ws_serving = False socketserver.BaseServer.__init__(self, (options.server_host, options.port), WebSocketRequestHandler) # Expose the options object to allow handler objects access it. We name # it with websocket_ prefix to avoid conflict. self.websocket_server_options = options self._create_sockets() self.server_bind() self.server_activate() def _create_sockets(self): self.server_name, self.server_port = self.server_address self._sockets = [] if not self.server_name: # On platforms that does not support IPv6, the first bind fails. # On platforms that supports IPv6 # - If it binds both IPv4 and IPv6 on call with AF_INET6, the # first bind succeeds and the second fails (we'll see 'Address # already in use' error). # - If it binds only IPv6 on call with AF_INET6, both call are # expected to succeed to listen both protocol. addrinfo_array = [(socket.AF_INET6, socket.SOCK_STREAM, '', '', ''), (socket.AF_INET, socket.SOCK_STREAM, '', '', '')] else: addrinfo_array = socket.getaddrinfo(self.server_name, self.server_port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP) for addrinfo in addrinfo_array: family, socktype, proto, canonname, sockaddr = addrinfo try: socket_ = socket.socket(family, socktype) except Exception as e: self._logger.info(_("Skip by failure: {0}").format(e.message)) continue server_options = self.websocket_server_options if server_options.use_tls: # For the case of _HAS_OPEN_SSL, we do wrapper setup after # accept. if server_options.tls_module == _TLS_BY_STANDARD_MODULE: if server_options.tls_client_auth: if server_options.tls_client_cert_optional: client_cert_ = ssl.CERT_OPTIONAL else: client_cert_ = ssl.CERT_REQUIRED else: client_cert_ = ssl.CERT_NONE socket_ = ssl.layer_socket( socket_, keyfile=server_options.private_key, certfile=server_options.certificate, ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=server_options.tls_client_ca, cert_reqs=client_cert_, do_handshake_on_connect=False) self._sockets.append((socket_, addrinfo)) def server_bind(self): """ Override SocketServer.TCPServer.server_bind to enable multiple sockets bind """ failed_sockets = [] for socketinfo in self._sockets: socket_, addrinfo = socketinfo if self.allow_reuse_address: socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: socket_.bind(self.server_address) except Exception as e: self._logger.info(_("Skip by failure: {0}").format(e.message)) socket_.close() failed_sockets.append(socketinfo) if self.server_address[1] == 0: # The operating system assigns the actual port number for port # number 0. This case, the second and later sockets should use # the same port number. Also self.server_port is rewritten # because it is exported, and will be used by external code. self.server_address = (self.server_name, socket_.getsockname()[1]) self.server_port = self.server_address[1] self._logger.info( _('Port {0:d} is assigned').format(self.server_port)) for socketinfo in failed_sockets: self._sockets.remove(socketinfo) def server_activate(self): """ Override SocketServer.TCPServer.server_activate to enable multiple sockets listen """ failed_sockets = [] for socketinfo in self._sockets: socket_, addrinfo = socketinfo self._logger.debug("Listen on: {0}".format(addrinfo)) try: socket_.listen(self.request_queue_size) except Exception as e: self._logger.info(_("Skip by failure: {0}").format(e.message)) socket_.close() failed_sockets.append(socketinfo) for socketinfo in failed_sockets: self._sockets.remove(socketinfo) if len(self._sockets) == 0: self._logger.critical( _('No sockets activated. Use info log level to see the reason') ) def server_close(self): """ Override SocketServer.TCPServer.server_close to enable multiple sockets close """ for socketinfo in self._sockets: socket_, addrinfo = socketinfo self._logger.info(_("Close on: {0}").format(addrinfo)) socket_.close() def fileno(self): """ Override SocketServer.TCPServer.fileno. """ self._logger.critical(_('Not supported: fileno')) return self._sockets[0][0].fileno() # NOTE: client_address is a tuple def handle_error(self, request, client_address): """ Override SocketServer.handle_error. """ self._logger.error( "Exception in processing request from: {0}\n{1}".format( client_address, util.get_stack_trace())) def get_request(self): """ Override TCPServer.get_request to lib OpenSSL.SSL.Connection object with _StandaloneSSLConnection to provide makefile method. We cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly attribute """ accepted_socket, client_address = self.socket.accept() server_options = self.websocket_server_options if server_options.use_tls: if server_options.tls_module == _TLS_BY_STANDARD_MODULE: try: accepted_socket.do_handshake() except ssl.SSLError as e: self._logger.debug("{0}".format(e.message)) raise # Print cipher in use. Handshake is done on accept. self._logger.debug("Cipher: {0}".format( accepted_socket.cipher())) self._logger.debug("Client cert: {0}".format( accepted_socket.getpeercert())) elif server_options.tls_module == _TLS_BY_PYOPENSSL: # We cannot print(the cipher in use. pyOpenSSL does not provide) # any method to fetch that. ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) ctx.use_privatekey_file(server_options.private_key) ctx.use_certificate_file(server_options.certificate) def default_callback(conn, cert, errnum, errdepth, ok): return ok == 1 # See the OpenSSL document for SSL_CTX_set_verify. if server_options.tls_client_auth: verify_mode = OpenSSL.SSL.VERIFY_PEER if not server_options.tls_client_cert_optional: verify_mode |= OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT ctx.set_verify(verify_mode, default_callback) ctx.load_verify_locations(server_options.tls_client_ca, None) else: ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, default_callback) accepted_socket = OpenSSL.SSL.Connection(ctx, accepted_socket) accepted_socket.set_accept_state() # Convert SSL related error into socket.error so that # SocketServer ignores them and keeps running. # # TODO(tyoshino): Convert all kinds of errors. try: accepted_socket.do_handshake() except OpenSSL.SSL.Error as e: # Set errno part to 1 (SSL_ERROR_SSL) like the ssl module # does. self._logger.debug('{0!r}'.format(e)) raise socket.error(1, repr(e)) cert = accepted_socket.get_peer_certificate() self._logger.debug("Client cert subject: {0}".format( cert.get_subject().get_components())) accepted_socket = _StandaloneSSLConnection(accepted_socket) else: raise ValueError('No TLS support module is available') return accepted_socket, client_address def serve_forever(self, poll_interval=0.5): """ Override SocketServer.BaseServer.serve_forever. """ self.__ws_serving = True self.__ws_is_shut_down.clear() handle_request = self.handle_request if hasattr(self, '_handle_request_noblock'): handle_request = self._handle_request_noblock else: self._logger.warning(_('Fallback to blocking request handler')) try: while self.__ws_serving: r, w, e = select.select( [socket_[0] for socket_ in self._sockets], [], [], poll_interval) for socket_ in r: self.socket = socket_ handle_request() self.socket = None finally: self.__ws_is_shut_down.set() def shutdown(self): """ Override SocketServer.BaseServer.shutdown. """ self.__ws_serving = False self.__ws_is_shut_down.wait()
class TransferManager(BaseManager): """Schedules and manages download and decrypter jobs.""" def setup(self): # won't start download when true self.pause = True # each thread is in exactly one category self.free = [] # a thread that in working must have a file as active attribute self.downloading = [] # holds the decrypter threads self.decrypting = [] # indicates when reconnect has occurred self.reconnecting = Event() self.lock = RWLock() @lock def done(self, thread): """Switch thread from working to free state.""" # only download threads will be re-used if isinstance(thread, DownloadThread): # clean local var thread.active = None self.downloading.remove(thread) self.free.append(thread) thread.running.clear() elif isinstance(thread, DecrypterThread): self.decrypting.remove(thread) @lock def discard(self, thread): """Removes a thread from all lists.""" if thread in self.free: self.free.remove(thread) elif thread in self.downloading: self.downloading.remove(thread) @lock def start_download_thread(self, info): """Use a free dl thread or create a new one.""" if self.free: thread = self.free[0] del self.free[0] else: thread = DownloadThread(self) thread.put(self.pyload.files.get_file(info.fid)) thread.start() # wait until it picked up the task thread.running.wait() self.downloading.append(thread) return thread @lock def start_decrypter_thread(self, info): """Start decrypting of entered data, all links in one package are accumulated to one thread.""" self.pyload.files.set_download_status( info.fid, DownloadStatus.Decrypting) thread = DecrypterThread( self, [(info.download.url, info.download.plugin)], info.fid, info.package, info.owner ) thread.start() self.decrypting.append(thread) return thread @lock(shared=True) def active_downloads(self, uid=None): """Retrieve pyfiles of running downloads.""" return [x.active for x in self.downloading if uid is None or x.active.owner == uid] @lock(shared=True) def waiting_downloads(self): """All waiting downloads.""" return [x.active for x in self.downloading if x.active.has_status('waiting')] @lock(shared=True) def get_progress_list(self, uid): """Progress of all running downloads.""" # decrypter progress could be none return [ x for x in [ thd.get_progress_info() for thd in self.downloading + self.decrypting if uid is None or thd.owner == uid] if x is not None] def processing_ids(self): """Get a id list of all pyfiles processed.""" return [x.fid for x in self.active_downloads(None)] @lock(shared=True) def exit(self): """End all threads.""" self.pause = True for thread in self.downloading + self.free: thread.put('quit') def work(self): """Main routine that does the periodical work.""" self.try_reconnect() if (availspace(self.pyload.config.get('general', 'storage_folder')) < self.pyload.config.get('general', 'min_storage_size') << 20): self.pyload.log.warning( self._('Not enough space left on device')) self.pause = True # if self.pause or not self.pyload.api.is_time_download(): # return False if self.pause: return False # at least one thread want reconnect and we are supposed to wait if self.pyload.config.get( 'reconnect', 'wait') and self.want_reconnect() > 1: return False self.assign_jobs() # TODO: clean free threads def assign_jobs(self): """Load jobs from db and try to assign them.""" limit = self.pyload.config.get( 'connection', 'max_transfers') - len(self.active_downloads()) # check for waiting dl rule if limit <= 0: # increase limit if there are waiting downloads limit += min( len(self.waiting_downloads()), self.pyload.config.get('connection', 'wait') + self.pyload.config.get('connection', 'max_transfers') - len(self.active_downloads())) slots = self.get_remaining_plugin_slots() occ = tuple(plugin for plugin, v in slots.items() if v == 0) jobs = self.pyload.files.get_jobs(occ) # map plugin to list of jobs plugins = defaultdict(list) for uid, info in jobs.items(): # check the quota of each user and filter quota = self.pyload.api.calc_quota(uid) if -1 < quota < info.size: del jobs[uid] plugins[info.download.plugin].append(info) for plugin, jobs in plugins.items(): # we know exactly the number of remaining jobs # or only can start one job if limit is not known to_schedule = slots[plugin] if plugin in slots else 1 # start all chosen jobs for job in self.choose_jobs(jobs, to_schedule): # if the job was started the limit will be reduced if self.start_job(job, limit): limit -= 1 def choose_jobs(self, jobs, k): """Make a fair choice of which k jobs to start.""" # TODO: prefer admins, make a fairer choice? if k <= 0: return [] if k >= len(jobs): return jobs return random.sample(jobs, k) def start_job(self, info, limit): """Start a download or decrypter thread with given file info.""" plugin = self.pyload.pgm.find_type(info.download.plugin) # this plugin does not exits if plugin is None: self.pyload.log.error( self._("Plugin '{0}' does not exists").format( info.download.plugin)) self.pyload.files.set_download_status( info.fid, DownloadStatus.Failed) return False if plugin == 'hoster': # this job can't be started if limit <= 0: return False self.start_download_thread(info) return True elif plugin == 'crypter': self.start_decrypter_thread(info) else: self.pyload.log.error( self._("Plugin type '{0}' " "can't be used for downloading").format(plugin)) return False @lock(shared=True) def try_reconnect(self): """Checks if reconnect needed.""" if not self.pyload.config.get('reconnect', 'activated'): return False # only reconnect when all threads are ready if not (0 < self.want_reconnect() == len(self.downloading)): return False script = self.pyload.config.get('reconnect', 'script') if not os.path.isfile(script): self.pyload.config.set('reconnect', 'activated', False) self.pyload.log.warning(self._('Reconnect script not found!')) return self.reconnecting.set() self.pyload.log.info(self._('Starting reconnect')) # wait until all thread got the event while [x.active.plugin.waiting for x in self.downloading].count(True): time.sleep(0.25) old_ip = get_ip() self.pyload.evm.fire('reconnect:before', old_ip) self.pyload.log.debug('Old IP: {0}'.format(old_ip)) try: subprocess.call( self.pyload.config.get( 'reconnect', 'script'), shell=True) except Exception as exc: self.pyload.log.warning( self._('Failed executing reconnect script!')) self.pyload.log.error(exc, exc_info=self.pyload.debug) self.pyload.config.set('reconnect', 'activated', False) self.reconnecting.clear() return time.sleep(1) ip = get_ip() self.pyload.evm.fire('reconnect:after', ip) if not old_ip or old_ip == ip: self.pyload.log.warning(self._('Reconnect not successful')) else: self.pyload.log.info( self._('Reconnected, new IP: {0}').format(ip)) self.reconnecting.clear() @lock(shared=True) def want_reconnect(self): """Number of downloads that are waiting for reconnect.""" active = [ x.active.has_plugin() and x.active.plugin.want_reconnect and x.active.plugin.waiting for x in self.downloading] return active.count(True) @lock(shared=True) def get_remaining_plugin_slots(self): """Dict of plugin names mapped to remaining dls.""" occ = {} # decrypter are treated as occupied for thd in self.decrypting: if not thd.progress: continue occ[thd.progress.plugin] = 0 # get all default dl limits for thd in self.downloading: if not thd.active.has_plugin(): continue limit = thd.active.plugin.get_download_limit() # limit <= 0 means no limit occ[thd.active.pluginname] = limit if limit > 0 else float('inf') # subtract with running downloads for thd in self.downloading: if not thd.active.has_plugin(): continue plugin = thd.active.pluginname if plugin in occ: occ[plugin] -= 1 return occ
class DatabaseBackend(Thread): subs = [] DB_FILE = 'pyload.db' VERSION_FILE = 'db.version' def __init__(self, core): super(DatabaseBackend, self).__init__() self.setDaemon(True) self.pyload = core self._ = core._ self.manager = None # set later self.error = None # TODO: Recheck... self.__running = Event() self.jobs = Queue() set_db(self) @property def running(self): return self.__running.is_set() def setup(self): """ *MUST* be called before db can be used !. """ self.start() self.__running.wait() def init(self): """Main loop, which executes commands.""" version = self._check_version() self.conn = sqlite3.connect(self.DB_FILE) os.chmod(self.DB_FILE, 0o600) self.c = self.conn.cursor() if version is not None and version < DB_VERSION: success = self._convert_db(version) # delete database if not success: self.c.close() self.conn.close() remove(self.VERSION_FILE) shutil.move(self.DB_FILE, self.DB_FILE + '.bak') self.pyload.log.warning( self._('Database was deleted due to incompatible version')) with io.open(self.VERSION_FILE, mode='wb') as fp: fp.write(to_str(DB_VERSION)) self.conn = sqlite3.connect(self.DB_FILE) os.chmod(self.DB_FILE, 0o600) self.c = self.conn.cursor() self._create_tables() self.conn.commit() def run(self): try: self.init() except Exception as exc: self.error = exc finally: self.__running.set() while True: j = self.jobs.get() if j == 'quit': self.c.close() self.conn.commit() self.conn.close() self.closing.set() break j.process_job() # TODO: Recheck... def exit(self): self.__running.clear() self.closing = Event() self.jobs.put('quit') self.closing.wait(1) def _check_version(self): """Get db version.""" if not os.path.isfile(self.VERSION_FILE) or not os.path.getsize( self.VERSION_FILE): with io.open(self.VERSION_FILE, mode='w') as fp: fp.write(to_str(DB_VERSION)) with io.open(self.VERSION_FILE, mode='r') as fp: v = int(fp.read().strip()) return v def _convert_db(self, v): try: return getattr(self, '_convertV{0:d}'.format(v))() except Exception: return False # -- convert scripts start -- def _convert_v6(self): return False # -- convert scripts end -- def _create_tables(self): """Create tables for database.""" self.c.execute( 'CREATE TABLE IF NOT EXISTS "packages" (' '"pid" INTEGER PRIMARY KEY AUTOINCREMENT, ' '"name" TEXT NOT NULL, ' '"folder" TEXT DEFAULT "" NOT NULL, ' '"site" TEXT DEFAULT "" NOT NULL, ' '"comment" TEXT DEFAULT "" NOT NULL, ' '"password" TEXT DEFAULT "" NOT NULL, ' '"added" INTEGER DEFAULT 0 NOT NULL,' # set by trigger '"status" INTEGER DEFAULT 0 NOT NULL,' '"tags" TEXT DEFAULT "" NOT NULL,' '"shared" INTEGER DEFAULT 0 NOT NULL,' '"packageorder" INTEGER DEFAULT -1 NOT NULL,' # inc by trigger '"root" INTEGER DEFAULT -1 NOT NULL, ' '"owner" INTEGER NOT NULL, ' 'FOREIGN KEY(owner) REFERENCES users(uid), ' 'CHECK (root != pid)' ')') self.c.execute('CREATE TRIGGER IF NOT EXISTS "insert_package" ' 'AFTER INSERT ON "packages"' 'BEGIN ' 'UPDATE packages SET added = strftime("%s", "now"), ' 'packageorder = (SELECT max(p.packageorder) + 1 FROM ' 'packages p WHERE p.root=new.root) ' 'WHERE rowid = new.rowid;' 'END') self.c.execute( 'CREATE TRIGGER IF NOT EXISTS "delete_package" ' 'AFTER DELETE ON "packages"' 'BEGIN ' 'DELETE FROM files WHERE package = old.pid;' 'UPDATE packages SET packageorder=packageorder-1 ' 'WHERE packageorder > old.packageorder AND root=old.pid;' 'END') self.c.execute('CREATE INDEX IF NOT EXISTS "package_index" ON ' 'packages(root, owner)') self.c.execute( 'CREATE INDEX IF NOT EXISTS "package_owner" ON packages(owner)') self.c.execute('CREATE TABLE IF NOT EXISTS "files" (' '"fid" INTEGER PRIMARY KEY AUTOINCREMENT, ' '"name" TEXT NOT NULL, ' '"size" INTEGER DEFAULT 0 NOT NULL, ' '"status" INTEGER DEFAULT 0 NOT NULL, ' '"media" INTEGER DEFAULT 1 NOT NULL,' '"added" INTEGER DEFAULT 0 NOT NULL,' '"fileorder" INTEGER DEFAULT -1 NOT NULL, ' '"url" TEXT DEFAULT "" NOT NULL, ' '"plugin" TEXT DEFAULT "" NOT NULL, ' '"hash" TEXT DEFAULT "" NOT NULL, ' '"dlstatus" INTEGER DEFAULT 0 NOT NULL, ' '"error" TEXT DEFAULT "" NOT NULL, ' '"package" INTEGER NOT NULL, ' '"owner" INTEGER NOT NULL, ' 'FOREIGN KEY(owner) REFERENCES users(uid), ' 'FOREIGN KEY(package) REFERENCES packages(id)' ')') self.c.execute( 'CREATE INDEX IF NOT EXISTS "file_index" ON files(package, owner)') self.c.execute( 'CREATE INDEX IF NOT EXISTS "file_owner" ON files(owner)') self.c.execute( 'CREATE INDEX IF NOT EXISTS "file_plugin" ON files(plugin)') self.c.execute('CREATE TRIGGER IF NOT EXISTS "insert_file" ' 'AFTER INSERT ON "files"' 'BEGIN ' 'UPDATE files SET added = strftime("%s", "now"), ' 'fileorder = (SELECT max(f.fileorder) + 1 FROM files f ' 'WHERE f.package=new.package) ' 'WHERE rowid = new.rowid;' 'END') self.c.execute('CREATE TABLE IF NOT EXISTS "collector" (' '"owner" INTEGER NOT NULL, ' '"data" TEXT NOT NULL, ' 'FOREIGN KEY(owner) REFERENCES users(uid), ' 'PRIMARY KEY(owner) ON CONFLICT REPLACE' ') ') self.c.execute('CREATE TABLE IF NOT EXISTS "storage" (' '"identifier" TEXT NOT NULL, ' '"key" TEXT NOT NULL, ' '"value" TEXT DEFAULT "", ' 'PRIMARY KEY (identifier, key) ON CONFLICT REPLACE' ')') self.c.execute( 'CREATE TABLE IF NOT EXISTS "users" (' '"uid" INTEGER PRIMARY KEY AUTOINCREMENT, ' '"name" TEXT NOT NULL UNIQUE, ' '"email" TEXT DEFAULT "" NOT NULL, ' '"password" TEXT NOT NULL, ' '"role" INTEGER DEFAULT 0 NOT NULL, ' '"permission" INTEGER DEFAULT 0 NOT NULL, ' '"folder" TEXT DEFAULT "" NOT NULL, ' '"traffic" INTEGER DEFAULT -1 NOT NULL, ' '"dllimit" INTEGER DEFAULT -1 NOT NULL, ' '"dlquota" TEXT DEFAULT "" NOT NULL, ' '"hddquota" INTEGER DEFAULT -1 NOT NULL, ' '"template" TEXT DEFAULT "default" NOT NULL, ' '"user" INTEGER DEFAULT -1 NOT NULL, ' # set by trigger to self 'FOREIGN KEY(user) REFERENCES users(uid)' ')') self.c.execute( 'CREATE INDEX IF NOT EXISTS "username_index" ON users(name)') self.c.execute('CREATE TRIGGER IF NOT EXISTS "insert_user" AFTER ' 'INSERT ON "users"' 'BEGIN ' 'UPDATE users SET user = new.uid, folder=new.name ' 'WHERE rowid = new.rowid;' 'END') self.c.execute('CREATE TABLE IF NOT EXISTS "settings" (' '"plugin" TEXT NOT NULL, ' '"user" INTEGER DEFAULT -1 NOT NULL, ' '"config" TEXT NOT NULL, ' 'FOREIGN KEY(user) REFERENCES users(uid), ' 'PRIMARY KEY (plugin, user) ON CONFLICT REPLACE' ')') self.c.execute('CREATE TABLE IF NOT EXISTS "accounts" (' '"aid" INTEGER PRIMARY KEY AUTOINCREMENT, ' '"plugin" TEXT NOT NULL, ' '"loginname" TEXT NOT NULL, ' '"owner" INTEGER NOT NULL, ' '"activated" INTEGER NOT NULL DEFAULT 1, ' '"password" TEXT DEFAULT "", ' '"shared" INTEGER NOT NULL DEFAULT 0, ' '"options" TEXT DEFAULT "", ' 'FOREIGN KEY(owner) REFERENCES users(uid)' ')') self.c.execute('CREATE INDEX IF NOT EXISTS "accounts_login" ON ' 'accounts(plugin, loginname)') self.c.execute('CREATE TABLE IF NOT EXISTS "stats" (' '"id" INTEGER PRIMARY KEY AUTOINCREMENT, ' '"user" INTEGER NOT NULL, ' '"plugin" TEXT NOT NULL, ' '"time" INTEGER NOT NULL, ' '"premium" INTEGER DEFAULT 0 NOT NULL, ' '"amount" INTEGER DEFAULT 0 NOT NULL, ' 'FOREIGN KEY(user) REFERENCES users(uid)' ')') self.c.execute( 'CREATE INDEX IF NOT EXISTS "stats_time" ON stats(user, time)') # try to lower ids self.c.execute('SELECT max(fid) FROM files') fid = self.c.fetchone()[0] fid = int(fid) if fid else 0 self.c.execute('UPDATE SQLITE_SEQUENCE SET seq=? WHERE name=?', (fid, 'files')) self.c.execute('SELECT max(pid) FROM packages') pid = self.c.fetchone()[0] pid = int(pid) if pid else 0 self.c.execute('UPDATE SQLITE_SEQUENCE SET seq=? WHERE name=?', (pid, 'packages')) self.c.execute('VACUUM') def create_cursor(self): return self.conn.cursor() @async def commit(self): self.conn.commit() @queue def sync_save(self): self.conn.commit() @async def rollback(self): self.conn.rollback() def async (self, f, *args, **kwargs): args = (self, ) + args job = DatabaseJob(f, *args, **kwargs) self.jobs.put(job) def queue(self, f, *args, **kwargs): # Raise previous error of initialization if isinstance(self.error, Exception): raise self.error args = (self, ) + args job = DatabaseJob(f, *args, **kwargs) self.jobs.put(job) # only wait when db is running if self.running: job.wait() return job.result @classmethod def register_sub(cls, klass): cls.subs.append(klass) @classmethod def unregister_sub(cls, klass): cls.subs.remove(klass) def __getattr__(self, attr): for sub in DatabaseBackend.subs: if hasattr(sub, attr): return getattr(sub, attr) raise AttributeError(attr)
class Core(object): DEFAULT_CONFIGNAME = 'config.ini' DEFAULT_LANGUAGE = 'english' DEFAULT_USERNAME = '******' DEFAULT_PASSWORD = '******' DEFAULT_STORAGENAME = 'downloads' @property def version(self): return __version__ @property def version_info(self): return __version_info__ @property def running(self): return self.__running.is_set() def __init__(self, cfgdir, tmpdir, debug=None, restore=False): self.__running = Event() self.__do_restart = False self.__do_exit = False self._ = lambda x: x self.cfgdir = fullpath(cfgdir) self.tmpdir = fullpath(tmpdir) os.chdir(self.cfgdir) # if self.tmpdir not in sys.path: # sys.path.append(self.tmpdir) # if refresh: # cleanpy(PACKDIR) self.config = ConfigParser(self.DEFAULT_CONFIGNAME) self.debug = self.config.get('log', 'debug') if debug is None else debug self.log = LoggerFactory(self, self.debug) self._init_database(restore) self._init_managers() self.request = self.req = RequestFactory(self) self._init_api() atexit.register(self.exit) def _init_api(self): from pyload.api import Api self.api = Api(self) def _init_database(self, restore): from pyload.core.database import DatabaseBackend from pyload.core.datatype import Permission, Role # TODO: Move inside DatabaseBackend newdb = not os.path.isfile(DatabaseBackend.DB_FILE) self.db = DatabaseBackend(self) self.db.setup() if restore or newdb: self.db.add_user(self.DEFAULT_USERNAME, self.DEFAULT_PASSWORD, Role.Admin, Permission.All) if restore: self.log.warning( self._('Restored default login credentials `admin|pyload`')) def _init_managers(self): from pyload.core.manager import (AccountManager, AddonManager, EventManager, ExchangeManager, FileManager, InfoManager, PluginManager, TransferManager) self.scheduler = sched.scheduler(time.time, time.sleep) self.filemanager = self.files = FileManager(self) self.pluginmanager = self.pgm = PluginManager(self) self.exchangemanager = self.exm = ExchangeManager(self) self.eventmanager = self.evm = EventManager(self) self.accountmanager = self.acm = AccountManager(self) self.infomanager = self.iom = InfoManager(self) self.transfermanager = self.tsm = TransferManager(self) # TODO: Remove builtins.ADDONMANAGER builtins.ADDONMANAGER = self.addonmanager = self.adm = AddonManager( self) # self.remotemanager = self.rem = RemoteManager(self) # self.servermanager = self.svm = ServerManager(self) self.db.manager = self.files # ugly? def _setup_permissions(self): self.log.debug('Setup permissions...') if os.name == 'nt': return change_group = self.config.get('permission', 'change_group') change_user = self.config.get('permission', 'change_user') if change_group: try: group = self.config.get('permission', 'group') set_process_group(group) except Exception as exc: self.log.error(self._('Unable to change gid')) self.log.error(exc, exc_info=self.debug) if change_user: try: user = self.config.get('permission', 'user') set_process_user(user) except Exception as exc: self.log.error(self._('Unable to change uid')) self.log.error(exc, exc_info=self.debug) def set_language(self, lang): domain = 'core' localedir = resource_filename(__package__, 'locale') languages = (locale.locale_alias[lang.lower()].split('_', 1)[0], ) self._set_language(domain, localedir, languages) def _set_language(self, *args, **kwargs): trans = gettext.translation(*args, **kwargs) try: self._ = trans.ugettext except AttributeError: self._ = trans.gettext def _setup_language(self): self.log.debug('Setup language...') lang = self.config.get('general', 'language') if not lang: lc = locale.getlocale()[0] or locale.getdefaultlocale()[0] lang = lc.split('_', 1)[0] if lc else 'en' try: self.set_language(lang) except IOError as exc: self.log.error(exc, exc_info=self.debug) self._set_language('core', fallback=True) # def _setup_niceness(self): # niceness = self.config.get('general', 'niceness') # renice(niceness=niceness) # ioniceness = int(self.config.get('general', 'ioniceness')) # ionice(niceness=ioniceness) def _setup_storage(self): self.log.debug('Setup storage...') storage_folder = self.config.get('general', 'storage_folder') if storage_folder is None: storage_folder = os.path.join(builtins.USERDIR, self.DEFAULT_STORAGENAME) self.log.info(self._('Storage: {0}'.format(storage_folder))) makedirs(storage_folder, exist_ok=True) avail_space = format.size(availspace(storage_folder)) self.log.info( self._('Available storage space: {0}').format(avail_space)) def _setup_network(self): self.log.debug('Setup network...') # TODO: Move to accountmanager self.log.info(self._('Activating accounts...')) self.acm.load_accounts() # self.scheduler.enter(0, 0, self.acm.load_accounts) self.adm.activate_addons() def run(self): self.log.info('Welcome to pyLoad v{0}'.format(self.version)) if self.debug: self.log.warning('*** DEBUG MODE ***') try: self.log.debug('Starting pyLoad...') self.evm.fire('pyload:starting') self.__running.set() self._setup_language() self._setup_permissions() self.log.info(self._('Config directory: {0}').format(self.cfgdir)) self.log.info(self._('Cache directory: {0}').format(self.tmpdir)) self._setup_storage() self._setup_network() # self._setup_niceness() # # some memory stats # from guppy import hpy # hp=hpy() # print(hp.heap()) # import objgraph # objgraph.show_most_common_types(limit=30) # import memdebug # memdebug.start(8002) # from meliae import scanner # scanner.dump_all_objects(os.path.join(PACKDIR, 'objs.json')) self.log.debug('pyLoad is up and running') self.evm.fire('pyload:started') self.tsm.pause = False # NOTE: Recheck... while True: self.__running.wait() self.tsm.work() self.iom.work() self.exm.work() if self.__do_restart: raise Restart if self.__do_exit: raise Exit self.scheduler.run() time.sleep(1) except Restart: self.restart() except (Exit, KeyboardInterrupt, SystemExit): self.exit() except Exception as exc: self.log.critical(exc, exc_info=True) self.exit() def _remove_loggers(self): for handler in self.log.handlers: with closing(handler) as hdlr: self.log.removeHandler(hdlr) def restart(self): self.stop() self.log.info(self._('Restarting pyLoad...')) self.evm.fire('pyload:restarting') self.run() def exit(self): self.stop() self.log.info(self._('Exiting pyLoad...')) self.tsm.exit() self.db.exit() # NOTE: Why here? self._remove_loggers() # if cleanup: # self.log.info(self._("Deleting temp files...")) # remove(self.tmpdir, ignore_errors=True) def stop(self): try: self.log.debug('Stopping pyLoad...') self.evm.fire('pyload:stopping') self.adm.deactivate_addons() self.api.stop_all_downloads() finally: self.files.sync_save() self.__running.clear() self.evm.fire('pyload:stopped')
class Core(object): DEFAULT_CONFIGNAME = 'config.ini' DEFAULT_LANGUAGE = 'english' DEFAULT_USERNAME = '******' DEFAULT_PASSWORD = '******' DEFAULT_STORAGENAME = 'downloads' @property def version(self): return __version__ @property def version_info(self): return __version_info__ @property def running(self): return self.__running.is_set() def __init__(self, cfgdir, tmpdir, debug=None, restore=False): self.__running = Event() self.__do_restart = False self.__do_exit = False self._ = lambda x: x self.cfgdir = fullpath(cfgdir) self.tmpdir = fullpath(tmpdir) os.chdir(self.cfgdir) # if self.tmpdir not in sys.path: # sys.path.append(self.tmpdir) # if refresh: # cleanpy(PACKDIR) self.config = ConfigParser(self.DEFAULT_CONFIGNAME) self.debug = self.config.get( 'log', 'debug') if debug is None else debug self.log = LoggerFactory(self, self.debug) self._init_database(restore) self._init_managers() self.request = self.req = RequestFactory(self) self._init_api() atexit.register(self.exit) def _init_api(self): from pyload.api import Api self.api = Api(self) def _init_database(self, restore): from pyload.core.database import DatabaseBackend from pyload.core.datatype import Permission, Role # TODO: Move inside DatabaseBackend newdb = not os.path.isfile(DatabaseBackend.DB_FILE) self.db = DatabaseBackend(self) self.db.setup() if restore or newdb: self.db.add_user( self.DEFAULT_USERNAME, self.DEFAULT_PASSWORD, Role.Admin, Permission.All) if restore: self.log.warning( self._('Restored default login credentials `admin|pyload`')) def _init_managers(self): from pyload.core.manager import ( AccountManager, AddonManager, EventManager, ExchangeManager, FileManager, InfoManager, PluginManager, TransferManager) self.scheduler = sched.scheduler(time.time, time.sleep) self.filemanager = self.files = FileManager(self) self.pluginmanager = self.pgm = PluginManager(self) self.exchangemanager = self.exm = ExchangeManager(self) self.eventmanager = self.evm = EventManager(self) self.accountmanager = self.acm = AccountManager(self) self.infomanager = self.iom = InfoManager(self) self.transfermanager = self.tsm = TransferManager(self) # TODO: Remove builtins.ADDONMANAGER builtins.ADDONMANAGER = self.addonmanager = self.adm = AddonManager( self) # self.remotemanager = self.rem = RemoteManager(self) # self.servermanager = self.svm = ServerManager(self) self.db.manager = self.files # ugly? def _setup_permissions(self): self.log.debug('Setup permissions...') if os.name == 'nt': return change_group = self.config.get('permission', 'change_group') change_user = self.config.get('permission', 'change_user') if change_group: try: group = self.config.get('permission', 'group') set_process_group(group) except Exception as exc: self.log.error(self._('Unable to change gid')) self.log.error(exc, exc_info=self.debug) if change_user: try: user = self.config.get('permission', 'user') set_process_user(user) except Exception as exc: self.log.error(self._('Unable to change uid')) self.log.error(exc, exc_info=self.debug) def set_language(self, lang): domain = 'core' localedir = resource_filename(__package__, 'locale') languages = (locale.locale_alias[lang.lower()].split('_', 1)[0],) self._set_language(domain, localedir, languages) def _set_language(self, *args, **kwargs): trans = gettext.translation(*args, **kwargs) try: self._ = trans.ugettext except AttributeError: self._ = trans.gettext def _setup_language(self): self.log.debug('Setup language...') lang = self.config.get('general', 'language') if not lang: lc = locale.getlocale()[0] or locale.getdefaultlocale()[0] lang = lc.split('_', 1)[0] if lc else 'en' try: self.set_language(lang) except IOError as exc: self.log.error(exc, exc_info=self.debug) self._set_language('core', fallback=True) # def _setup_niceness(self): # niceness = self.config.get('general', 'niceness') # renice(niceness=niceness) # ioniceness = int(self.config.get('general', 'ioniceness')) # ionice(niceness=ioniceness) def _setup_storage(self): self.log.debug('Setup storage...') storage_folder = self.config.get('general', 'storage_folder') if storage_folder is None: storage_folder = os.path.join( builtins.USERDIR, self.DEFAULT_STORAGENAME) self.log.info(self._('Storage: {0}'.format(storage_folder))) makedirs(storage_folder, exist_ok=True) avail_space = format.size(availspace(storage_folder)) self.log.info( self._('Available storage space: {0}').format(avail_space)) def _setup_network(self): self.log.debug('Setup network...') # TODO: Move to accountmanager self.log.info(self._('Activating accounts...')) self.acm.load_accounts() # self.scheduler.enter(0, 0, self.acm.load_accounts) self.adm.activate_addons() def run(self): self.log.info('Welcome to pyLoad v{0}'.format(self.version)) if self.debug: self.log.warning('*** DEBUG MODE ***') try: self.log.debug('Starting pyLoad...') self.evm.fire('pyload:starting') self.__running.set() self._setup_language() self._setup_permissions() self.log.info(self._('Config directory: {0}').format(self.cfgdir)) self.log.info(self._('Cache directory: {0}').format(self.tmpdir)) self._setup_storage() self._setup_network() # self._setup_niceness() # # some memory stats # from guppy import hpy # hp=hpy() # print(hp.heap()) # import objgraph # objgraph.show_most_common_types(limit=30) # import memdebug # memdebug.start(8002) # from meliae import scanner # scanner.dump_all_objects(os.path.join(PACKDIR, 'objs.json')) self.log.debug('pyLoad is up and running') self.evm.fire('pyload:started') self.tsm.pause = False # NOTE: Recheck... while True: self.__running.wait() self.tsm.work() self.iom.work() self.exm.work() if self.__do_restart: raise Restart if self.__do_exit: raise Exit self.scheduler.run() time.sleep(1) except Restart: self.restart() except (Exit, KeyboardInterrupt, SystemExit): self.exit() except Exception as exc: self.log.critical(exc, exc_info=True) self.exit() def _remove_loggers(self): for handler in self.log.handlers: with closing(handler) as hdlr: self.log.removeHandler(hdlr) def restart(self): self.stop() self.log.info(self._('Restarting pyLoad...')) self.evm.fire('pyload:restarting') self.run() def exit(self): self.stop() self.log.info(self._('Exiting pyLoad...')) self.tsm.exit() self.db.exit() # NOTE: Why here? self._remove_loggers() # if cleanup: # self.log.info(self._("Deleting temp files...")) # remove(self.tmpdir, ignore_errors=True) def stop(self): try: self.log.debug('Stopping pyLoad...') self.evm.fire('pyload:stopping') self.adm.deactivate_addons() self.api.stop_all_downloads() finally: self.files.sync_save() self.__running.clear() self.evm.fire('pyload:stopped')