def remove(self, nzo_id, add_to_history=True, save=True, cleanup=True, keep_basic=False, del_files=False): if nzo_id in self.__nzo_table: nzo = self.__nzo_table.pop(nzo_id) nzo.deleted = True if cleanup and nzo.status not in (Status.COMPLETED, Status.FAILED): nzo.status = Status.DELETED self.__nzo_list.remove(nzo) if add_to_history: # Create the history DB instance history_db = database.HistoryDB() # Add the nzo to the database. Only the path, script and time taken is passed # Other information is obtained from the nzo history_db.add_history_db(nzo, '', '', 0, '', '') history_db.close() elif cleanup: self.cleanup_nzo(nzo, keep_basic, del_files) sabnzbd.remove_data(nzo_id, nzo.workpath) if save: self.save(nzo) else: nzo_id = None # Update the last check time, since history was updated sabnzbd.LAST_HISTORY_UPDATE = time.time() return nzo_id
def remove(self, nzo_id, add_to_history=True, save=True, cleanup=True, keep_basic=False, del_files=False): if nzo_id in self.__nzo_table: nzo = self.__nzo_table.pop(nzo_id) nzo.deleted = True if cleanup and not nzo.is_gone(): nzo.status = Status.DELETED self.__nzo_list.remove(nzo) if add_to_history: # Create the history DB instance history_db = database.HistoryDB() # Add the nzo to the database. Only the path, script and time taken is passed # Other information is obtained from the nzo history_db.add_history_db(nzo, '', '', 0, '', '') history_db.close() sabnzbd.history_updated() elif cleanup: self.cleanup_nzo(nzo, keep_basic, del_files) sabnzbd.remove_data(nzo_id, nzo.workpath) logging.info('[%s] Removed job %s', caller_name(), nzo.final_name) if save: self.save(nzo) else: nzo_id = None return nzo_id
def remove(self, nzo_id, add_to_history=True, save=True, cleanup=True, keep_basic=False, del_files=False): if nzo_id in self.__nzo_table: nzo = self.__nzo_table.pop(nzo_id) nzo.deleted = True if cleanup and not nzo.is_gone(): nzo.status = Status.DELETED self.__nzo_list.remove(nzo) if add_to_history: # Create the history DB instance history_db = database.HistoryDB() # Add the nzo to the database. Only the path, script and time taken is passed # Other information is obtained from the nzo history_db.add_history_db(nzo, '', '', 0, '', '') history_db.close() sabnzbd.history_updated() elif cleanup: self.cleanup_nzo(nzo, keep_basic, del_files) sabnzbd.remove_data(nzo_id, nzo.workpath) logging.info('Removed job %s', nzo.final_name) if save: self.save(nzo) else: nzo_id = None return nzo_id
def remove(self, nzo_id, add_to_history=True, save=True, cleanup=True, keep_basic=False, del_files=False): if nzo_id in self.__nzo_table: nzo = self.__nzo_table.pop(nzo_id) nzo.deleted = True self.__nzo_list.remove(nzo) sabnzbd.remove_data(nzo_id, nzo.workpath) if add_to_history: # Create the history DB instance history_db = database.get_history_handle() # Add the nzo to the database. Only the path, script and time taken is passed # Other information is obtained from the nzo history_db.add_history_db(nzo, '', '', 0, '', '') history_db.close() elif cleanup: self.cleanup_nzo(nzo, keep_basic, del_files) if save: self.save(nzo)
def purge_articles(self, articles): for article in articles: if article in self.__article_list: self.__article_list.remove(article) data = self.__article_table.pop(article) self.__cache_size -= len(data) if article.art_id: sabnzbd.remove_data(article.art_id, article.nzf.nzo.workpath)
def purge_articles(self, articles): """ Remove all saved articles, from memory and disk """ for article in articles: if article in self.__article_list: self.__article_list.remove(article) data = self.__article_table.pop(article) self.free_reserved_space(len(data)) if article.art_id: sabnzbd.remove_data(article.art_id, article.nzf.nzo.workpath)
def add(self, nzo, save=True, quiet=False): assert isinstance(nzo, NzbObject) if not nzo.nzo_id: nzo.nzo_id = sabnzbd.get_new_id('nzo', nzo.workpath, self.__nzo_table) # If no files are to be downloaded anymore, send to postproc if not nzo.files and not nzo.futuretype: sabnzbd.remove_data(nzo.nzo_id, nzo.workpath) sabnzbd.proxy_postproc(nzo) return '' # Reset try_lists nzo.reset_try_list() self.reset_try_list() if nzo.nzo_id: nzo.deleted = False priority = nzo.priority self.__nzo_table[nzo.nzo_id] = nzo if priority > HIGH_PRIORITY: #Top and repair priority items are added to the top of the queue self.__nzo_list.insert(0, nzo) elif priority == LOW_PRIORITY: self.__nzo_list.append(nzo) else: #for high priority we need to add the item at the bottom #of any other high priority items above the normal priority #for normal priority we need to add the item at the bottom #of the normal priority items above the low priority if self.__nzo_list: pos = 0 added = False for position in self.__nzo_list: if position.priority < priority: self.__nzo_list.insert(pos, nzo) added = True break pos += 1 if not added: #if there are no other items classed as a lower priority #then it will be added to the bottom of the queue self.__nzo_list.append(nzo) else: #if the queue is empty then simple append the item to the bottom self.__nzo_list.append(nzo) if save: self.save(nzo) if not (quiet or nzo.status in ('Fetching', )): growler.send_notification(T('NZB added to queue'), nzo.filename, 'download') if cfg.auto_sort(): self.sort_by_avg_age() return nzo.nzo_id
def purge_articles(self, articles): if sabnzbd.LOG_ALL: logging.debug("Purgable articles -> %s", articles) for article in articles: if article in self.__article_list: self.__article_list.remove(article) data = self.__article_table.pop(article) self.__cache_size -= len(data) if article.art_id: sabnzbd.remove_data(article.art_id, article.nzf.nzo.workpath)
def add(self, nzo, save=True, quiet=False): assert isinstance(nzo, NzbObject) if not nzo.nzo_id: nzo.nzo_id = sabnzbd.get_new_id('nzo', nzo.workpath, self.__nzo_table) # If no files are to be downloaded anymore, send to postproc if not nzo.files and not nzo.futuretype: sabnzbd.remove_data(nzo.nzo_id, nzo.workpath) sabnzbd.proxy_postproc(nzo) return '' # Reset try_lists nzo.reset_try_list() self.reset_try_list() if nzo.nzo_id: nzo.deleted = False priority = nzo.priority self.__nzo_table[nzo.nzo_id] = nzo if priority > HIGH_PRIORITY: #Top and repair priority items are added to the top of the queue self.__nzo_list.insert(0, nzo) elif priority == LOW_PRIORITY: self.__nzo_list.append(nzo) else: #for high priority we need to add the item at the bottom #of any other high priority items above the normal priority #for normal priority we need to add the item at the bottom #of the normal priority items above the low priority if self.__nzo_list: pos = 0 added = False for position in self.__nzo_list: if position.priority < priority: self.__nzo_list.insert(pos, nzo) added = True break pos += 1 if not added: #if there are no other items classed as a lower priority #then it will be added to the bottom of the queue self.__nzo_list.append(nzo) else: #if the queue is empty then simple append the item to the bottom self.__nzo_list.append(nzo) if save: self.save(nzo) if not (quiet or nzo.status in ('Fetching',)): growler.send_notification(T('NZB added to queue'), nzo.filename, 'download') if cfg.auto_sort(): self.sort_by_avg_age() return nzo.nzo_id
def remove_all(self): lst = [] for nzo_id in self.__nzo_table: lst.append(nzo_id) for nzo_id in lst: nzo = self.__nzo_table.pop(nzo_id) nzo.deleted = True self.__nzo_list.remove(nzo) sabnzbd.remove_data(nzo_id, nzo.workpath) self.cleanup_nzo(nzo) del lst self.save()
def purge_articles(self, articles: List[Article]): """ Remove all saved articles, from memory and disk """ logging.debug("Purging %s articles from the cache/disk", len(articles)) for article in articles: if article in self.__article_table: try: data = self.__article_table.pop(article) self.free_reserved_space(len(data)) except KeyError: # Could fail if already deleted by flush_articles or load_data logging.debug("Failed to flush %s from cache, probably already deleted or written to disk", article) elif article.art_id: sabnzbd.remove_data(article.art_id, article.nzf.nzo.admin_path)
def remove_all(self, search=None): if search: search = search.lower() removed = [] for nzo_id in self.__nzo_table.keys(): if (not search) or search in self.__nzo_table[nzo_id].final_name_pw_clean.lower(): nzo = self.__nzo_table.pop(nzo_id) nzo.deleted = True self.__nzo_list.remove(nzo) sabnzbd.remove_data(nzo_id, nzo.workpath) self.cleanup_nzo(nzo) removed.append(nzo_id) self.save() return removed
def insert_future(self, future, filename, msgid, data, pp=None, script=None, cat=None, priority=NORMAL_PRIORITY, nzbname=None, nzo_info=None): """ Refresh a placeholder nzo with an actual nzo """ assert isinstance(future, NzbObject) if nzo_info is None: nzo_info = {} nzo_id = future.nzo_id if nzo_id in self.__nzo_table: try: sabnzbd.remove_data(nzo_id, future.workpath) logging.info("Regenerating item: %s", nzo_id) r, u, d = future.repair_opts if not r is None: pp = sabnzbd.opts_to_pp(r, u, d) scr = future.script if scr is None: scr = script categ = future.cat if categ is None: categ = cat categ, pp, script, priority = cat_to_opts(categ, pp, script, priority) # Remember old priority old_prio = future.priority try: future.__init__(filename, msgid, pp, scr, nzb=data, futuretype=False, cat=categ, priority=priority, nzbname=nzbname, nzo_info=nzo_info) future.nzo_id = nzo_id self.save(future) except ValueError: self.remove(nzo_id, False) except TypeError: self.remove(nzo_id, False) # Make sure the priority is changed now that we know the category if old_prio != priority: future.priority = None self.set_priority(future.nzo_id, priority) if cfg.auto_sort(): self.sort_by_avg_age() self.reset_try_list() except: logging.error(Ta('Error while adding %s, removing'), nzo_id) logging.info("Traceback: ", exc_info = True) self.remove(nzo_id, False) else: logging.info("Item %s no longer in queue, omitting", nzo_id)
def remove(self, nzo_id, add_to_history = True, save=True, cleanup=True, keep_basic=False, del_files=False): if nzo_id in self.__nzo_table: nzo = self.__nzo_table.pop(nzo_id) nzo.deleted = True self.__nzo_list.remove(nzo) sabnzbd.remove_data(nzo_id, nzo.workpath) if add_to_history: # Create the history DB instance history_db = database.get_history_handle() # Add the nzo to the database. Only the path, script and time taken is passed # Other information is obtained from the nzo history_db.add_history_db(nzo, '', '', 0, '', '') history_db.close() elif cleanup: self.cleanup_nzo(nzo, keep_basic, del_files) if save: self.save(nzo)
def nzbfile_parser(raw_data, nzo): # Load data as file-object raw_data = raw_data.replace("http://www.newzbin.com/DTD/2003/nzb", "", 1) nzb_tree = xml.etree.ElementTree.fromstring(raw_data) # Hash for dupe-checking md5sum = hashlib.md5() # Average date avg_age_sum = 0 # In case of failing timestamps and failing files time_now = time.time() skipped_files = 0 valid_files = 0 # Parse the header if nzb_tree.find("head"): for meta in nzb_tree.find("head").iter("meta"): meta_type = meta.attrib.get("type") if meta_type and meta.text: # Meta tags can occur multiple times if meta_type not in nzo.meta: nzo.meta[meta_type] = [] nzo.meta[meta_type].append(meta.text) logging.debug("NZB file meta-data = %s", nzo.meta) # Parse the files for file in nzb_tree.iter("file"): # Get subject and date file_name = "" if file.attrib.get("subject"): file_name = file.attrib.get("subject") # Don't fail if no date present try: file_date = datetime.datetime.fromtimestamp( int(file.attrib.get("date"))) file_timestamp = int(file.attrib.get("date")) except: file_date = datetime.datetime.fromtimestamp(time_now) file_timestamp = time_now # Get group for group in file.iter("group"): if group.text not in nzo.groups: nzo.groups.append(group.text) # Get segments raw_article_db = {} file_bytes = 0 if file.find("segments"): for segment in file.find("segments").iter("segment"): try: article_id = segment.text segment_size = int(segment.attrib.get("bytes")) partnum = int(segment.attrib.get("number")) # Update hash md5sum.update(utob(article_id)) # Duplicate parts? if partnum in raw_article_db: if article_id != raw_article_db[partnum][0]: logging.info( "Duplicate part %s, but different ID-s (%s // %s)", partnum, raw_article_db[partnum][0], article_id, ) nzo.increase_bad_articles_counter( "duplicate_articles") else: logging.info("Skipping duplicate article (%s)", article_id) elif segment_size <= 0 or segment_size >= 2**23: # Perform sanity check (not negative, 0 or larger than 8MB) on article size # We use this value later to allocate memory in cache and sabyenc logging.info( "Skipping article %s due to strange size (%s)", article_id, segment_size) nzo.increase_bad_articles_counter("bad_articles") else: raw_article_db[partnum] = (article_id, segment_size) file_bytes += segment_size except: # In case of missing attributes pass # Sort the articles by part number, compatible with Python 3.5 raw_article_db_sorted = [ raw_article_db[partnum] for partnum in sorted(raw_article_db) ] # Create NZF nzf = sabnzbd.nzbstuff.NzbFile(file_date, file_name, raw_article_db_sorted, file_bytes, nzo) # Check if we already have this exact NZF (see custom eq-checks) if nzf in nzo.files: logging.info("File %s occured twice in NZB, skipping", nzf.filename) continue # Add valid NZF's if file_name and nzf.valid and nzf.nzf_id: logging.info("File %s added to queue", nzf.filename) nzo.files.append(nzf) nzo.files_table[nzf.nzf_id] = nzf nzo.bytes += nzf.bytes valid_files += 1 avg_age_sum += file_timestamp else: logging.info("Error importing %s, skipping", file_name) if nzf.nzf_id: sabnzbd.remove_data(nzf.nzf_id, nzo.admin_path) skipped_files += 1 # Final bookkeeping nr_files = max(1, valid_files) nzo.avg_stamp = avg_age_sum / nr_files nzo.avg_date = datetime.datetime.fromtimestamp(avg_age_sum / nr_files) nzo.md5sum = md5sum.hexdigest() if skipped_files: logging.warning(T("Failed to import %s files from %s"), skipped_files, nzo.filename)