def __init__(self, status: bool, cookies: dict or str, **kwargs): self.name = type(self).__name__ # -*- Assign the based information -*- self.status = status try: self.cookies = cookies_raw2jar(cookies) if isinstance(cookies, str) else cookies except ValueError: # Empty raw_cookies will raise ValueError (,see utils.cookie ) Logger.critical("Empty cookies, Not allowed to active Model \"{}\"".format(self.name)) self.status = False # -*- Assign Enhanced Features : Site -*- """ Enhance Feature for `base` Reseeder. Those key-values will be set as default value unless you change it in your user-settings. The name of those key should be start with "_" and upper. Included: 1. _EXTEND_DESCR_* : default True, Enable to Enhanced the description of the reseed torrent, And its priority is higher than setting.extend_descr_raw[key]["status"]. 2. _ASSIST_ONLY : default False, Enable to only assist the exist same torrent but not to reseed. """ self._EXTEND_DESCR_BEFORE = kwargs.setdefault("extend_descr_before", True) self._EXTEND_DESCR_THUMBNAILS = kwargs.setdefault("extend_descr_thumbnails", True) self._EXTEND_DESCR_MEDIAINFO = kwargs.setdefault("extend_descr_mediainfo", True) self._EXTEND_DESCR_CLONEINFO = kwargs.setdefault("extend_descr_cloneinfo", True) self._ASSIST_ONLY = kwargs.setdefault("assist_only", False) self._ASSIST_DELAY_TIME = kwargs.setdefault("assist_delay_time", 0) # Check Site Online Status if self.status: Logger.debug("Model \"{}\" is activation now.".format(self.name)) self.online_check() else: Logger.info("Model \"{}\" isn't active due to your settings.".format(self.name))
def update_torrent_info_from_rpc_to_db(self, last_id_db=None, force_clean_check=False): """ Sync torrent's id from transmission to database, List Start on last check id,and will return the max id as the last check id. """ torrent_list = tc.get_torrents() # Cache the torrent list new_torrent_list = [t for t in torrent_list if t.id > self.last_id_check] if new_torrent_list: last_id_now = max([t.id for t in new_torrent_list]) if last_id_db is None: last_id_db = db.get_max_in_seed_list(column_list=db.col_seed_list[2:]) Logger.debug("Max tid, transmission: {tr}, database: {db}".format(tr=last_id_now, db=last_id_db)) if not force_clean_check: # Normal Update Logger.info("Some new torrents were add to transmission, Sync to db~") for i in new_torrent_list: # Upsert the new torrent db.upsert_seed_list(self._get_torrent_info(i)) self.last_id_check = last_id_now elif last_id_now != last_id_db: # Check the torrent 's record between tr and db total_num_in_tr = len(set([t.name for t in torrent_list])) total_num_in_db = db.exec(sql="SELECT COUNT(*) FROM `seed_list`")[0] if int(total_num_in_tr) >= int(total_num_in_db): db.cache_torrent_list() Logger.info("Upsert the whole torrent id to database.") for t in torrent_list: # Upsert the whole torrent db.upsert_seed_list(self._get_torrent_info(t)) else: Logger.error( "The torrent list didn't match with db-records, Clean the whole \"seed_list\" for safety.") db.exec(sql="DELETE FROM `seed_list` WHERE 1") # Delete all line from seed_list self.update_torrent_info_from_rpc_to_db(last_id_db=0) else: Logger.debug("No new torrent(s), Return with nothing to do.") return self.last_id_check
def sort_title_info(raw_title, raw_type, raw_sec_type) -> dict: """ the function (sort_title_info) will sort title to post_data due to clone_torrent's category But some may wrong,Due to inappropriate search_title """ split = type_dict[raw_type]["split"] raw_title_group = re.findall(r"\[[^\]]*\]", raw_title) return_dict = { "raw_type": raw_type, "raw_second_type": raw_sec_type, "type": type_dict[raw_type]["cat"], "second_type": type_dict[raw_type]["sec_type"][raw_sec_type], } len_split = len(type_dict[raw_type]["split"]) if len_split != len(raw_title_group): Logger.warning("The raw title \"{raw}\" may lack of tag (now: {no},ask: {co})," "The split may wrong.".format(raw=raw_title, no=len(raw_title_group), co=len_split)) while len_split > len(raw_title_group): raw_title_group.append("") raw_title_group.reverse() for i in split: j = raw_title_group.pop() title_split = re.sub("\[(?P<in>.*)\]", "\g<in>", j) if i in type_dict[raw_type]["limit"]: if title_split not in type_dict[raw_type]["limit"][i]: title_split = "" # type_dict[raw_type]["limit"][i][0] raw_title_group.append(j) return_dict.update({i: title_split}) Logger.debug("the title split success.The title dict:{dic}".format(dic=return_dict)) return return_dict
def session_check(self): page_usercp_bs = self.get_data(url=self.url_host + "/usercp.php", bs=True) self.status = True if page_usercp_bs.find(id="info_block") else False if self.status: Logger.debug("Through authentication in Site: {}".format(self.name)) else: Logger.error("Can not verify identity. Please Check your Cookies".format(mo=self.name)) return self.status
def torrent_reseed(self, torrent): name_pattern = self._get_torrent_ptn(torrent) if name_pattern: key_raw = re.sub(r"[_\-.']", " ", name_pattern.group("search_name")) key_with_gp = "{gr} {search_key}".format(search_key=key_raw, gr=name_pattern.group("group")) key_with_gp_ep = "{ep} {gp_key}".format(gp_key=key_with_gp, ep=name_pattern.group("episode")) else: raise NoMatchPatternError("No match pattern. Will Mark \"{}\" As Un-reseed torrent.".format(torrent.name)) search_tag = self.exist_judge(key_with_gp_ep, torrent.name) if search_tag == 0 and not self._ASSIST_ONLY: # Non-existent repetition torrent (by local judge plugins), prepare to reseed torrent_raw_info_dict = None try: if self._GET_CLONE_ID_FROM_DB: clone_id = db.get_data_clone_id(key=key_raw, site=self.db_column) if clone_id in [None, 0]: raise KeyError("The db-record is not return the correct clone id.") elif clone_id is not -1: # Set to no re-seed for this site in database. torrent_raw_info_dict = self.torrent_clone(clone_id) if not torrent_raw_info_dict: raise ValueError("The clone torrent for tid in db-record is not exist.") Logger.debug("Get clone torrent info from \"DataBase\" OK, Which id: {}".format(clone_id)) else: raise KeyError("Set not get clone torrent id from \"Database.\"") except (KeyError, ValueError) as e: Logger.warning("{}, Try to search the clone info from site, it may not correct".format(e.args[0])) clone_id = self._DEFAULT_CLONE_TORRENT if self._DEFAULT_CLONE_TORRENT else 0 # USE Default clone id for key in [key_with_gp, key_raw]: # USE The same group to search firstly and Then non-group tag search_id = self.first_tid_in_search_list(key=key) if search_id is not 0: clone_id = search_id # The search result will cover the default setting. break if clone_id is not 0: torrent_raw_info_dict = self.torrent_clone(clone_id) Logger.info("Get clone torrent info from \"Reseed-Site\" OK, Which id: {cid}".format(cid=clone_id)) if torrent_raw_info_dict: if self._ALLOW_CAT: pre_reseed_cat = torrent_raw_info_dict.get("type") if int(pre_reseed_cat) not in self._ALLOW_CAT: raise NoCloneTorrentError("The clone torrent's category is not allowed.") Logger.info("Begin post The torrent {0},which name: {1}".format(torrent.id, torrent.name)) new_dict = self.date_raw_update(torrent_name_search=name_pattern, raw_info=torrent_raw_info_dict) multipart_data = self.data_raw2tuple(torrent, raw_info=new_dict) flag = self.torrent_upload(torrent=torrent, data=multipart_data) else: raise NoCloneTorrentError("Can't find any clone torrent to used.".format(self.name)) elif search_tag == -1: # IF the torrents are present, but not consistent (When FORCE_JUDGE_DUPE_LOC is True) raise CannotAssistError("Find dupe, and the exist torrent is not same as pre-reseed torrent. Stop Posting~") else: # IF the torrent is already released and can be assist Logger.warning("Find dupe torrent,which id: {0}, Automatically assist it~".format(search_tag)) flag = self.torrent_download(tid=search_tag, thanks=False) return flag
def search_list(self, key) -> list: if self._ORIGINAL_SEARCH: return super().search_list(key) else: tid_list = [] data_json = self.page_search(key=key, bs=False) if data_json["success"] and data_json["total"] > 0: tid_list = list(map(lambda x: x["sid"], data_json["rows"])) Logger.debug("USE key: {key} to search through ptboard API, " "With the Return tid-list: {list}".format(key=key, list=tid_list)) return tid_list
def _del_torrent_with_db(self, rid=None): """Delete torrent(both download and reseed) with data from transmission and database""" Logger.debug("Begin torrent's status check. If reach condition you set, You will get a warning.") if rid: sql = "SELECT * FROM `seed_list` WHERE `id`={}".format(rid) else: sql = "SELECT * FROM `seed_list`" time_now = time.time() for cow in db.exec(sql=sql, r_dict=True, fetch_all=True): sid = cow.pop("id") s_title = cow.pop("title") err = 0 reseed_list = [] torrent_id_list = [tid for tracker, tid in cow.items() if tid > 0] for tid in torrent_id_list: try: # Ensure torrent exist reseed_list.append(tc.get_torrent(torrent_id=tid)) except KeyError: # Mark err when the torrent is not exist. err += 1 delete = False if rid: delete = True Logger.warning("Force Delete. Which name: {}, Affect torrents: {}".format(s_title, torrent_id_list)) elif err is 0: # It means all torrents in this cow are exist,then check these torrent's status. reseed_stop_list = [] for t in reseed_list: if int(time_now - t.addedDate) > TIME_TORRENT_KEEP_MIN: # At least seed time if t.status == "stopped": # Mark the stopped torrent reseed_stop_list.append(t) elif setting.pre_delete_judge(torrent=t): _tid, _tname, _tracker = self._get_torrent_info(t) tc.stop_torrent(t.id) Logger.warning( "Reach Target you set, Torrent({tid}) \"{name}\" in Tracker \"{tracker}\" now stop, " "With Uploaded {si:.2f} MiB, Ratio {ro:.2f} , Keep time {ho:.2f} h." "".format(tid=_tid, name=_tname, tracker=_tracker, si=t.uploadedEver / 1024 / 1024, ro=t.uploadRatio, ho=(time.time() - t.startDate) / 60 / 60) ) if len(reseed_list) == len(reseed_stop_list): delete = True Logger.info("All torrents of \"{0}\" reach target, Will DELETE them soon.".format(s_title)) else: delete = True Logger.error("Some Torrents (\"{name}\", {er} of {co}) may not found, " "Delete all it's records from db".format(name=s_title, er=err, co=len(torrent_id_list))) if delete: # Delete torrents with it's data and db-records for tid in torrent_id_list: tc.remove_torrent(tid, delete_data=True) db.exec(sql="DELETE FROM `seed_list` WHERE `id` = {0}".format(sid))
def search_list(self, key) -> list: bs = self.page_search(key=key, bs=True) download_tag = bs.find_all("a", href=self._pat_search_torrent_id) tid_list = [ int(re.search(self._pat_search_torrent_id, tag["href"]).group(1)) for tag in download_tag ] Logger.debug( "USE key: {key} to search, With the Return tid-list: {list}". format(key=key, list=tid_list)) return tid_list
def session_check(self): page_usercp_bs = self.get_data(url=self.url_host + "/usercp.php", bs=True) self.status = True if page_usercp_bs.find(id="info_block") else False if self.status: Logger.debug("Through authentication in Site: {}".format( self.name)) else: Logger.error( "Can not verify identity. Please Check your Cookies".format( mo=self.name)) return self.status
def _get_torrent_ptn(self, torrent): torrent = self._get_torrent(torrent) tname = torrent.name search = None for ptn in search_ptn: search = re.search(ptn, tname) if search: Logger.debug("The search group dict of Torrent: {tn} is {gr}".format(tn=tname, gr=search.groupdict())) break return search
def torrent_clone(self, tid) -> dict: """ Reconstruction from BYRBT Info Clone by Deparsoul version 20170400,thx This function will return a dict include (split_title,small_title,imdb_url,db_url,descr,before_torrent_id). """ return_dict = {} details_bs = self.page_torrent_detail(tid=tid, bs=True) title_search = re.search("种子详情 \"(?P<title>.*)\" - Powered", str(details_bs.title)) if title_search: title = unescape(title_search.group("title")) Logger.info("Get clone torrent's info,id: {tid},title: \"{ti}\"".format(tid=tid, ti=title)) title_dict = sort_title_info(raw_title=title, raw_type=details_bs.find("span", id="type").text.strip(), raw_sec_type=details_bs.find("span", id="sec_type").text.strip()) return_dict.update(title_dict) body = details_bs.body imdb_url = dburl = "" if body.find(class_="imdbRatingPlugin"): imdb_url = 'http://www.imdb.com/title/' + body.find(class_="imdbRatingPlugin")["data-title"] Logger.debug("Found imdb link:{link} for this torrent.".format(link=imdb_url)) if body.find("a", href=re.compile("://movie.douban.com/subject")): dburl = body.find("a", href=re.compile("://movie.douban.com/subject")).text Logger.debug("Found douban link:{link} for this torrent.".format(link=dburl)) # Update description descr = body.find(id="kdescr") # Restore the image link for img_tag in descr.find_all("img"): del img_tag["onload"] del img_tag["data-pagespeed-url-hash"] img_tag["src"] = unquote(re.sub(r"images/(?:(?:\d+x)+|x)(?P<raw>.*)\.pagespeed\.ic.*", "images/\g<raw>", img_tag["src"])) # Remove unnecessary description (class: autoseed, byrbt_info_clone_ignore, byrbt_info_clone) for tag in descr.find_all(class_=pat_tag_pass_by_class): tag.extract() descr_out = re.search(r"<div id=\"kdescr\">(?P<in>.+)</div>$", str(descr), re.S).group("in") return_dict.update({ "small_descr": body.find(id="subtitle").find("li").text, "url": imdb_url, "dburl": dburl, "descr": descr_out, "clone_id": tid }) else: Logger.error("Error,this torrent may not exist or ConnectError") return return_dict
def session_check(self): page_usercp_bs = self.get_data(url=self.url_host + "/usercp.php", bs=True) self.status = True if page_usercp_bs.find(id="info_block") else False if not self.status and self._AUTO_RENEW_COOKIES: Logger.info( 'Update your cookies by login method in Site: {}'.format( self.name)) self.update_cookies() if self.status: Logger.debug("Through authentication in Site: {}".format( self.name)) else: Logger.error( "Can not verify identity. Please Check your Cookies".format( mo=self.name)) return self.status
def main(): rootLogger.info( "Autoseed start~,will check database record at the First time.") i = 0 while True: controller.update_torrent_info_from_rpc_to_db() # 更新表 controller.reseeders_update() # reseed判断主函数 sleep_time = setting.sleep_free_time if setting.busy_start_hour <= int(time.strftime( "%H", time.localtime())) < setting.busy_end_hour: sleep_time = setting.sleep_busy_time rootLogger.debug("Check time {ti} OK, Reach check id {cid}," " Will Sleep for {slt} seconds.".format( ti=i, cid=controller.last_id_check, slt=sleep_time)) i += 1 time.sleep(sleep_time)
def sort_title_info(raw_title, raw_type, raw_sec_type) -> dict: """ the function (sort_title_info) will sort title to post_data due to clone_torrent's category But some may wrong,Due to inappropriate search_title """ split = type_dict[raw_type]["split"] raw_title_group = re.findall(r"\[[^\]]*\]", raw_title) return_dict = { "raw_type": raw_type, "raw_second_type": raw_sec_type, "type": type_dict[raw_type]["cat"], "second_type": type_dict[raw_type]["sec_type"][raw_sec_type], } len_split = len(type_dict[raw_type]["split"]) if len_split != len(raw_title_group): Logger.warning( "The raw title \"{raw}\" may lack of tag (now: {no},ask: {co})," "The split may wrong.".format(raw=raw_title, no=len(raw_title_group), co=len_split)) while len_split > len(raw_title_group): raw_title_group.append("") raw_title_group.reverse() for i in split: j = raw_title_group.pop() title_split = re.sub("\[(?P<in>.*)\]", "\g<in>", j) if i in type_dict[raw_type]["limit"]: if title_split not in type_dict[raw_type]["limit"][i]: title_split = "" # type_dict[raw_type]["limit"][i][0] raw_title_group.append(j) return_dict.update({i: title_split}) Logger.debug( "the title split success.The title dict:{dic}".format(dic=return_dict)) return return_dict
def search_list(self, key) -> list: bs = self.page_search(key=key, bs=True) download_tag = bs.find_all("a", href=self._pat_search_torrent_id) tid_list = [int(re.search(self._pat_search_torrent_id, tag["href"]).group(1)) for tag in download_tag] Logger.debug("USE key: {key} to search, With the Return tid-list: {list}".format(key=key, list=tid_list)) return tid_list
def _del_torrent_with_db(self): """Delete torrent(both download and reseed) with data from transmission and database""" Logger.debug( "Begin torrent's status check. If reach condition you set, You will get a warning." ) time_now = time.time() t_all_list = tc.get_torrents() t_name_list = set(map(lambda x: x.name, t_all_list)) for t_name in t_name_list: t_list = list(filter(lambda x: x.name == t_name, t_all_list)) t_list_len = len(t_list) t_list_stop = 0 for t in t_list: if t.status == "stopped": t_list_stop += 1 continue _tid, _tname, _tracker = self._get_torrent_info(t) # 0 means OK, 1 means tracker warning, 2 means tracker error, 3 means local error. if t.error > 1: tc.stop_torrent(t.id) Logger.warning( "Torrent Error, Torrent({tid}) \"{name}\" in Tracker \"{tracker}\" now stop, " "Error code : {code} {msg}." "With Uploaded {si:.2f} MiB, Ratio {ro:.2f} , Keep time {ho:.2f} h." "".format(tid=_tid, name=_tname, tracker=_tracker, si=t.uploadedEver / 1024 / 1024, ro=t.uploadRatio, ho=(time.time() - t.startDate) / 60 / 60, code=t.error, msg=t.errorString)) if int(time_now - t.addedDate ) > TIME_TORRENT_KEEP_MIN: # At least seed time if setting.pre_delete_judge(torrent=t): tc.stop_torrent(t.id) Logger.warning( "Reach Target you set, Torrent({tid}) \"{name}\" in Tracker \"{tracker}\" now stop, " "With Uploaded {si:.2f} MiB, Ratio {ro:.2f} , Keep time {ho:.2f} h." "".format(tid=_tid, name=_tname, tracker=_tracker, si=t.uploadedEver / 1024 / 1024, ro=t.uploadRatio, ho=(time.time() - t.startDate) / 60 / 60)) if t_list_stop == t_list_len: # Delete torrents with it's data and db-records Logger.info( "All torrents of \"{0}\" reach target, Will DELETE them soon." .format(t_name)) tid_list = map(lambda x: x.id, t_list) for tid in tid_list: tc.remove_torrent(tid, delete_data=True) db.exec("DELETE FROM `seed_list` WHERE `title` = %s", (t_name, ))
def torrent_reseed(self, torrent): name_pattern = self._get_torrent_ptn(torrent) if name_pattern: key_raw = re.sub(r"[_\-.']", " ", name_pattern.group("search_name")) key_with_gp = "{gr} {search_key}".format( search_key=key_raw, gr=name_pattern.group("group")) key_with_gp_ep = "{ep} {gp_key}".format( gp_key=key_with_gp, ep=name_pattern.group("episode")) else: raise NoMatchPatternError( "No match pattern. Will Mark \"{}\" As Un-reseed torrent.". format(torrent.name)) search_tag = self.exist_judge(key_with_gp_ep, torrent.name) if search_tag == 0 and not self._ASSIST_ONLY: # Non-existent repetition torrent (by local judge plugins), prepare to reseed torrent_raw_info_dict = None try: if self._GET_CLONE_ID_FROM_DB: clone_id = db.get_data_clone_id(key=key_raw, site=self.db_column) if clone_id in [None, 0]: raise KeyError( "The db-record is not return the correct clone id." ) elif clone_id is not -1: # Set to no re-seed for this site in database. torrent_raw_info_dict = self.torrent_clone(clone_id) if not torrent_raw_info_dict: raise ValueError( "The clone torrent for tid in db-record is not exist." ) Logger.debug( "Get clone torrent info from \"DataBase\" OK, Which id: {}" .format(clone_id)) else: raise KeyError( "Set not get clone torrent id from \"Database.\"") except (KeyError, ValueError) as e: Logger.warning( "{}, Try to search the clone info from site, it may not correct" .format(e.args[0])) clone_id = self._DEFAULT_CLONE_TORRENT if self._DEFAULT_CLONE_TORRENT else 0 # USE Default clone id for key in [ key_with_gp, key_raw ]: # USE The same group to search firstly and Then non-group tag search_id = self.first_tid_in_search_list(key=key) if search_id is not 0: clone_id = search_id # The search result will cover the default setting. break if clone_id is not 0: torrent_raw_info_dict = self.torrent_clone(clone_id) Logger.info( "Get clone torrent info from \"Reseed-Site\" OK, Which id: {cid}" .format(cid=clone_id)) if torrent_raw_info_dict: if self._ALLOW_CAT: pre_reseed_cat = torrent_raw_info_dict.get("type") if int(pre_reseed_cat) not in self._ALLOW_CAT: raise NoCloneTorrentError( "The clone torrent's category is not allowed.") Logger.info( "Begin post The torrent {0},which name: {1}".format( torrent.id, torrent.name)) new_dict = self.date_raw_update( torrent_name_search=name_pattern, raw_info=torrent_raw_info_dict) multipart_data = self.data_raw2tuple(torrent, raw_info=new_dict) flag = self.torrent_upload(torrent=torrent, data=multipart_data) else: raise NoCloneTorrentError( "Can't find any clone torrent to used.".format(self.name)) elif search_tag == -1: # IF the torrents are present, but not consistent (When FORCE_JUDGE_DUPE_LOC is True) raise CannotAssistError( "Find dupe, and the exist torrent is not same as pre-reseed torrent. Stop Posting~" ) else: # IF the torrent is already released and can be assist Logger.warning( "Find dupe torrent,which id: {0}, Automatically assist it~". format(search_tag)) flag = self.torrent_download(tid=search_tag, thanks=False) return flag
def torrent_clone(self, tid) -> dict: """ Reconstruction from BYRBT Info Clone by Deparsoul version 20170400,thx This function will return a dict include (split_title,small_title,imdb_url,db_url,descr,before_torrent_id). """ return_dict = {} details_bs = self.page_torrent_detail(tid=tid, bs=True) title_search = re.search("种子详情 \"(?P<title>.*)\" - Powered", str(details_bs.title)) if title_search: title = unescape(title_search.group("title")) Logger.info( "Get clone torrent's info,id: {tid},title: \"{ti}\"".format( tid=tid, ti=title)) title_dict = sort_title_info( raw_title=title, raw_type=details_bs.find("span", id="type").text.strip(), raw_sec_type=details_bs.find("span", id="sec_type").text.strip()) return_dict.update(title_dict) body = details_bs.body imdb_url = dburl = "" if body.find(class_="imdbRatingPlugin"): imdb_url = 'http://www.imdb.com/title/' + body.find( class_="imdbRatingPlugin")["data-title"] Logger.debug("Found imdb link:{link} for this torrent.".format( link=imdb_url)) if body.find("a", href=re.compile("://movie.douban.com/subject")): dburl = body.find( "a", href=re.compile("://movie.douban.com/subject")).text Logger.debug( "Found douban link:{link} for this torrent.".format( link=dburl)) # Update description descr = body.find(id="kdescr") # Restore the image link for img_tag in descr.find_all("img"): del img_tag["onload"] del img_tag["data-pagespeed-url-hash"] img_tag["src"] = unquote( re.sub( r"images/(?:(?:\d+x)+|x)(?P<raw>.*)\.pagespeed\.ic.*", "images/\g<raw>", img_tag["src"])) # Remove unnecessary description (class: autoseed, byrbt_info_clone_ignore, byrbt_info_clone) for tag in descr.find_all(class_=pat_tag_pass_by_class): tag.extract() descr_out = re.search(r"<div id=\"kdescr\">(?P<in>.+)</div>$", str(descr), re.S).group("in") return_dict.update({ "small_descr": body.find(id="subtitle").find("li").text, "url": imdb_url, "dburl": dburl, "descr": descr_out, "clone_id": tid }) else: Logger.error("Error,this torrent may not exist or ConnectError") return return_dict
def _online_check(self): Logger.debug("The reseeder online check now start.") for i in self.active_obj_list: i.online_check()
def _shut_unreseeder_db(): Logger.debug("Set un-reseeder's column into -1.") for tracker in unactive_tracker_list: # Set un_reseed column into -1 db.exec( sql="UPDATE `seed_list` SET `{cow}` = -1 WHERE `{cow}` = 0 " .format(cow=tracker))