def changePosterFilterByStatus(filter): if filter not in ('paused', 'continuing', 'ended'): pass elif filter in sickbeard.POSTER_FILTERBY: sickbeard.POSTER_FILTERBY.remove(str(filter)) else: sickbeard.POSTER_FILTERBY.append(str(filter)) sickbeard.save_config() logger.log(str(','.join(sickbeard.POSTER_FILTERBY)), logger.DEBUG)
def mass_upsert(self, table_name, query_list, log_transaction=False): # type: (str, List[tuple, list], bool) -> None """ Execute multiple queries :param table_name: name of table to upsert :param query_list: list of queries :param log_transaction: Boolean to wrap all in one transaction :return: None """ log_level = (logger.DB, logger.DEBUG)[log_transaction] for values, control in query_list: logger.log(log_level, _("{filename}: {query} [{control}]").format(filename=self.filename, query=values, control=control)) self.upsert(table_name, values, control)
def _error_log_helper(self, exception, severity, local_variables, attempts, called_method): if attempts in (0, self.MAX_ATTEMPTS): # Only log the first try and the final failure prefix = ("Database", "Fatal")[severity == logger.ERROR] # noinspection PyUnresolvedReferences logger.log( severity, _("{exception_severity} error executing query with {method} in database {db_location}: ").format( db_location=self.full_path, method=called_method, exception_severity=prefix ) + str(exception), ) # Lets print out all of the arguments so we can debug this better # noinspection PyUnresolvedReferences logger.info(_("If this happened in cache.db, you can safely stop SickChill, and delete the cache.db file without losing any data")) # noinspection PyUnresolvedReferences logger.info( _("Here is the arguments that were passed to this function (This is what the developers need to know): {local_variables}").format( local_variables=local_variables ) )
def db_safe(): message = { 'equal': { 'type': logger.DEBUG, 'text': "We can proceed with the update. New update has same DB version" }, 'upgrade': { 'type': logger.WARNING, 'text': "We can't proceed with the update. New update has a new DB version. Please manually update" }, 'downgrade': { 'type': logger.ERROR, 'text': "We can't proceed with the update. New update has a old DB version. It's not possible to downgrade" }, } try: result = self.compare_db_version() if result in message: logger.log( message[result]['type'], message[result] ['text']) # unpack the result message into a log entry else: logger.warning( "We can't proceed with the update. Unable to check remote DB version. Error: {0}" .format(result)) return result in ['equal' ] # add future True results to the list except Exception as error: logger.warning( "We can't proceed with the update. Unable to compare DB version. Error: {0}" .format(repr(error))) return False
def mass_action(self, query_list=None, log_transaction=False, fetchall=False): # type: (list, bool, bool) -> List[sqlite3.Row] """ Execute multiple queries :param query_list: list of queries :param log_transaction: Boolean to wrap all in one transaction :param fetchall: Boolean, when using a select query force returning all results :return: list of results """ # noinspection PyUnresolvedReferences assert hasattr(query_list, "__iter__"), _("You passed a non-iterable to mass_action: {0!r}").format(query_list) # remove None types query_list = [i for i in query_list if i] sql_results = [] attempt = 0 with db_locks[self.filename]: self._set_row_factory() while attempt <= self.MAX_ATTEMPTS: try: log_level = (logger.DB, logger.DEBUG)[log_transaction] for qu in query_list: if len(qu) == 1: # noinspection PyUnresolvedReferences logger.log(log_level, _("{filename}: {query}").format(filename=self.filename, query=qu[0])) sql_results.append(self._execute(qu[0], fetchall=fetchall)) elif len(qu) > 1: # noinspection PyUnresolvedReferences logger.log(log_level, _("{filename}: {query} with args {args}").format(filename=self.filename, query=qu[0], args=qu[1])) sql_results.append(self._execute(qu[0], qu[1], fetchall=fetchall)) self.connection.commit() # noinspection PyUnresolvedReferences logger.log(log_level, _("Transaction with {count:d} of queries executed successfully").format(count=len(query_list))) # finished break except (sqlite3.OperationalError, sqlite3.DatabaseError) as e: sql_results = [] # Reset results because of rollback if self.connection: self.connection.rollback() severity = (logger.ERROR, logger.WARNING)[self._is_locked_or_denied(e) and attempt < self.MAX_ATTEMPTS] self._error_log_helper(e, severity, locals(), attempt, "db.mass_action") if severity == logger.ERROR: raise time.sleep(1) except Exception as e: sql_results = [] if self.connection: self.connection.rollback() self._error_log_helper(e, logger.ERROR, locals(), attempt, "db.mass_action") raise attempt += 1 return sql_results
def _log(self, message, level=logging.INFO): """Log to regular logfile and save for return for PP script log""" logger.log(level, message) self.log += message + "\n"
def action(self, query, args=None, fetchall=False, fetchone=False): """ Execute single query :param query: Query string :param args: Arguments to query string :param fetchall: Boolean to indicate all results must be fetched :param fetchone: Boolean to indicate one result must be fetched (to walk results for instance) :return: query results """ if query is None: return # noinspection PyUnresolvedReferences assert not (fetchall and fetchone), _( "Cannot fetch all and only one at the same time!") sql_results = [] attempt = 0 with db_locks[self.filename]: self._set_row_factory() while attempt < self.MAX_ATTEMPTS: try: if settings.DBDEBUG: if args is None: logger.log(logger.DB, self.filename + ": " + query) else: logger.log( logger.DB, "{filename}: {query} with args {args}".format( filename=self.filename, query=query, args=args)) sql_results = self._execute(query, args, fetchall=fetchall, fetchone=fetchone) self.connection.commit() # get out of the connection attempt loop since we were successful break except (sqlite3.OperationalError, sqlite3.DatabaseError) as e: sql_results = [] # Reset results because of rollback if self.connection: self.connection.rollback() severity = ( logger.ERROR, logger.WARNING)[self._is_locked_or_denied(e) and attempt < self.MAX_ATTEMPTS] self._error_log_helper(e, severity, locals(), attempt, "db.action") if severity == logger.ERROR: raise time.sleep(1) except Exception as e: sql_results = [] if self.connection: self.connection.rollback() self._error_log_helper(e, logger.ERROR, locals(), attempt, "db.action") raise attempt += 1 return sql_results
def log_helper(message, level=logging.INFO): logger.log(level, message) return message + "\n"