def show_all_values(obj, select, session, where=None): warnings.warn('deprecated', DeprecationWarning) # TODO Refactoring, optimization # TODO Realize it like method of `bill_classes` class if not isinstance(where, dict): raise DatabaseError('WHERE statement must be dict, expected {}'.format(type(where))) if not isinstance(select, tuple) and not isinstance(select, list): raise DatabaseError('SELECT statement must be list or tuple, expected {}'.format(type(select))) select_objects = dict() for column in select: select_objects[column] = get_class(class_id=getattr(obj, column).referrer) result_list = session.query(obj).filter_by(**where).all() for result in result_list: for column in select_objects: if getattr(result, column): ref = session.query(select_objects[column]).filter_by(i_id=getattr(result, column)).one() setattr(result, column + '_refer', ref ) setattr(result, column + '_ref_label', getattr( ref, getattr(obj, column).referrer_name ) ) return result_list
def rollback(self, savepoint=None): """ Function: rollback ------------------ Rolls back a database transaction, if currently in one. If a savepoint is named, rolls back to the named savepoint, otherwise, does a normal rollback which will remove all savepoints. savepoint: The savepoint to rollback to, if specified. """ if self.db.in_transaction: # Roll back to the named savepoint. All savepoints created after this # savepoint are deleted. if savepoint and savepoint in self.savepoints: # If rolling back a savepoint failed, then a commit must have occurred # at some point. Rollback as far as we can just to be safe. try: self.execute_sql("ROLLBACK TO %s" % savepoint) except: self.db.rollback() self.savepoints = self.savepoints[0:self.savepoints. index(savepoint) + 1] else: try: self.db.rollback() except mysql.connector.errors.Error as e: raise DatabaseError(e) self.savepoints = []
def commit(self): """ Function: commit ---------------- Commits the current transaction. Destroys any savepoints. """ try: self.db.commit() except mysql.connector.errors.Error as e: raise DatabaseError(e) self.savepoints = []
def start_transaction(self): """ Function: start_transaction --------------------------- Starts a database transaction, if not already in one. """ self.db.commit() if not self.db.in_transaction: try: self.db.start_transaction() except mysql.connector.errors.Error as e: raise DatabaseError(e)
def add_message(self, path, tags=[], afterwards=None): """ Adds a file to the notmuch index. :param path: path to the file :type path: str :param tags: tagstrings to add :type tags: list of str :param afterwards: callback to trigger after adding :type afterwards: callable or None """ if self.ro: raise DatabaseROError() if not is_subdir_of(path, self.path): msg = 'message path %s ' % path msg += ' is not below notmuchs ' msg += 'root path (%s)' % self.path raise DatabaseError(msg) else: self.writequeue.append(('add', afterwards, path, tags))
def get_lots_from_db(cursor): """Get information about selected lots from database. :param cursor: database cursor :type cursor: <class 'MySQLdb.cursors.DictCursor'> :raises DatabaseError: if there are no selected lots for creation in database :rtype: tuple of dicts :return: lots, e.g. ({'id': 5, 'title': '', 'category': '', 'subcategory': '', 'image': '5.jpg', 'action': 1}, {...}, ...) """ cursor.execute("""SELECT * FROM newauction WHERE action=1""") lots = cursor.fetchall() if not lots: raise DatabaseError('You have no selected lots for creation!') print(f'{len(lots)} lots to go.') return lots
def get_date_from_db(cursor): """Get auction start time from database and convert it to the view - 2021-05-12 23:58:00. :param cursor: database cursor :type cursor: <class 'MySQLdb.cursors.DictCursor'> :raises DatabaseError: if auction start time has been passed :rtype: <class 'datetime.datetime'> :return: auction start time, e.g. 2021-05-12 23:58:00 """ cursor.execute("""SELECT * FROM start_date WHERE id=1""") date = cursor.fetchone() convert_date = datetime(date['year'], date['month'], date['day'], date['hour'], date['minute']) if convert_date < datetime.now(): raise DatabaseError('Auction start time has been passed!') print(f'Start date is {convert_date}') return convert_date
def get_db_connection(self, timeout=None, close=True): """ Function: get_db_connection --------------------------- Get a new database connection with a specified timeout (defaults to CONNECTION_TIMEOUT specified in the CONFIG file). Closes the old connection if there was one. timeout: The connection timeout. close: Whether or not to close the old database connection beforehand. Should set to False if a timeout occurred just before the call to this function. returns: A database connection object. """ if self.db and self.db.is_connected(): # If timeout isn't specified, check if we're already at the default. if timeout is None and self.timeout == CONNECTION_TIMEOUT: return self.db # If the timeout is the same as before, then don't change anything. if timeout is not None and timeout == self.timeout: return self.db # Close any old connections and make another one with the new setting. if close: self.close_db_connection() self.timeout = timeout or CONNECTION_TIMEOUT log("New timeout: %d" % self.timeout) try: self.db = mysql.connector.connect(user=self.user, password=LOGIN[self.user], host=HOST, database=self.database, port=PORT, connection_timeout=self.timeout, autocommit=False) self.cursor = self.db.cursor(buffered=True) except mysql.connector.errors.Error as e: raise DatabaseError(e) return self
def flush(self): """ write out all queued write-commands in order, each one in a separate :meth:`atomic <notmuch.Database.begin_atomic>` transaction. If this fails the current action is rolled back, stays in the write queue and an exception is raised. You are responsible to retry flushing at a later time if you want to ensure that the cached changes are applied to the database. :exception: :exc:`~errors.DatabaseROError` if db is opened read-only :exception: :exc:`~errors.DatabaseLockedError` if db is locked """ if self.ro: raise DatabaseROError() if self.writequeue: # read notmuch's config regarding imap flag synchronization sync = settings.get_notmuch_setting('maildir', 'synchronize_flags') # go through writequeue entries while self.writequeue: current_item = self.writequeue.popleft() logging.debug('write-out item: %s' % str(current_item)) # watch out for notmuch errors to re-insert current_item # to the queue on errors try: # the first two coordinants are cnmdname and post-callback cmd, afterwards = current_item[:2] logging.debug('cmd created') # aquire a writeable db handler try: mode = Database.MODE.READ_WRITE db = Database(path=self.path, mode=mode) except NotmuchError: raise DatabaseLockedError() logging.debug('got write lock') # make this a transaction db.begin_atomic() logging.debug('got atomic') if cmd == 'add': logging.debug('add') path, tags = current_item[2:] msg, status = db.add_message(path, sync_maildir_flags=sync) logging.debug('added msg') msg.freeze() logging.debug('freeze') for tag in tags: msg.add_tag(tag.encode(DB_ENC), sync_maildir_flags=sync) logging.debug('added tags ') msg.thaw() logging.debug('thaw') elif cmd == 'remove': path = current_item[2] db.remove_message(path) else: # tag/set/untag querystring, tags = current_item[2:] query = db.create_query(querystring) for msg in query.search_messages(): msg.freeze() if cmd == 'tag': for tag in tags: msg.add_tag(tag.encode(DB_ENC), sync_maildir_flags=sync) if cmd == 'set': msg.remove_all_tags() for tag in tags: msg.add_tag(tag.encode(DB_ENC), sync_maildir_flags=sync) elif cmd == 'untag': for tag in tags: msg.remove_tag(tag.encode(DB_ENC), sync_maildir_flags=sync) msg.thaw() logging.debug('ended atomic') # end transaction and reinsert queue item on error if db.end_atomic() != notmuch.STATUS.SUCCESS: raise DatabaseError('end_atomic failed') logging.debug('ended atomic') # close db db.close() logging.debug('closed db') # call post-callback if callable(afterwards): logging.debug(str(afterwards)) afterwards() logging.debug('called callback') # re-insert item to the queue upon Xapian/NotmuchErrors except (XapianError, NotmuchError) as e: logging.exception(e) self.writequeue.appendleft(current_item) raise DatabaseError(unicode(e)) except DatabaseLockedError as e: logging.debug('index temporarily locked') self.writequeue.appendleft(current_item) raise e logging.debug('flush finished')
def get_class(class_name=None, class_id=None): session = get_session('ekomobile') def _getattr(attribute): types = { 'varchar': VARCHAR(length=255), 'int': BIGINT, 'date': DATETIME, 'dec': DECIMAL, 'text': LONGTEXT, 'raw': VARCHAR(length=255) } out_attrib = Column(attribute.name, types[attribute.data_type]) out_attrib.label(attribute.name+'_') if attribute.required: out_attrib.nullable = False else: out_attrib.nullable = True if attribute.ref_object: out_attrib.referrer = attribute.ref_object out_attrib.referrer_name = attribute.ref_object_label else: out_attrib.referrer = None out_attrib.referrer_name = None if attribute.values_to_select: out_attrib.values_to_select = dict([ el.split(':') for el in attribute.values_to_select.split('|') ]) else: out_attrib.values_to_select = None return out_attrib if class_name: # search by class name try: result = session.query(Object).filter(Object.name == class_name).one() except MultipleResultsFound: raise DatabaseError('Objects with class name="{}" > 1'.format(class_name)) except NoResultFound: raise DatabaseError('No objects with class name="{}"'.format(class_name)) elif class_id: # search by class id result = session.query(Object).filter(Object.object_id == class_id).one() else: raise AttributeError('Must have className or classId') attributes = { '__tablename__': result.table, '__attr_list__': ['user_id', 'date_in', 'date_ch', 'i_id'], "__referrer_list__": dict(), 'i_id': Column(result.id_field, INTEGER, nullable=False, primary_key=True), 'date_in': Column('date_in', DATETIME, nullable=False), 'date_ch': Column('date_ch', DATETIME, nullable=False), '__table_args__': { 'extend_existing': True }, 'user_id': Column('user_id', INTEGER, nullable=False), 'classname': result.name, 'object_id': result.object_id } for attr in attributes['__attr_list__']: attributes[attr].values_to_select = None attributes[attr].referrer = None for attrib in result.properties_t: attributes[attrib.name] = _getattr(attrib) attributes['__attr_list__'] += (attrib.name,) if attributes[attrib.name].referrer: attributes['__referrer_list__'][attrib.name] = attributes[attrib.name].referrer classname = result.name session.close() return type(classname, (Base, BaseBill), attributes) # returns new class with Properties of Object instance
def run_multi(self, queries, cached=False): """ Function: run_multi ------------------- Runs multiple SQL statements at once. """ # Consume old results if needed. [row for row in self.cursor] sql_list = split(queries) # Consume any additional result-sets that might have been left # on the connection. # try: # while self.cursor.nextset(): # pass # except Error: # pass result = Result() for sql in sql_list: sql = sql.rstrip().rstrip(";") if len(sql) == 0: continue query_results = Cache.get(sql) # Results are not to be cached or are not in the cache and needs to # be cached. Run the query. if not query_results or not cached: try: self.clear_cursor() self.cursor.execute(sql) # except DatabaseError as e: # if 'already exists' in str(e): # print("[warning: %s]" % str(e)) # else: # # Reraise the exception # raise e # If the query times out. except mysql.connector.errors.OperationalError as e: raise TimeoutError(e) # If something is wrong with their query. except mysql.connector.errors.ProgrammingError as e: if 'already exists' in str(e): log("[warning: %s]" % str(e)) else: raise DatabaseError(e) # If the query can't be run as a single query, attempt to do it with a # multi-line query. except mysql.connector.errors.Error as e: print("ERROR while executing SQL: %s" % sql) print(str(e)) raise DatabaseError(e) query_results = self.get_results() if cached: Cache.put(sql, query_results) result = query_results # If no longer in a transaction, remove all savepoints. if not self.db.in_transaction: self.savepoints = [] return result