async def test_database_error(client: MagicMock): q = MagicMock() q.put.side_effect = DatabaseError() client.publish.return_value.set_exception(TimeoutError()) _, response = await call_submit(client, q) assert response.status == 507 assert response.body == b""
def delete(self, nomenclature): sql_statement = f"DELETE FROM NOMENCLATURE WHERE ID='{nomenclature.id}'" self.cursor.execute(sql_statement) try: self.connection.commit() except Exception as e: raise DatabaseError(e.args)
def labels_from_db(self, sample_ids): ''' Given a list of sample ids, return a list of ele yes/no labels: @param sample_ids: np.array of sample ids whose labels are to be retrieved. @type sample_ids: np.array ''' # Need to turn the sample_ids from # ints to strings so that they are # usable as a comma-separated list # in the query: sample_id_list = ','.join([str(el) for el in list(sample_ids)]) cmd = f'''SELECT label FROM Samples WHERE sample_id in ({sample_id_list}) ORDER BY sample_id; ''' try: rows = self.db.execute(cmd) except Exception as e: raise DatabaseError(f"Could not retrieve labels: {repr(e)}") from e labels = [row['label'] for row in rows] return labels
def h_update_status(data, connection): logging.info("h_update_status: {}".format(data)) status = data.pop(0) conn = db.insert_state(status) if conn is None: raise DatabaseError("Could not get connection") return '_'.join(data)
def socker_bind_connection(): # Bind the socket_connection to the port global connected connected = False server_address = ('', 4662) logging.info('waiter_receive_socket - Starting receiver socket_connection up on %s port %s' % server_address) slave_socket_connection = TCPsocket() slave_socket_connection.bind(server_address) # Listen for incoming connections while 1: data, client_address = slave_socket_connection.wait_connection() # condition.acquire() logging.info('Received connection from ' + client_address[0]) # Receive the data in small chunks and retransmit it logging.info('Received %s' % data) if "IP" in data: logging.warning("IP received %s" % client_address[0]) connected = False with open(ip_filename, "w") as ip_file: ip_file.write(client_address[0]) connected = True elif "ST" in data: logging.warning("Receiving state %s" % data) status = data.split("_")[3] if status == "True" or status == "False": conn = db.insert_state(status) if conn is None: raise DatabaseError("Could not get connection") else: logging.error("Error not implemented - status missing")
def insert(self, nomenclature): sql_statement = f"INSERT INTO NOMENCLATURE (NAME, DESCRIPTION) VALUES \ ('{nomenclature.name}', '{nomenclature.description}')" self.cursor.execute(sql_statement) try: self.connection.commit() except Exception as e: raise DatabaseError(e.args)
def update(self, nomenclature): sql_statement = f"UPDATE NOMENCLATURE SET NAME='{nomenclature.name}', \ DESCRIPTION='{nomenclature.description}' \ WHERE ID='{nomenclature.id}'" self.cursor.execute(sql_statement) try: self.connection.commit() except Exception as e: raise DatabaseError(e.args)
def _set_error(rc, errmsg=None): throw() if rc == SQLITE_OK: return if errmsg is None: errmsg = sqlite3_errstr(rc).decode() if rc in ( SQLITE_INTERNAL, SQLITE_NOTFOUND): raise InteralError(errmsg) elif rc == SQLITE_NOMEM: raise MemoryError() elif rc in ( SQLITE_ERROR, SQLITE_PERM, SQLITE_ABORT, SQLITE_BUSY, SQLITE_LOCKED, SQLITE_READONLY, SQLITE_INTERRUPT, SQLITE_IOERR, SQLITE_FULL, SQLITE_CANTOPEN, SQLITE_PROTOCOL, SQLITE_EMPTY, SQLITE_SCHEMA, ): raise OperationalError(errmsg) elif rc == SQLITE_CORRUPT: raise DatabaseError(errmsg) elif rc == SQLITE_TOOBIG: raise DataError(errmsg) elif rc in (SQLITE_CONSTRAINT, SQLITE_MISMATCH): raise IntegrityError(errmsg) elif rc == SQLITE_MISUSE: raise ProgrammingError(errmsg) else: raise DatabaseError(errmsg)
def save_entity(self, obj: object) -> int: if isinstance(obj, User): u = self.get_user_by_login_id(obj.login_id) if u: self.users.remove(u) self.users.append(obj) rows_affected = 1 elif isinstance(obj, KnownFace): kf = self.get_known_face_by_id(obj.kf_id) if kf: self.faces.remove(kf) self.faces.append(obj) rows_affected = 1 else: raise DatabaseError("Unsupported entity type: " + type(obj)) return rows_affected
def buggy_db(database, *args, **kwargs): if database.name.startswith("manager"): database.unlink() raise DatabaseError("Mock")
def ex_single(self, query): self.cursor.execute(query) result = self.cursor.fetchone() if result is None: raise DatabaseError(query) return result
def add_snippet_to_db(self, snippet_file_name_template, label, snippet_xtick_interval, snippet_time_interval, freq_band_energies, curr_file_family): ''' Adds a record for a spectrogram snippet into the Sqlite db. The record in includes the snippet_id, the file where the snippet resides, the recording site as derived from the filename in curr_file_family, as well as X-axis time slice start/stop index and start/stop times of the snippet relative to the start of the parent spectrogram. Trick is that we use Sqlite's automatic ROWID generation for snippet ids. And we only know those after inserting the record. That id is part of the snippet's future file name, which itself has the snippet id in it. Since that file name needs to be part of the snippet's record, though, we have to do an INSERT, followed by an UPDATE. Obtaining the ROWID (i.e. the snippet id) after the insert is free. The update of the ultimate file name is not. @param snippet_file_name_template: partially constructed file name where the snippet dataframe will be stored on disk: "foo_???_spectrogram.pickle" The question marks are replaced in this method with the ROWID of the newly created record @type snippet_file_name_template: str @param label: the snippet's label: 1/0 @type label: int @param snippet_xtick_interval: interval of x slots in spectrogram @type snippet_xtick_interval: Pandas Interval @param snippet_time_interval: interval of true times since start of parent spectrogram @type snippet_time_interval: Pandas Interval @param freq_band_energies: mean energy in three frequency bands of parent 24hr-spectrogram, and of this snippet @type freq_band_energies: {str : float} @param curr_file_family: info about the snippet's file family (see dsp_utils.file_family). @type curr_file_family: FileFamily ''' recording_site = curr_file_family.file_root insertion = f''' INSERT INTO Samples (recording_site, label, start_time_tick, end_time_tick, start_time, end_time, parent_low_freqs_energy, parent_med_freqs_energy, parent_high_freqs_energy, snippet_low_freqs_energy, snippet_med_freqs_energy, snippet_high_freqs_energy ) VALUES ('{recording_site}', {label}, {snippet_xtick_interval.left}, {snippet_xtick_interval.right}, {snippet_time_interval.left}, {snippet_time_interval.right}, {freq_band_energies['parent_low_freqs_energy']}, {freq_band_energies['parent_med_freqs_energy']}, {freq_band_energies['parent_high_freqs_energy']}, {freq_band_energies['snippet_low_freqs_energy']}, {freq_band_energies['snippet_med_freqs_energy']}, {freq_band_energies['snippet_high_freqs_energy']} ); ''' try: # The Python API to the Sqlite3 db # automatically begins a transaction: cur = self.db.execute(insertion) except Exception as e: self.db.rollback() # Raise DatabaseError but with original stacktrace: raise DatabaseError(repr(e)) from e # Get the ROWID that was assigned to the row # we just wrote above: db_snippet_id = cur.lastrowid # Safe to commit the partially filled in # snippet record now that we have its # ROWID: self.db.commit() # Use this ultimate sample id to finalize # the file name where the caller will write # the spectrogram snippet: snippet_file_name = snippet_file_name_template.replace( '???', str(db_snippet_id)) # Finally: update the snippet_filename column # of the just-written entry: self.db.execute( f'''UPDATE Samples SET snippet_filename = '{snippet_file_name}' WHERE sample_id = {db_snippet_id} ''') self.db.commit() return (db_snippet_id, snippet_file_name)
def __init__(self, dirs_or_spect_files=None, sqlite_db_path=None, recurse=False, chop=False, snippet_outdir=None, testing=False, test_db=None): ''' @param dirs_or_spect_files: list of files and/or directories where spectrograms reside. These may be 24-hr spectrograms, or snippets. @type dirs_or_spect_files: {str|[str]} @param sqlite_db_path: fully qualified path to the sqlite db that holds info of already existing snippets. If None, such a db will be created. @type sqlite_db_path: str @param recurse: whether or not to search for spectrograms in subtrees of dirs_or_spect_files @type recurse: bool @param chop: whether or not to perform any chopping of 24-hr spectrograms. If all spectrograms in dirs_or_spect_files and their subtrees are snippets, set this value to False. @type chop: bool @param snippet_outdir: if chopping is requested: where to place the resulting snippets @type snippet_outdir: str @param testing: whether caller is a unittest @type testing: bool @param test_db: in case of testing, a db created by the unittest. @type test_db: sqlite3.Connection ''' #***** if snippet_outdir is None, snippets # go where spectrogram is. self.snippet_outdir = snippet_outdir # Allow unittests to create an instance and # then call methods selectively: if testing: if test_db is None: raise ValueError( "If testing, must provide an Sqlite db instance") self.db = test_db self.testing = testing # Indicators that a new fold was just # loaded as part of a __next__() call: self.new_train_fold_len = None self.new_validate_fold_len = None if not testing: if dirs_or_spect_files is None and sqlite_db_path is None: raise ValueError( "Directories and sqlite_db_path args must not both be None" ) self.log = LoggingService() if sqlite_db_path is None: sqlite_db_path = os.path.join(os.path.dirname(__file__), 'spectrograms.sqlite') self.db = SpectrogramDataset.get_db(sqlite_db_path) if chop: # Get already processed dirs. The 'list()' pulls all hits from # the db at once (like any other iterator) try: processed_dirs = list( self.db.execute(''' SELECT dir_or_file_name FROM DirsAndFiles; ''')) except DatabaseError as e: raise DatabaseError( f"Could not check for already processed work: {repr(e)}" ) from e if dirs_or_spect_files is not None: # Process those of the given dirs_or_spect_files that # are not already in the db: dirs_or_files_to_do = set(dirs_or_spect_files) - set( processed_dirs) else: dirs_or_files_to_do = set() if len(dirs_or_files_to_do) > 0: # Chop spectrograms: self.process_spectrograms(dirs_or_files_to_do, recurse=recurse) num_samples_row = next( self.db.execute('''SELECT COUNT(*) AS num_samples from Samples''')) # Total number of samples in the db: self.num_samples = num_samples_row['num_samples'] # Our sample ids go from 0 to n. List of all sample ids: self.sample_ids = list(range(self.num_samples)) # So for, folds*() was not called, so using # the entire dataset: self.num_folds = 0 # The following only needed in case the # class is used without either the split_dataset() # or the kfold*() facilities: # Make a preliminary train queue with all the # sample ids. If split_dataset() is called later, # this queue will be replaced: self.train_queue = deque(self.sample_ids) self.curr_queue = self.train_queue
def close(self): try: self.db.close() except Exception as e: raise DatabaseError(f"Could not close sqlite db: {repr(e)}") from e
def _new_library(config): from ckcache import new_cache from database import LibraryDb from sqlalchemy.exc import OperationalError cache = new_cache(config['filesystem']) database = LibraryDb(**dict(config['database'])) try: database.create() except OperationalError as e: from ..dbexceptions import DatabaseError raise DatabaseError('Failed to create {} : {}'.format(database.dsn, e.message)) if 'upstream' in config: raise DeprecationWarning("Upstream no longer allowed in configuration") root = config['root'] remotes = { name:new_cache(remote) for name, remote in config.get('remotes', {}).items() } for i, remote in enumerate(remotes.values()): remote.set_priority(i) source_dir = config.get('source', None) hostport = config.get('host', None) if hostport: if ':' in hostport: host, port = hostport.split(':') else: host = hostport port = 80 else: host = None port = 80 if 'documentation' in config: doc_cache = new_cache(config['documentation']) else: doc_cache = cache.subcache('_doc') if 'warehouses' in config: warehouse_cache = new_cache(config['warehouses']) else: warehouse_cache = cache.subcache('warehouses') l = Library(cache=cache, doc_cache = doc_cache, warehouse_cache = warehouse_cache, database=database, name = config['_name'] if '_name' in config else 'NONE', remotes=remotes, require_upload=config.get('require_upload', None), source_dir = source_dir, host = host, port = port, urlhost=config.get('urlhost', None)) return l