def generate_dotcode(): # get markdown with tempfile.NamedTemporaryFile() as tf_db: # is_public=False triggers the creation of tables CylcSuiteDAO(db_file_name=tf_db.name, is_public=False) schema, orphans = schema_to_markdown(db_name=tf_db.name) # graph prefix dotcode = [ 'graph {', 'node [label = "\\N", shape = plaintext];', 'edge [color = gray50, minlen = 2, style = dashed];', 'rankdir = "LR";' ] # the database graph tables, relationships = all_to_intermediary(schema) dotcode.extend([x.to_dot() for x in tables]) dotcode.extend([x.to_dot() for x in relationships]) # group orphan nodes to cut down on clutter dotcode.extend(group_nodes(orphans)) # use invisible graph edges to change the graph layout dotcode.append( '"task_pool_checkpoints" -- "inheritance"[style=invis];') # graph suffix dotcode += ['}'] return dotcode
def test_remove_columns(): """Test workaround for dropping columns in sqlite3.""" with create_temp_db() as (temp_db, conn): conn.execute( r''' CREATE TABLE foo ( bar, baz, pub ) ''' ) conn.execute( r''' INSERT INTO foo VALUES (?,?,?) ''', ['BAR', 'BAZ', 'PUB'] ) conn.commit() conn.close() dao = CylcSuiteDAO(temp_db) dao.remove_columns('foo', ['bar', 'baz']) conn = dao.connect() data = [row for row in conn.execute(r'SELECT * from foo')] assert data == [('PUB',)]
def on_suite_start(self, is_restart): """Initialise data access objects. Ensure that: * private database file is private * public database is in sync with private database """ if not is_restart: try: os.unlink(self.pri_path) except OSError: # Just in case the path is a directory! rmtree(self.pri_path, ignore_errors=True) self.pri_dao = self.get_pri_dao() os.chmod(self.pri_path, 0o600) self.pub_dao = CylcSuiteDAO(self.pub_path, is_public=True) self.copy_pri_to_pub()
def get_task_job_attrs(suite_name, point, task, submit_num): """Return job (platform, job_runner_name, live_job_id). live_job_id is the job ID if job is running, else None. """ suite_dao = CylcSuiteDAO(get_suite_run_pub_db_name(suite_name), is_public=True) task_job_data = suite_dao.select_task_job(point, task, submit_num) suite_dao.close() if task_job_data is None: return (None, None, None) job_runner_name = task_job_data["job_runner_name"] job_id = task_job_data["job_id"] if (not job_runner_name or not job_id or not task_job_data["time_run"] or task_job_data["time_run_exit"]): live_job_id = None else: live_job_id = job_id return (task_job_data["platform_name"], job_runner_name, live_job_id)
def get_task_job_attrs(suite_name, point, task, submit_num): """Return job (user_at_host, batch_sys_name, live_job_id). live_job_id is batch system job ID if job is running, else None. """ suite_dao = CylcSuiteDAO(get_suite_run_pub_db_name(suite_name), is_public=True) task_job_data = suite_dao.select_task_job(point, task, submit_num) suite_dao.close() if task_job_data is None: return (None, None, None) batch_sys_name = task_job_data["batch_sys_name"] batch_sys_job_id = task_job_data["batch_sys_job_id"] if (not batch_sys_name or not batch_sys_job_id or not task_job_data["time_run"] or task_job_data["time_run_exit"]): live_job_id = None else: live_job_id = batch_sys_job_id return (task_job_data["user_at_host"], batch_sys_name, live_job_id)
def _get_dao(suite): """Return the DAO (public) for suite.""" return CylcSuiteDAO(get_suite_run_pub_db_name(suite), is_public=True)
def get_pri_dao(self): """Return the primary DAO.""" return CylcSuiteDAO(self.pri_path)
def setUp(self): self.dao = CylcSuiteDAO(':memory:') self.mocked_connection = mock.Mock() self.dao.connect = mock.MagicMock(return_value=self.mocked_connection)
def test_upgrade_to_platforms(mock_glbl_cfg): """Test upgrader logic for platforms in the database. """ # Set up the global config mock_glbl_cfg('cylc.flow.rundb.glbl_cfg', GLOBAL_CONFIG) # task name, cycle, user_at_host, batch_system initial_data = [ ('hpc_with_pbs', '1', 'hpcl1', 'pbs'), ('desktop_with_bg', '1', 'desktop01', 'background'), ('slurm_no_host', '1', '', 'slurm'), ('hpc_bg', '1', 'hpcl1', 'background'), ('username_given', '1', 'slartibartfast@hpcl1', 'pbs') ] # task name, cycle, user, platform expected_data = [ ('hpc_with_pbs', '1', '', 'hpc'), ('desktop_with_bg', '1', '', 'desktop01'), ('slurm_no_host', '1', '', 'sugar'), ('hpc_bg', '1', '', 'hpcl1-bg'), ('username_given', '1', 'slartibartfast', 'hpc'), ] with create_temp_db() as (temp_db, conn): conn.execute( rf''' CREATE TABLE {CylcSuiteDAO.TABLE_TASK_JOBS} ( name varchar(255), cycle varchar(255), user_at_host varchar(255), batch_system varchar(255) ) ''' ) conn.executemany( rf''' INSERT INTO {CylcSuiteDAO.TABLE_TASK_JOBS} VALUES (?,?,?,?) ''', initial_data ) # close database conn.commit() conn.close() # open database as cylc dao dao = CylcSuiteDAO(temp_db) conn = dao.connect() # check the initial data was correctly inserted dump = [ x for x in conn.execute( rf'SELECT * FROM {CylcSuiteDAO.TABLE_TASK_JOBS}' ) ] assert dump == initial_data # Upgrade function returns True? assert dao.upgrade_to_platforms() # check the data was correctly upgraded dump = [ x for x in conn.execute( r'SELECT name, cycle, user, platform_name FROM task_jobs' ) ] assert dump == expected_data # make sure the upgrade is skipped on future runs assert not dao.upgrade_to_platforms()
def test_upgrade_hold_swap(): """Pre Cylc8 DB upgrade compatibility test.""" # test data initial_data = [ # (name, cycle, status, hold_swap) ('foo', '1', 'waiting', ''), ('bar', '1', 'held', 'waiting'), ('baz', '1', 'held', 'running'), ('pub', '1', 'waiting', 'held') ] expected_data = [ # (name, cycle, status, hold_swap, is_held) ('foo', '1', 'waiting', 0), ('bar', '1', 'waiting', 1), ('baz', '1', 'running', 1), ('pub', '1', 'waiting', 1) ] tables = [ CylcSuiteDAO.TABLE_TASK_POOL, CylcSuiteDAO.TABLE_TASK_POOL_CHECKPOINTS ] with create_temp_db() as (temp_db, conn): # initialise tables for table in tables: conn.execute( rf''' CREATE TABLE {table} ( name varchar(255), cycle varchar(255), status varchar(255), hold_swap varchar(255) ) ''' ) conn.executemany( rf''' INSERT INTO {table} VALUES (?,?,?,?) ''', initial_data ) # close database conn.commit() conn.close() # open database as cylc dao dao = CylcSuiteDAO(temp_db) conn = dao.connect() # check the initial data was correctly inserted for table in tables: dump = [x for x in conn.execute(rf'SELECT * FROM {table}')] assert dump == initial_data # upgrade assert dao.upgrade_is_held() # check the data was correctly upgraded for table in tables: dump = [x for x in conn.execute(r'SELECT * FROM task_pool')] assert dump == expected_data # make sure the upgrade is skipped on future runs assert not dao.upgrade_is_held()
def test_upgrade_retry_state(): """Pre Cylc8 DB upgrade compatibility test.""" initial_data = [ # (name, cycle, status) ('foo', '1', 'waiting'), ('bar', '1', 'running'), ('baz', '1', 'retrying'), ('pub', '1', 'submit-retrying') ] expected_data = [ # (name, cycle, status) ('foo', '1', 'waiting'), ('bar', '1', 'running'), ('baz', '1', 'waiting'), ('pub', '1', 'waiting') ] tables = [ CylcSuiteDAO.TABLE_TASK_POOL, CylcSuiteDAO.TABLE_TASK_POOL_CHECKPOINTS ] with create_temp_db() as (temp_db, conn): # initialise tables for table in tables: conn.execute( rf''' CREATE TABLE {table} ( name varchar(255), cycle varchar(255), status varchar(255) ) ''' ) conn.executemany( rf''' INSERT INTO {table} VALUES (?,?,?) ''', initial_data ) # close database conn.commit() conn.close() # open database as cylc dao dao = CylcSuiteDAO(temp_db) conn = dao.connect() # check the initial data was correctly inserted for table in tables: dump = [x for x in conn.execute(rf'SELECT * FROM {table}')] assert dump == initial_data # upgrade assert dao.upgrade_retry_state() == [ ('1', 'baz', 'retrying'), ('1', 'pub', 'submit-retrying') ] # check the data was correctly upgraded for table in tables: dump = [x for x in conn.execute(r'SELECT * FROM task_pool')] assert dump == expected_data