def _add_run_to_runs_table( dataset: DataSetProtocol, target_conn: ConnectionPlus, target_exp_id: int, create_run_table: bool = True, ) -> Optional[str]: metadata = dataset.metadata snapshot_raw = dataset._snapshot_raw captured_run_id = dataset.captured_run_id captured_counter = dataset.captured_counter parent_dataset_links = links_to_str(dataset.parent_dataset_links) _, target_run_id, target_table_name = create_run( target_conn, target_exp_id, name=dataset.name, guid=dataset.guid, metadata=metadata, captured_run_id=captured_run_id, captured_counter=captured_counter, parent_dataset_links=parent_dataset_links, create_run_table=create_run_table, snapshot_raw=snapshot_raw, description=dataset.description, ) mark_run_complete(target_conn, target_run_id) _rewrite_timestamps( target_conn, target_run_id, dataset.run_timestamp_raw, dataset.completed_timestamp_raw, ) return target_table_name
def _extract_single_dataset_into_db(dataset: DataSet, target_conn: ConnectionPlus, target_exp_id: int) -> None: """ NB: This function should only be called from within :meth:extract_runs_into_db Insert the given dataset into the specified database file as the latest run. Trying to insert a run already in the DB is a NOOP. Args: dataset: A dataset representing the run to be copied target_conn: connection to the DB. Must be atomically guarded target_exp_id: The exp_id of the (target DB) experiment in which to insert the run """ if not dataset.completed: raise ValueError('Dataset not completed. An incomplete dataset ' 'can not be copied. The incomplete dataset has ' f'GUID: {dataset.guid} and run_id: {dataset.run_id}') source_conn = dataset.conn run_id = get_runid_from_guid(target_conn, dataset.guid) if run_id != -1: return if dataset.parameters is not None: param_names = dataset.parameters.split(',') else: param_names = [] parspecs_dict = { p.name: p for p in new_to_old(dataset._interdeps).paramspecs } parspecs = [parspecs_dict[p] for p in param_names] metadata = dataset.metadata snapshot_raw = dataset.snapshot_raw _, target_run_id, target_table_name = create_run(target_conn, target_exp_id, name=dataset.name, guid=dataset.guid, parameters=parspecs, metadata=metadata) _populate_results_table(source_conn, target_conn, dataset.table_name, target_table_name) mark_run_complete(target_conn, target_run_id) _rewrite_timestamps(target_conn, target_run_id, dataset.run_timestamp_raw, dataset.completed_timestamp_raw) if snapshot_raw is not None: add_meta_data(target_conn, target_run_id, {'snapshot': snapshot_raw})
def _complete(self, value: bool) -> None: with contextlib.closing( conn_from_dbpath_or_conn(conn=None, path_to_db=self._path_to_db)) as conn: if value: self._completed_timestamp_raw = time.time() mark_run_complete(conn, self.run_id, self._completed_timestamp_raw)
def test_mark_run_complete(dataset): assert dataset.run_timestamp_raw is None assert dataset.completed_timestamp_raw is None time_now = time.time() mut_queries.set_run_timestamp(dataset.conn, dataset.run_id) time.sleep(1) # for slower test platforms mut_queries.mark_run_complete(dataset.conn, dataset.run_id) assert dataset.completed_timestamp_raw > time_now
def completed(self, value): self._completed = value if value: mark_run_complete(self.conn, self.run_id)