def create_child_tables(self, source_connection, target_engine, table_name): for child_table in self.parent_child_mapping[table_name]: with target_engine.begin() as target_conn: if not table_exists(target_conn, child_table): insp = sqlinspect(source_connection) # only interested in child table constraint and not inherited constraints = [ constraint for constraint in insp.get_check_constraints(child_table) if constraint['name'].startswith(child_table) ] constraints_sql = [ 'CHECK ({})'.format(constraint['sqltext']) for constraint in constraints ] sql = """ CREATE TABLE IF NOT EXISTS "{tablename}" ( {constraints}, LIKE "{parent_tablename}" INCLUDING DEFAULTS INCLUDING CONSTRAINTS INCLUDING INDEXES ) INHERITS ("{parent_tablename}") """.format(parent_tablename=table_name, tablename=child_table, constraints=', '.join(constraints_sql)) self.stdout.write(sql) if not self.dry_run: target_conn.execute(sql)
def generate_dump_script(self, source_engine_id): self.seen_tables = set() source_engine = connection_manager.get_engine(source_engine_id) # direct dump and load from parent + child tables with source_engine.begin() as source_conn: insp = sqlinspect(source_conn) for table in keep_child_tables + plain_tables: if table in self.all_tables: for line in self.get_table_date_target(insp, table): self.insert_row(line) # direct dump and load from parent # dump from all child tables into parent table for table in drop_child_tables: if table in self.all_tables: for line in self.get_table_date_target(insp, table, all_in_parent=True): self.insert_row(line) for datasource in StaticDataSourceConfiguration.by_domain( DASHBOARD_DOMAIN): if source_engine_id == datasource.engine_id or source_engine_id in datasource.mirrored_engine_ids: adapter = get_indicator_adapter(datasource) table_name = adapter.get_table().name # direct dump and load from parent # dump from all child tables into parent table # - if table is distrubuted, citus will distribute the data # - if table is partitioned the triggers on the parent will distribute the data for line in self.get_table_date_target(insp, table_name, all_in_parent=True): self.insert_row(line) remaining_tables = self.all_tables - self.seen_tables - IGNORE_TABLES icds_ucr_prefix = '{}{}_'.format(UCR_TABLE_PREFIX, DASHBOARD_DOMAIN) def keep_table(table): root_table = self.child_parent_mapping.get(table, table) return not root_table.startswith( UCR_TABLE_PREFIX) or root_table.startswith(icds_ucr_prefix) remaining_tables = list(filter(keep_table, remaining_tables)) if remaining_tables: self.stderr.write("Some tables not seen:") for t in remaining_tables: parent = self.child_parent_mapping.get(t) if parent: self.stderr.write("\t{} (parent: {})".format( t, parent)) else: self.stderr.write("\t{}".format(t))
def _fill_exportable_(self): # all db columns columns = [c.key for c in sqlinspect(self).attrs] # all dynamic properties properties = [ k[0] for k in inspect.getmembers(self.__class__, lambda o: isinstance(o, property)) ] # minus blacklist blacklist = getattr(self, 'export_blacklist', []) self._exportable_ = [ i for i in columns + properties if i not in blacklist ]
def set(self, state): self.state = state db.session.commit() if not sqlinspect(self).transient and getattr(self, 'bind', None): self.bind.handler = TaskRunRelationalSchema().dump(self) base_msg = f"TASK {state} - RUN TIME: {round(self.run_time.total_seconds(), 3)}" if state == "RETRYING": retry_count = f"{(self.retries + 1)}/{self.max_retries}" logger.warn( f"{base_msg}: [{retry_count} - COUNTDOWN: {self.retries_countdown}s]" ) elif state == "REVOKED": logger.info( f"{base_msg}: [{self.celery_id}/{self.task.key} - {self.revoke_msg}]" ) elif state in ["SUCCESSFUL", "FAILED"]: logger.info(f"{base_msg}: RUN DETAILS, {self.bind.handler}") else: logger.info(base_msg)