def _do_clone(self): """ Cloning operation itself. Assumes that @see self.to_clone was populated before. """ # Get the mappings in source project, in order to determines the useful columns custom_mapping = ProjectMapping().load_from_project(self.prj) obj_mapping = custom_mapping.object_mappings used_columns = set(obj_mapping.real_cols_to_tsv.keys()) used_columns.add("orig_id") # By safety # Create a DB writer writer = DBWriter(self.session) # Narrow the writes in ObjectFields thanks to mappings of original project writer.generators({"obj_field": used_columns}) # Use import helpers dest_prj_id = self.dest_prj.projid import_how = ImportHow(prj_id=dest_prj_id, update_mode="No", custom_mapping=ProjectMapping(), skip_object_duplicates=False, loaded_files=[]) # Get parent (enclosing) Sample, Acquisition, Process. There should be 0 in this context... import_how.existing_parents = InBundle.fetch_existing_parents( self.session, prj_id=dest_prj_id) self._clone_all(import_how, writer) # Copy mappings to destination. We could narrow them to the minimum? custom_mapping.write_to_project(self.dest_prj)
def do_real(self) -> None: """ Do the real job, i.e. write everywhere (DB/filesystem) """ loaded_files = none_to_empty(self.prj.fileloaded).splitlines() logger.info("Previously loaded files: %s", loaded_files) found_users, taxo_found, col_mapping_dict, \ nb_rows, source_path = self._load_vars_from_state(self.STATE_KEYS) # Save mappings straight away col_mapping = ProjectMapping().load_from_dict(col_mapping_dict) col_mapping.write_to_project(self.prj) self.session.commit() # TODO: Duplicated code source_bundle = InBundle( source_path, Path(self.temp_for_jobs.data_dir_for(self.job_id))) # Configure the import to come, destination db_writer = DBWriter(self.session) import_where = ImportWhere( db_writer, self.vault, self.temp_for_jobs.base_dir_for(self.job_id)) # Configure the import to come, directives import_how = ImportHow(self.prj_id, self.req.update_mode, col_mapping, self.req.skip_existing_objects, loaded_files) import_how.taxo_mapping = self.req.taxo_mappings import_how.found_taxa = taxo_found import_how.found_users = found_users if self.req.skip_loaded_files: import_how.compute_skipped(source_bundle, logger) if self.req.skip_existing_objects: # If we must skip existing objects then do an inventory of what's in already with CodeTimer("run: Existing images for %d: " % self.prj_id, logger): import_how.objects_and_images_to_skip = Image.fetch_existing_images( self.session, self.prj_id) import_how.do_thumbnail_above(int(self.config['THUMBSIZELIMIT'])) # Do the bulk job of import rowcount_from_validate = nb_rows row_count = source_bundle.do_import(import_where, import_how, rowcount_from_validate, self.report_progress) # Update loaded files in DB, removing duplicates self.prj.fileloaded = "\n".join(set(import_how.loaded_files)) self.session.commit() # Recompute stats ProjectBO.do_after_load(self.session, self.prj_id) self.session.commit() msg = "Total of %d rows loaded" % row_count logger.info(msg) self.set_job_result(errors=[], infos={"rowcount": row_count})
class MergeService(Service, LogEmitter): """ Merge operation, move everything from source into destination project. """ def __init__(self, prj_id: int, src_prj_id: int, dry_run: bool): super().__init__() # params self.prj_id = prj_id self.src_prj_id = src_prj_id self.dry_run = dry_run # work vars self.remap_operations: Dict[MappedTableTypeT, List[RemapOp]] = {} self.dest_augmented_mappings = ProjectMapping() def log_file_path(self) -> str: return "merge_%d_in_%d.log" % (self.prj_id, self.src_prj_id) def run(self, current_user_id: int) -> MergeRsp: with LogsSwitcher(self): return self.do_run(current_user_id) def do_run(self, current_user_id: int) -> MergeRsp: """ Run the service, merge the projects. :return: """ # Security check RightsBO.user_wants(self.session, current_user_id, Action.ADMINISTRATE, self.prj_id) RightsBO.user_wants(self.session, current_user_id, Action.ADMINISTRATE, self.src_prj_id) # OK prj = self.session.query(Project).get(self.prj_id) assert prj is not None src_prj = self.session.query(Project).get(self.src_prj_id) assert src_prj is not None logger.info("Validating Merge of '%s'", prj.title) ret = MergeRsp() errs = self._verify_possible(prj, src_prj) ret.errors = errs # Exit if errors or dry run if self.dry_run or len(errs) > 0: return ret logger.info("Remaps: %s", self.remap_operations) # Go for real if not dry run AND len(errs) == 0 logger.info("Starting Merge of '%s'", prj.title) self._do_merge(prj) self.session.commit() # Recompute stats and so on ProjectBO.do_after_load(self.session, prj_id=self.prj_id) self.session.commit() return ret def _verify_possible(self, dest_prj: Project, src_prj: Project) -> List[str]: """ Verify that the merge would not mean a loss of information. The mappings of src project should be preserved and copied into dest project. Augmented mappings should fit in the allowed maximum size for each entity. :param dest_prj: :param src_prj: :return: a list of problems, empty means we can proceed. """ ret = [] dest_mappings = ProjectMapping().load_from_project(dest_prj) src_mappings = ProjectMapping().load_from_project(src_prj) a_tbl: MappedTableTypeT for a_tbl in MAPPED_TABLES: mappings_for_dest_tbl = dest_mappings.by_table[a_tbl] mappings_for_src_tbl = src_mappings.by_table[a_tbl] # Compute the new mapping and eventual transformations to get there aug, remaps, errs = mappings_for_dest_tbl.augmented_with( mappings_for_src_tbl) ret.extend(errs) if len(remaps) > 0: self.remap_operations[a_tbl] = remaps # Load future mapping self.dest_augmented_mappings.by_table[a_tbl].load_from(aug) # Also check problems on consistency of unique orig_id dest_parents = InBundle.fetch_existing_parents(self.ro_session, prj_id=self.prj_id) src_parents = InBundle.fetch_existing_parents(self.ro_session, prj_id=self.src_prj_id) for an_orig_id_container in [ Sample.__tablename__, Acquisition.__tablename__ ]: # key=orig_id value, value=full record dest_orig_ids = dest_parents[an_orig_id_container] src_orig_ids = src_parents[an_orig_id_container] common_orig_ids = set(dest_orig_ids.keys()).intersection( src_orig_ids.keys()) if len(common_orig_ids) != 0: logger.info("Common %s orig_ids: %s", an_orig_id_container, common_orig_ids) for common_orig_id in common_orig_ids: orm_diff = orm_equals(dest_orig_ids[common_orig_id], src_orig_ids[common_orig_id]) if orm_diff: msg = ( "Data conflict: %s record with orig_id '%s' is different in destination project: %s" % (an_orig_id_container, common_orig_id, str(orm_diff))) # TODO: Should be an error? logger.warning(msg) return ret def _do_merge(self, dest_prj: Project): """ Real merge operation. """ # Loop over involved tables and remap free columns for a_mapped_tbl in MAPPED_TABLES: remaps = self.remap_operations.get(a_mapped_tbl) # Do the remappings if any if remaps is not None: logger.info("Doing re-mapping in %s: %s", a_mapped_tbl.__tablename__, remaps) ProjectBO.remap(self.session, self.src_prj_id, a_mapped_tbl, remaps) # Collect orig_id dest_parents = InBundle.fetch_existing_parents(self.ro_session, prj_id=self.prj_id) src_parents = InBundle.fetch_existing_parents(self.ro_session, prj_id=self.src_prj_id) # Compute needed projections in order to keep orig_id unicity common_samples = self.get_ids_for_common_orig_id( Sample, dest_parents, src_parents) common_acquisitions = self.get_ids_for_common_orig_id( Acquisition, dest_parents, src_parents) # Align foreign keys, to Project, Sample and Acquisition for a_fk_to_proj_tbl in [ Sample, Acquisition, ObjectHeader, ParticleProject ]: upd: Query = self.session.query(a_fk_to_proj_tbl) if a_fk_to_proj_tbl == Sample: # Move (i.e. change project) samples which are 'new' from merged project, # so take all of them from src project... upd = upd.filter( a_fk_to_proj_tbl.projid == self.src_prj_id) # type: ignore # ...but not the ones with same orig_id, which are presumably equal. upd = upd.filter( Sample.sampleid != all_(list(common_samples.keys()))) # And update the column upd_values = {'projid': self.prj_id} elif a_fk_to_proj_tbl == Acquisition: # Acquisitions which were created, in source, under new samples, will 'follow' # them during above move, thanks to the FK on acq_sample_id. # BUT some acquisitions were potentially created in source project, inside # forked samples. They need to be attached to the dest (self) corresponding sample. if len(common_samples) > 0: # Build a CTE with values for the update smp_cte = values_cte("upd_smp", ("src_id", "dst_id"), [(k, v) for k, v in common_samples.items()]) smp_subqry = self.session.query(smp_cte.c.column2).filter( smp_cte.c.column1 == Acquisition.acq_sample_id) upd_values = { 'acq_sample_id': func.coalesce( smp_subqry.scalar_subquery(), # type: ignore Acquisition.acq_sample_id) } upd = upd.filter(Acquisition.acq_sample_id == any_( list(common_samples.keys()))) # upd = upd.filter(Acquisition.acquisid != all_(list(common_acquisitions.keys()))) if len(common_samples) == 0: # Nothing to do. There were only new samples, all of them moved to self. continue elif a_fk_to_proj_tbl == ObjectHeader: # Generated SQL looks like: # with upd_acq (src_id, dst_id) as (values (5,6), (7,8)) # update obj_head # set acquisid = coalesce((select dst_id from upd_acq where acquisid=src_id), acquisid) # where acquisid in (select src_id from upd_acq) if len(common_acquisitions) > 0: # Object must follow its acquisition acq_cte = values_cte( "upd_acq", ("src_id", "dst_id"), [(k, v) for k, v in common_acquisitions.items()]) acq_subqry = self.session.query(acq_cte.c.column2).filter( acq_cte.c.column1 == ObjectHeader.acquisid) upd_values = { 'acquisid': func.coalesce( acq_subqry.scalar_subquery(), # type:ignore ObjectHeader.acquisid) } upd = upd.filter(ObjectHeader.acquisid == any_( list(common_acquisitions.keys()))) if len(common_acquisitions) == 0: # Nothing to do. There were only new acquisitions, all of them moved to self. continue else: # For Particle project upd = upd.filter( ParticleProject.projid == self.src_prj_id) # type: ignore upd_values = {'projid': self.prj_id} rowcount = upd.update(values=upd_values, synchronize_session=False) table_name = a_fk_to_proj_tbl.__tablename__ # type: ignore logger.info("Update in %s: %s rows", table_name, rowcount) # Acquisition & twin Process have followed their enclosing Sample # Remove the parents which are duplicate from orig_id point of view for a_fk_to_proj_tbl in [Acquisition, Sample]: to_del: Query = self.session.query(a_fk_to_proj_tbl) if a_fk_to_proj_tbl == Acquisition: # Remove conflicting acquisitions, they should be empty? to_del = to_del.filter(Acquisition.acquisid == any_( list(common_acquisitions.keys()))) # type: ignore elif a_fk_to_proj_tbl == Sample: # Remove conflicting samples to_del = to_del.filter(Sample.sampleid == any_( list(common_samples.keys()))) # type: ignore rowcount = to_del.delete(synchronize_session=False) table_name = a_fk_to_proj_tbl.__tablename__ # type: ignore logger.info("Delete in %s: %s rows", table_name, rowcount) self.dest_augmented_mappings.write_to_project(dest_prj) ProjectPrivilegeBO.generous_merge_into(self.session, self.prj_id, self.src_prj_id) # Completely erase the source project ProjectBO.delete(self.session, self.src_prj_id) @staticmethod def get_ids_for_common_orig_id(a_parent_class, dest_parents, src_parents) -> \ Dict[Union[SampleIDT, AcquisitionIDT], Union[SampleIDT, AcquisitionIDT]]: """ Return a link between IDs for resolving colliding orig_id. E.g. sample 'moose2015_ge_leg2_026' is present in source with ID 15482 and also in destination with ID 84678 -> return {15482:84678}, to read 15482->84678 :param a_parent_class: Sample/Acquisition :param dest_parents: :param src_parents: :return: """ ret = {} dst_orig_ids = dest_parents[a_parent_class.__tablename__] src_orig_ids = src_parents[a_parent_class.__tablename__] common_orig_ids = set(dst_orig_ids.keys()).intersection( src_orig_ids.keys()) for a_common_orig_id in common_orig_ids: src_orig_id = src_orig_ids[a_common_orig_id].pk() dst_orig_id = dst_orig_ids[a_common_orig_id].pk() ret[src_orig_id] = dst_orig_id return ret