def get_schema(self): # Construct the full class string class_string = 'aiida.orm.' + self._aiida_type # Load correspondent orm class orm_class = get_object_from_string(class_string) # Construct the json object to be returned basic_schema = orm_class.get_schema() schema = {} ordering = [] # get addional info and column order from translator class # and combine it with basic schema if len(self._schema_projections["column_order"]) > 0: for field in self._schema_projections["column_order"]: # basic schema if field in basic_schema.keys(): schema[field] = basic_schema[field] else: ## Note: if column name starts with user_* get the schema information from # user class. It is added mainly to handle user_email case. # TODO need to improve field_parts = field.split("_") if field_parts[0] == "user" and field != "user_id" and len( field_parts) > 1: from aiida.orm.user import User user_schema = User.get_schema() if field_parts[1] in user_schema.keys(): schema[field] = user_schema[field_parts[1]] else: raise KeyError( "{} is not present in user schema".format( field)) else: raise KeyError( "{} is not present in ORM basic schema".format( field)) # additional info defined in translator class if field in self._schema_projections["additional_info"]: schema[field].update( self._schema_projections["additional_info"][field]) else: raise KeyError( "{} is not present in default projection additional info" .format(field)) # order ordering = self._schema_projections["column_order"] else: raise ConfigurationError( "Define column order to get schema for {}".format( self._aiida_type)) return dict(fields=schema, ordering=ordering)
def _get_config(config_file): try: with open(config_file, 'r') as f: config = yaml.load(f)[get_current_profile()] # no config file, or no config for this profile except (OSError, IOError, KeyError): return DEFAULT_CONFIG # validate configuration for key in config: if key not in DEFAULT_CONFIG: raise ValueError( "Configuration error: Invalid key '{}' in cache_config.yml". format(key)) # add defaults where config is missing for key, default_config in DEFAULT_CONFIG.items(): config[key] = config.get(key, default_config) # load classes try: for key in [config_keys.enabled, config_keys.disabled]: config[key] = [get_object_from_string(c) for c in config[key]] except (ValueError) as err: raise_from( ConfigurationError( "Unknown class given in 'cache_config.yml': '{}'".format(err)), err) return config
def setup_pseudo_family(command_name, folder, group_name, group_description): pseudo_uploadfamily_cmd = get_object_from_string(command_name) with open(os.devnull, 'w') as devnull, redirect_stdout(devnull): pseudo_uploadfamily_cmd.callback(path=folder, name=group_name, description=group_description, stop_if_existing=False, dry_run=False)
def on_next_step_starting(self, workchain): """ Assigns the result stored in the action in the key of the workchain context :param workchain: instance of WorkChain whose context should be updated """ fn = get_object_from_string(self._action.fn) key = self._key val = fn(self._action.running_info.pid) workchain.ctx[key] = val
def load_workchain(self): """Loads the workchain and sets up additional attributes.""" # pylint: disable=attribute-defined-outside-init aiida.try_load_dbenv() self.class_name = self.arguments[0].split('(')[0] self.module_name = self.options['module'] self.workchain_name = self.module_name + '.' + self.class_name self.workchain = get_object_from_string(self.workchain_name) self.workchain_spec = self.workchain.spec()
def get_schema(self): # Construct the full class string class_string = 'aiida.orm.' + self._aiida_type # Load correspondent orm class orm_class = get_object_from_string(class_string) # Construct the json object to be returned basic_schema = orm_class.get_db_columns() if self._default_projections == ['**']: schema = basic_schema # No custom schema, take the basic one else: schema = dict([(k, basic_schema[k]) for k in self._default_projections if k in basic_schema.keys()]) # Convert the related_tablevalues to the RESTAPI resources # (orm class/db table ==> RESTapi resource) def table2resource(table_name): # TODO Consider ways to make this function backend independent (one # idea would be to go from table name to aiida class name which is # univoque) if BACKEND == BACKEND_DJANGO: (spam, resource_name) = issingular(table_name[2:].lower()) elif BACKEND == BACKEND_SQLA: (spam, resource_name) = issingular(table_name[5:]) elif BACKEND is None: raise ConfigurationError("settings.BACKEND has not been set.\n" "Hint: Have you called " "aiida.load_dbenv?") else: raise ConfigurationError("Unknown settings.BACKEND: {}".format( BACKEND)) return resource_name for k, v in schema.iteritems(): # Add custom fields to the column dictionaries if 'fields' in self.custom_schema: if k in self.custom_schema['fields'].keys(): schema[k].update(self.custom_schema['fields'][k]) # Convert python types values into strings schema[k]['type'] = str(schema[k]['type'])[7:-2] # Construct the 'related resource' field from the 'related_table' # field if v['is_foreign_key'] == True: schema[k]['related_resource'] = table2resource( schema[k].pop('related_table')) return dict(columns=schema)
def _apply_single_strain(structure: orm.StructureData, strain_kind: orm.Str, strain_parameters: orm.Str, strength_value: orm.Float) -> orm.StructureData: """ Applies a specific strain (kind, parameters, and value) to the given structure, and returns the strained structure. """ strain_classname = 'strain.structure.' + strain_kind.value strain_class = get_object_from_string(strain_classname) strain_parametername = 'strain.parameter.' + strain_parameters.value strain_parameters = get_object_from_string(strain_parametername) strain_instance = strain_class(**strain_parameters) pmg_structure = structure.get_pymatgen_structure() new_pmg_structure = strain_instance.apply(pmg_structure, strength_value.value) new_structure_data = orm.StructureData() new_structure_data.set_pymatgen(new_pmg_structure) return new_structure_data
def initialize(self): """Set internal attributes of the class. Includes importing the process class. """ # pylint: disable=attribute-defined-outside-init from aiida.manage.configuration import load_profile load_profile() self.class_name = self.arguments[0].split('(')[0] self.module_name = self.options['module'] self.process_name = self.module_name + '.' + self.class_name self.process = get_object_from_string(self.process_name) self.process_spec = self.process.spec()
def on_next_step_starting(self, workchain): """ Appends the result stored in the action in the key of the workchain context :param workchain: instance of WorkChain whose context should be updated """ fn = get_object_from_string(self._action.fn) key = self._key val = fn(self._action.running_info.pid) if key in workchain.ctx and not isinstance(workchain.ctx[key], MutableSequence): raise TypeError( "You are trying to append to an existing key that is not a list" ) workchain.ctx.setdefault(key, []).append(val)
def initialize(self): """Set internal attributes of the class. Includes importing the process class. """ # pylint: disable=attribute-defined-outside-init load_profile() self.class_name = self.arguments[0].split('(')[0] self.module_name = self.options['module'] self.process_name = f'{self.module_name}.{self.class_name}' self.process = get_object_from_string(self.process_name) try: self.process_spec = self.process.spec() except Exception as exc: raise RuntimeError( f"Error while building the spec for process '{self.process_name}': '{exc!r}.'" ) from exc
def initialize(self): """Set internal attributes of the class. Includes importing the process class. """ # pylint: disable=attribute-defined-outside-init from aiida.manage.configuration import load_profile load_profile() self.class_name = self.arguments[0].split('(')[0] self.module_name = self.options['module'] self.process_name = self.module_name + '.' + self.class_name self.process = get_object_from_string(self.process_name) try: self.process_spec = self.process.spec() except Exception as exc: raise RuntimeError( "Error while building the spec for process '{}': '{!r}.'". format(self.process_name, exc)) from exc
def _store_entity_data(*, reader: ArchiveReaderAbstract, entity_name: str, comment_mode: str, extras_mode_existing: str, new_entries: Dict[str, Dict[str, dict]], existing_entries: Dict[str, Dict[str, dict]], foreign_ids_reverse_mappings: Dict[str, Dict[str, int]], import_unique_ids_mappings: Dict[str, Dict[int, str]], ret_dict: dict, session: Session): """Store the entity data on the AiiDA profile.""" from aiida.backends.sqlalchemy.utils import flag_modified from aiida.backends.sqlalchemy.models.node import DbNode entity = entity_names_to_entities[entity_name] fields_info = reader.metadata.all_fields_info.get(entity_name, {}) unique_identifier = reader.metadata.unique_identifiers.get( entity_name, None) pbar_base_str = f'{entity_name}s - ' # EXISTING ENTRIES if existing_entries[entity_name]: with get_progress_reporter()( total=len(existing_entries[entity_name]), desc=f'{pbar_base_str} existing entries') as progress: for import_entry_pk, entry_data in existing_entries[ entity_name].items(): progress.update() unique_id = entry_data[unique_identifier] existing_entry_pk = foreign_ids_reverse_mappings[entity_name][ unique_id] import_data = dict( deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings= foreign_ids_reverse_mappings) for k, v in entry_data.items()) # TODO COMPARE, AND COMPARE ATTRIBUTES if entity_name == COMMENT_ENTITY_NAME: new_entry_uuid = merge_comment(import_data, comment_mode) if new_entry_uuid is not None: entry_data[unique_identifier] = new_entry_uuid new_entries[entity_name][import_entry_pk] = entry_data if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['existing'].append( (import_entry_pk, existing_entry_pk)) # print(' `-> WARNING: NO DUPLICITY CHECK DONE!') # CHECK ALSO FILES! # Store all objects for this model in a list, and store them all in once at the end. objects_to_create = [] # In the following list we add the objects to be updated objects_to_update = [] # This is needed later to associate the import entry with the new pk import_new_entry_pks = {} # NEW ENTRIES for import_entry_pk, entry_data in new_entries[entity_name].items(): unique_id = entry_data[unique_identifier] import_data = dict( deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings=foreign_ids_reverse_mappings) for k, v in entry_data.items()) # We convert the Django fields to SQLA. Note that some of # the Django fields were converted to SQLA compatible # fields by the deserialize_field method. This was done # for optimization reasons in Django but makes them # compatible with the SQLA schema and they don't need any # further conversion. if entity_name in file_fields_to_model_fields: for file_fkey in file_fields_to_model_fields[entity_name]: # This is an exception because the DbLog model defines the `_metadata` column instead of the # `metadata` column used in the Django model. This is because the SqlAlchemy model base # class already has a metadata attribute that cannot be overridden. For consistency, the # `DbLog` class however expects the `metadata` keyword in its constructor, so we should # ignore the mapping here if entity_name == LOG_ENTITY_NAME and file_fkey == 'metadata': continue model_fkey = file_fields_to_model_fields[entity_name][ file_fkey] if model_fkey in import_data: continue import_data[model_fkey] = import_data[file_fkey] import_data.pop(file_fkey, None) db_entity = get_object_from_string( entity_names_to_sqla_schema[entity_name]) objects_to_create.append(db_entity(**import_data)) import_new_entry_pks[unique_id] = import_entry_pk if entity_name == NODE_ENTITY_NAME: # Before storing entries in the DB, I store the files (if these are nodes). # Note: only for new entries! uuids_to_create = [obj.uuid for obj in objects_to_create] _copy_node_repositories(uuids_to_create=uuids_to_create, reader=reader) # For the existing nodes that are also in the imported list we also update their extras if necessary if existing_entries[entity_name]: with get_progress_reporter()( total=len(existing_entries[entity_name]), desc='Updating existing node extras') as progress: import_existing_entry_pks = { entry_data[unique_identifier]: import_entry_pk for import_entry_pk, entry_data in existing_entries[entity_name].items() } for node in session.query(DbNode).filter( DbNode.uuid.in_(import_existing_entry_pks)).all(): import_entry_uuid = str(node.uuid) import_entry_pk = import_existing_entry_pks[ import_entry_uuid] pbar_node_base_str = f"{pbar_base_str}UUID={import_entry_uuid.split('-')[0]} - " progress.set_description_str(f'{pbar_node_base_str}Extras', refresh=False) progress.update() old_extras = node.extras.copy() extras = existing_entries[entity_name][str( import_entry_pk)].get('extras', {}) new_extras = merge_extras(node.extras, extras, extras_mode_existing) if new_extras != old_extras: node.extras = new_extras flag_modified(node, 'extras') objects_to_update.append(node) # Store them all in once; However, the PK are not set in this way... if objects_to_create: session.add_all(objects_to_create) if objects_to_update: session.add_all(objects_to_update) session.flush() if not import_new_entry_pks: return with get_progress_reporter()( total=len(import_new_entry_pks), desc=f'{pbar_base_str} storing new') as progress: just_saved = {} builder = QueryBuilder() builder.append(entity, filters={ unique_identifier: { 'in': list(import_new_entry_pks.keys()) } }, project=[unique_identifier, 'id']) for entry in builder.iterall(): progress.update() just_saved.update({entry[0]: entry[1]}) # Now I have the PKs, print the info # Moreover, add newly created Nodes to foreign_ids_reverse_mappings for unique_id, new_pk in just_saved.items(): from uuid import UUID if isinstance(unique_id, UUID): unique_id = str(unique_id) import_entry_pk = import_new_entry_pks[unique_id] foreign_ids_reverse_mappings[entity_name][unique_id] = new_pk if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['new'].append((import_entry_pk, new_pk))
def import_data_sqla(in_path, group=None, ignore_unknown_nodes=False, extras_mode_existing='kcl', extras_mode_new='import', comment_mode='newest', silent=False, **kwargs): """Import exported AiiDA archive to the AiiDA database and repository. Specific for the SQLAlchemy backend. If ``in_path`` is a folder, calls extract_tree; otherwise, tries to detect the compression format (zip, tar.gz, tar.bz2, ...) and calls the correct function. :param in_path: the path to a file or folder that can be imported in AiiDA. :type in_path: str :param group: Group wherein all imported Nodes will be placed. :type group: :py:class:`~aiida.orm.groups.Group` :param extras_mode_existing: 3 letter code that will identify what to do with the extras import. The first letter acts on extras that are present in the original node and not present in the imported node. Can be either: 'k' (keep it) or 'n' (do not keep it). The second letter acts on the imported extras that are not present in the original node. Can be either: 'c' (create it) or 'n' (do not create it). The third letter defines what to do in case of a name collision. Can be either: 'l' (leave the old value), 'u' (update with a new value), 'd' (delete the extra), or 'a' (ask what to do if the content is different). :type extras_mode_existing: str :param extras_mode_new: 'import' to import extras of new nodes or 'none' to ignore them. :type extras_mode_new: str :param comment_mode: Comment import modes (when same UUIDs are found). Can be either: 'newest' (will keep the Comment with the most recent modification time (mtime)) or 'overwrite' (will overwrite existing Comments with the ones from the import file). :type comment_mode: str :param silent: suppress progress bar and summary. :type silent: bool :return: New and existing Nodes and Links. :rtype: dict :raises `~aiida.tools.importexport.common.exceptions.ImportValidationError`: if parameters or the contents of `metadata.json` or `data.json` can not be validated. :raises `~aiida.tools.importexport.common.exceptions.CorruptArchive`: if the provided archive at ``in_path`` is corrupted. :raises `~aiida.tools.importexport.common.exceptions.IncompatibleArchiveVersionError`: if the provided archive's export version is not equal to the export version of AiiDA at the moment of import. :raises `~aiida.tools.importexport.common.exceptions.ArchiveImportError`: if there are any internal errors when importing. :raises `~aiida.tools.importexport.common.exceptions.ImportUniquenessError`: if a new unique entity can not be created. """ from aiida.backends.sqlalchemy.models.node import DbNode, DbLink from aiida.backends.sqlalchemy.utils import flag_modified # This is the export version expected by this function expected_export_version = StrictVersion(EXPORT_VERSION) # The returned dictionary with new and existing nodes and links ret_dict = {} # Initial check(s) if group: if not isinstance(group, Group): raise exceptions.ImportValidationError( 'group must be a Group entity') elif not group.is_stored: group.store() if silent: logging.disable(level=logging.CRITICAL) ################ # EXTRACT DATA # ################ # The sandbox has to remain open until the end with SandboxFolder() as folder: if os.path.isdir(in_path): extract_tree(in_path, folder) else: if tarfile.is_tarfile(in_path): extract_tar(in_path, folder, silent=silent, nodes_export_subfolder=NODES_EXPORT_SUBFOLDER, **kwargs) elif zipfile.is_zipfile(in_path): extract_zip(in_path, folder, silent=silent, nodes_export_subfolder=NODES_EXPORT_SUBFOLDER, **kwargs) else: raise exceptions.ImportValidationError( 'Unable to detect the input file format, it is neither a ' 'tar file, nor a (possibly compressed) zip file.') if not folder.get_content_list(): raise exceptions.CorruptArchive( 'The provided file/folder ({}) is empty'.format(in_path)) try: IMPORT_LOGGER.debug('CACHING metadata.json') with open(folder.get_abs_path('metadata.json'), encoding='utf8') as fhandle: metadata = json.load(fhandle) IMPORT_LOGGER.debug('CACHING data.json') with open(folder.get_abs_path('data.json'), encoding='utf8') as fhandle: data = json.load(fhandle) except IOError as error: raise exceptions.CorruptArchive( 'Unable to find the file {} in the import file or folder'. format(error.filename)) ###################### # PRELIMINARY CHECKS # ###################### export_version = StrictVersion(str(metadata['export_version'])) if export_version != expected_export_version: msg = 'Export file version is {}, can import only version {}'\ .format(metadata['export_version'], expected_export_version) if export_version < expected_export_version: msg += "\nUse 'verdi export migrate' to update this export file." else: msg += '\nUpdate your AiiDA version in order to import this file.' raise exceptions.IncompatibleArchiveVersionError(msg) start_summary(in_path, comment_mode, extras_mode_new, extras_mode_existing) ################################################################### # CREATE UUID REVERSE TABLES AND CHECK IF # # I HAVE ALL NODES FOR THE LINKS # ################################################################### IMPORT_LOGGER.debug( 'CHECKING IF NODES FROM LINKS ARE IN DB OR ARCHIVE...') linked_nodes = set( chain.from_iterable( (l['input'], l['output']) for l in data['links_uuid'])) group_nodes = set(chain.from_iterable(data['groups_uuid'].values())) # Check that UUIDs are valid linked_nodes = set(x for x in linked_nodes if validate_uuid(x)) group_nodes = set(x for x in group_nodes if validate_uuid(x)) import_nodes_uuid = set() for value in data['export_data'].get(NODE_ENTITY_NAME, {}).values(): import_nodes_uuid.add(value['uuid']) unknown_nodes = linked_nodes.union(group_nodes) - import_nodes_uuid if unknown_nodes and not ignore_unknown_nodes: raise exceptions.DanglingLinkError( 'The import file refers to {} nodes with unknown UUID, therefore it cannot be imported. Either first ' 'import the unknown nodes, or export also the parents when exporting. The unknown UUIDs are:\n' ''.format(len(unknown_nodes)) + '\n'.join('* {}'.format(uuid) for uuid in unknown_nodes)) ################################### # DOUBLE-CHECK MODEL DEPENDENCIES # ################################### # The entity import order. It is defined by the database model relationships. entity_order = [ USER_ENTITY_NAME, COMPUTER_ENTITY_NAME, NODE_ENTITY_NAME, GROUP_ENTITY_NAME, LOG_ENTITY_NAME, COMMENT_ENTITY_NAME ] # I make a new list that contains the entity names: # eg: ['User', 'Computer', 'Node', 'Group'] for import_field_name in metadata['all_fields_info']: if import_field_name not in entity_order: raise exceptions.ImportValidationError( "You are trying to import an unknown model '{}'!".format( import_field_name)) for idx, entity_name in enumerate(entity_order): dependencies = [] # for every field, I checked the dependencies given as value for key requires for field in metadata['all_fields_info'][entity_name].values(): try: dependencies.append(field['requires']) except KeyError: # (No ForeignKey) pass for dependency in dependencies: if dependency not in entity_order[:idx]: raise exceptions.ArchiveImportError( 'Entity {} requires {} but would be loaded first; stopping...' .format(entity_name, dependency)) ################################################### # CREATE IMPORT DATA DIRECT UNIQUE_FIELD MAPPINGS # ################################################### # This is nested dictionary of entity_name:{id:uuid} # to map one id (the pk) to a different one. # One of the things to remove for v0.4 # { # 'Node': {2362: '82a897b5-fb3a-47d7-8b22-c5fe1b4f2c14', # 2363: 'ef04aa5d-99e7-4bfd-95ef-fe412a6a3524', 2364: '1dc59576-af21-4d71-81c2-bac1fc82a84a'}, # 'User': {1: 'aiida@localhost'} # } IMPORT_LOGGER.debug('CREATING PK-2-UUID/EMAIL MAPPING...') import_unique_ids_mappings = {} # Export data since v0.3 contains the keys entity_name for entity_name, import_data in data['export_data'].items(): # Again I need the entity_name since that's what's being stored since 0.3 if entity_name in metadata['unique_identifiers']: # I have to reconvert the pk to integer import_unique_ids_mappings[entity_name] = { int(k): v[metadata['unique_identifiers'][entity_name]] for k, v in import_data.items() } ############### # IMPORT DATA # ############### # DO ALL WITH A TRANSACTION import aiida.backends.sqlalchemy session = aiida.backends.sqlalchemy.get_scoped_session() try: foreign_ids_reverse_mappings = {} new_entries = {} existing_entries = {} IMPORT_LOGGER.debug('GENERATING LIST OF DATA...') # Instantiate progress bar progress_bar = get_progress_bar(total=1, leave=False, disable=silent) pbar_base_str = 'Generating list of data - ' # Get total entities from data.json # To be used with progress bar number_of_entities = 0 # I first generate the list of data for entity_name in entity_order: entity = entity_names_to_entities[entity_name] # I get the unique identifier, since v0.3 stored under entity_name unique_identifier = metadata['unique_identifiers'].get( entity_name, None) # so, new_entries. Also, since v0.3 it makes more sense to use the entity_name new_entries[entity_name] = {} existing_entries[entity_name] = {} foreign_ids_reverse_mappings[entity_name] = {} # Not necessarily all models are exported if entity_name in data['export_data']: IMPORT_LOGGER.debug(' %s...', entity_name) progress_bar.set_description_str(pbar_base_str + entity_name, refresh=False) number_of_entities += len(data['export_data'][entity_name]) if unique_identifier is not None: import_unique_ids = set( v[unique_identifier] for v in data['export_data'][entity_name].values()) relevant_db_entries = {} if import_unique_ids: builder = QueryBuilder() builder.append(entity, filters={ unique_identifier: { 'in': import_unique_ids } }, project='*') if builder.count(): progress_bar = get_progress_bar( total=builder.count(), disable=silent) for object_ in builder.iterall(): progress_bar.update() relevant_db_entries.update({ getattr(object_[0], unique_identifier): object_[0] }) foreign_ids_reverse_mappings[entity_name] = { k: v.pk for k, v in relevant_db_entries.items() } IMPORT_LOGGER.debug(' GOING THROUGH ARCHIVE...') imported_comp_names = set() for key, value in data['export_data'][ entity_name].items(): if entity_name == GROUP_ENTITY_NAME: # Check if there is already a group with the same name, # and if so, recreate the name orig_label = value['label'] dupl_counter = 0 while QueryBuilder().append( entity, filters={ 'label': { '==': value['label'] } }).count(): # Rename the new group value[ 'label'] = orig_label + DUPL_SUFFIX.format( dupl_counter) dupl_counter += 1 if dupl_counter == 100: raise exceptions.ImportUniquenessError( 'A group of that label ( {} ) already exists and I could not create a new ' 'one'.format(orig_label)) elif entity_name == COMPUTER_ENTITY_NAME: # The following is done for compatibility # reasons in case the export file was generated # with the Django export method. In Django the # metadata and the transport parameters are # stored as (unicode) strings of the serialized # JSON objects and not as simple serialized # JSON objects. if isinstance(value['metadata'], (str, bytes)): value['metadata'] = json.loads( value['metadata']) # Check if there is already a computer with the # same name in the database builder = QueryBuilder() builder.append( entity, filters={'name': { '==': value['name'] }}, project=['*'], tag='res') dupl = builder.count( ) or value['name'] in imported_comp_names dupl_counter = 0 orig_name = value['name'] while dupl: # Rename the new computer value[ 'name'] = orig_name + DUPL_SUFFIX.format( dupl_counter) builder = QueryBuilder() builder.append(entity, filters={ 'name': { '==': value['name'] } }, project=['*'], tag='res') dupl = builder.count( ) or value['name'] in imported_comp_names dupl_counter += 1 if dupl_counter == 100: raise exceptions.ImportUniquenessError( 'A computer of that name ( {} ) already exists and I could not create a ' 'new one'.format(orig_name)) imported_comp_names.add(value['name']) if value[unique_identifier] in relevant_db_entries: # Already in DB # again, switched to entity_name in v0.3 existing_entries[entity_name][key] = value else: # To be added new_entries[entity_name][key] = value else: new_entries[entity_name] = data['export_data'][ entity_name] # Progress bar - reset for import progress_bar = get_progress_bar(total=number_of_entities, disable=silent) reset_progress_bar = {} # I import data from the given model for entity_name in entity_order: entity = entity_names_to_entities[entity_name] fields_info = metadata['all_fields_info'].get(entity_name, {}) unique_identifier = metadata['unique_identifiers'].get( entity_name, '') # Progress bar initialization - Model if reset_progress_bar: progress_bar = get_progress_bar( total=reset_progress_bar['total'], disable=silent) progress_bar.n = reset_progress_bar['n'] reset_progress_bar = {} pbar_base_str = '{}s - '.format(entity_name) progress_bar.set_description_str(pbar_base_str + 'Initializing', refresh=True) # EXISTING ENTRIES if existing_entries[entity_name]: # Progress bar update - Model progress_bar.set_description_str( pbar_base_str + '{} existing entries'.format( len(existing_entries[entity_name])), refresh=True) for import_entry_pk, entry_data in existing_entries[ entity_name].items(): unique_id = entry_data[unique_identifier] existing_entry_pk = foreign_ids_reverse_mappings[ entity_name][unique_id] import_data = dict( deserialize_field(k, v, fields_info=fields_info, import_unique_ids_mappings= import_unique_ids_mappings, foreign_ids_reverse_mappings= foreign_ids_reverse_mappings) for k, v in entry_data.items()) # TODO COMPARE, AND COMPARE ATTRIBUTES if entity_name == COMMENT_ENTITY_NAME: new_entry_uuid = merge_comment(import_data, comment_mode) if new_entry_uuid is not None: entry_data[unique_identifier] = new_entry_uuid new_entries[entity_name][ import_entry_pk] = entry_data if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['existing'].append( (import_entry_pk, existing_entry_pk)) IMPORT_LOGGER.debug('Existing %s: %s (%s->%s)', entity_name, unique_id, import_entry_pk, existing_entry_pk) # Store all objects for this model in a list, and store them # all in once at the end. objects_to_create = list() # In the following list we add the objects to be updated objects_to_update = list() # This is needed later to associate the import entry with the new pk import_new_entry_pks = dict() # NEW ENTRIES if new_entries[entity_name]: # Progress bar update - Model progress_bar.set_description_str( pbar_base_str + '{} new entries'.format(len(new_entries[entity_name])), refresh=True) for import_entry_pk, entry_data in new_entries[ entity_name].items(): unique_id = entry_data[unique_identifier] import_data = dict( deserialize_field(k, v, fields_info=fields_info, import_unique_ids_mappings= import_unique_ids_mappings, foreign_ids_reverse_mappings= foreign_ids_reverse_mappings) for k, v in entry_data.items()) # We convert the Django fields to SQLA. Note that some of # the Django fields were converted to SQLA compatible # fields by the deserialize_field method. This was done # for optimization reasons in Django but makes them # compatible with the SQLA schema and they don't need any # further conversion. if entity_name in file_fields_to_model_fields: for file_fkey in file_fields_to_model_fields[ entity_name]: # This is an exception because the DbLog model defines the `_metadata` column instead of the # `metadata` column used in the Django model. This is because the SqlAlchemy model base # class already has a metadata attribute that cannot be overridden. For consistency, the # `DbLog` class however expects the `metadata` keyword in its constructor, so we should # ignore the mapping here if entity_name == LOG_ENTITY_NAME and file_fkey == 'metadata': continue model_fkey = file_fields_to_model_fields[ entity_name][file_fkey] if model_fkey in import_data: continue import_data[model_fkey] = import_data[file_fkey] import_data.pop(file_fkey, None) db_entity = get_object_from_string( entity_names_to_sqla_schema[entity_name]) objects_to_create.append(db_entity(**import_data)) import_new_entry_pks[unique_id] = import_entry_pk if entity_name == NODE_ENTITY_NAME: IMPORT_LOGGER.debug( 'STORING NEW NODE REPOSITORY FILES & ATTRIBUTES...') # NEW NODES for object_ in objects_to_create: import_entry_uuid = object_.uuid import_entry_pk = import_new_entry_pks[ import_entry_uuid] # Progress bar initialization - Node progress_bar.update() pbar_node_base_str = pbar_base_str + 'UUID={} - '.format( import_entry_uuid.split('-')[0]) # Before storing entries in the DB, I store the files (if these are nodes). # Note: only for new entries! subfolder = folder.get_subfolder( os.path.join(NODES_EXPORT_SUBFOLDER, export_shard_uuid(import_entry_uuid))) if not subfolder.exists(): raise exceptions.CorruptArchive( 'Unable to find the repository folder for Node with UUID={} in the exported ' 'file'.format(import_entry_uuid)) destdir = RepositoryFolder( section=Repository._section_name, uuid=import_entry_uuid) # Replace the folder, possibly destroying existing previous folders, and move the files # (faster if we are on the same filesystem, and in any case the source is a SandboxFolder) progress_bar.set_description_str(pbar_node_base_str + 'Repository', refresh=True) destdir.replace_with_folder(subfolder.abspath, move=True, overwrite=True) # For Nodes, we also have to store Attributes! IMPORT_LOGGER.debug('STORING NEW NODE ATTRIBUTES...') progress_bar.set_description_str(pbar_node_base_str + 'Attributes', refresh=True) # Get attributes from import file try: object_.attributes = data['node_attributes'][str( import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find attribute info for Node with UUID={}' .format(import_entry_uuid)) # For DbNodes, we also have to store extras if extras_mode_new == 'import': IMPORT_LOGGER.debug('STORING NEW NODE EXTRAS...') progress_bar.set_description_str( pbar_node_base_str + 'Extras', refresh=True) # Get extras from import file try: extras = data['node_extras'][str( import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find extra info for Node with UUID={}' .format(import_entry_uuid)) # TODO: remove when aiida extras will be moved somewhere else # from here extras = { key: value for key, value in extras.items() if not key.startswith('_aiida_') } if object_.node_type.endswith('code.Code.'): extras = { key: value for key, value in extras.items() if not key == 'hidden' } # till here object_.extras = extras elif extras_mode_new == 'none': IMPORT_LOGGER.debug('SKIPPING NEW NODE EXTRAS...') else: raise exceptions.ImportValidationError( "Unknown extras_mode_new value: {}, should be either 'import' or 'none'" ''.format(extras_mode_new)) # EXISTING NODES (Extras) IMPORT_LOGGER.debug('UPDATING EXISTING NODE EXTRAS...') import_existing_entry_pks = { entry_data[unique_identifier]: import_entry_pk for import_entry_pk, entry_data in existing_entries[entity_name].items() } for node in session.query(DbNode).filter( DbNode.uuid.in_(import_existing_entry_pks)).all(): import_entry_uuid = str(node.uuid) import_entry_pk = import_existing_entry_pks[ import_entry_uuid] # Progress bar initialization - Node pbar_node_base_str = pbar_base_str + 'UUID={} - '.format( import_entry_uuid.split('-')[0]) progress_bar.set_description_str(pbar_node_base_str + 'Extras', refresh=False) progress_bar.update() # Get extras from import file try: extras = data['node_extras'][str(import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find extra info for Node with UUID={}' .format(import_entry_uuid)) old_extras = node.extras.copy() # TODO: remove when aiida extras will be moved somewhere else # from here extras = { key: value for key, value in extras.items() if not key.startswith('_aiida_') } if node.node_type.endswith('code.Code.'): extras = { key: value for key, value in extras.items() if not key == 'hidden' } # till here new_extras = merge_extras(node.extras, extras, extras_mode_existing) if new_extras != old_extras: node.extras = new_extras flag_modified(node, 'extras') objects_to_update.append(node) else: # Update progress bar with new non-Node entries progress_bar.update(n=len(existing_entries[entity_name]) + len(new_entries[entity_name])) progress_bar.set_description_str(pbar_base_str + 'Storing', refresh=True) # Store them all in once; However, the PK are not set in this way... if objects_to_create: session.add_all(objects_to_create) if objects_to_update: session.add_all(objects_to_update) session.flush() just_saved = {} if import_new_entry_pks.keys(): reset_progress_bar = { 'total': progress_bar.total, 'n': progress_bar.n } progress_bar = get_progress_bar( total=len(import_new_entry_pks), disable=silent) builder = QueryBuilder() builder.append(entity, filters={ unique_identifier: { 'in': list(import_new_entry_pks.keys()) } }, project=[unique_identifier, 'id']) for entry in builder.iterall(): progress_bar.update() just_saved.update({entry[0]: entry[1]}) progress_bar.set_description_str(pbar_base_str + 'Done!', refresh=True) # Now I have the PKs, print the info # Moreover, add newly created Nodes to foreign_ids_reverse_mappings for unique_id, new_pk in just_saved.items(): from uuid import UUID if isinstance(unique_id, UUID): unique_id = str(unique_id) import_entry_pk = import_new_entry_pks[unique_id] foreign_ids_reverse_mappings[entity_name][ unique_id] = new_pk if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['new'].append( (import_entry_pk, new_pk)) IMPORT_LOGGER.debug('N %s: %s (%s->%s)', entity_name, unique_id, import_entry_pk, new_pk) IMPORT_LOGGER.debug('STORING NODE LINKS...') import_links = data['links_uuid'] if import_links: progress_bar = get_progress_bar(total=len(import_links), disable=silent) pbar_base_str = 'Links - ' for link in import_links: # Check for dangling Links within the, supposed, self-consistent archive progress_bar.set_description_str( pbar_base_str + 'label={}'.format(link['label']), refresh=False) progress_bar.update() try: in_id = foreign_ids_reverse_mappings[NODE_ENTITY_NAME][ link['input']] out_id = foreign_ids_reverse_mappings[NODE_ENTITY_NAME][ link['output']] except KeyError: if ignore_unknown_nodes: continue raise exceptions.ImportValidationError( 'Trying to create a link with one or both unknown nodes, stopping (in_uuid={}, out_uuid={}, ' 'label={}, type={})'.format(link['input'], link['output'], link['label'], link['type'])) # Since backend specific Links (DbLink) are not validated upon creation, we will now validate them. source = QueryBuilder().append(Node, filters={ 'id': in_id }, project='*').first()[0] target = QueryBuilder().append(Node, filters={ 'id': out_id }, project='*').first()[0] link_type = LinkType(link['type']) # Check for existence of a triple link, i.e. unique triple. # If it exists, then the link already exists, continue to next link, otherwise, validate link. if link_triple_exists(source, target, link_type, link['label']): continue try: validate_link(source, target, link_type, link['label']) except ValueError as why: raise exceptions.ImportValidationError( 'Error occurred during Link validation: {}'.format( why)) # New link session.add( DbLink(input_id=in_id, output_id=out_id, label=link['label'], type=link['type'])) if 'Link' not in ret_dict: ret_dict['Link'] = {'new': []} ret_dict['Link']['new'].append((in_id, out_id)) IMPORT_LOGGER.debug(' (%d new links...)', len(ret_dict.get('Link', {}).get('new', []))) IMPORT_LOGGER.debug('STORING GROUP ELEMENTS...') import_groups = data['groups_uuid'] if import_groups: progress_bar = get_progress_bar(total=len(import_groups), disable=silent) pbar_base_str = 'Groups - ' for groupuuid, groupnodes in import_groups.items(): # # TODO: cache these to avoid too many queries qb_group = QueryBuilder().append( Group, filters={'uuid': { '==': groupuuid }}) group_ = qb_group.first()[0] progress_bar.set_description_str( pbar_base_str + 'label={}'.format(group_.label), refresh=False) progress_bar.update() nodes_ids_to_add = [ foreign_ids_reverse_mappings[NODE_ENTITY_NAME][node_uuid] for node_uuid in groupnodes ] qb_nodes = QueryBuilder().append( Node, filters={'id': { 'in': nodes_ids_to_add }}) # Adding nodes to group avoiding the SQLA ORM to increase speed nodes_to_add = [n[0].backend_entity for n in qb_nodes.all()] group_.backend_entity.add_nodes(nodes_to_add, skip_orm=True) ###################################################### # Put everything in a specific group ###################################################### existing = existing_entries.get(NODE_ENTITY_NAME, {}) existing_pk = [ foreign_ids_reverse_mappings[NODE_ENTITY_NAME][v['uuid']] for v in existing.values() ] new = new_entries.get(NODE_ENTITY_NAME, {}) new_pk = [ foreign_ids_reverse_mappings[NODE_ENTITY_NAME][v['uuid']] for v in new.values() ] pks_for_group = existing_pk + new_pk # So that we do not create empty groups if pks_for_group: # If user specified a group, import all things into it if not group: from aiida.backends.sqlalchemy.models.group import DbGroup # Get an unique name for the import group, based on the current (local) time basename = timezone.localtime( timezone.now()).strftime('%Y%m%d-%H%M%S') counter = 0 group_label = basename while session.query(DbGroup).filter( DbGroup.label == group_label).count() > 0: counter += 1 group_label = '{}_{}'.format(basename, counter) if counter == 100: raise exceptions.ImportUniquenessError( "Overflow of import groups (more than 100 import groups exists with basename '{}')" ''.format(basename)) group = ImportGroup(label=group_label) session.add(group.backend_entity._dbmodel) # Adding nodes to group avoiding the SQLA ORM to increase speed builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_for_group }}) progress_bar = get_progress_bar(total=len(pks_for_group), disable=silent) progress_bar.set_description_str( 'Creating import Group - Preprocessing', refresh=True) first = True nodes = [] for entry in builder.iterall(): if first: progress_bar.set_description_str( 'Creating import Group', refresh=False) first = False progress_bar.update() nodes.append(entry[0].backend_entity) group.backend_entity.add_nodes(nodes, skip_orm=True) progress_bar.set_description_str('Done (cleaning up)', refresh=True) else: IMPORT_LOGGER.debug( 'No Nodes to import, so no Group created, if it did not already exist' ) IMPORT_LOGGER.debug('COMMITTING EVERYTHING...') session.commit() # Finalize Progress bar close_progress_bar(leave=False) # Summarize import result_summary(ret_dict, getattr(group, 'label', None)) except: # Finalize Progress bar close_progress_bar(leave=False) result_summary({}, None) IMPORT_LOGGER.debug('Rolling back') session.rollback() raise # Reset logging level if silent: logging.disable(level=logging.NOTSET) return ret_dict
def import_data_dj(in_path, group=None, ignore_unknown_nodes=False, extras_mode_existing='kcl', extras_mode_new='import', comment_mode='newest', silent=False): """Import exported AiiDA archive to the AiiDA database and repository. Specific for the Django backend. If ``in_path`` is a folder, calls extract_tree; otherwise, tries to detect the compression format (zip, tar.gz, tar.bz2, ...) and calls the correct function. :param in_path: the path to a file or folder that can be imported in AiiDA. :type in_path: str :param group: Group wherein all imported Nodes will be placed. :type group: :py:class:`~aiida.orm.groups.Group` :param extras_mode_existing: 3 letter code that will identify what to do with the extras import. The first letter acts on extras that are present in the original node and not present in the imported node. Can be either: 'k' (keep it) or 'n' (do not keep it). The second letter acts on the imported extras that are not present in the original node. Can be either: 'c' (create it) or 'n' (do not create it). The third letter defines what to do in case of a name collision. Can be either: 'l' (leave the old value), 'u' (update with a new value), 'd' (delete the extra), or 'a' (ask what to do if the content is different). :type extras_mode_existing: str :param extras_mode_new: 'import' to import extras of new nodes or 'none' to ignore them. :type extras_mode_new: str :param comment_mode: Comment import modes (when same UUIDs are found). Can be either: 'newest' (will keep the Comment with the most recent modification time (mtime)) or 'overwrite' (will overwrite existing Comments with the ones from the import file). :type comment_mode: str :param silent: suppress prints. :type silent: bool :return: New and existing Nodes and Links. :rtype: dict :raises `~aiida.tools.importexport.common.exceptions.ImportValidationError`: if parameters or the contents of `metadata.json` or `data.json` can not be validated. :raises `~aiida.tools.importexport.common.exceptions.CorruptArchive`: if the provided archive at ``in_path`` is corrupted. :raises `~aiida.tools.importexport.common.exceptions.IncompatibleArchiveVersionError`: if the provided archive's export version is not equal to the export version of AiiDA at the moment of import. :raises `~aiida.tools.importexport.common.exceptions.ArchiveImportError`: if there are any internal errors when importing. :raises `~aiida.tools.importexport.common.exceptions.ImportUniquenessError`: if a new unique entity can not be created. """ from django.db import transaction # pylint: disable=import-error,no-name-in-module from aiida.backends.djsite.db import models # This is the export version expected by this function expected_export_version = StrictVersion(EXPORT_VERSION) # The returned dictionary with new and existing nodes and links ret_dict = {} # Initial check(s) if group: if not isinstance(group, Group): raise exceptions.ImportValidationError( 'group must be a Group entity') elif not group.is_stored: group.store() ################ # EXTRACT DATA # ################ # The sandbox has to remain open until the end with SandboxFolder() as folder: if os.path.isdir(in_path): extract_tree(in_path, folder) else: if tarfile.is_tarfile(in_path): extract_tar(in_path, folder, silent=silent, nodes_export_subfolder=NODES_EXPORT_SUBFOLDER) elif zipfile.is_zipfile(in_path): try: extract_zip(in_path, folder, silent=silent, nodes_export_subfolder=NODES_EXPORT_SUBFOLDER) except ValueError as exc: print( 'The following problem occured while processing the provided file: {}' .format(exc)) return else: raise exceptions.ImportValidationError( 'Unable to detect the input file format, it is neither a ' '(possibly compressed) tar file, nor a zip file.') if not folder.get_content_list(): raise exceptions.CorruptArchive( 'The provided file/folder ({}) is empty'.format(in_path)) try: with open(folder.get_abs_path('metadata.json'), 'r', encoding='utf8') as fhandle: metadata = json.load(fhandle) with open(folder.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle: data = json.load(fhandle) except IOError as error: raise exceptions.CorruptArchive( 'Unable to find the file {} in the import file or folder'. format(error.filename)) ###################### # PRELIMINARY CHECKS # ###################### export_version = StrictVersion(str(metadata['export_version'])) if export_version != expected_export_version: msg = 'Export file version is {}, can import only version {}'\ .format(metadata['export_version'], expected_export_version) if export_version < expected_export_version: msg += "\nUse 'verdi export migrate' to update this export file." else: msg += '\nUpdate your AiiDA version in order to import this file.' raise exceptions.IncompatibleArchiveVersionError(msg) ########################################################################## # CREATE UUID REVERSE TABLES AND CHECK IF I HAVE ALL NODES FOR THE LINKS # ########################################################################## linked_nodes = set( chain.from_iterable( (l['input'], l['output']) for l in data['links_uuid'])) group_nodes = set(chain.from_iterable(data['groups_uuid'].values())) if NODE_ENTITY_NAME in data['export_data']: import_nodes_uuid = set( v['uuid'] for v in data['export_data'][NODE_ENTITY_NAME].values()) else: import_nodes_uuid = set() # the combined set of linked_nodes and group_nodes was obtained from looking at all the links # the set of import_nodes_uuid was received from the stuff actually referred to in export_data unknown_nodes = linked_nodes.union(group_nodes) - import_nodes_uuid if unknown_nodes and not ignore_unknown_nodes: raise exceptions.DanglingLinkError( 'The import file refers to {} nodes with unknown UUID, therefore it cannot be imported. Either first ' 'import the unknown nodes, or export also the parents when exporting. The unknown UUIDs are:\n' ''.format(len(unknown_nodes)) + '\n'.join('* {}'.format(uuid) for uuid in unknown_nodes)) ################################### # DOUBLE-CHECK MODEL DEPENDENCIES # ################################### # The entity import order. It is defined by the database model relationships. model_order = (USER_ENTITY_NAME, COMPUTER_ENTITY_NAME, NODE_ENTITY_NAME, GROUP_ENTITY_NAME, LOG_ENTITY_NAME, COMMENT_ENTITY_NAME) for import_field_name in metadata['all_fields_info']: if import_field_name not in model_order: raise exceptions.ImportValidationError( "You are trying to import an unknown model '{}'!".format( import_field_name)) for idx, model_name in enumerate(model_order): dependencies = [] for field in metadata['all_fields_info'][model_name].values(): try: dependencies.append(field['requires']) except KeyError: # (No ForeignKey) pass for dependency in dependencies: if dependency not in model_order[:idx]: raise exceptions.ArchiveImportError( 'Model {} requires {} but would be loaded first; stopping...' .format(model_name, dependency)) ################################################### # CREATE IMPORT DATA DIRECT UNIQUE_FIELD MAPPINGS # ################################################### import_unique_ids_mappings = {} for model_name, import_data in data['export_data'].items(): if model_name in metadata['unique_identifiers']: # I have to reconvert the pk to integer import_unique_ids_mappings[model_name] = { int(k): v[metadata['unique_identifiers'][model_name]] for k, v in import_data.items() } ############### # IMPORT DATA # ############### # DO ALL WITH A TRANSACTION # batch size for bulk create operations batch_size = get_config_option('db.batch_size') with transaction.atomic(): foreign_ids_reverse_mappings = {} new_entries = {} existing_entries = {} # I first generate the list of data for model_name in model_order: cls_signature = entity_names_to_signatures[model_name] model = get_object_from_string(cls_signature) fields_info = metadata['all_fields_info'].get(model_name, {}) unique_identifier = metadata['unique_identifiers'].get( model_name, None) new_entries[model_name] = {} existing_entries[model_name] = {} foreign_ids_reverse_mappings[model_name] = {} # Not necessarily all models are exported if model_name in data['export_data']: # skip nodes that are already present in the DB if unique_identifier is not None: import_unique_ids = set( v[unique_identifier] for v in data['export_data'][model_name].values()) relevant_db_entries_result = model.objects.filter(**{ '{}__in'.format(unique_identifier): import_unique_ids }) # Note: uuids need to be converted to strings relevant_db_entries = { str(getattr(n, unique_identifier)): n for n in relevant_db_entries_result } foreign_ids_reverse_mappings[model_name] = { k: v.pk for k, v in relevant_db_entries.items() } for key, value in data['export_data'][ model_name].items(): if value[ unique_identifier] in relevant_db_entries.keys( ): # Already in DB existing_entries[model_name][key] = value else: # To be added new_entries[model_name][key] = value else: new_entries[model_name] = data['export_data'][ model_name].copy() # Show Comment mode if not silent if not silent: print('Comment mode: {}'.format(comment_mode)) # I import data from the given model for model_name in model_order: cls_signature = entity_names_to_signatures[model_name] model = get_object_from_string(cls_signature) fields_info = metadata['all_fields_info'].get(model_name, {}) unique_identifier = metadata['unique_identifiers'].get( model_name, None) # EXISTING ENTRIES for import_entry_pk, entry_data in existing_entries[ model_name].items(): unique_id = entry_data[unique_identifier] existing_entry_id = foreign_ids_reverse_mappings[ model_name][unique_id] import_data = dict( deserialize_field(k, v, fields_info=fields_info, import_unique_ids_mappings= import_unique_ids_mappings, foreign_ids_reverse_mappings= foreign_ids_reverse_mappings) for k, v in entry_data.items()) # TODO COMPARE, AND COMPARE ATTRIBUTES if model is models.DbComment: new_entry_uuid = merge_comment(import_data, comment_mode) if new_entry_uuid is not None: entry_data[unique_identifier] = new_entry_uuid new_entries[model_name][ import_entry_pk] = entry_data if model_name not in ret_dict: ret_dict[model_name] = {'new': [], 'existing': []} ret_dict[model_name]['existing'].append( (import_entry_pk, existing_entry_id)) if not silent: print('existing %s: %s (%s->%s)' % (model_name, unique_id, import_entry_pk, existing_entry_id)) # print(" `-> WARNING: NO DUPLICITY CHECK DONE!") # CHECK ALSO FILES! # Store all objects for this model in a list, and store them all in once at the end. objects_to_create = [] # This is needed later to associate the import entry with the new pk import_new_entry_pks = {} imported_comp_names = set() # NEW ENTRIES for import_entry_pk, entry_data in new_entries[ model_name].items(): unique_id = entry_data[unique_identifier] import_data = dict( deserialize_field(k, v, fields_info=fields_info, import_unique_ids_mappings= import_unique_ids_mappings, foreign_ids_reverse_mappings= foreign_ids_reverse_mappings) for k, v in entry_data.items()) if model is models.DbGroup: # Check if there is already a group with the same name dupl_counter = 0 orig_label = import_data['label'] while model.objects.filter(label=import_data['label']): import_data[ 'label'] = orig_label + DUPL_SUFFIX.format( dupl_counter) dupl_counter += 1 if dupl_counter == 100: raise exceptions.ImportUniquenessError( 'A group of that label ( {} ) already exists and I could not create a new one' ''.format(orig_label)) elif model is models.DbComputer: # Check if there is already a computer with the same name in the database dupl = (model.objects.filter(name=import_data['name']) or import_data['name'] in imported_comp_names) orig_name = import_data['name'] dupl_counter = 0 while dupl: # Rename the new computer import_data['name'] = ( orig_name + DUPL_SUFFIX.format(dupl_counter)) dupl = ( model.objects.filter(name=import_data['name']) or import_data['name'] in imported_comp_names) dupl_counter += 1 if dupl_counter == 100: raise exceptions.ImportUniquenessError( 'A computer of that name ( {} ) already exists and I could not create a new one' ''.format(orig_name)) imported_comp_names.add(import_data['name']) objects_to_create.append(model(**import_data)) import_new_entry_pks[unique_id] = import_entry_pk if model_name == NODE_ENTITY_NAME: if not silent: print('STORING NEW NODE REPOSITORY FILES...') # NEW NODES for object_ in objects_to_create: import_entry_uuid = object_.uuid import_entry_pk = import_new_entry_pks[ import_entry_uuid] # Before storing entries in the DB, I store the files (if these are nodes). # Note: only for new entries! subfolder = folder.get_subfolder( os.path.join(NODES_EXPORT_SUBFOLDER, export_shard_uuid(import_entry_uuid))) if not subfolder.exists(): raise exceptions.CorruptArchive( 'Unable to find the repository folder for Node with UUID={} in the exported ' 'file'.format(import_entry_uuid)) destdir = RepositoryFolder( section=Repository._section_name, uuid=import_entry_uuid) # Replace the folder, possibly destroying existing previous folders, and move the files # (faster if we are on the same filesystem, and in any case the source is a SandboxFolder) destdir.replace_with_folder(subfolder.abspath, move=True, overwrite=True) # For DbNodes, we also have to store its attributes if not silent: print('STORING NEW NODE ATTRIBUTES...') # Get attributes from import file try: object_.attributes = data['node_attributes'][str( import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find attribute info for Node with UUID={}' .format(import_entry_uuid)) # For DbNodes, we also have to store its extras if extras_mode_new == 'import': if not silent: print('STORING NEW NODE EXTRAS...') # Get extras from import file try: extras = data['node_extras'][str( import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find extra info for Node with UUID={}' .format(import_entry_uuid)) # TODO: remove when aiida extras will be moved somewhere else # from here extras = { key: value for key, value in extras.items() if not key.startswith('_aiida_') } if object_.node_type.endswith('code.Code.'): extras = { key: value for key, value in extras.items() if not key == 'hidden' } # till here object_.extras = extras elif extras_mode_new == 'none': if not silent: print('SKIPPING NEW NODE EXTRAS...') else: raise exceptions.ImportValidationError( "Unknown extras_mode_new value: {}, should be either 'import' or 'none'" ''.format(extras_mode_new)) # EXISTING NODES (Extras) # For the existing nodes that are also in the imported list we also update their extras if necessary if not silent: print( 'UPDATING EXISTING NODE EXTRAS (mode: {})'.format( extras_mode_existing)) import_existing_entry_pks = { entry_data[unique_identifier]: import_entry_pk for import_entry_pk, entry_data in existing_entries[model_name].items() } for node in models.DbNode.objects.filter( uuid__in=import_existing_entry_pks).all(): # pylint: disable=no-member import_entry_uuid = str(node.uuid) import_entry_pk = import_existing_entry_pks[ import_entry_uuid] # Get extras from import file try: extras = data['node_extras'][str(import_entry_pk)] except KeyError: raise exceptions.CorruptArchive( 'Unable to find extra info for ode with UUID={}' .format(import_entry_uuid)) # TODO: remove when aiida extras will be moved somewhere else # from here extras = { key: value for key, value in extras.items() if not key.startswith('_aiida_') } if node.node_type.endswith('code.Code.'): extras = { key: value for key, value in extras.items() if not key == 'hidden' } # till here node.extras = merge_extras(node.extras, extras, extras_mode_existing) # Already saving existing node here to update its extras node.save() # If there is an mtime in the field, disable the automatic update # to keep the mtime that we have set here if 'mtime' in [ field.name for field in model._meta.local_fields ]: with models.suppress_auto_now([(model, ['mtime'])]): # Store them all in once; however, the PK are not set in this way... model.objects.bulk_create(objects_to_create, batch_size=batch_size) else: model.objects.bulk_create(objects_to_create, batch_size=batch_size) # Get back the just-saved entries just_saved_queryset = model.objects.filter( **{ '{}__in'.format(unique_identifier): import_new_entry_pks.keys() }).values_list(unique_identifier, 'pk') # note: convert uuids from type UUID to strings just_saved = { str(key): value for key, value in just_saved_queryset } # Now I have the PKs, print the info # Moreover, add newly created Nodes to foreign_ids_reverse_mappings for unique_id, new_pk in just_saved.items(): import_entry_pk = import_new_entry_pks[unique_id] foreign_ids_reverse_mappings[model_name][ unique_id] = new_pk if model_name not in ret_dict: ret_dict[model_name] = {'new': [], 'existing': []} ret_dict[model_name]['new'].append( (import_entry_pk, new_pk)) if not silent: print('NEW %s: %s (%s->%s)' % (model_name, unique_id, import_entry_pk, new_pk)) if not silent: print('STORING NODE LINKS...') import_links = data['links_uuid'] links_to_store = [] # Needed, since QueryBuilder does not yet work for recently saved Nodes existing_links_raw = models.DbLink.objects.all().values_list( 'input', 'output', 'label', 'type') existing_links = {(l[0], l[1], l[2], l[3]) for l in existing_links_raw} existing_outgoing_unique = {(l[0], l[3]) for l in existing_links_raw} existing_outgoing_unique_pair = {(l[0], l[2], l[3]) for l in existing_links_raw} existing_incoming_unique = {(l[1], l[3]) for l in existing_links_raw} existing_incoming_unique_pair = {(l[1], l[2], l[3]) for l in existing_links_raw} calculation_node_types = 'process.calculation.' workflow_node_types = 'process.workflow.' data_node_types = 'data.' link_mapping = { LinkType.CALL_CALC: (workflow_node_types, calculation_node_types, 'unique_triple', 'unique'), LinkType.CALL_WORK: (workflow_node_types, workflow_node_types, 'unique_triple', 'unique'), LinkType.CREATE: (calculation_node_types, data_node_types, 'unique_pair', 'unique'), LinkType.INPUT_CALC: (data_node_types, calculation_node_types, 'unique_triple', 'unique_pair'), LinkType.INPUT_WORK: (data_node_types, workflow_node_types, 'unique_triple', 'unique_pair'), LinkType.RETURN: (workflow_node_types, data_node_types, 'unique_pair', 'unique_triple'), } for link in import_links: # Check for dangling Links within the, supposed, self-consistent archive try: in_id = foreign_ids_reverse_mappings[NODE_ENTITY_NAME][ link['input']] out_id = foreign_ids_reverse_mappings[NODE_ENTITY_NAME][ link['output']] except KeyError: if ignore_unknown_nodes: continue raise exceptions.ImportValidationError( 'Trying to create a link with one or both unknown nodes, stopping (in_uuid={}, out_uuid={}, ' 'label={}, type={})'.format(link['input'], link['output'], link['label'], link['type'])) # Check if link already exists, skip if it does # This is equivalent to an existing triple link (i.e. unique_triple from below) if (in_id, out_id, link['label'], link['type']) in existing_links: continue # Since backend specific Links (DbLink) are not validated upon creation, we will now validate them. try: validate_link_label(link['label']) except ValueError as why: raise exceptions.ImportValidationError( 'Error during Link label validation: {}'.format(why)) source = models.DbNode.objects.get(id=in_id) target = models.DbNode.objects.get(id=out_id) if source.uuid == target.uuid: raise exceptions.ImportValidationError( 'Cannot add a link to oneself') link_type = LinkType(link['type']) type_source, type_target, outdegree, indegree = link_mapping[ link_type] # Check if source Node is a valid type if not source.node_type.startswith(type_source): raise exceptions.ImportValidationError( 'Cannot add a {} link from {} to {}'.format( link_type, source.node_type, target.node_type)) # Check if target Node is a valid type if not target.node_type.startswith(type_target): raise exceptions.ImportValidationError( 'Cannot add a {} link from {} to {}'.format( link_type, source.node_type, target.node_type)) # If the outdegree is `unique` there cannot already be any other outgoing link of that type, # i.e., the source Node may not have a LinkType of current LinkType, going out, existing already. if outdegree == 'unique' and ( in_id, link['type']) in existing_outgoing_unique: raise exceptions.ImportValidationError( 'Node<{}> already has an outgoing {} link'.format( source.uuid, link_type)) # If the outdegree is `unique_pair`, # then the link labels for outgoing links of this type should be unique, # i.e., the source Node may not have a LinkType of current LinkType, going out, # that also has the current Link label, existing already. elif outdegree == 'unique_pair' and \ (in_id, link['label'], link['type']) in existing_outgoing_unique_pair: raise exceptions.ImportValidationError( 'Node<{}> already has an outgoing {} link with label "{}"' .format(source.uuid, link_type, link['label'])) # If the indegree is `unique` there cannot already be any other incoming links of that type, # i.e., the target Node may not have a LinkType of current LinkType, coming in, existing already. if indegree == 'unique' and ( out_id, link['type']) in existing_incoming_unique: raise exceptions.ImportValidationError( 'Node<{}> already has an incoming {} link'.format( target.uuid, link_type)) # If the indegree is `unique_pair`, # then the link labels for incoming links of this type should be unique, # i.e., the target Node may not have a LinkType of current LinkType, coming in # that also has the current Link label, existing already. elif indegree == 'unique_pair' and \ (out_id, link['label'], link['type']) in existing_incoming_unique_pair: raise exceptions.ImportValidationError( 'Node<{}> already has an incoming {} link with label "{}"' .format(target.uuid, link_type, link['label'])) # New link links_to_store.append( models.DbLink(input_id=in_id, output_id=out_id, label=link['label'], type=link['type'])) if 'Link' not in ret_dict: ret_dict['Link'] = {'new': []} ret_dict['Link']['new'].append((in_id, out_id)) # Add new Link to sets of existing Links 'input PK', 'output PK', 'label', 'type' existing_links.add( (in_id, out_id, link['label'], link['type'])) existing_outgoing_unique.add((in_id, link['type'])) existing_outgoing_unique_pair.add( (in_id, link['label'], link['type'])) existing_incoming_unique.add((out_id, link['type'])) existing_incoming_unique_pair.add( (out_id, link['label'], link['type'])) # Store new links if links_to_store: if not silent: print(' ({} new links...)'.format(len(links_to_store))) models.DbLink.objects.bulk_create(links_to_store, batch_size=batch_size) else: if not silent: print(' (0 new links...)') if not silent: print('STORING GROUP ELEMENTS...') import_groups = data['groups_uuid'] for groupuuid, groupnodes in import_groups.items(): # TODO: cache these to avoid too many queries group_ = models.DbGroup.objects.get(uuid=groupuuid) nodes_to_store = [ foreign_ids_reverse_mappings[NODE_ENTITY_NAME][node_uuid] for node_uuid in groupnodes ] if nodes_to_store: group_.dbnodes.add(*nodes_to_store) ###################################################### # Put everything in a specific group ###################################################### existing = existing_entries.get(NODE_ENTITY_NAME, {}) existing_pk = [ foreign_ids_reverse_mappings[NODE_ENTITY_NAME][v['uuid']] for v in existing.values() ] new = new_entries.get(NODE_ENTITY_NAME, {}) new_pk = [ foreign_ids_reverse_mappings[NODE_ENTITY_NAME][v['uuid']] for v in new.values() ] pks_for_group = existing_pk + new_pk # So that we do not create empty groups if pks_for_group: # If user specified a group, import all things into it if not group: # Get an unique name for the import group, based on the current (local) time basename = timezone.localtime( timezone.now()).strftime('%Y%m%d-%H%M%S') counter = 0 group_label = basename while Group.objects.find(filters={'label': group_label}): counter += 1 group_label = '{}_{}'.format(basename, counter) if counter == 100: raise exceptions.ImportUniquenessError( "Overflow of import groups (more than 100 import groups exists with basename '{}')" ''.format(basename)) group = ImportGroup(label=group_label).store() # Add all the nodes to the new group # TODO: decide if we want to return the group label nodes = [ entry[0] for entry in QueryBuilder().append(Node, filters={ 'id': { 'in': pks_for_group } }).all() ] group.add_nodes(nodes) if not silent: print( "IMPORTED NODES ARE GROUPED IN THE IMPORT GROUP LABELED '{}'" .format(group.label)) else: if not silent: print( 'NO NODES TO IMPORT, SO NO GROUP CREATED, IF IT DID NOT ALREADY EXIST' ) if not silent: print('DONE.') return ret_dict
def import_data(in_path, ignore_unknown_nodes=False, silent=False): """ Import exported AiiDA environment to the AiiDA database. If the 'in_path' is a folder, calls export_tree; otherwise, tries to detect the compression format (zip, tar.gz, tar.bz2, ...) and calls the correct function. :param in_path: the path to a file or folder that can be imported in AiiDA """ import json import os import tarfile import zipfile from itertools import chain from django.db import transaction from aiida.utils import timezone from aiida.orm.node import Node from aiida.orm.group import Group from aiida.common.exceptions import UniquenessError from aiida.common.folders import SandboxFolder, RepositoryFolder from aiida.backends.djsite.db import models from aiida.common.utils import get_class_string, get_object_from_string from aiida.common.datastructures import calc_states # This is the export version expected by this function expected_export_version = '0.1' # The name of the subfolder in which the node files are stored nodes_export_subfolder = 'nodes' # The returned dictionary with new and existing nodes and links ret_dict = {} ################ # EXTRACT DATA # ################ # The sandbox has to remain open until the end with SandboxFolder() as folder: if os.path.isdir(in_path): extract_tree(in_path, folder, silent=silent) else: if tarfile.is_tarfile(in_path): extract_tar(in_path, folder, silent=silent, nodes_export_subfolder=nodes_export_subfolder) elif zipfile.is_zipfile(in_path): extract_zip(in_path, folder, silent=silent, nodes_export_subfolder=nodes_export_subfolder) else: raise ValueError( "Unable to detect the input file format, it " "is neither a (possibly compressed) tar file, " "nor a zip file.") try: with open(folder.get_abs_path('metadata.json')) as f: metadata = json.load(f) with open(folder.get_abs_path('data.json')) as f: data = json.load(f) except IOError as e: raise ValueError("Unable to find the file {} in the import " "file or folder".format(e.filename)) ###################### # PRELIMINARY CHECKS # ###################### if metadata['export_version'] != expected_export_version: raise ValueError( "File export version is {}, but I can import only " "version {}".format(metadata['export_version'], expected_export_version)) ########################################################################## # CREATE UUID REVERSE TABLES AND CHECK IF I HAVE ALL NODES FOR THE LINKS # ########################################################################## linked_nodes = set( chain.from_iterable( (l['input'], l['output']) for l in data['links_uuid'])) group_nodes = set(chain.from_iterable( data['groups_uuid'].itervalues())) # I preload the nodes, I need to check each of them later, and I also # store them in a reverse table # I break up the query due to SQLite limitations.. relevant_db_nodes = {} for group in grouper(999, linked_nodes): relevant_db_nodes.update({ n.uuid: n for n in models.DbNode.objects.filter(uuid__in=group) }) db_nodes_uuid = set(relevant_db_nodes.keys()) dbnode_model = get_class_string(models.DbNode) import_nodes_uuid = set( v['uuid'] for v in data['export_data'][dbnode_model].values()) unknown_nodes = linked_nodes.union(group_nodes) - db_nodes_uuid.union( import_nodes_uuid) if unknown_nodes and not ignore_unknown_nodes: raise ValueError( "The import file refers to {} nodes with unknown UUID, therefore " "it cannot be imported. Either first import the unknown nodes, " "or export also the parents when exporting. The unknown UUIDs " "are:\n".format(len(unknown_nodes)) + "\n".join('* {}'.format(uuid) for uuid in unknown_nodes)) ################################### # DOUBLE-CHECK MODEL DEPENDENCIES # ################################### # I hardcode here the model order, for simplicity; in any case, this is # fixed by the export version model_order = [ get_class_string(m) for m in ( models.DbUser, models.DbComputer, models.DbNode, models.DbGroup, ) ] # Models that do appear in the import file, but whose import is # managed manually model_manual = [ get_class_string(m) for m in ( models.DbLink, models.DbAttribute, ) ] all_known_models = model_order + model_manual for import_field_name in metadata['all_fields_info']: if import_field_name not in all_known_models: raise NotImplementedError( "Apparently, you are importing a " "file with a model '{}', but this does not appear in " "all_known_models!".format(import_field_name)) for idx, model_name in enumerate(model_order): dependencies = [] for field in metadata['all_fields_info'][model_name].values(): try: dependencies.append(field['requires']) except KeyError: # (No ForeignKey) pass for dependency in dependencies: if dependency not in model_order[:idx]: raise ValueError( "Model {} requires {} but would be loaded " "first; stopping...".format(model_name, dependency)) ################################################### # CREATE IMPORT DATA DIRECT UNIQUE_FIELD MAPPINGS # ################################################### import_unique_ids_mappings = {} for model_name, import_data in data['export_data'].iteritems(): if model_name in metadata['unique_identifiers']: # I have to reconvert the pk to integer import_unique_ids_mappings[model_name] = { int(k): v[metadata['unique_identifiers'][model_name]] for k, v in import_data.iteritems() } ############### # IMPORT DATA # ############### # DO ALL WITH A TRANSACTION with transaction.commit_on_success(): foreign_ids_reverse_mappings = {} new_entries = {} existing_entries = {} # I first generate the list of data for model_name in model_order: Model = get_object_from_string(model_name) fields_info = metadata['all_fields_info'].get(model_name, {}) unique_identifier = metadata['unique_identifiers'].get( model_name, None) new_entries[model_name] = {} existing_entries[model_name] = {} foreign_ids_reverse_mappings[model_name] = {} # Not necessarily all models are exported if model_name in data['export_data']: if unique_identifier is not None: import_unique_ids = set( v[unique_identifier] for v in data['export_data'][model_name].values()) relevant_db_entries = { getattr(n, unique_identifier): n for n in Model.objects.filter( **{ '{}__in'.format(unique_identifier): import_unique_ids }) } foreign_ids_reverse_mappings[model_name] = { k: v.pk for k, v in relevant_db_entries.iteritems() } for k, v in data['export_data'][model_name].iteritems( ): if v[unique_identifier] in relevant_db_entries.keys( ): # Already in DB existing_entries[model_name][k] = v else: # To be added new_entries[model_name][k] = v else: new_entries[model_name] = data['export_data'][ model_name].copy() # I import data from the given model for model_name in model_order: Model = get_object_from_string(model_name) fields_info = metadata['all_fields_info'].get(model_name, {}) unique_identifier = metadata['unique_identifiers'].get( model_name, None) for import_entry_id, entry_data in existing_entries[ model_name].iteritems(): unique_id = entry_data[unique_identifier] existing_entry_id = foreign_ids_reverse_mappings[ model_name][unique_id] # TODO COMPARE, AND COMPARE ATTRIBUTES if model_name not in ret_dict: ret_dict[model_name] = {'new': [], 'existing': []} ret_dict[model_name]['existing'].append( (import_entry_id, existing_entry_id)) if not silent: print "existing %s: %s (%s->%s)" % ( model_name, unique_id, import_entry_id, existing_entry_id) # print " `-> WARNING: NO DUPLICITY CHECK DONE!" # CHECK ALSO FILES! # Store all objects for this model in a list, and store them # all in once at the end. objects_to_create = [] # This is needed later to associate the import entry with the new pk import_entry_ids = {} for import_entry_id, entry_data in new_entries[ model_name].iteritems(): unique_id = entry_data[unique_identifier] import_data = dict( deserialize_field(k, v, fields_info=fields_info, import_unique_ids_mappings= import_unique_ids_mappings, foreign_ids_reverse_mappings= foreign_ids_reverse_mappings) for k, v in entry_data.iteritems()) objects_to_create.append(Model(**import_data)) import_entry_ids[unique_id] = import_entry_id # Before storing entries in the DB, I store the files (if these # are nodes). Note: only for new entries! if model_name == get_class_string(models.DbNode): if not silent: print "STORING NEW NODE FILES..." for o in objects_to_create: subfolder = folder.get_subfolder( os.path.join(nodes_export_subfolder, export_shard_uuid(o.uuid))) if not subfolder.exists(): raise ValueError( "Unable to find the repository " "folder for node with UUID={} in the exported " "file".format(o.uuid)) destdir = RepositoryFolder(section=Node._section_name, uuid=o.uuid) # Replace the folder, possibly destroying existing # previous folders, and move the files (faster if we # are on the same filesystem, and # in any case the source is a SandboxFolder) destdir.replace_with_folder(subfolder.abspath, move=True, overwrite=True) # Store them all in once; however, the PK are not set in this way... Model.objects.bulk_create(objects_to_create) # Get back the just-saved entries just_saved = dict( Model.objects.filter( **{ "{}__in".format(unique_identifier): import_entry_ids.keys() }).values_list(unique_identifier, 'pk')) imported_states = [] if model_name == get_class_string(models.DbNode): if not silent: print "SETTING THE IMPORTED STATES FOR NEW NODES..." # I set for all nodes, even if I should set it only # for calculations for unique_id, new_pk in just_saved.iteritems(): imported_states.append( models.DbCalcState(dbnode_id=new_pk, state=calc_states.IMPORTED)) models.DbCalcState.objects.bulk_create(imported_states) # Now I have the PKs, print the info # Moreover, set the foreing_ids_reverse_mappings for unique_id, new_pk in just_saved.iteritems(): import_entry_id = import_entry_ids[unique_id] foreign_ids_reverse_mappings[model_name][ unique_id] = new_pk if model_name not in ret_dict: ret_dict[model_name] = {'new': [], 'existing': []} ret_dict[model_name]['new'].append( (import_entry_id, new_pk)) if not silent: print "NEW %s: %s (%s->%s)" % (model_name, unique_id, import_entry_id, new_pk) # For DbNodes, we also have to store Attributes! if model_name == get_class_string(models.DbNode): if not silent: print "STORING NEW NODE ATTRIBUTES..." for unique_id, new_pk in just_saved.iteritems(): import_entry_id = import_entry_ids[unique_id] # Get attributes from import file try: attributes = data['node_attributes'][str( import_entry_id)] attributes_conversion = data[ 'node_attributes_conversion'][str( import_entry_id)] except KeyError: raise ValueError( "Unable to find attribute info " "for DbNode with UUID = {}".format(unique_id)) # Here I have to deserialize the attributes deserialized_attributes = deserialize_attributes( attributes, attributes_conversion) models.DbAttribute.reset_values_for_node( dbnode=new_pk, attributes=deserialized_attributes, with_transaction=False) if not silent: print "STORING NODE LINKS..." ## TODO: check that we are not creating input links of an already ## existing node... import_links = data['links_uuid'] links_to_store = [] # Needed for fast checks of existing links existing_links_raw = models.DbLink.objects.all().values_list( 'input', 'output', 'label') existing_links_labels = {(l[0], l[1]): l[2] for l in existing_links_raw} existing_input_links = {(l[1], l[2]): l[0] for l in existing_links_raw} dbnode_reverse_mappings = foreign_ids_reverse_mappings[ get_class_string(models.DbNode)] for link in import_links: try: in_id = dbnode_reverse_mappings[link['input']] out_id = dbnode_reverse_mappings[link['output']] except KeyError: if ignore_unknown_nodes: continue else: raise ValueError("Trying to create a link with one " "or both unknown nodes, stopping " "(in_uuid={}, out_uuid={}, " "label={})".format( link['input'], link['output'], link['label'])) try: existing_label = existing_links_labels[in_id, out_id] if existing_label != link['label']: raise ValueError( "Trying to rename an existing link name, " "stopping (in={}, out={}, old_label={}, " "new_label={})".format(in_id, out_id, existing_label, link['label'])) # Do nothing, the link is already in place and has the correct # name except KeyError: try: existing_input = existing_input_links[out_id, link['label']] # If existing_input were the correct one, I would have found # it already in the previous step! raise ValueError( "There exists already an input link to " "node {} with label {} but it does not " "come the expected input {}".format( out_id, link['label'], in_id)) except KeyError: # New link links_to_store.append( models.DbLink(input_id=in_id, output_id=out_id, label=link['label'])) if 'aiida.backends.djsite.db.models.DbLink' not in ret_dict: ret_dict[ 'aiida.backends.djsite.db.models.DbLink'] = { 'new': [] } ret_dict['aiida.backends.djsite.db.models.DbLink'][ 'new'].append((in_id, out_id)) # Store new links if links_to_store: if not silent: print " ({} new links...)".format(len(links_to_store)) models.DbLink.objects.bulk_create(links_to_store) else: if not silent: print " (0 new links...)" if not silent: print "STORING GROUP ELEMENTS..." import_groups = data['groups_uuid'] for groupuuid, groupnodes in import_groups.iteritems(): # TODO: cache these to avoid too many queries group = models.DbGroup.objects.get(uuid=groupuuid) nodes_to_store = [ dbnode_reverse_mappings[node_uuid] for node_uuid in groupnodes ] if nodes_to_store: group.dbnodes.add(*nodes_to_store) ###################################################### # Put everything in a specific group dbnode_model_name = get_class_string(models.DbNode) existing = existing_entries.get(dbnode_model_name, {}) existing_pk = [ foreign_ids_reverse_mappings[dbnode_model_name][v['uuid']] for v in existing.itervalues() ] new = new_entries.get(dbnode_model_name, {}) new_pk = [ foreign_ids_reverse_mappings[dbnode_model_name][v['uuid']] for v in new.itervalues() ] pks_for_group = existing_pk + new_pk # So that we do not create empty groups if pks_for_group: # Get an unique name for the import group, based on the # current (local) time basename = timezone.localtime( timezone.now()).strftime("%Y%m%d-%H%M%S") counter = 0 created = False while not created: if counter == 0: group_name = basename else: group_name = "{}_{}".format(basename, counter) try: group = Group(name=group_name, type_string=IMPORTGROUP_TYPE).store() created = True except UniquenessError: counter += 1 # Add all the nodes to the new group # TODO: decide if we want to return the group name group.add_nodes( models.DbNode.objects.filter(pk__in=pks_for_group)) if not silent: print "IMPORTED NODES GROUPED IN IMPORT GROUP NAMED '{}'".format( group.name) else: if not silent: print "NO DBNODES TO IMPORT, SO NO GROUP CREATED" if not silent: print "*** WARNING: MISSING EXISTING UUID CHECKS!!" print "*** WARNING: TODO: UPDATE IMPORT_DATA WITH DEFAULT VALUES! (e.g. calc status, user pwd, ...)" print "DONE." return ret_dict
def _store_entity_data(*, reader: ArchiveReaderAbstract, entity_name: str, comment_mode: str, extras_mode_existing: str, new_entries: Dict[str, Dict[str, dict]], existing_entries: Dict[str, Dict[str, dict]], foreign_ids_reverse_mappings: Dict[str, Dict[str, int]], import_unique_ids_mappings: Dict[str, Dict[int, str]], ret_dict: dict, batch_size: int): """Store the entity data on the AiiDA profile.""" from aiida.backends.djsite.db import models cls_signature = entity_names_to_signatures[entity_name] model = get_object_from_string(cls_signature) fields_info = reader.metadata.all_fields_info.get(entity_name, {}) unique_identifier = reader.metadata.unique_identifiers.get( entity_name, None) pbar_base_str = f'{entity_name}s - ' # EXISTING ENTRIES if existing_entries[entity_name]: with get_progress_reporter()( total=len(existing_entries[entity_name]), desc=f'{pbar_base_str} existing entries') as progress: for import_entry_pk, entry_data in existing_entries[ entity_name].items(): progress.update() unique_id = entry_data[unique_identifier] existing_entry_id = foreign_ids_reverse_mappings[entity_name][ unique_id] import_data = dict( deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings= foreign_ids_reverse_mappings) for k, v in entry_data.items()) # TODO COMPARE, AND COMPARE ATTRIBUTES if model is models.DbComment: new_entry_uuid = merge_comment(import_data, comment_mode) if new_entry_uuid is not None: entry_data[unique_identifier] = new_entry_uuid new_entries[entity_name][import_entry_pk] = entry_data if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['existing'].append( (import_entry_pk, existing_entry_id)) # print(' `-> WARNING: NO DUPLICITY CHECK DONE!') # CHECK ALSO FILES! # Store all objects for this model in a list, and store them all in once at the end. objects_to_create = [] # This is needed later to associate the import entry with the new pk import_new_entry_pks = {} # NEW ENTRIES for import_entry_pk, entry_data in new_entries[entity_name].items(): unique_id = entry_data[unique_identifier] import_data = dict( deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings=foreign_ids_reverse_mappings) for k, v in entry_data.items()) objects_to_create.append(model(**import_data)) import_new_entry_pks[unique_id] = import_entry_pk if entity_name == NODE_ENTITY_NAME: # Before storing entries in the DB, I store the files (if these are nodes). # Note: only for new entries! uuids_to_create = [obj.uuid for obj in objects_to_create] _copy_node_repositories(uuids_to_create=uuids_to_create, reader=reader) # For the existing nodes that are also in the imported list we also update their extras if necessary if existing_entries[entity_name]: with get_progress_reporter()( total=len(existing_entries[entity_name]), desc='Updating existing node extras') as progress: import_existing_entry_pks = { entry_data[unique_identifier]: import_entry_pk for import_entry_pk, entry_data in existing_entries[entity_name].items() } for node in models.DbNode.objects.filter( uuid__in=import_existing_entry_pks).all(): # pylint: disable=no-member import_entry_uuid = str(node.uuid) import_entry_pk = import_existing_entry_pks[ import_entry_uuid] pbar_node_base_str = f"{pbar_base_str}UUID={import_entry_uuid.split('-')[0]} - " progress.set_description_str(f'{pbar_node_base_str}Extras', refresh=False) progress.update() old_extras = node.extras.copy() extras = existing_entries[entity_name][str( import_entry_pk)].get('extras', {}) new_extras = merge_extras(node.extras, extras, extras_mode_existing) if new_extras != old_extras: # Already saving existing node here to update its extras node.extras = new_extras node.save() if not objects_to_create: return with get_progress_reporter()( total=len(objects_to_create), desc=f'{pbar_base_str} storing new') as progress: # If there is an mtime in the field, disable the automatic update # to keep the mtime that we have set here if 'mtime' in [field.name for field in model._meta.local_fields]: with models.suppress_auto_now([(model, ['mtime'])]): # Store them all in once; however, the PK are not set in this way... model.objects.bulk_create(objects_to_create, batch_size=batch_size) else: model.objects.bulk_create(objects_to_create, batch_size=batch_size) # Get back the just-saved entries just_saved_queryset = model.objects.filter( **{ f'{unique_identifier}__in': import_new_entry_pks.keys() }).values_list(unique_identifier, 'pk') # note: convert uuids from type UUID to strings just_saved = {str(key): value for key, value in just_saved_queryset} # Now I have the PKs, print the info # Moreover, add newly created Nodes to foreign_ids_reverse_mappings for unique_id, new_pk in just_saved.items(): import_entry_pk = import_new_entry_pks[unique_id] foreign_ids_reverse_mappings[entity_name][unique_id] = new_pk if entity_name not in ret_dict: ret_dict[entity_name] = {'new': [], 'existing': []} ret_dict[entity_name]['new'].append((import_entry_pk, new_pk)) progress.update()
def get_schema(self): # Construct the full class string class_string = 'aiida.orm.' + self._aiida_type # Load correspondent orm class orm_class = get_object_from_string(class_string) # Construct the json object to be returned basic_schema = orm_class.get_db_columns() """ Determine the API schema (spartially overlapping with the ORM/database one). When the ORM is based on django, however, attributes and extras are not colums of the database but are nevertheless valid projections. We add them by hand into the API schema. """ # TODO change the get_db_columns method to include also relationships such as attributes, extras, input, # and outputs in order to have a more complete definition of the schema. if self._default_projections == ['**']: schema = basic_schema # No custom schema, take the basic one else: # Non-schema possible projections (only for nodes when django is backend) non_schema_projs = ('attributes', 'extras') # Sub-projections of JSON fields (applies to both SQLA and Django) non_schema_proj_prefix = ('attributes.', 'extras.') schema_key = [] schema_values = [] for k in self._default_projections: if k in basic_schema.keys(): schema_key.append(k) schema_values.append(basic_schema[k]) elif k in non_schema_projs: # Catches 'attributes' and 'extras' schema_key.append(k) value = dict(type=dict, is_foreign_key=False) schema_values.append(value) elif k.startswith(non_schema_proj_prefix): # Catches 'attributes.<key>' and 'extras.<key>' schema_key.append(k) value = dict(type=None, is_foreign_key=False) schema_values.append(value) schema = dict(zip(schema_key, schema_values)) def table2resource(table_name): """ Convert the related_tablevalues to the RESTAPI resources (orm class/db table ==> RESTapi resource) :param table_name (str): name of the table (in SQLA is __tablename__) :return: resource_name (str): name of the API resource """ # TODO Consider ways to make this function backend independent (one # idea would be to go from table name to aiida class name which is # unique) if BACKEND == BACKEND_DJANGO: (spam, resource_name) = issingular(table_name[2:].lower()) elif BACKEND == BACKEND_SQLA: (spam, resource_name) = issingular(table_name[5:]) elif BACKEND is None: raise ConfigurationError("settings.BACKEND has not been set.\n" "Hint: Have you called " "aiida.load_dbenv?") else: raise ConfigurationError( "Unknown settings.BACKEND: {}".format(BACKEND)) return resource_name for k, v in schema.iteritems(): # Add custom fields to the column dictionaries if 'fields' in self.custom_schema: if k in self.custom_schema['fields'].keys(): schema[k].update(self.custom_schema['fields'][k]) # Convert python types values into strings schema[k]['type'] = str(schema[k]['type'])[7:-2] # Construct the 'related resource' field from the 'related_table' # field if v['is_foreign_key'] == True: schema[k]['related_resource'] = table2resource( schema[k].pop('related_table')) # TODO Construct the ordering (all these things have to be moved in matcloud_backend) if self._default_projections != ['**']: ordering = self._default_projections else: # random ordering if not set explicitely in ordering = schema.keys() return dict(fields=schema, ordering=ordering)
def setup_pseudo_family(command_name, folder, group_name, group_description): pseudo_cmd = get_object_from_string(command_name)() with open(os.devnull, 'w') as devnull, redirect_stdout(devnull): pseudo_cmd.uploadfamily(folder, group_name, group_description)
def on_next_step_starting(self, workchain): for key, action in self._to_assign.iteritems(): fn = get_object_from_string(action.fn) workchain.ctx[key] = fn(action.running_info.pid)
def _select_entity_data(*, entity_name: str, reader: ArchiveReaderAbstract, new_entries: Dict[str, Dict[str, dict]], existing_entries: Dict[str, Dict[str, dict]], foreign_ids_reverse_mappings: Dict[str, Dict[str, int]], extras_mode_new: str): """Select the data to import by comparing the AiiDA database to the archive contents.""" cls_signature = entity_names_to_signatures[entity_name] model = get_object_from_string(cls_signature) unique_identifier = reader.metadata.unique_identifiers.get( entity_name, None) # Not necessarily all models are present in the archive if entity_name not in reader.entity_names: return existing_entries.setdefault(entity_name, {}) new_entries.setdefault(entity_name, {}) if unique_identifier is None: new_entries[entity_name] = { str(pk): fields for pk, fields in reader.iter_entity_fields(entity_name) } return # skip nodes that are already present in the DB import_unique_ids = set(f[unique_identifier] for _, f in reader.iter_entity_fields( entity_name, fields=(unique_identifier, ))) relevant_db_entries = {} if import_unique_ids: relevant_db_entries_result = model.objects.filter( **{f'{unique_identifier}__in': import_unique_ids}) if relevant_db_entries_result.count(): with get_progress_reporter()( desc=f'Finding existing entities - {entity_name}', total=relevant_db_entries_result.count()) as progress: # Imitating QueryBuilder.iterall() with default settings for object_ in relevant_db_entries_result.iterator( chunk_size=100): progress.update() # Note: UUIDs need to be converted to strings relevant_db_entries.update( {str(getattr(object_, unique_identifier)): object_}) foreign_ids_reverse_mappings[entity_name] = { k: v.pk for k, v in relevant_db_entries.items() } entity_count = reader.entity_count(entity_name) if not entity_count: return with get_progress_reporter()( desc=f'Reading archived entities - {entity_name}', total=entity_count) as progress: imported_comp_names = set() for pk, fields in reader.iter_entity_fields(entity_name): progress.update() if entity_name == GROUP_ENTITY_NAME: # Check if there is already a group with the same name dupl_counter = 0 orig_label = fields['label'] while model.objects.filter(label=fields['label']): fields['label'] = orig_label + DUPL_SUFFIX.format( dupl_counter) dupl_counter += 1 if dupl_counter == MAX_GROUPS: raise exceptions.ImportUniquenessError( f'A group of that label ( {orig_label} ) already exists and I could not create a new one' ) elif entity_name == COMPUTER_ENTITY_NAME: # Check if there is already a computer with the same name in the database dupl = (model.objects.filter(name=fields['name']) or fields['name'] in imported_comp_names) orig_name = fields['name'] dupl_counter = 0 while dupl: # Rename the new computer fields['name'] = orig_name + DUPL_SUFFIX.format( dupl_counter) dupl = (model.objects.filter(name=fields['name']) or fields['name'] in imported_comp_names) dupl_counter += 1 if dupl_counter == MAX_COMPUTERS: raise exceptions.ImportUniquenessError( f'A computer of that name ( {orig_name} ) already exists and I could not create a new one' ) imported_comp_names.add(fields['name']) if fields[unique_identifier] in relevant_db_entries: # Already in DB existing_entries[entity_name][str(pk)] = fields else: # To be added if entity_name == NODE_ENTITY_NAME: # format extras fields = _sanitize_extras(fields) if extras_mode_new != 'import': fields.pop('extras', None) new_entries[entity_name][str(pk)] = fields