def get_crossing_and_lowgap_points(bands_data, gap_threshold): """Extract the low-gap points and crossings from the output of a `bands` calculation.""" if not isinstance(bands_data, orm.BandsData): raise InputValidationError( 'Invalide type {} for parameter `bands_data`'.format( type(bands_data))) if not isinstance(gap_threshold, orm.Float): raise InputValidationError( 'Invalide type {} for parameter `gap_threshold`'.format( type(gap_threshold))) calculation = bands_data.creator gaps = get_gap_array_from_PwCalc(calculation) kpt_cryst = bands_data.get_kpoints() kpt_cart = bands_data.get_kpoints(cartesian=True) gap_thr = gap_threshold.value try: kki = calculation.inputs.kpoints.creator.inputs last_pinned = kki.centers.get_array('pinned') dist = kki.distance.value except: dist = 200 last_pinned = np.array([[0., 0., 0.]]) centers = KDTree(last_pinned) kpt_tree = KDTree(kpt_cart) query = centers.query_ball_tree(kpt_tree, r=dist * 1.74 / 2) #~sqrt(3) / 2 # Limiting fermi velocity to ~ v_f[graphene] * 3 # GAP ~< dK * 10 / (#PT - 1) pinned_thr = dist * 4.00 # Limiting number of new points per lowgap center based on distance between points lim = max(-5 // np.log10(dist), 1) if dist < 1 else 200 if dist < 0.01: lim = 1 where_pinned = [] where_found = [] for n, q in enumerate(query): q = np.array(q, dtype=np.int) if len(q) == 0: continue min_gap = gaps[q].min() # Skipping points where the gap didn't move much between iterations # _, i = kpt_tree.query(last_pinned[n]) # prev_min_gap = gaps[i] # if min_gap / prev_min_gap > 0.95 and dist < 0.005: # continue app = None scale = 2.5 if lim > 1 else 1.001 if dist == 200: scale = 0.25 / min_gap while app is None or len(app) > lim: app = np.where(gaps[q] < min_gap * scale)[0] scale *= 0.98 if scale < 1.0001: app = np.where(gaps[q] < min_gap * 1.0001)[0] break where_found.extend([q[i] for i in app if gaps[q[i]] <= gap_thr]) where_pinned.extend( [q[i] for i in app if gap_thr < gaps[q[i]] < pinned_thr]) # Removing dupicates and avoid exception for empty list where_pinned = np.array(where_pinned, dtype=np.int) where_pinned = np.unique(where_pinned) where_found = np.array(where_found, dtype=np.int) where_found = np.unique(where_found) res = orm.ArrayData() res.set_array('pinned', kpt_cart[where_pinned]) res.set_array('found', kpt_cryst[where_found]) return res
def update_params(node, nodename=None, nodedesc=None, **kwargs): """ Update parameter node given with the values given as kwargs. Returns new node. :param node: Input parameter node (needs to be valid KKR input parameter node). :param **kwargs: Input keys with values as in kkrparams. :param linkname: Input linkname string. Give link from old to new node a name . If no linkname is given linkname defaults to 'updated parameters' :return: parameter node :example usage: OutputNode = KkrCalculation.update_params(InputNode, EMIN=-1, NSTEPS=30) :note: Keys are set as in kkrparams class. Check documentation of kkrparams for further information. :note: By default nodename is 'updated KKR parameters' and description contains list of changed """ # check if node is a valid KKR parameters node if not isinstance(node, ParameterData): print('Input node is not a valid ParameterData node') raise InputValidationError( 'update_params needs valid parameter node as input') #initialize temporary kkrparams instance containing all possible KKR parameters params = kkrparams() # extract input dict from node inp_params = node.get_dict() # check if input dict contains only values for KKR parameters for key in inp_params: if key not in params.values.keys() and key not in _ignored_keys: print('Input node contains unvalid key "{}"'.format(key)) raise InputValidationError( 'unvalid key "{}" in input parameter node'.format(key)) # copy values from input node for key in inp_params: value = inp_params[key] params.set_value(key, value, silent=True) # to keep track of changed values: changed_params = {} # check if values are given as **kwargs (otherwise return input node) if len(kwargs) == 0: print('No additional input keys given, return input node') return node else: for key in kwargs: if kwargs[key] != inp_params[key]: params.set_value(key, kwargs[key], silent=True) changed_params[key] = kwargs[key] if len(changed_params.keys()) == 0: print('No keys have been changed, return input node') return node # set linkname with input or default value if nodename is None or type(nodename) is not str: nodename = 'updated KKR parameters' if nodedesc is None or type(nodedesc) is not str: nodedesc = 'changed parameters: {}'.format(changed_params) # create new node ParaNode = ParameterData(dict=params.values) ParaNode.label = nodename ParaNode.description = nodedesc return ParaNode
def _validate_keys(self, input_dict): """ Validates the keys otherwise raise ValidationError. Does basic validation from the parent followed by validations for the quantum numbers. Raises exceptions should the input_dict fail the valiation or if it contains any unsupported keywords. :param input_dict: the dictionary of keys to be validated :return validated_dict: a validated dictionary """ validated_dict = super(RealhydrogenOrbital, self)._validate_keys(input_dict) # removes all validated items from input_dict input_dict = {x: input_dict[x] for x in input_dict if x not in validated_dict} # get quantum numbers quantum_number_dict = {} for key in RealhydrogenOrbital._default_quantum_number_fields: quantum_number_dict[key] = input_dict.pop(key, None) # state lower and upper limits accepted_range = {} accepted_range["angular_momentum"] = [-5, 3] accepted_range["radial_nodes"] = [0, 2] accepted_range["spin"] = [-1, 1] l = quantum_number_dict["angular_momentum"] if l >= 0: accepted_range["magnetic_number"] = [0, 2*l] else: accepted_range["magnetic_number"] = [0, -l] # Here the tests with the limits defined above for key in RealhydrogenOrbital._default_quantum_number_fields: validated_number = self._quantum_number_validator( quantum_number_dict[key], key, accepted_range[key]) validated_dict[key] = validated_number if validated_dict["angular_momentum"] is None: raise InputValidationError("Must supply angular_momentum") if validated_dict["magnetic_number"] is None: raise InputValidationError("Must supply magnetic_number") if validated_dict["radial_nodes"] is None: validated_dict["radial_nodes"] = 0 try: self.get_name_from_quantum_numbers( validated_dict['angular_momentum'], magnetic_number=validated_dict['magnetic_number']) except InputValidationError: raise InputValidationError("Invalid angular momentum magnetic" " number combination.") # Finally checks optional fields KindName = input_dict.pop('kind_name', None) if KindName: if not isinstance(KindName, (basestring, None)): raise ValidationError('If kind_name is provided must be string') validated_dict['kind_name'] = KindName Kind_index = input_dict.pop('kind_index', None) if Kind_index: if not isinstance(Kind_index, (int, None)): raise ValidationError('If kind_index is provided must be int') validated_dict['kind_index'] = Kind_index if input_dict: raise ValidationError("Some unrecognized keys: {}". format(input_dict.keys())) return validated_dict
def _prepare_for_submission(self, tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ local_copy_list = [] remote_copy_list = [] try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError( "No code specified for this calculation") try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError( "No parameters specified for this calculation") if not isinstance(parameters, ParameterData): raise InputValidationError( "parameters is not of type ParameterData") # Settings can be undefined, and defaults to an empty dictionary settings = inputdict.pop(self.get_linkname('settings'), None) if settings is None: settings_dict = {} else: if not isinstance(settings, ParameterData): raise InputValidationError( "settings, if specified, must be of " "type ParameterData") # Settings converted to uppercase settings_dict = _uppercase_dict(settings.get_dict(), dict_name='settings') parent_calc_folder = inputdict.pop(self.get_linkname('parent_folder'), None) if parent_calc_folder is not None: if not isinstance(parent_calc_folder, self._parent_folder_type): if not isinstance(self._parent_folder_type, tuple): possible_types = [self._parent_folder_type.__name__] else: possible_types = [ t.__name__ for t in self._parent_folder_type ] raise InputValidationError("parent_calc_folder, if specified," "must be of type {}".format( " or ".join(possible_types))) following_text = self._get_following_text(inputdict, settings) # Here, there should be no more parameters... if inputdict: raise InputValidationError("The following input data nodes are " "unrecognized: {}".format( inputdict.keys())) ############################## # END OF INITIAL INPUT CHECK # ############################## # I put the first-level keys as uppercase (i.e., namelist and card names) # and the second-level keys as lowercase # (deeper levels are unchanged) input_params = _uppercase_dict(parameters.get_dict(), dict_name='parameters') input_params = { k: _lowercase_dict(v, dict_name=k) for k, v in input_params.iteritems() } # set default values. NOTE: this is different from PW/CP for blocked in self._blocked_keywords: namelist = blocked[0].upper() key = blocked[1].lower() value = blocked[2] if namelist in input_params: if key in input_params[namelist]: raise InputValidationError( "You cannot specify explicitly the '{}' key in the '{}' " "namelist.".format(key, namelist)) # set to a default if not input_params[namelist]: input_params[namelist] = {} input_params[namelist][key] = value # =================== NAMELISTS AND CARDS ======================== try: namelists_toprint = settings_dict.pop('NAMELISTS') if not isinstance(namelists_toprint, list): raise InputValidationError( "The 'NAMELISTS' value, if specified in the settings input " "node, must be a list of strings") except KeyError: # list of namelists not specified; do automatic detection namelists_toprint = self._default_namelists input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename, 'w') as infile: for namelist_name in namelists_toprint: infile.write("&{0}\n".format(namelist_name)) # namelist content; set to {} if not present, so that we leave an # empty namelist namelist = input_params.pop(namelist_name, {}) for k, v in sorted(namelist.iteritems()): infile.write(convert_input_to_namelist_entry(k, v)) infile.write("/\n") # Write remaning text now, if any infile.write(following_text) # Check for specified namelists that are not expected if input_params: raise InputValidationError( "The following namelists are specified in input_params, but are " "not valid namelists for the current type of calculation: " "{}".format(",".join(input_params.keys()))) # copy remote output dir, if specified if parent_calc_folder is not None: if isinstance(parent_calc_folder, RemoteData): parent_calc_out_subfolder = settings_dict.pop( 'PARENT_CALC_OUT_SUBFOLDER', self._INPUT_SUBFOLDER) remote_copy_list.append( (parent_calc_folder.get_computer().uuid, os.path.join(parent_calc_folder.get_remote_path(), parent_calc_out_subfolder), self._OUTPUT_SUBFOLDER)) elif isinstance(parent_calc_folder, FolderData): local_copy_list.append( (parent_calc_folder.get_abs_path(self._INPUT_SUBFOLDER), self._OUTPUT_SUBFOLDER)) elif isinstance(parent_calc_folder, SinglefileData): filename = parent_calc_folder.get_file_abs_path() local_copy_list.append((filename, os.path.basename(filename))) calcinfo = CalcInfo() calcinfo.uuid = self.uuid # Empty command line by default calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list codeinfo = CodeInfo() codeinfo.cmdline_params = settings_dict.pop('CMDLINE', []) codeinfo.stdin_name = self._INPUT_FILE_NAME codeinfo.stdout_name = self._OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] # Retrieve by default the output file and the xml file calcinfo.retrieve_list = [] calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME) settings_retrieve_list = settings_dict.pop('ADDITIONAL_RETRIEVE_LIST', []) calcinfo.retrieve_list += settings_retrieve_list calcinfo.retrieve_list += self._internal_retrieve_list calcinfo.retrieve_singlefile_list = self._retrieve_singlefile_list if settings_dict: try: Parserclass = self.get_parserclass() parser = Parserclass(self) parser_opts = parser.get_parser_settings_key() settings_dict.pop(parser_opts) except ( KeyError, AttributeError ): # the key parser_opts isn't inside the dictionary, or it is set to None raise InputValidationError( "The following keys have been found in " "the settings input node, but were not understood: {}". format(",".join(settings_dict.keys()))) return calcinfo
def prepare_for_submission(self, folder): """ This is the routine to be called when you want to create the input files for the inpgen with the plug-in. :param folder: a aiida.common.folders.Folder subclass where the plugin should put all its files. """ # Get the connection between coordination number and element symbol _atomic_numbers = { data['symbol']: num for num, data in six.iteritems(PeriodicTableElements) } possible_namelists = self._possible_namelists possible_params = self._possible_params local_copy_list = [] remote_copy_list = [] remote_symlink_list = [] bulk = True film = False # convert these 'booleans' to the inpgen format. replacer_values_bool = [ True, False, 'True', 'False', 't', 'T', 'F', 'f' ] # some keywords require a string " around them in the input file. string_replace = ['econfig', 'lo', 'element', 'name', 'xctyp'] # of some keys only the values are written to the file, specify them here. val_only_namelist = ['soc', 'qss'] # Scaling comes from the Structure # but we have to convert from Angstrom to a.u (bohr radii) scaling_factors = [1.0, 1.0, 1.0] scaling_lat = 1. # /bohr_to_ang = 0.52917720859 scaling_pos = 1. / BOHR_A # Angstrom to atomic own_lattice = False # not self._use_aiida_structure ########################################## ############# INPUT CHECK ################ ########################################## # first check existence of structure and if 1D, 2D, 3D structure = self.inputs.structure pbc = structure.pbc if False in pbc: bulk = False film = True # check existence of parameters (optional) if 'parameters' in self.inputs: parameters = self.inputs.parameters else: parameters = None if parameters is None: # use default parameters_dict = {} else: parameters_dict = _lowercase_dict(parameters.get_dict(), dict_name='parameters') # we write always out rel coordinates, because thats the way FLEUR uses # them best. we have to convert them from abs, because thats how they # are stored in a Structure node. cartesian=F is default if 'input' in parameters_dict: parameters_dict['input']['cartesian'] = False if film: parameters_dict['input']['film'] = True else: if bulk: parameters_dict['input'] = {'cartesian': False} elif film: parameters_dict['input'] = {'cartesian': False, 'film': True} namelists_toprint = possible_namelists input_params = parameters_dict if 'title' in list(input_params.keys()): self._inp_title = input_params.pop('title') # TODO validate type of values of the input parameter keys ? # check input_parameters for namelist, paramdic in six.iteritems(input_params): if 'atom' in namelist: # this namelist can be specified more often # special atom namelist needs to be set for writing, # but insert it in the right spot! index = namelists_toprint.index('atom') + 1 namelists_toprint.insert(index, namelist) namelist = 'atom' if namelist not in possible_namelists: raise InputValidationError( "The namelist '{0}' is not supported by the fleur" " inputgenerator. Check on the fleur website or add '{0}'" 'to _possible_namelists.'.format(namelist)) for para in paramdic.keys(): if para not in possible_params[namelist]: raise InputValidationError( "The property '{}' is not supported by the " "namelist '{}'. " 'Check the fleur website, or if it really is,' ' update _possible_params. '.format(para, namelist)) if para in string_replace: # TODO check if its in the parameter dict paramdic[para] = convert_to_fortran_string(paramdic[para]) # things that are in string replace can never be a bool # Otherwise input where someone given the title 'F' would fail... elif paramdic[para] in replacer_values_bool: # because 1/1.0 == True, and 0/0.0 == False # maybe change in convert_to_fortran that no error occurs if isinstance(paramdic[para], (int, float)): if isinstance(paramdic[para], bool): paramdic[para] = convert_to_fortran_bool( paramdic[para]) else: paramdic[para] = convert_to_fortran_bool( paramdic[para]) # in fleur it is possible to give a lattice namelist if 'lattice' in list(input_params.keys()): own_lattice = True if structure in self.inputs: # two structures given? # which one should be prepared? TODO: log warning or even error if self._use_aiida_structure: input_params.pop('lattice', {}) own_lattice = False #TODO check if input parameter dict is consistent to given structure. # if not issue warnings. # TODO allow only usual kpt meshes and use therefore Aiida kpointData # if self._use_kpoints: # try: # kpoints = inputdict.pop(self.get_linkname('kpoints')) # except KeyError: # raise InputValidationError("No kpoints specified for this" # " calculation") # if not isinstance(kpoints, KpointsData): # raise InputValidationError("kpoints is not of type KpointsData") code = self.inputs.code # check existence of settings (optional) if 'settings' in self.inputs: settings = self.inputs.settings else: settings = None if settings is None: settings_dict = {} else: settings_dict = settings.get_dict() # check for for allowed keys, ignore unknown keys but warn. for key in settings_dict.keys(): if key not in self._settings_keys: # TODO warning self.logger.info( 'settings dict key %s for Fleur calculation' 'not recognized, only %s are allowed.', key, str(self._settings_keys)) ############################## # END OF INITIAL INPUT CHECK # ############################## ####################################################### ######### PREPARE PARAMETERS FOR INPUT FILE ########### ####################################################### #### STRUCTURE_PARAMETERS #### scaling_factor_card = '' cell_parameters_card = '' # We allow to set the significant figures format, because sometimes # inpgen has numerical problems which are not there with less precise formatting sf_c = str(settings_dict.get('significant_figures_cell', 9)) sf_p = str(settings_dict.get('significant_figure_positions', 10)) if not own_lattice: cell = structure.cell for vector in cell: scaled = [a * scaling_pos for a in vector] # scaling_pos=1./bohr_to_ang reg_string = '{0:18.' + sf_c + 'f} {1:18.' + sf_c + 'f} {2:18.' + sf_c + 'f}\n' cell_parameters_card += (reg_string.format( scaled[0], scaled[1], scaled[2])) reg_string = '{0:18.' + sf_c + 'f} {1:18.' + sf_c + 'f} {2:18.' + sf_c + 'f}\n' scaling_factor_card += (reg_string.format(scaling_factors[0], scaling_factors[1], scaling_factors[2])) #### ATOMIC_POSITIONS #### # TODO: be careful with units atomic_positions_card_list = [''] atomic_positions_card_listtmp = [''] if not own_lattice: natoms = len(structure.sites) # for FLEUR true, general not, because you could put several # atoms on a site # TODO: test that only one atom at site? # TODO this feature might change in Fleur, do different. that in inpgen kind gets a name, which will also be the name in fleur inp.xml. # now user has to make kind_name = atom id. for site in structure.sites: kind_name = site.kind_name kind = structure.get_kind(kind_name) if kind.has_vacancies: # then we do not at atoms with weights smaller one if kind.weights[0] < 1.0: natoms = natoms - 1 # Log message? continue # TODO: list I assume atoms therefore I just get the first one... site_symbol = kind.symbols[0] atomic_number = _atomic_numbers[site_symbol] atomic_number_name = atomic_number # per default we use relative coordinates in Fleur # we have to scale back to atomic units from angstrom pos = site.position if bulk: vector_rel = abs_to_rel(pos, cell) elif film: vector_rel = abs_to_rel_f(pos, cell, structure.pbc) vector_rel[2] = vector_rel[2] * scaling_pos if site_symbol != kind_name: # This is an important fact, if user renames it becomes a new atomtype or species! try: # Kind names can be more then numbers now, this might need to be reworked head = kind_name.rstrip('0123456789') kind_namet = int(kind_name[len(head):]) #if int(kind_name[len(head)]) > 4: # raise InputValidationError('New specie name/label should start with a digit smaller than 4') except ValueError: self.report( 'Warning: Kind name {} will be ignored by the FleurinputgenCalculation and not set a charge number.' .format(kind_name)) else: atomic_number_name = '{}.{}'.format( atomic_number, kind_namet) # append a label to the detached atom reg_string = ' {0:7} {1:18.' + sf_p + 'f} {2:18.' + sf_p + 'f} {3:18.' + sf_p + 'f} {4}\n' atomic_positions_card_listtmp.append( reg_string.format(atomic_number_name, vector_rel[0], vector_rel[1], vector_rel[2], kind_namet)) else: reg_string = ' {0:7} {1:18.' + sf_p + 'f} {2:18.' + sf_p + 'f} {3:18.' + sf_p + 'f}\n' atomic_positions_card_listtmp.append( reg_string.format(atomic_number_name, vector_rel[0], vector_rel[1], vector_rel[2])) # TODO check format # we write it later, since we do not know what natoms is before the loop... atomic_positions_card_list.append(' {0:3}\n'.format(natoms)) for card in atomic_positions_card_listtmp: atomic_positions_card_list.append(card) else: # TODO with own lattice atomic positions have to come from somewhere # else.... User input? raise InputValidationError('fleur lattice needs also the atom ' ' position as input,' ' not implemented yet, sorry!') atomic_positions_card = ''.join(atomic_positions_card_list) del atomic_positions_card_list # Free memory #### Kpts #### # TODO: kpts # kpoints_card = ""#.join(kpoints_card_list) #del kpoints_card_list ####################################### #### WRITE ALL CARDS IN INPUT FILE #### input_filename = folder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename, 'w') as infile: # first write title infile.write('{0}\n'.format(self._inp_title)) # then write &input namelist infile.write('&{0}'.format('input')) # namelist content; set to {} if not present, so that we leave an # empty namelist namelist = input_params.pop('input', {}) for k, val in sorted(six.iteritems(namelist)): infile.write(get_input_data_text(k, val, False, mapping=None)) infile.write('/\n') # Write lattice information now infile.write(cell_parameters_card) infile.write('{0:18.10f}\n'.format(scaling_lat)) infile.write(scaling_factor_card) infile.write('\n') # Write Atomic positons infile.write(atomic_positions_card) # Write namelists after atomic positions for namels_name in namelists_toprint: namelist = input_params.pop(namels_name, {}) if namelist: if 'atom' in namels_name: namels_name = 'atom' infile.write('&{0}\n'.format(namels_name)) if namels_name in val_only_namelist: make_reversed = False if namels_name == 'soc': make_reversed = True for k, val in sorted(six.iteritems(namelist), reverse=make_reversed): infile.write( get_input_data_text(k, val, True, mapping=None)) else: for k, val in sorted(six.iteritems(namelist)): infile.write( get_input_data_text(k, val, False, mapping=None)) infile.write('/\n') # infile.write(kpoints_card) if input_params: raise InputValidationError( 'input_params leftover: The following namelists are specified' ' in input_params, but are ' 'not valid namelists for the current type of calculation: ' '{}'.format(','.join(list(input_params.keys())))) calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list calcinfo.remote_symlink_list = remote_symlink_list # Retrieve per default only out file and inp.xml file? retrieve_list = [] retrieve_list.append(self._INPXML_FILE_NAME) retrieve_list.append(self._OUTPUT_FILE_NAME) retrieve_list.append(self._SHELLOUT_FILE_NAME) retrieve_list.append(self._ERROR_FILE_NAME) retrieve_list.append(self._STRUCT_FILE_NAME) retrieve_list.append(self._INPUT_FILE_NAME) # user specific retrieve add_retrieve = settings_dict.get('additional_retrieve_list', []) for file1 in add_retrieve: retrieve_list.append(file1) remove_retrieve = settings_dict.get('remove_from_retrieve_list', []) for file1 in remove_retrieve: if file1 in retrieve_list: retrieve_list.remove(file1) calcinfo.retrieve_list = [] for file1 in retrieve_list: calcinfo.retrieve_list.append(file1) codeinfo = CodeInfo() # , "-electronConfig"] # TODO? let the user decide -electronconfig? #cmdline_params = ['-explicit', '-inc', '+all', '-f', '{}'.format(self._INPUT_FILE_NAME)] cmdline_params = ['-explicit'] # user specific commandline_options for command in settings_dict.get('cmdline', []): cmdline_params.append(command) codeinfo.cmdline_params = (list(cmdline_params)) codeinfo.code_uuid = code.uuid codeinfo.stdin_name = self._INPUT_FILE_NAME codeinfo.stdout_name = self._SHELLOUT_FILE_NAME # shell output will be piped in file codeinfo.stderr_name = self._ERROR_FILE_NAME # std error too calcinfo.codes_info = [codeinfo] return calcinfo
def run_process(self): """ Merge the inputs namespace and added inputs, and launch the sub-process. """ self.report("Merging inputs for the sub-process.") if isinstance(self.inputs.added_input_keys, orm.Str): added_input_keys = [self.inputs.added_input_keys.value] if not isinstance(self.inputs.added_input_values, orm.BaseType): raise InputValidationError( "When 'added_input_keys' is given as 'Str', 'added_input_values'" " must be a 'BaseType' instance.") added_input_values = [self.inputs.added_input_values.value] else: added_input_keys = self.inputs.added_input_keys.get_list() if not isinstance(self.inputs.added_input_values, orm.List): raise InputValidationError( "When 'added_input_keys' is given as 'List', 'added_input_values'" " must also be a 'List'.") added_input_values = self.inputs.added_input_values.get_list() if len(added_input_values) != len(added_input_keys): raise InputValidationError( "Lengths of 'added_input_values' and 'added_input_keys' do not match." ) inputs = AttributeDict(self.inputs.inputs) def _get_or_create_sub_dict(in_dict, name): try: return in_dict[name] except KeyError: res = {} in_dict[name] = res return res def _get_or_create_port(in_attr_dict, name): try: return getattr(in_attr_dict, name) except AttributeError: res = AttributeDict() setattr(in_attr_dict, name, res) return res for key, value in zip(added_input_keys, added_input_values): full_port_path, *full_attr_path = key.split(':') *port_path, port_name = full_port_path.split('.') namespace = reduce(_get_or_create_port, port_path, inputs) if not full_attr_path: res_value = to_aiida_type(value) else: assert len(full_attr_path) == 1 # Get or create the top-level dictionary. try: res_dict = getattr(namespace, port_name).get_dict() except AttributeError: res_dict = {} *sub_dict_path, attr_name = full_attr_path[0].split('.') sub_dict = reduce(_get_or_create_sub_dict, sub_dict_path, res_dict) sub_dict[attr_name] = value res_value = orm.Dict(dict=res_dict).store() setattr(namespace, port_name, res_value) self.report("Launching the sub-process.") return ToContext(sub_process=self.run_or_submit( load_object(self.inputs.sub_process.value), **inputs))
def _prepare_for_submission(self, tempfolder, inputdict): from aiida.orm.data.cif import CifData from aiida.orm.data.parameter import ParameterData from aiida.orm.calculation.job.codtools import commandline_params_from_dict import shutil try: cif = inputdict.pop(self.get_linkname('cif')) except KeyError: raise InputValidationError("no CIF file is specified for deposition") if not isinstance(cif, CifData): raise InputValidationError("cif is not of type CifData") parameters = inputdict.pop(self.get_linkname('parameters'), None) if parameters is None: parameters = ParameterData(dict={}) if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type ParameterData") code = inputdict.pop(self.get_linkname('code'), None) if code is None: raise InputValidationError("No code found in input") parameters_dict = parameters.get_dict() deposit_file_rel = "deposit.cif" deposit_file_abs = tempfolder.get_abs_path(deposit_file_rel) shutil.copy(cif.get_file_abs_path(), deposit_file_abs) input_filename = tempfolder.get_abs_path(self._DEFAULT_INPUT_FILE) with open(input_filename, 'w') as f: f.write("{}\n".format(deposit_file_rel)) f.flush() config_file_abs = tempfolder.get_abs_path(self._CONFIG_FILE) with open(config_file_abs, 'w') as f: for k in self._config_keys: if k in parameters_dict.keys(): f.write("{}={}\n".format(k, parameters_dict.pop(k))) f.flush() commandline_params = self._default_commandline_params commandline_params.extend( commandline_params_from_dict(parameters_dict)) calcinfo = CalcInfo() calcinfo.uuid = self.uuid # The command line parameters should be generated from 'parameters' calcinfo.local_copy_list = [] calcinfo.remote_copy_list = [] calcinfo.retrieve_list = [self._DEFAULT_OUTPUT_FILE, self._DEFAULT_ERROR_FILE] calcinfo.retrieve_singlefile_list = [] codeinfo = CodeInfo() codeinfo.cmdline_params = commandline_params codeinfo.stdin_name = self._DEFAULT_INPUT_FILE codeinfo.stdout_name = self._DEFAULT_OUTPUT_FILE codeinfo.stderr_name = self._DEFAULT_ERROR_FILE codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] return calcinfo
def get_filter_expr_from_attributes(self, operator, value, attr_key, column=None, column_name=None, alias=None): # Too many everything! # pylint: disable=too-many-branches, too-many-arguments, too-many-statements def cast_according_to_type(path_in_json, value): """Cast the value according to the type""" if isinstance(value, bool): type_filter = jsonb_typeof(path_in_json) == 'boolean' casted_entity = path_in_json.astext.cast(Boolean) elif isinstance(value, (int, float)): type_filter = jsonb_typeof(path_in_json) == 'number' casted_entity = path_in_json.astext.cast(Float) elif isinstance(value, dict) or value is None: type_filter = jsonb_typeof(path_in_json) == 'object' casted_entity = path_in_json.astext.cast(JSONB) # BOOLEANS? elif isinstance(value, dict): type_filter = jsonb_typeof(path_in_json) == 'array' casted_entity = path_in_json.astext.cast(JSONB) # BOOLEANS? elif isinstance(value, str): type_filter = jsonb_typeof(path_in_json) == 'string' casted_entity = path_in_json.astext elif value is None: type_filter = jsonb_typeof(path_in_json) == 'null' casted_entity = path_in_json.astext.cast(JSONB) # BOOLEANS? else: raise TypeError(f'Unknown type {type(value)}') return type_filter, casted_entity if column is None: column = self.get_column(column_name, alias) database_entity = column[tuple(attr_key)] if operator == '==': type_filter, casted_entity = cast_according_to_type( database_entity, value) expr = case([(type_filter, casted_entity == value)], else_=False) elif operator == '>': type_filter, casted_entity = cast_according_to_type( database_entity, value) expr = case([(type_filter, casted_entity > value)], else_=False) elif operator == '<': type_filter, casted_entity = cast_according_to_type( database_entity, value) expr = case([(type_filter, casted_entity < value)], else_=False) elif operator in ('>=', '=>'): type_filter, casted_entity = cast_according_to_type( database_entity, value) expr = case([(type_filter, casted_entity >= value)], else_=False) elif operator in ('<=', '=<'): type_filter, casted_entity = cast_according_to_type( database_entity, value) expr = case([(type_filter, casted_entity <= value)], else_=False) elif operator == 'of_type': # http://www.postgresql.org/docs/9.5/static/functions-json.html # Possible types are object, array, string, number, boolean, and null. valid_types = ('object', 'array', 'string', 'number', 'boolean', 'null') if value not in valid_types: raise InputValidationError( f'value {value} for of_type is not among valid types\n{valid_types}' ) expr = jsonb_typeof(database_entity) == value elif operator == 'like': type_filter, casted_entity = cast_according_to_type( database_entity, value) expr = case([(type_filter, casted_entity.like(value))], else_=False) elif operator == 'ilike': type_filter, casted_entity = cast_according_to_type( database_entity, value) expr = case([(type_filter, casted_entity.ilike(value))], else_=False) elif operator == 'in': type_filter, casted_entity = cast_according_to_type( database_entity, value[0]) expr = case([(type_filter, casted_entity.in_(value))], else_=False) elif operator == 'contains': expr = database_entity.cast(JSONB).contains(value) elif operator == 'has_key': expr = database_entity.cast(JSONB).has_key(value) # noqa elif operator == 'of_length': expr = case( [(jsonb_typeof(database_entity) == 'array', jsonb_array_length(database_entity.cast(JSONB)) == value)], else_=False) elif operator == 'longer': expr = case( [(jsonb_typeof(database_entity) == 'array', jsonb_array_length(database_entity.cast(JSONB)) > value)], else_=False) elif operator == 'shorter': expr = case( [(jsonb_typeof(database_entity) == 'array', jsonb_array_length(database_entity.cast(JSONB)) < value)], else_=False) else: raise InputValidationError( f'Unknown operator {operator} for filters in JSON field') return expr
def merge_parameter(Dict1, Dict2, overwrite=True, merge=True): """ Merges two Dict nodes. Additive: uses all namelists of both. If they have a namelist in common. Dict2 will overwrite the namelist of Dict. If this is not wanted. set overwrite = False. Then attributes of both will be added, but attributes from Dict1 won't be overwritten. :param Dict1: AiiDA Dict Node :param Dict2: AiiDA Dict Node :param overwrite: bool, default True :param merge: bool, default True returns: AiiDA Dict Node #TODO be more carefull how to merge ids in atom namelists, i.e species labels """ from aiida.common.exceptions import InputValidationError from aiida_fleur.tools.dict_util import recursive_merge #Dict = DataFactory('dict') # layout: # check input # get dictionaries # merge dictionaries into new dictionary # create a new Dict node new_dict = {} atoms_dict = {} atomlist = [] if not isinstance(Dict1, Dict): raise InputValidationError('Dict1, must be of ' 'type Dict') if not isinstance(Dict2, Dict): raise InputValidationError('Dict2, must be of ' 'type Dict') dict1 = Dict1.get_dict() dict2 = Dict2.get_dict() if dict1 == dict2: return Dict(dict=dict1) for key in list(dict1.keys()): if 'atom' in key: val = dict1.pop(key) atomlist.append(val) for key in list(dict2.keys()): if 'atom' in key: val = dict2.pop(key) atomlist.append(val) # TODO do something on atom list, # we do not want doubles, check element and Id? Keep first ones? for i, atom in enumerate(atomlist): # TODO check for duplicates? what about key = 'atom{}'.format(i) atoms_dict[key] = atom # merge all namelists except atoms if overwrite: new_dict = dict1.copy() new_dict.update(dict2) else: # add second one later? new_dict = dict2.copy() if merge: new_dict = recursive_merge(new_dict, dict1) else: new_dict.update(dict1) # TODO mergeing does not make sense for all namelist keys. # be more specific here. new_dict.update(atoms_dict) # be carefull with atom namelist return Dict(dict=new_dict)
def get_filter_expr_from_attributes(self, operator, value, attr_key, column=None, column_name=None, alias=None): def get_attribute_db_column(mapped_class, dtype, castas=None): if dtype == 't': mapped_entity = mapped_class.tval elif dtype == 'b': mapped_entity = mapped_class.bval # ~ mapped_entity = cast(mapped_class.value_str, Boolean) elif dtype == 'f': mapped_entity = mapped_class.fval # ~ mapped_entity = cast(mapped_class.value_str, Float) elif dtype == 'i': mapped_entity = mapped_class.ival # ~ mapped_entity = cast(mapped_class.value_str, Integer) elif dtype == 'd': mapped_entity = mapped_class.dval else: raise InputValidationError( "I don't know what to do with dtype {}".format(dtype)) if castas == 't': mapped_entity = cast(mapped_entity, String) elif castas == 'f': mapped_entity = cast(mapped_entity, Float) return mapped_entity if column: mapped_class = column.prop.mapper.class_ else: column = getattr(alias, column_name) mapped_class = column.prop.mapper.class_ # Ok, so we have an attribute key here. # Unless cast is specified, will try to infer my self where the value # is stored # Datetime -> dval # bool -> bval # string -> tval # integer -> ival, fval (cast ival to float) # float -> ival, fval (cast ival to float) # If the user specified of_type ?? # That is basically a query for where the value is sitting # (which db_column in the dbattribtues) # If the user specified in what to cast, he wants an operation to # be performed to cast the value to a different type if isinstance(value, (list, tuple)): value_type_set = set([type(i) for i in value]) if len(value_type_set) > 1: raise InputValidationError( '{} contains more than one type'.format(value)) elif len(value_type_set) == 0: raise InputValidationError( 'Given list is empty, cannot determine type') else: value_to_consider = value[0] else: value_to_consider = value # First cases, I maybe need not do anything but just count the # number of entries if operator in ('of_length', 'shorter', 'longer'): raise NotImplementedError( "Filtering by lengths of arrays or lists is not implemented\n" "in the Django-Backend") elif operator == 'of_type': raise NotImplementedError("Filtering by type is not implemented\n" "in the Django-Backend") elif operator == 'contains': raise NotImplementedError( "Contains is not implemented in the Django-backend") elif operator == 'has_key': if issubclass(mapped_class, dummy_model.DbAttribute): expr = alias.attributes.any( mapped_class.key == '.'.join(attr_key + [value])) elif issubclass(mapped_class, dummy_model.DbExtra): expr = alias.extras.any(mapped_class.key == '.'.join(attr_key + [value])) else: raise Exception( "I was given {} as an attribute base class".format( mapped_class)) else: types_n_casts = [] if isinstance(value_to_consider, basestring): types_n_casts.append(('t', None)) elif isinstance(value_to_consider, bool): types_n_casts.append(('b', None)) elif isinstance(value_to_consider, (int, float)): types_n_casts.append(('f', None)) types_n_casts.append(('i', 'f')) elif isinstance(value_to_consider, datetime): types_n_casts.append(('d', None)) expressions = [] for dtype, castas in types_n_casts: try: expressions.append( self.get_filter_expr(operator, value, attr_key=[], column=get_attribute_db_column( mapped_class, dtype, castas=castas), is_attribute=False)) except InputValidationError as e: raise e actual_attr_key = '.'.join(attr_key) expr = column.any( and_(mapped_class.key == actual_attr_key, or_(*expressions))) return expr
def get_filter_expr(self, operator, value, attr_key, is_attribute, alias=None, column=None, column_name=None): """ Applies a filter on the alias given. Expects the alias of the ORM-class on which to filter, and filter_spec. Filter_spec contains the specification on the filter. Expects: :param operator: The operator to apply, see below for further details :param value: The value for the right side of the expression, the value you want to compare with. :param path: The path leading to the value :param attr_key: Boolean, whether the value is in a json-column, or in an attribute like table. Implemented and valid operators: * for any type: * == (compare single value, eg: '==':5.0) * in (compare whether in list, eg: 'in':[5, 6, 34] * for floats and integers: * > * < * <= * >= * for strings: * like (case - sensitive), for example 'like':'node.calc.%' will match node.calc.relax and node.calc.RELAX and node.calc. but not node.CALC.relax * ilike (case - unsensitive) will also match node.CaLc.relax in the above example .. note:: The character % is a reserved special character in SQL, and acts as a wildcard. If you specifically want to capture a ``%`` in the string, use: ``_%`` * for arrays and dictionaries (only for the SQLAlchemy implementation): * contains: pass a list with all the items that the array should contain, or that should be among the keys, eg: 'contains': ['N', 'H']) * has_key: pass an element that the list has to contain or that has to be a key, eg: 'has_key':'N') * for arrays only (SQLAlchemy version): * of_length * longer * shorter All the above filters invoke a negation of the expression if preceded by **~**:: # first example: filter_spec = { 'name' : { '~in':[ 'halle', 'lujah' ] } # Name not 'halle' or 'lujah' } # second example: filter_spec = { 'id' : { '~==': 2 } } # id is not 2 """ # pylint: disable=too-many-branches,too-many-arguments # pylint: disable=too-many-branches,too-many-arguments expr = None if operator.startswith('~'): negation = True operator = operator.lstrip('~') elif operator.startswith('!'): negation = True operator = operator.lstrip('!') else: negation = False if operator in ('longer', 'shorter', 'of_length'): if not isinstance(value, int): raise InputValidationError( 'You have to give an integer when comparing to a length') elif operator in ('like', 'ilike'): if not isinstance(value, str): raise InputValidationError( f'Value for operator {operator} has to be a string (you gave {value})' ) elif operator == 'in': try: value_type_set = set(type(i) for i in value) except TypeError: raise TypeError( 'Value for operator `in` could not be iterated') if not value_type_set: raise InputValidationError( 'Value for operator `in` is an empty list') if len(value_type_set) > 1: raise InputValidationError( f'Value for operator `in` contains more than one type: {value}' ) elif operator in ('and', 'or'): expressions_for_this_path = [] for filter_operation_dict in value: for newoperator, newvalue in filter_operation_dict.items(): expressions_for_this_path.append( self.get_filter_expr(newoperator, newvalue, attr_key=attr_key, is_attribute=is_attribute, alias=alias, column=column, column_name=column_name)) if operator == 'and': expr = and_(*expressions_for_this_path) elif operator == 'or': expr = or_(*expressions_for_this_path) if expr is None: if is_attribute: expr = self.get_filter_expr_from_attributes( operator, value, attr_key, column=column, column_name=column_name, alias=alias) else: if column is None: if (alias is None) and (column_name is None): raise Exception( 'I need to get the column but do not know the alias and the column name' ) column = self.get_column(column_name, alias) expr = self.get_filter_expr_from_column( operator, value, column) if negation: return not_(expr) return expr
def _prepare_for_submission(self, tempfolder, inputdict): """ This is the routine to be called when you want to create the input files for the inpgen with the plug-in. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ #from aiida.common.utils import get_unique_filename, get_suggestion #import re # Get the connection between coordination number and element symbol # maybe do in a differnt way _atomic_numbers = { data['symbol']: num for num, data in PeriodicTableElements.iteritems() } possible_namelists = self._possible_namelists possible_params = self._possible_params local_copy_list = [] remote_copy_list = [] remote_symlink_list = [] bulk = True film = False # convert these 'booleans' to the inpgen format. replacer_values_bool = [ True, False, 'True', 'False', 't', 'T', 'F', 'f' ] # some keywords require a string " around them in the input file. string_replace = ['econfig', 'lo', 'element', 'name'] # of some keys only the values are writen to the file, specify them here. val_only_namelist = ['soc', 'qss'] # Scaling comes from the Structure # but we have to convert from Angstroem to a.u (bohr radii) scaling_factors = [1.0, 1.0, 1.0] # scaling_lat = 1. #/bohr_a scaling_pos = 1. / bohr_a # Angstrom to atomic own_lattice = False #not self._use_aiida_structure # The inpfile gen is run in serial TODO: How to do this by default? #self.set_withmpi(False) ########################################## ############# INPUT CHECK ################ ########################################## # first check existence of structure and if 1D, 2D, 3D try: structure = inputdict.pop(self.get_linkname('structure')) except KeyError: raise InputValidationError("No structure specified for this" " calculation") if not isinstance(structure, StructureData): raise InputValidationError( "structure is not of type StructureData") pbc = structure.pbc if False in pbc: bulk = False film = True # check existence of parameters (optional) parameters = inputdict.pop(self.get_linkname('parameters'), None) if parameters is None: # use default parameters_dict = {} else: if not isinstance(parameters, ParameterData): raise InputValidationError( "parameters, if specified, must be of " "type ParameterData") parameters_dict = _lowercase_dict(parameters.get_dict(), dict_name='parameters') # we write always out rel coordinates, because thats the way FLEUR uses # them best. we have to convert them from abs, becauses thats how they #are stored in a Structure node. cartesian=F is default if 'input' in parameters_dict: parameters_dict['input']['cartesian'] = False if film: parameters_dict['input']['film'] = True else: if bulk: parameters_dict['input'] = {'cartesian': False} elif film: parameters_dict['input'] = {'cartesian': False, 'film': True} namelists_toprint = possible_namelists # check parameters keys TODO: values needed, or keep plug-in as stupid as possible? #if parameters_dict:# TODO remove, unnesseary now? input_params = parameters_dict #TODO:?make everything lowercase in the database, and change it to inpgen format? #_lowercase_dict(parameters.get_dict(), #dict_name='parameters') #input_params = {k: _lowercase_dict(val, dict_name=k) # for k, val in input_params.iteritems()} #input_params_keys = input_params.keys() if 'title' in input_params.keys(): self._inp_title = input_params.pop('title') #TODO validate type of values of the input parameter keys ? #check input_parameters for namelist, paramdic in input_params.iteritems(): if 'atom' in namelist: # this namelist can be specified more often # special atom namelist needs to be set for writing, # but insert it in the right spot! index = namelists_toprint.index('atom') + 1 namelists_toprint.insert(index, namelist) namelist = 'atom' if namelist not in possible_namelists: raise InputValidationError( "The namelist '{}' is not supported by the fleur" " inputgenerator. Check on the fleur website or add '{}'" "to _possible_namelists.".format(namelist, namelist)) for para in paramdic.keys(): if para not in possible_params[namelist]: raise InputValidationError( "The property '{}' is not supported by the " "namelist '{}'. " "Check the fleur website, or if it really is," " update _possible_params. ".format(para, namelist)) if paramdic[para] in replacer_values_bool: # because 1/1.0 == True, and 0/0.0 == False # maybe change in convert_to_fortran that no error occurs if isinstance(paramdic[para], (int, float)): if isinstance(paramdic[para], bool): paramdic[para] = convert_to_fortran_bool( paramdic[para]) else: paramdic[para] = convert_to_fortran_bool( paramdic[para]) if para in string_replace: #TODO check if its in the parameter dict #print para paramdic[para] = convert_to_fortran_string(paramdic[para]) #print "{}".format(paramdic[para]) #in fleur it is possible to give a lattice namelist if 'lattice' in input_params.keys(): own_lattice = True structure = inputdict.pop(self.get_linkname('structure'), None) if structure is not None: #two structures given? #which one should be prepared? TODO: print warning or even error if self._use_aiida_structure: if not isinstance(structure, StructureData): raise InputValidationError( "structure is not of type" " StructureData") input_params.pop('lattice', {}) own_lattice = False ''' # TODO allow only usual kpt meshes and use therefore Aiida kpointData if self._use_kpoints: try: kpoints = inputdict.pop(self.get_linkname('kpoints')) except KeyError: raise InputValidationError("No kpoints specified for this" " calculation") if not isinstance(kpoints, KpointsData): raise InputValidationError("kpoints is not of type KpointsData") ''' #TODO I think the code should not be in the input dict. check local, # verus remote, codeinfos, one several codes.. try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this " "calculation") # check existence of settings (optional) settings = inputdict.pop(self.get_linkname('settings'), None) #print('settings: {}'.format(settings)) if settings is None: settings_dict = {} else: if not isinstance(settings, ParameterData): raise InputValidationError( "settings, if specified, must be of " "type ParameterData") else: settings_dict = settings.get_dict() #check for for allowed keys, ignor unknown keys but warn. for key in settings_dict.keys(): if key not in self._settings_keys: #TODO warrning self.logger.info("settings dict key {} for Fleur calculation" "not reconized, only {} are allowed." "".format(key, self._settings_keys)) # Here, there should be no more parameters... if inputdict: raise InputValidationError("The following input data nodes are " "unrecognized: {}".format( inputdict.keys())) ############################## # END OF INITIAL INPUT CHECK # ####################################################### ######### PREPARE PARAMETERS FOR INPUT FILE ########### #### STRUCTURE_PARAMETERS #### scaling_factor_card = "" cell_parameters_card = "" if not own_lattice: cell = structure.cell for vector in cell: scaled = [a * scaling_pos for a in vector] #scaling_pos=1./bohr_a cell_parameters_card += ("{0:18.10f} {1:18.10f} {2:18.10f}" "\n".format(scaled[0], scaled[1], scaled[2])) scaling_factor_card += ("{0:18.10f} {1:18.10f} {2:18.10f}" "\n".format(scaling_factors[0], scaling_factors[1], scaling_factors[2])) #### ATOMIC_POSITIONS #### # TODO: be careful with units atomic_positions_card_list = [""] # Fleur does not have any keyword before the atomic species. # first the number of atoms then the form nuclear charge, postion # Fleur hast the option of nuclear charge as floats, # allows the user to distinguish two atoms and break the symmetry. if not own_lattice: natoms = len(structure.sites) #for FLEUR true, general not, because you could put several # atoms on a site # TODO: test that only one atom at site? atomic_positions_card_list.append(" {0:3}\n".format(natoms)) # TODO this feature might change in Fleur, do different. that in inpgen kind gets a name, which will also be the name in fleur inp.xml. # now user has to make kind_name = atom id. for site in structure.sites: kind_name = site.kind_name site_symbol = structure.get_kind(kind_name).symbols[ 0] # TODO: list I assume atoms therefore I just get the first one... atomic_number = _atomic_numbers[site_symbol] atomic_number_name = atomic_number if site_symbol != kind_name: # This is an important fact, if usere renames it becomes a new species! suc = True try: head = kind_name.rstrip('0123456789') kind_namet = int(kind_name[len(head):]) except ValueError: suc = False if suc: atomic_number_name = '{}.{}'.format( atomic_number, kind_namet) # per default we use relative coordinates in Fleur # we have to scale back to atomic units from angstrom pos = site.position #print 'pos {}'.format(pos) if bulk: vector_rel = abs_to_rel(pos, cell) elif film: vector_rel = abs_to_rel_f(pos, cell, structure.pbc) vector_rel[2] = vector_rel[2] * scaling_pos atomic_positions_card_list.append( " {0:3} {1:18.10f} {2:18.10f} {3:18.10f}" "\n".format(atomic_number_name, vector_rel[0], vector_rel[1], vector_rel[2])) #print atomic_positions_card_list #TODO check format else: # TODO with own lattice atomic positions have to come from somewhere # else.... User input? raise InputValidationError("fleur lattice needs also the atom " " position as input," " not implemented yet, sorry!") atomic_positions_card = "".join(atomic_positions_card_list) del atomic_positions_card_list # Free memory #### Kpts #### # TODO: kpts #kpoints_card = ""#.join(kpoints_card_list) #del kpoints_card_list ####################################### #### WRITE ALL CARDS IN INPUT FILE #### input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename, 'w') as infile: #first write title infile.write("{0}\n".format(self._inp_title)) #then write &input namelist infile.write("&{0}".format('input')) # namelist content; set to {} if not present, so that we leave an # empty namelist namelist = input_params.pop('input', {}) for k, val in sorted(namelist.iteritems()): infile.write(get_input_data_text(k, val, False, mapping=None)) infile.write("/\n") # Write lattice information now infile.write(cell_parameters_card) infile.write("{0:18.10f}\n".format(scaling_lat)) infile.write(scaling_factor_card) infile.write("\n") # Write Atomic positons infile.write(atomic_positions_card) # Write namelists after atomic positions for namels_name in namelists_toprint: namelist = input_params.pop(namels_name, {}) if namelist: if 'atom' in namels_name: namels_name = 'atom' infile.write("&{0}\n".format(namels_name)) if namels_name in val_only_namelist: for k, val in sorted(namelist.iteritems()): infile.write( get_input_data_text(k, val, True, mapping=None)) else: for k, val in sorted(namelist.iteritems()): infile.write( get_input_data_text(k, val, False, mapping=None)) infile.write("/\n") #infile.write(kpoints_card) if input_params: raise InputValidationError( "input_params leftover: The following namelists are specified" " in input_params, but are " "not valid namelists for the current type of calculation: " "{}".format(",".join(input_params.keys()))) calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list calcinfo.remote_symlink_list = remote_symlink_list # Retrieve per default only out file and inp.xml file? retrieve_list = [] # TODO: let the user specify? #settings_retrieve_list = settings_dict.pop( # 'ADDITIONAL_RETRIEVE_LIST', []) retrieve_list.append(self._INPXML_FILE_NAME) retrieve_list.append(self._OUTPUT_FILE_NAME) retrieve_list.append(self._SHELLOUT_FILE_NAME) retrieve_list.append(self._ERROR_FILE_NAME) retrieve_list.append(self._STRUCT_FILE_NAME) retrieve_list.append(self._INPUT_FILE_NAME) #calcinfo.retrieve_list += settings_retrieve_list #calcinfo.retrieve_list += self._internal_retrieve_list # user specific retrieve add_retrieve = settings_dict.get('additional_retrieve_list', []) #print('add_retrieve: {}'.format(add_retrieve)) for file1 in add_retrieve: retrieve_list.append(file1) remove_retrieve = settings_dict.get('remove_from_retrieve_list', []) for file1 in remove_retrieve: if file1 in retrieve_list: retrieve_list.remove(file1) calcinfo.retrieve_list = [] for file1 in retrieve_list: calcinfo.retrieve_list.append(file1) codeinfo = CodeInfo() cmdline_params = ["-explicit"] # TODO? let the user decide -econfig? # user specific commandline_options for command in settings_dict.get('cmdline', []): cmdline_params.append(command) codeinfo.cmdline_params = (list(cmdline_params)) codeinfo.code_uuid = code.uuid codeinfo.stdin_name = self._INPUT_FILE_NAME codeinfo.stdout_name = self._SHELLOUT_FILE_NAME # shell output will be piped in file codeinfo.stderr_name = self._ERROR_FILE_NAME # std error too calcinfo.codes_info = [codeinfo] ''' if settings_dict: try: Parserclass = self.get_parserclass() parser = Parserclass(self) parser_opts = parser.get_parser_settings_key() settings_dict.pop(parser_opts) except (KeyError, AttributeError): # the key parser_opts isn't # inside the dictionary raise InputValidationError( "The following keys have been found in the settings " "input node, but were not understood: {}" "".format(",".join(settings_dict.keys()))) ''' return calcinfo
def get_ormclass(self, cls, ormclasstype): """ Return the valid ormclass for the connections """ # Checks whether valid cls and ormclasstype are done before # If it is a class: if cls: # Nodes: if issubclass(cls, self.Node): # If something pass an ormclass node # Users wouldn't do that, by why not... ormclasstype = self.AiidaNode._plugin_type_string query_type_string = self.AiidaNode._query_type_string ormclass = cls elif issubclass(cls, self.AiidaNode): ormclasstype = cls._plugin_type_string query_type_string = cls._query_type_string ormclass = self.Node # Groups: elif issubclass(cls, self.Group): ormclasstype = 'group' query_type_string = None ormclass = cls elif issubclass(cls, self.AiidaGroup): ormclasstype = 'group' query_type_string = None ormclass = self.Group # Computers: elif issubclass(cls, self.Computer): ormclasstype = 'computer' query_type_string = None ormclass = cls elif issubclass(cls, self.AiidaComputer): ormclasstype = 'computer' query_type_string = None ormclass = self.Computer # Users elif issubclass(cls, self.User): ormclasstype = 'user' query_type_string = None ormclass = cls elif issubclass(cls, self.AiidaUser): ormclasstype = 'user' query_type_string = None ormclass = self.User else: raise InputValidationError("\n\n\n" "I do not know what to do with {}" "\n\n\n".format(cls)) # If it is not a class else: if ormclasstype.lower() == 'group': ormclasstype = ormclasstype.lower() query_type_string = None ormclass = self.Group elif ormclasstype.lower() == 'computer': ormclasstype = ormclasstype.lower() query_type_string = None ormclass = self.Computer elif ormclasstype.lower() == 'user': ormclasstype = ormclasstype.lower() query_type_string = None ormclass = self.User else: # At this point, it has to be a node. # The only valid string at this point is a string # that matches exactly the _plugin_type_string # of a node class from aiida.common.old_pluginloader import from_type_to_pluginclassname from aiida.common.pluginloader import load_plugin ormclass = self.Node try: pluginclassname = from_type_to_pluginclassname( ormclasstype) # I want to check at this point if that is a valid class, # so I use the load_plugin to load the plugin class # and use the classes _plugin_type_string attribute # In the future, assuming the user knows what he or she is doing # we could remove that check # The query_type_string we can get from # the aiida.common.old_pluginloader function get_query_type_string PluginClass = load_plugin(self.AiidaNode, 'aiida.orm', pluginclassname) except (DbContentError, MissingPluginError) as e: raise InputValidationError( "\nYou provide a vertice of the path with\n" "type={}\n" "But that string is not a valid type string\n" "Exception raise during check\n" "{}".format(ormclasstype, e)) ormclasstype = PluginClass._plugin_type_string query_type_string = PluginClass._query_type_string return ormclass, ormclasstype, query_type_string
def query_jobcalculations_by_computer_user_state( self, state, computer=None, user=None, only_computer_user_pairs=False, only_enabled=True, limit=None): # Here I am overriding the implementation using the QueryBuilder: """ Filter all calculations with a given state. Issue a warning if the state is not in the list of valid states. :param state: The state to be used to filter (should be a string among those defined in aiida.common.datastructures.calc_states) :type state: str :param computer: a Django DbComputer entry, or a Computer object, of a computer in the DbComputer table. A string for the hostname is also valid. :param user: a Django entry (or its pk) of a user in the DbUser table; if present, the results are restricted to calculations of that specific user :param bool only_computer_user_pairs: if False (default) return a queryset where each element is a suitable instance of Node (it should be an instance of Calculation, if everything goes right!) If True, return only a list of tuples, where each tuple is in the format ('dbcomputer__id', 'user__id') [where the IDs are the IDs of the respective tables] :return: a list of calculation objects matching the filters. """ # I assume that calc_states are strings. If this changes in the future, # update the filter below from dbattributes__tval to the correct field. from aiida.orm import Computer, User from aiida.common.exceptions import InputValidationError from aiida.orm.implementation.django.calculation.job import JobCalculation from aiida.common.datastructures import calc_states from aiida.backends.djsite.db.models import DbUser if state not in calc_states: raise InputValidationError( "querying for calculation state='{}', but it " "is not a valid calculation state".format(state)) kwargs = {} if computer is not None: # I convert it from various type of inputs # (string, DbComputer, Computer) # to a DbComputer type kwargs['dbcomputer'] = Computer.get(computer).dbcomputer if user is not None: kwargs['user'] = user if only_enabled: kwargs['dbcomputer__enabled'] = True queryresults = JobCalculation.query(dbattributes__key='state', dbattributes__tval=state, **kwargs) if only_computer_user_pairs: computer_users_ids = queryresults.values_list( 'dbcomputer__id', 'user__id').distinct() computer_users = [] for computer_id, user_id in computer_users_ids: #return cls(dbcomputer=DbComputer.get_dbcomputer(computer))DbNode.objects.get(pk=pk).get_aiida_class() computer_users.append( (Computer.get(computer_id), DbUser.objects.get(pk=user_id).get_aiida_class())) return computer_users elif limit is not None: return queryresults[:limit] else: return queryresults
def _prepare_for_submission(self, tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputs_dict (without the Code!) """ local_copy_list = [] remote_copy_list = [] remote_symlink_list = [] try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError( "No parameters specified for this calculation") if not isinstance(parameters, ParameterData): raise InputValidationError( "parameters is not of type ParameterData") try: structure = inputdict.pop(self.get_linkname('structure')) except KeyError: raise InputValidationError( "No structure specified for this calculation") if not isinstance(structure, StructureData): raise InputValidationError( "structure is not of type StructureData") if self._use_kpoints: try: kpoints = inputdict.pop(self.get_linkname('kpoints')) except KeyError: raise InputValidationError( "No kpoints specified for this calculation") if not isinstance(kpoints, KpointsData): raise InputValidationError( "kpoints is not of type KpointsData") else: kpoints = None # Settings can be undefined, and defaults to an empty dictionary settings = inputdict.pop(self.get_linkname('settings'), None) if settings is None: settings_dict = {} else: if not isinstance(settings, ParameterData): raise InputValidationError( "settings, if specified, must be of " "type ParameterData") # Settings converted to uppercase settings_dict = _uppercase_dict(settings.get_dict(), dict_name='settings') pseudos = {} # I create here a dictionary that associates each kind name to a pseudo for link in inputdict.keys(): if link.startswith(self._get_linkname_pseudo_prefix()): kindstring = link[len(self._get_linkname_pseudo_prefix()):] kinds = kindstring.split('_') the_pseudo = inputdict.pop(link) if not isinstance(the_pseudo, UpfData): raise InputValidationError( "Pseudo for kind(s) {} is not of " "type UpfData".format(",".join(kinds))) for kind in kinds: if kind in pseudos: raise InputValidationError( "Pseudo for kind {} passed " "more than one time".format(kind)) pseudos[kind] = the_pseudo parent_calc_folder = inputdict.pop(self.get_linkname('parent_folder'), None) if parent_calc_folder is not None: if not isinstance(parent_calc_folder, RemoteData): raise InputValidationError("parent_calc_folder, if specified, " "must be of type RemoteData") vdw_table = inputdict.pop(self.get_linkname('vdw_table'), None) if vdw_table is not None: if not isinstance(vdw_table, SinglefileData): raise InputValidationError("vdw_table, if specified, " "must be of type SinglefileData") hubbard_file = inputdict.pop(self.get_linkname('hubbard_file'), None) if hubbard_file is not None: if not isinstance(hubbard_file, SinglefileData): raise InputValidationError( 'hubbard_file, if specified, must be of type SinglefileData' ) try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError( "No code specified for this calculation") # Here, there should be no more parameters... if inputdict: raise InputValidationError("The following input data nodes are " "unrecognized: {}".format( inputdict.keys())) # Check structure, get species, check peudos kindnames = [k.name for k in structure.kinds] if set(kindnames) != set(pseudos.keys()): err_msg = ("Mismatch between the defined pseudos and the list of " "kinds of the structure. Pseudos: {}; kinds: {}".format( ",".join(pseudos.keys()), ",".join(list(kindnames)))) raise InputValidationError(err_msg) ############################## # END OF INITIAL INPUT CHECK # ############################## # I create the subfolder that will contain the pseudopotentials tempfolder.get_subfolder(self._PSEUDO_SUBFOLDER, create=True) # I create the subfolder with the output data (sometimes Quantum # Espresso codes crash if an empty folder is not already there tempfolder.get_subfolder(self._OUTPUT_SUBFOLDER, create=True) # If present, add also the Van der Waals table to the pseudo dir # Note that the name of the table is not checked but should be the # one expected by QE. if vdw_table: src_path = vdw_table.get_file_abs_path() dst_path = os.path.join( self._PSEUDO_SUBFOLDER, os.path.split(vdw_table.get_file_abs_path())[1]) local_copy_list.append((src_path, dst_path)) if hubbard_file: src_path = hubbard_file.get_file_abs_path() dst_path = self.input_file_name_hubbard_file local_copy_list.append((src_path, dst_path)) input_filecontent, local_copy_pseudo_list = self._generate_PWCPinputdata( parameters, settings_dict, pseudos, structure, kpoints) local_copy_list += local_copy_pseudo_list input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename, 'w') as infile: infile.write(input_filecontent) # operations for restart symlink = settings_dict.pop('PARENT_FOLDER_SYMLINK', self._default_symlink_usage) # a boolean if symlink: if parent_calc_folder is not None: # I put the symlink to the old parent ./out folder remote_symlink_list.append( (parent_calc_folder.get_computer().uuid, os.path.join(parent_calc_folder.get_remote_path(), self._restart_copy_from), self._restart_copy_to)) else: # copy remote output dir, if specified if parent_calc_folder is not None: remote_copy_list.append( (parent_calc_folder.get_computer().uuid, os.path.join(parent_calc_folder.get_remote_path(), self._restart_copy_from), self._restart_copy_to)) # here we may create an aiida.EXIT file create_exit_file = settings_dict.pop('ONLY_INITIALIZATION', False) if create_exit_file: exit_filename = tempfolder.get_abs_path('{}.EXIT'.format( self._PREFIX)) with open(exit_filename, 'w') as f: f.write('\n') # Check if specific inputs for the ENVIRON module where specified environ_namelist = settings_dict.pop('ENVIRON', None) if environ_namelist is not None: if not isinstance(environ_namelist, dict): raise InputValidationError( "ENVIRON namelist should be specified as a dictionary") # We first add the environ flag to the command-line options (if not already present) try: if '-environ' not in settings_dict['CMDLINE']: settings_dict['CMDLINE'].append('-environ') except KeyError: settings_dict['CMDLINE'] = ['-environ'] # To create a mapping from the species to an incremental fortran 1-based index # we use the alphabetical order as in the inputdata generation mapping_species = { sp_name: (idx + 1) for idx, sp_name in enumerate( sorted([kind.name for kind in structure.kinds])) } environ_input_filename = tempfolder.get_abs_path( self._ENVIRON_INPUT_FILE_NAME) with open(environ_input_filename, 'w') as environ_infile: environ_infile.write("&ENVIRON\n") for k, v in sorted(environ_namelist.iteritems()): environ_infile.write( convert_input_to_namelist_entry( k, v, mapping=mapping_species)) environ_infile.write("/\n") # Check for the deprecated 'ALSO_BANDS' setting and if present fire a deprecation log message also_bands = settings_dict.pop('ALSO_BANDS', None) if also_bands: import logging from aiida.common.log import get_dblogger_extra logger = logging.LoggerAdapter(logger=self.logger, extra=get_dblogger_extra(self)) logger.warning( "The '{}' setting is deprecated as bands are now parsed by default. " "If you do not want the bands to be parsed set the '{}' to True {}. " "Note that the eigenvalue.xml files are also no longer stored in the repository" .format('also_bands', 'no_bands', type(self))) calcinfo = CalcInfo() calcinfo.uuid = self.uuid # Empty command line by default cmdline_params = settings_dict.pop('CMDLINE', []) # we commented calcinfo.stin_name and added it here in cmdline_params # in this way the mpirun ... pw.x ... < aiida.in # is replaced by mpirun ... pw.x ... -in aiida.in # in the scheduler, _get_run_line, if cmdline_params is empty, it # simply uses < calcinfo.stin_name calcinfo.cmdline_params = (list(cmdline_params) + ["-in", self._INPUT_FILE_NAME]) codeinfo = CodeInfo() codeinfo.cmdline_params = (list(cmdline_params) + ["-in", self._INPUT_FILE_NAME]) codeinfo.stdout_name = self._OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list calcinfo.remote_symlink_list = remote_symlink_list # Retrieve by default the output file and the xml file calcinfo.retrieve_list = [] calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME) calcinfo.retrieve_list.append(self._DATAFILE_XML) calcinfo.retrieve_list += settings_dict.pop('ADDITIONAL_RETRIEVE_LIST', []) calcinfo.retrieve_list += self._internal_retrieve_list # Retrieve the k-point directories with the xml files to the temporary folder # to parse the band eigenvalues and occupations but not to have to save the raw files # if and only if the 'no_bands' key was not set to true in the settings no_bands = settings_dict.pop('NO_BANDS', False) if no_bands is False: xmlpaths = os.path.join(self._OUTPUT_SUBFOLDER, self._PREFIX + '.save', 'K*[0-9]', 'eigenval*.xml') calcinfo.retrieve_temporary_list = [[xmlpaths, '.', 2]] try: Parserclass = self.get_parserclass() parser = Parserclass(self) parser_opts = parser.get_parser_settings_key().upper() settings_dict.pop(parser_opts) except (KeyError, AttributeError): # the key parser_opts isn't inside the dictionary pass if settings_dict: raise InputValidationError( "The following keys have been found in " "the settings input node, but were not understood: {}".format( ",".join(settings_dict.keys()))) return calcinfo
def _generate_NEBinputdata(self, neb_parameters, settings_dict): """ This methods generate the input data for the NEB part of the calculation """ # I put the first-level keys as uppercase (i.e., namelist and card names) # and the second-level keys as lowercase # (deeper levels are unchanged) input_params = _uppercase_dict(neb_parameters.get_dict(), dict_name='parameters') input_params = { k: _lowercase_dict(v, dict_name=k) for k, v in input_params.iteritems() } # For the neb input there is no blocked keyword # Create an empty dictionary for the compulsory namelist 'PATH' # if not present if 'PATH' not in input_params: input_params['PATH'] = {} # In case of climbing image, we need the corresponding card climbing_image = False if input_params['PATH'].get('ci_scheme', 'no-ci').lower() in ['manual']: climbing_image = True try: climbing_image_list = settings_dict.pop("CLIMBING_IMAGES") except KeyError: raise InputValidationError( "No climbing image specified for this calculation") if not isinstance(climbing_image_list, list): raise InputValidationError( "Climbing images should be provided as a list") if [ i for i in climbing_image_list if i < 2 or i >= input_params['PATH'].get('num_of_images', 2) ]: raise InputValidationError( "The climbing images should be in the range between the first " "and the last image") climbing_image_card = "CLIMBING_IMAGES\n" climbing_image_card += ", ".join( [str(_) for _ in climbing_image_list]) + "\n" inputfile = "" inputfile += "&PATH\n" # namelist content; set to {} if not present, so that we leave an # empty namelist namelist = input_params.pop('PATH', {}) for k, v in sorted(namelist.iteritems()): inputfile += get_input_data_text(k, v) inputfile += "/\n" # Write cards now if climbing_image: inputfile += climbing_image_card if input_params: raise InputValidationError( "The following namelists are specified in input_params, but are " "not valid namelists for the current type of calculation: " "{}".format(",".join(input_params.keys()))) return inputfile
def create_restart(self, force_restart=False, parent_folder_symlink=None, use_output_structure=False, restart_from_beginning=False): """ Function to restart a calculation that was not completed before (like max walltime reached...) i.e. not to restart a really FAILED calculation. Returns a calculation c2, with all links prepared but not stored in DB. To submit it simply: c2.store_all() c2.submit() .. deprecated:: 3.0 Use the helper method :py:func:`aiida_quantumespresso.utils.restart.create_restart_cp` or :py:func:`aiida_quantumespresso.utils.restart.create_restart_pw` instead, that returns a calculation builder rather than a new, unstored calculation. :param bool force_restart: restart also if parent is not in FINISHED state (e.g. FAILED, IMPORTED, etc.). Default=False. :param bool parent_folder_symlink: if True, symlinks are used instead of hard copies of the files. Default given by self._default_symlink_usage. :param bool use_output_structure: if True, the output structure of the restarted calculation is used if available, rather than its input structure. Useful for nscf or bands calculations, but it SHOULD NOT be used for the restart of a relaxation run. Default=False. :param bool restart_from_beginning: If set to True, creates a copy of the parent calculation, without using the scratch for the restart. Useful to restart calculations that have crashed on the cluster for external reasons. Default=False """ from aiida_quantumespresso.utils.restart import clone_calculation import warnings warnings.warn( 'This method has been deprecated, use instead ' 'aiida_quantumespresso.utils.restart.create_restart_pw() or ' 'aiida_quantumespresso.utils.restart.create_restart_cp()', DeprecationWarning) from aiida.common.datastructures import calc_states # Check the calculation's state using ``from_attribute=True`` to # correctly handle IMPORTED calculations. if self.get_state(from_attribute=True) != calc_states.FINISHED: if not force_restart: raise InputValidationError( "Calculation to be restarted must be " "in the {} state. Otherwise, use the force_restart " "flag".format(calc_states.FINISHED)) if parent_folder_symlink is None: parent_folder_symlink = self._default_symlink_usage calc_inp = self.get_inputs_dict() if restart_from_beginning: inp_dict = calc_inp[self.get_linkname('parameters')] else: # case of restart without using the parent scratch old_inp_dict = calc_inp[self.get_linkname('parameters')].get_dict() # add the restart flag old_inp_dict['CONTROL']['restart_mode'] = 'restart' inp_dict = ParameterData(dict=old_inp_dict) remote_folders = self.get_outputs(node_type=RemoteData) if len(remote_folders) != 1: raise InputValidationError("More than one output RemoteData found " "in calculation {}".format(self.pk)) remote_folder = remote_folders[0] c2 = clone_calculation(self) # if not 'Restart' in c2.label: # labelstring = c2.label + " Restart of {} {}.".format( # self.__class__.__name__,self.pk) # else: # labelstring = " Restart of {} {}.".format(self.__class__.__name__,self.pk) # c2.label = labelstring.lstrip() # set the new links c2.use_parameters(inp_dict) if use_output_structure: # use OUTPUT structure if available try: c2.use_structure(self.out.output_structure) except AttributeError: c2.use_structure(calc_inp[self.get_linkname('structure')]) else: c2.use_structure(calc_inp[self.get_linkname('structure')]) if self._use_kpoints: c2.use_kpoints(calc_inp[self.get_linkname('kpoints')]) c2.use_code(calc_inp[self.get_linkname('code')]) try: old_settings_dict = calc_inp[self.get_linkname( 'settings')].get_dict() except KeyError: old_settings_dict = {} if parent_folder_symlink is not None: old_settings_dict['PARENT_FOLDER_SYMLINK'] = parent_folder_symlink if old_settings_dict: # if not empty dictionary settings = ParameterData(dict=old_settings_dict) c2.use_settings(settings) c2._set_parent_remotedata(remote_folder) # set links for pseudos for linkname, input_node in self.get_inputs_dict().iteritems(): if isinstance(input_node, UpfData): c2.add_link_from(input_node, label=linkname) # Add also the vdw table, if the parent had one try: old_vdw_table = calc_inp[self.get_linkname('vdw_table')] except KeyError: # No VdW table pass else: c2.use_vdw_table(old_vdw_table) return c2
def _prepare_for_submission(self, tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ import numpy as np local_copy_list = [] remote_copy_list = [] remote_symlink_list = [] try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError( "No code specified for this calculation") try: pw_parameters = inputdict.pop(self.get_linkname('pw_parameters')) except KeyError: raise InputValidationError( "No PW parameters specified for this calculation") if not isinstance(pw_parameters, ParameterData): raise InputValidationError( "PW parameters is not of type ParameterData") try: neb_parameters = inputdict.pop(self.get_linkname('neb_parameters')) except KeyError: raise InputValidationError( "No NEB parameters specified for this calculation") if not isinstance(neb_parameters, ParameterData): raise InputValidationError( "NEB parameters is not of type ParameterData") try: first_structure = inputdict.pop( self.get_linkname('first_structure')) except KeyError: raise InputValidationError( "No initial structure specified for this calculation") if not isinstance(first_structure, StructureData): raise InputValidationError( "Initial structure is not of type StructureData") try: last_structure = inputdict.pop(self.get_linkname('last_structure')) except KeyError: raise InputValidationError( "No final structure specified for this calculation") if not isinstance(last_structure, StructureData): raise InputValidationError( "Final structure is not of type StructureData") try: kpoints = inputdict.pop(self.get_linkname('kpoints')) except KeyError: raise InputValidationError( "No kpoints specified for this calculation") if not isinstance(kpoints, KpointsData): raise InputValidationError("kpoints is not of type KpointsData") # Settings can be undefined, and defaults to an empty dictionary settings = inputdict.pop(self.get_linkname('settings'), None) if settings is None: settings_dict = {} else: if not isinstance(settings, ParameterData): raise InputValidationError( "settings, if specified, must be of " "type ParameterData") # Settings converted to uppercase settings_dict = _uppercase_dict(settings.get_dict(), dict_name='settings') pseudos = {} # I create here a dictionary that associates each kind name to a pseudo for link in inputdict.keys(): if link.startswith(self._get_linkname_pseudo_prefix()): kindstring = link[len(self._get_linkname_pseudo_prefix()):] kinds = kindstring.split('_') the_pseudo = inputdict.pop(link) if not isinstance(the_pseudo, UpfData): raise InputValidationError( "Pseudo for kind(s) {} is not of " "type UpfData".format(",".join(kinds))) for kind in kinds: if kind in pseudos: raise InputValidationError( "Pseudo for kind {} passed " "more than one time".format(kind)) pseudos[kind] = the_pseudo parent_calc_folder = inputdict.pop(self.get_linkname('parent_folder'), None) if parent_calc_folder is not None: if not isinstance(parent_calc_folder, RemoteData): raise InputValidationError("parent_calc_folder, if specified, " "must be of type RemoteData") vdw_table = inputdict.pop(self.get_linkname('vdw_table'), None) if vdw_table is not None: if not isinstance(vdw_table, SinglefileData): raise InputValidationError("vdw_table, if specified, " "must be of type SinglefileData") # Here, there should be no more parameters... if inputdict: raise InputValidationError("The following input data nodes are " "unrecognized: {}".format( inputdict.keys())) # Check that the first and last image have the same cell if abs(np.array(first_structure.cell) - np.array(last_structure.cell)).max() > 1.e-4: raise InputValidationError( "Different cell in the fist and last image") # Check that the first and last image have the same number of sites if len(first_structure.sites) != len(last_structure.sites): raise InputValidationError( "Different number of sites in the fist and last image") # Check that sites in the initial and final structure have the same kinds if not first_structure.get_site_kindnames( ) == last_structure.get_site_kindnames(): raise InputValidationError( "Mismatch between the kind names and/or oder between " "the first and final image") # Check structure, get species, check peudos kindnames = [k.name for k in first_structure.kinds] if set(kindnames) != set(pseudos.keys()): err_msg = ("Mismatch between the defined pseudos and the list of " "kinds of the structure. Pseudos: {}; kinds: {}".format( ",".join(pseudos.keys()), ",".join(list(kindnames)))) raise InputValidationError(err_msg) ############################## # END OF INITIAL INPUT CHECK # ############################## # I create the subfolder that will contain the pseudopotentials tempfolder.get_subfolder(self._PSEUDO_SUBFOLDER, create=True) # I create the subfolder with the output data (sometimes Quantum # Espresso codes crash if an empty folder is not already there tempfolder.get_subfolder(self._OUTPUT_SUBFOLDER, create=True) # We first prepare the NEB-specific input file input_filecontent = self._generate_NEBinputdata( neb_parameters, settings_dict) input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename, 'w') as infile: infile.write(input_filecontent) # We now generate the PW input files for each input structure local_copy_pseudo_list = [] for i, structure in enumerate([first_structure, last_structure]): # We need to a pass a copy of the settings_dict for each structure this_settings_dict = copy.deepcopy(settings_dict) input_filecontent, this_local_copy_pseudo_list = self._generate_PWCPinputdata( pw_parameters, this_settings_dict, pseudos, structure, kpoints) local_copy_pseudo_list += this_local_copy_pseudo_list input_filename = tempfolder.get_abs_path('pw_{}.in'.format(i + 1)) with open(input_filename, 'w') as infile: infile.write(input_filecontent) # We need to pop the settings that were used in the PW calculations for key in settings_dict.keys(): if key not in this_settings_dict.keys(): settings_dict.pop(key) # We avoid to copy twice the same pseudopotential to the same filename local_copy_pseudo_list = set(local_copy_pseudo_list) # We check that two different pseudopotentials are not copied # with the same name (otherwise the first is overwritten) if len( set([ pseudoname for local_path, pseudoname in local_copy_pseudo_list ])) < len(local_copy_pseudo_list): raise InputValidationError( "Same filename for two different pseudopotentials") local_copy_list += local_copy_pseudo_list # If present, add also the Van der Waals table to the pseudo dir # Note that the name of the table is not checked but should be the # one expected by QE. if vdw_table: local_copy_list.append( (vdw_table.get_file_abs_path(), os.path.join(self._PSEUDO_SUBFOLDER, os.path.split( vdw_table.get_file_abs_path())[1]))) # operations for restart symlink = settings_dict.pop('PARENT_FOLDER_SYMLINK', self._default_symlink_usage) # a boolean if symlink: if parent_calc_folder is not None: # I put the symlink to the old parent ./out folder remote_symlink_list.append( (parent_calc_folder.get_computer().uuid, os.path.join(parent_calc_folder.get_remote_path(), self._OUTPUT_SUBFOLDER, '*'), self._OUTPUT_SUBFOLDER)) # and to the old parent prefix.path remote_symlink_list.append( (parent_calc_folder.get_computer().uuid, os.path.join(parent_calc_folder.get_remote_path(), '{}.path'.format(self._PREFIX)), '{}.path'.format(self._PREFIX))) else: # copy remote output dir and .path file, if specified if parent_calc_folder is not None: remote_copy_list.append( (parent_calc_folder.get_computer().uuid, os.path.join(parent_calc_folder.get_remote_path(), self._OUTPUT_SUBFOLDER, '*'), self._OUTPUT_SUBFOLDER)) # and to the old parent prefix.path remote_copy_list.append( (parent_calc_folder.get_computer().uuid, os.path.join(parent_calc_folder.get_remote_path(), '{}.path'.format(self._PREFIX)), '{}.path'.format(self._PREFIX))) # here we may create an aiida.EXIT file create_exit_file = settings_dict.pop('ONLY_INITIALIZATION', False) if create_exit_file: exit_filename = tempfolder.get_abs_path('{}.EXIT'.format( self._PREFIX)) with open(exit_filename, 'w') as f: f.write('\n') calcinfo = CalcInfo() codeinfo = CodeInfo() calcinfo.uuid = self.uuid # Empty command line by default cmdline_params = settings_dict.pop('CMDLINE', []) # For the time-being we only have the initial and final image calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list calcinfo.remote_symlink_list = remote_symlink_list # In neb calculations there is no input read from standard input!! codeinfo.cmdline_params = (["-input_images", "2"] + list(cmdline_params)) codeinfo.stdout_name = self._OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] # Retrieve by default the output file and ... calcinfo.retrieve_list = [] calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME) calcinfo.retrieve_list.append([ os.path.join(self._OUTPUT_SUBFOLDER, self._PREFIX + '_*[0-9]', 'PW.out'), '.', 2 ]) calcinfo.retrieve_list.append([ os.path.join(self._OUTPUT_SUBFOLDER, self._PREFIX + '_*[0-9]', self._PREFIX + '.save', self._DATAFILE_XML_BASENAME), '.', 3 ]) settings_retrieve_list = settings_dict.pop('ADDITIONAL_RETRIEVE_LIST', []) calcinfo.retrieve_list += settings_retrieve_list calcinfo.retrieve_list += self._internal_retrieve_list if settings_dict: try: Parserclass = self.get_parserclass() parser = Parserclass(self) parser_opts = parser.get_parser_settings_key() settings_dict.pop(parser_opts) except (KeyError, AttributeError ): # the key parser_opts isn't inside the dictionary raise InputValidationError( "The following keys have been found in " "the settings input node, but were not understood: {}". format(",".join(settings_dict.keys()))) return calcinfo
def _prepare_for_submission(self, tempfolder, inputdict): """ Create input files. :param tempfolder: aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: dictionary of the input nodes as they would be returned by get_inputs_dict """ # Check inputdict try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError("No parameters specified for this " "calculation") if not isinstance(parameters, ParameterData): raise InputValidationError("parameters not of type " "ParameterData") try: structure = inputdict.pop(self.get_linkname('structure')) found_structure = True except KeyError: found_structure = False vca_structure = False if found_structure: if not isinstance(structure, StructureData): raise InputValidationError("structure not of type " "StructureData") # for VCA: check if input structure and parameter node define VCA structure vca_structure = vca_check(structure, parameters) try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this " "calculation") # check if a parent folder containing a potential file (out_potential) is given try: parent_calc_folder = inputdict.pop(self.get_linkname('parent_KKR')) found_parent = True except KeyError: found_parent = False if found_parent: if not isinstance(parent_calc_folder, RemoteData): raise InputValidationError( "parent_KKR must be of type RemoteData") # check if parent is either Voronoi or previous KKR calculation overwrite_potential, parent_calc = self._check_valid_parent( parent_calc_folder) #cross check if no structure was given and extract structure from parent if found_structure and not vca_structure: raise InputValidationError( "parent_KKR and structure found in input. " "Can only use either parent_KKR or structure in input.") else: structure_remote_KKR, voro_parent = self.find_parent_structure( parent_calc) if not vca_structure: structure = structure_remote_KKR else: # check consistency of input vca structure and structure from remote KKR folder # TODO check consistency pass else: overwrite_potential = False if not found_structure: raise InputValidationError( "Neither structure nor parent_KKR specified for this " "calculation") # check if overwrite potential is given explicitly try: potfile_overwrite = inputdict.pop( self.get_linkname('potential_overwrite')) has_potfile_overwrite = True except KeyError: has_potfile_overwrite = False if has_potfile_overwrite: overwrite_potential = True if not isinstance(potfile_overwrite, SingleFileData): raise InputValidationError( "potfile_overwrite must be of type SingleFileData") if not found_structure: raise InputValidationError( "Input structure needed for this calculation " "(using 'potential_overwrite' input node)") # finally check if something else was given as input (should not be the case) if inputdict: raise ValidationError("Unknown inputs: {}".format(inputdict)) ################################### # Check for 2D case twoDimcheck, msg = check_2Dinput_consistency(structure, parameters) if not twoDimcheck: raise InputValidationError(msg) # Prepare inputcard from Structure and input parameter data input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) try: use_alat_input = parameters.get_dict().get('use_input_alat', False) natom, nspin, newsosol = generate_inputcard_from_structure( parameters, structure, input_filename, isvoronoi=True, vca_structure=vca_structure, use_input_alat=use_alat_input) except ValueError as e: raise InputValidationError( "Input ParameterData not consistent: {}".format(e)) # Decide what files to copy local_copy_list = [] if overwrite_potential: # copy the right files #TODO check first if file, exists and throw # warning, now this will throw an error if found_parent and self._is_KkrCalc(parent_calc): outfolderpath = parent_calc.out.retrieved.folder.abspath self.logger.info("out folder path {}".format(outfolderpath)) filename = os.path.join(outfolderpath, 'path', parent_calc._OUT_POTENTIAL) copylist = [filename] elif has_potfile_overwrite: copylist = [potfile_overwrite.get_file_abs_path()] else: copylist = [] for file1 in copylist: filename = file1 if (found_parent or has_potfile_overwrite) and file1 == copylist[0]: filename = self._POTENTIAL_IN_OVERWRITE local_copy_list.append((file1, filename)) # Prepare CalcInfo to be returned to aiida calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = [] calcinfo.retrieve_list = [ self._OUTPUT_FILE_NAME, self._ATOMINFO, self._RADII, self._SHAPEFUN, self._VERTICES, self._INPUT_FILE_NAME ] # pass on overwrite potential if this was given in input # (KkrCalculation checks if this file is there and takes this file instead of _OUT_POTENTIAL_voronoi # if given) if overwrite_potential: calcinfo.retrieve_list += [self._POTENTIAL_IN_OVERWRITE] else: calcinfo.retrieve_list += [self._OUT_POTENTIAL_voronoi] codeinfo = CodeInfo() codeinfo.cmdline_params = [] codeinfo.stdout_name = self._OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] return calcinfo
def create_restart(self, force_restart=False, parent_folder_symlink=None): """ Function to restart a calculation that was not completed before (like max walltime reached...) i.e. not to restart a really FAILED calculation. Returns a calculation c2, with all links prepared but not stored in DB. To submit it simply: c2.store_all() c2.submit() :param bool force_restart: restart also if parent is not in FINISHED state (e.g. FAILED, IMPORTED, etc.). Default=False. :param bool parent_folder_symlink: if True, symlinks are used instead of hard copies of the files. Default given by self._default_symlink_usage. """ from aiida.common.datastructures import calc_states # Check the calculation's state using ``from_attribute=True`` to # correctly handle IMPORTED calculations. if self.get_state(from_attribute=True) != calc_states.FINISHED: if not force_restart: raise InputValidationError( "Calculation to be restarted must be " "in the {} state. Otherwise, use the force_restart " "flag".format(calc_states.FINISHED)) if parent_folder_symlink is None: parent_folder_symlink = self._default_symlink_usage calc_inp = self.get_inputs_dict() old_inp_dict = calc_inp[self.get_linkname('neb_parameters')].get_dict() # add the restart flag old_inp_dict['PATH']['restart_mode'] = 'restart' inp_dict = ParameterData(dict=old_inp_dict) remote_folders = self.get_outputs(type=RemoteData) if len(remote_folders) != 1: raise InputValidationError("More than one output RemoteData found " "in calculation {}".format(self.pk)) remote_folder = remote_folders[0] c2 = self.copy() #if not 'Restart' in c2.label: # labelstring = c2.label + " Restart of {} {}.".format( # self.__class__.__name__,self.pk) #else: # labelstring = " Restart of {} {}.".format(self.__class__.__name__,self.pk) #c2.label = labelstring.lstrip() # set the new links c2.use_neb_parameters(inp_dict) c2.use_pw_parameters(calc_inp[self.get_linkname('pw_parameters')]) c2.use_first_structure(calc_inp[self.get_linkname('first_structure')]) c2.use_last_structure(calc_inp[self.get_linkname('last_structure')]) if self._use_kpoints: c2.use_kpoints(calc_inp[self.get_linkname('kpoints')]) c2.use_code(calc_inp[self.get_linkname('code')]) try: old_settings_dict = calc_inp[self.get_linkname( 'settings')].get_dict() except KeyError: old_settings_dict = {} if parent_folder_symlink is not None: old_settings_dict['PARENT_FOLDER_SYMLINK'] = parent_folder_symlink if old_settings_dict: # if not empty dictionary settings = ParameterData(dict=old_settings_dict) c2.use_settings(settings) c2._set_parent_remotedata(remote_folder) # set links for pseudos for linkname, input_node in self.get_inputs_dict().iteritems(): if isinstance(input_node, UpfData): c2._add_link_from(input_node, label=linkname) # Add also the vdw table, if the parent had one try: old_vdw_table = calc_inp[self.get_linkname('vdw_table')] except KeyError: # No VdW table pass else: c2.use_vdw_table(old_vdw_table) return c2
def presubmit(self, folder): """Prepares the calculation folder with all inputs, ready to be copied to the cluster. :param folder: a SandboxFolder that can be used to write calculation input files and the scheduling script. :type folder: :class:`aiida.common.folders.Folder` :return calcinfo: the CalcInfo object containing the information needed by the daemon to handle operations. :rtype calcinfo: :class:`aiida.common.CalcInfo` """ # pylint: disable=too-many-locals,too-many-statements,too-many-branches import os from aiida.common.exceptions import PluginInternalError, ValidationError, InvalidOperation, InputValidationError from aiida.common import json from aiida.common.utils import validate_list_of_string_tuples from aiida.common.datastructures import CodeInfo, CodeRunMode from aiida.orm import load_node, Code, Computer from aiida.plugins import DataFactory from aiida.schedulers.datastructures import JobTemplate computer = self.node.computer inputs = self.node.get_incoming(link_type=LinkType.INPUT_CALC) if not self.inputs.metadata.dry_run and self.node.has_cached_links(): raise InvalidOperation('calculation node has unstored links in cache') codes = [_ for _ in inputs.all_nodes() if isinstance(_, Code)] for code in codes: if not code.can_run_on(computer): raise InputValidationError('The selected code {} for calculation {} cannot run on computer {}'.format( code.pk, self.node.pk, computer.name)) if code.is_local() and code.get_local_executable() in folder.get_content_list(): raise PluginInternalError('The plugin created a file {} that is also the executable name!'.format( code.get_local_executable())) calc_info = self.prepare_for_submission(folder) calc_info.uuid = str(self.node.uuid) scheduler = computer.get_scheduler() # I create the job template to pass to the scheduler job_tmpl = JobTemplate() job_tmpl.shebang = computer.get_shebang() job_tmpl.submit_as_hold = False job_tmpl.rerunnable = False job_tmpl.job_environment = {} # 'email', 'email_on_started', 'email_on_terminated', job_tmpl.job_name = 'aiida-{}'.format(self.node.pk) job_tmpl.sched_output_path = self.options.scheduler_stdout if self.options.scheduler_stderr == self.options.scheduler_stdout: job_tmpl.sched_join_files = True else: job_tmpl.sched_error_path = self.options.scheduler_stderr job_tmpl.sched_join_files = False # Set retrieve path, add also scheduler STDOUT and STDERR retrieve_list = (calc_info.retrieve_list if calc_info.retrieve_list is not None else []) if (job_tmpl.sched_output_path is not None and job_tmpl.sched_output_path not in retrieve_list): retrieve_list.append(job_tmpl.sched_output_path) if not job_tmpl.sched_join_files: if (job_tmpl.sched_error_path is not None and job_tmpl.sched_error_path not in retrieve_list): retrieve_list.append(job_tmpl.sched_error_path) self.node.set_retrieve_list(retrieve_list) retrieve_singlefile_list = (calc_info.retrieve_singlefile_list if calc_info.retrieve_singlefile_list is not None else []) # a validation on the subclasses of retrieve_singlefile_list for _, subclassname, _ in retrieve_singlefile_list: file_sub_class = DataFactory(subclassname) if not issubclass(file_sub_class, orm.SinglefileData): raise PluginInternalError( '[presubmission of calc {}] retrieve_singlefile_list subclass problem: {} is ' 'not subclass of SinglefileData'.format(self.node.pk, file_sub_class.__name__)) if retrieve_singlefile_list: self.node.set_retrieve_singlefile_list(retrieve_singlefile_list) # Handle the retrieve_temporary_list retrieve_temporary_list = (calc_info.retrieve_temporary_list if calc_info.retrieve_temporary_list is not None else []) self.node.set_retrieve_temporary_list(retrieve_temporary_list) # the if is done so that if the method returns None, this is # not added. This has two advantages: # - it does not add too many \n\n if most of the prepend_text are empty # - most importantly, skips the cases in which one of the methods # would return None, in which case the join method would raise # an exception prepend_texts = [computer.get_prepend_text()] + \ [code.get_prepend_text() for code in codes] + \ [calc_info.prepend_text, self.node.get_option('prepend_text')] job_tmpl.prepend_text = '\n\n'.join(prepend_text for prepend_text in prepend_texts if prepend_text) append_texts = [self.node.get_option('append_text'), calc_info.append_text] + \ [code.get_append_text() for code in codes] + \ [computer.get_append_text()] job_tmpl.append_text = '\n\n'.join(append_text for append_text in append_texts if append_text) # Set resources, also with get_default_mpiprocs_per_machine resources = self.node.get_option('resources') def_cpus_machine = computer.get_default_mpiprocs_per_machine() if def_cpus_machine is not None: resources['default_mpiprocs_per_machine'] = def_cpus_machine job_tmpl.job_resource = scheduler.create_job_resource(**resources) subst_dict = {'tot_num_mpiprocs': job_tmpl.job_resource.get_tot_num_mpiprocs()} for key, value in job_tmpl.job_resource.items(): subst_dict[key] = value mpi_args = [arg.format(**subst_dict) for arg in computer.get_mpirun_command()] extra_mpirun_params = self.node.get_option('mpirun_extra_params') # same for all codes in the same calc # set the codes_info if not isinstance(calc_info.codes_info, (list, tuple)): raise PluginInternalError('codes_info passed to CalcInfo must be a list of CalcInfo objects') codes_info = [] for code_info in calc_info.codes_info: if not isinstance(code_info, CodeInfo): raise PluginInternalError('Invalid codes_info, must be a list of CodeInfo objects') if code_info.code_uuid is None: raise PluginInternalError('CalcInfo should have ' 'the information of the code ' 'to be launched') this_code = load_node(code_info.code_uuid, sub_classes=(Code,)) this_withmpi = code_info.withmpi # to decide better how to set the default if this_withmpi is None: if len(calc_info.codes_info) > 1: raise PluginInternalError('For more than one code, it is ' 'necessary to set withmpi in ' 'codes_info') else: this_withmpi = self.node.get_option('withmpi') if this_withmpi: this_argv = (mpi_args + extra_mpirun_params + [this_code.get_execname()] + (code_info.cmdline_params if code_info.cmdline_params is not None else [])) else: this_argv = [this_code.get_execname()] + (code_info.cmdline_params if code_info.cmdline_params is not None else []) # overwrite the old cmdline_params and add codename and mpirun stuff code_info.cmdline_params = this_argv codes_info.append(code_info) job_tmpl.codes_info = codes_info # set the codes execution mode if len(codes) > 1: try: job_tmpl.codes_run_mode = calc_info.codes_run_mode except KeyError: raise PluginInternalError('Need to set the order of the code execution (parallel or serial?)') else: job_tmpl.codes_run_mode = CodeRunMode.SERIAL ######################################################################## custom_sched_commands = self.node.get_option('custom_scheduler_commands') if custom_sched_commands: job_tmpl.custom_scheduler_commands = custom_sched_commands job_tmpl.import_sys_environment = self.node.get_option('import_sys_environment') job_tmpl.job_environment = self.node.get_option('environment_variables') queue_name = self.node.get_option('queue_name') account = self.node.get_option('account') qos = self.node.get_option('qos') if queue_name is not None: job_tmpl.queue_name = queue_name if account is not None: job_tmpl.account = account if qos is not None: job_tmpl.qos = qos priority = self.node.get_option('priority') if priority is not None: job_tmpl.priority = priority max_memory_kb = self.node.get_option('max_memory_kb') if max_memory_kb is not None: job_tmpl.max_memory_kb = max_memory_kb max_wallclock_seconds = self.node.get_option('max_wallclock_seconds') if max_wallclock_seconds is not None: job_tmpl.max_wallclock_seconds = max_wallclock_seconds max_memory_kb = self.node.get_option('max_memory_kb') if max_memory_kb is not None: job_tmpl.max_memory_kb = max_memory_kb script_filename = '_aiidasubmit.sh' script_content = scheduler.get_submit_script(job_tmpl) folder.create_file_from_filelike(io.StringIO(script_content), script_filename, 'w', encoding='utf8') subfolder = folder.get_subfolder('.aiida', create=True) subfolder.create_file_from_filelike(io.StringIO(json.dumps(job_tmpl)), 'job_tmpl.json', 'w', encoding='utf8') subfolder.create_file_from_filelike(io.StringIO(json.dumps(calc_info)), 'calcinfo.json', 'w', encoding='utf8') if calc_info.local_copy_list is None: calc_info.local_copy_list = [] if calc_info.remote_copy_list is None: calc_info.remote_copy_list = [] # Some validation this_pk = self.node.pk if self.node.pk is not None else '[UNSTORED]' local_copy_list = calc_info.local_copy_list try: validate_list_of_string_tuples(local_copy_list, tuple_length=3) except ValidationError as exc: raise PluginInternalError('[presubmission of calc {}] ' 'local_copy_list format problem: {}'.format(this_pk, exc)) remote_copy_list = calc_info.remote_copy_list try: validate_list_of_string_tuples(remote_copy_list, tuple_length=3) except ValidationError as exc: raise PluginInternalError('[presubmission of calc {}] ' 'remote_copy_list format problem: {}'.format(this_pk, exc)) for (remote_computer_uuid, _, dest_rel_path) in remote_copy_list: try: Computer.objects.get(uuid=remote_computer_uuid) # pylint: disable=unused-variable except exceptions.NotExistent: raise PluginInternalError('[presubmission of calc {}] ' 'The remote copy requires a computer with UUID={}' 'but no such computer was found in the ' 'database'.format(this_pk, remote_computer_uuid)) if os.path.isabs(dest_rel_path): raise PluginInternalError('[presubmission of calc {}] ' 'The destination path of the remote copy ' 'is absolute! ({})'.format(this_pk, dest_rel_path)) return calc_info, script_filename
def paginate(self, page, perpage, total_count): """ Calculates limit and offset for the reults of a query, given the page and the number of restuls per page. Moreover, calculates the last available page and raises an exception if the required page exceeds that limit. If number of rows==0, only page 1 exists :param page: integer number of the page that has to be viewed :param perpage: integer defining how many results a page contains :param total_count: the total number of rows retrieved by the query :return: integers: limit, offset, rel_pages """ from math import ceil ## Type checks # Mandatory params try: page = int(page) except ValueError: raise InputValidationError('page number must be an integer') try: total_count = int(total_count) except ValueError: raise InputValidationError('total_count must be an integer') # Non-mandatory params if perpage is not None: try: perpage = int(perpage) except ValueError: raise InputValidationError('perpage must be an integer') else: perpage = self.perpage_default ## First_page is anyway 1 first_page = 1 ## Calculate last page if total_count == 0: last_page = 1 else: last_page = int(ceil(total_count / perpage)) ## Check validity of required page and calculate limit, offset, # previous, # and next page if page > last_page or page < 1: raise RestInputValidationError('Non existent page requested. The ' 'page range is [{} : {}]'.format( first_page, last_page)) limit = perpage offset = (page - 1) * perpage prev_page = None if page > 1: prev_page = page - 1 next_page = None if page < last_page: next_page = page + 1 rel_pages = dict(prev=prev_page, next=next_page, first=first_page, last=last_page) return (limit, offset, rel_pages)
def _prepare_for_submission(self, tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ try: parameters_data = inputdict.pop(self.get_linkname("parameters")) except KeyError: pass # raise InputValidationError("No parameters specified for this " # "calculation") if not isinstance(parameters_data, ParameterData): raise InputValidationError("parameters is not of type " "ParameterData") try: structure = inputdict.pop(self.get_linkname("structure")) except KeyError: raise InputValidationError( "no structure is specified for this calculation") try: trajectory = inputdict.pop(self.get_linkname("trajectory")) except KeyError: raise InputValidationError( "trajectory is specified for this calculation") try: force_constants = inputdict.pop( self.get_linkname("force_constants")) except KeyError: raise InputValidationError( "no force_constants is specified for this calculation") try: code = inputdict.pop(self.get_linkname("code")) except KeyError: raise InputValidationError( "no code is specified for this calculation") time_step = trajectory.get_times()[1] - trajectory.get_times()[0] ############################## # END OF INITIAL INPUT CHECK # ############################## # =================== prepare the python input files ===================== cell_txt = get_poscar_txt(structure) input_txt = parameters_to_input_file(parameters_data) force_constants_txt = get_force_constants(force_constants) trajectory_txt = get_trajectory_txt(trajectory) # =========================== dump to file ============================= input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) with open(input_filename, "w") as infile: infile.write(input_txt) cell_filename = tempfolder.get_abs_path(self._INPUT_CELL) with open(cell_filename, "w") as infile: infile.write(cell_txt) force_constants_filename = tempfolder.get_abs_path( self._INPUT_FORCE_CONSTANTS) with open(force_constants_filename, "w") as infile: infile.write(force_constants_txt) trajectory_filename = tempfolder.get_abs_path(self._INPUT_TRAJECTORY) with open(trajectory_filename, "w") as infile: infile.write(trajectory_txt) # ============================ calcinfo ================================ local_copy_list = [] remote_copy_list = [] # additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[]) calcinfo = CalcInfo() calcinfo.uuid = self.uuid # Empty command line by default calcinfo.local_copy_list = local_copy_list calcinfo.remote_copy_list = remote_copy_list # Retrieve files calcinfo.retrieve_list = [ self._OUTPUT_FILE_NAME, self._OUTPUT_FORCE_CONSTANTS, self._OUTPUT_QUASIPARTICLES, ] codeinfo = CodeInfo() codeinfo.cmdline_params = [ self._INPUT_FILE_NAME, self._INPUT_TRAJECTORY, "-ts", "{}".format(time_step), "--silent", "-sfc", self._OUTPUT_FORCE_CONSTANTS, "-thm", # '--resolution 0.01', "-psm", "2", "--normalize_dos", "-sdata", ] if "temperature" in parameters_data.get_dict(): codeinfo.cmdline_params.append("--temperature") codeinfo.cmdline_params.append("{}".format( parameters_data.dict.temperature)) if "md_commensurate" in parameters_data.get_dict(): if parameters_data.dict.md_commensurate: codeinfo.cmdline_params.append("--MD_commensurate") codeinfo.stdout_name = self._OUTPUT_FILE_NAME codeinfo.code_uuid = code.uuid codeinfo.withmpi = False calcinfo.codes_info = [codeinfo] return calcinfo
def build_headers(self, rel_pages=None, url=None, total_count=None): """ Construct the header dictionary for an HTTP response. It includes related pages, total count of results (before pagination). :param rel_pages: a dictionary defining related pages (first, prev, next, last) :param url: (string) the full url, i.e. the url that the client uses to get Rest resources """ ## Type validation # mandatory parameters try: total_count = int(total_count) except ValueError: raise InputValidationError('total_count must be a long integer') # non mandatory parameters if rel_pages is not None and not isinstance(rel_pages, dict): raise InputValidationError('rel_pages must be a dictionary') if url is not None: try: url = str(url) except ValueError: raise InputValidationError('url must be a string') ## Input consistency # rel_pages cannot be defined without url if rel_pages is not None and url is None: raise InputValidationError( "'rel_pages' parameter requires 'url' parameter to be defined") headers = {} ## Setting mandatory headers # set X-Total-Count headers['X-Total-Count'] = total_count expose_header = ['X-Total-Count'] ## Two auxiliary functions def split_url(url): """ Split url into path and query string """ if '?' in url: [path, query_string] = url.split('?') question_mark = '?' else: path = url query_string = '' question_mark = '' return (path, query_string, question_mark) def make_rel_url(rel, page): new_path_elems = path_elems + ['page', str(page)] return '<' + '/'.join(new_path_elems) + \ question_mark + query_string + '>; rel={}, '.format(rel) ## Setting non-mandatory parameters # set links to related pages if rel_pages is not None: (path, query_string, question_mark) = split_url(url) path_elems = self.split_path(path) if path_elems.pop(-1) == 'page' or path_elems.pop(-1) == 'page': links = [] for (rel, page) in rel_pages.items(): if page is not None: links.append(make_rel_url(rel, page)) headers['Link'] = ''.join(links) expose_header.append('Link') else: pass # to expose header access in cross-domain requests headers['Access-Control-Expose-Headers'] = ','.join(expose_header) return headers
def generate_vasp_params(structure, settings, type=None, pressure=0.0): """ Generate the input paramemeters needed to run a calculation for VASP :param structure: StructureData object containing the crystal structure :param settings: ParametersData object containing a dictionary with the INCAR parameters :return: Calculation process object, input dictionary """ try: code = settings.dict.code[type] except: code = settings.dict.code plugin = Code.get_from_string(code).get_attr('input_plugin') VaspCalculation = CalculationFactory(plugin) inputs = VaspCalculation.process().get_inputs_template() # code inputs.code = Code.get_from_string(code) # structure inputs.structure = structure # machine inputs._options.resources = settings.dict.machine['resources'] inputs._options.max_wallclock_seconds = settings.dict.machine[ 'max_wallclock_seconds'] # inputs._options._parser_name = 'vasp.pymatgen' # Use for all the set functions in calculation. # inputs._options = dict(inputs._options) # inputs._options['_parser_name'] = 'vasp.pymatgen' # INCAR (parameters) incar = dict(settings.dict.parameters) if type == 'optimize': incar.update({ 'PREC': 'Accurate', 'ISTART': 0, 'IBRION': 2, 'ISIF': 3, 'LWAVE': '.FALSE.', 'LCHARG': '.FALSE.', 'ADDGRID': '.TRUE.', 'LREAL': '.FALSE.', 'PSTRESS': pressure }) # unit: kb -> kB if not 'NSW' in incar: incar.update({'NSW': 300}) elif type == 'optimize_constant_volume': incar.update({ 'PREC': 'Accurate', 'ISTART': 0, 'IBRION': 2, 'ISIF': 4, 'LWAVE': '.FALSE.', 'LCHARG': '.FALSE.', 'ADDGRID': '.TRUE.', 'LREAL': '.FALSE.' }) if not 'NSW' in incar: incar.update({'NSW': 300}) elif type == 'forces': incar.update({ 'PREC': 'Accurate', 'ISYM': 0, 'ISTART': 0, 'IBRION': -1, 'NSW': 0, 'LWAVE': '.FALSE.', 'LCHARG': '.FALSE.', 'ADDGRID': '.TRUE.', 'LREAL': '.FALSE.' }) elif type == 'born_charges': incar.update({ 'PREC': 'Accurate', 'LEPSILON': '.TRUE.', 'ISTART': 0, 'IBRION': -1, 'NSW': 0, 'LWAVE': '.FALSE.', 'LCHARG': '.FALSE.', 'ADDGRID': '.TRUE.', 'LREAL': '.FALSE.' }) if 'NPAR' in incar: del incar[ 'NPAR'] #DFPT in vasp, use default NPAR = number of cores if not 'EDIFF' in incar: incar.update({'EDIFF': 1.0E-8}) if not 'EDIFFG' in incar: incar.update({'EDIFFG': -1.0E-6}) inputs.parameters = ParameterData(dict=incar) # POTCAR (pseudo potentials) inputs.potential = get_potential_vasp(structure, settings.dict.pot_family) # KPOINTS kpoints = KpointsData() kpoints.set_cell_from_structure(structure) if 'kpoints_density_{}'.format(type) in settings.get_dict(): kpoints.set_kpoints_mesh_from_density( settings.get_dict()['kpoints_density_{}'.format(type)]) elif 'kpoints_density' in settings.get_dict(): kpoints.set_kpoints_mesh_from_density(settings.dict.kpoints_density) elif 'kpoints_mesh_{}'.format(type) in settings.get_dict(): if 'kpoints_offset' in settings.get_dict(): kpoints_offset = settings.dict.kpoints_offset else: kpoints_offset = [0.0, 0.0, 0.0] kpoints.set_kpoints_mesh( settings.get_dict()['kpoints_mesh_{}'.format(type)], offset=kpoints_offset) elif 'kpoints_mesh' in settings.get_dict(): if 'kpoints_offset' in settings.get_dict(): kpoints_offset = settings.dict.kpoints_offset else: kpoints_offset = [0.0, 0.0, 0.0] kpoints.set_kpoints_mesh(settings.dict.kpoints_mesh, offset=kpoints_offset) else: raise InputValidationError( 'no kpoint definition in input. Define either kpoints_density or kpoints_mesh' ) inputs.kpoints = kpoints return VaspCalculation.process(), inputs
def _prepare_for_submission(self, tempfolder, inputdict): """ This is the routine to be called when you want to create the input files and related stuff with a plugin. :param tempfolder: a aiida.common.folders.Folder subclass where the plugin should put all its files. :param inputdict: a dictionary with the input nodes, as they would be returned by get_inputdata_dict (without the Code!) """ ### ------------------------------------------------------ ### Input check try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError("No code specified for this calculation") try: parameters = inputdict.pop(self.get_linkname('parameters')) except KeyError: raise InputValidationError("No parameters specified for this calculation") if not isinstance(parameters, ParameterData): raise InputValidationError("parameters is not of type ParameterData") try: parent_calc_folder = inputdict.pop(self.get_linkname('parent_calc_folder')) except KeyError: raise InputValidationError("No parent_calc_folder specified for this calculation") if not isinstance(parent_calc_folder, RemoteData): raise InputValidationError("parent_calc_folder is not of type RemoteData") try: settings = inputdict.pop(self.get_linkname('settings')) except KeyError: raise InputValidationError("No settings specified for this calculation") if not isinstance(settings, ParameterData): raise InputValidationError("settings is not of type ParameterData") settings_dict = settings.get_dict() # Here, there should be no more parameters... if inputdict: raise InputValidationError("The following input data nodes are " "unrecognized: {}".format(inputdict.keys())) ### End of input check ### ------------------------------------------------------ # create code info codeinfo = CodeInfo() codeinfo.code_uuid = code.uuid cmdline = [] for key in parameters.dict: cmdline += [key] if parameters.dict[key] != '': if isinstance(parameters.dict[key], list): cmdline += parameters.dict[key] else: cmdline += [parameters.dict[key]] codeinfo.cmdline_params = cmdline # create calc info calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.cmdline_params = codeinfo.cmdline_params calcinfo.codes_info = [codeinfo] # file lists calcinfo.remote_symlink_list = [] calcinfo.local_copy_list = [] calcinfo.remote_copy_list = [] calcinfo.retrieve_list = settings_dict.pop('additional_retrieve_list', []) # symlinks if parent_calc_folder is not None: comp_uuid = parent_calc_folder.get_computer().uuid remote_path = parent_calc_folder.get_remote_path() symlink = (comp_uuid, remote_path, self._PARENT_CALC_FOLDER_NAME) calcinfo.remote_symlink_list.append(symlink) return calcinfo # EOF
def neworder_potential_wf(settings_node, parent_calc_folder, **kwargs): #, parent_calc_folder2=None): """ Workfunction to create database structure for aiida_kkr.tools.modify_potential.neworder_potential function A temporary file is written in a Sandbox folder on the computer specified via the input computer node before the output potential is stored as SingleFileData in the Database. :param settings_node: settings for the neworder_potentail function (ParameterData) :param parent_calc_folder: parent calculation remote folder node where the input potential is retreived from (RemoteData) :param parent_calc_folder2: *optional*, parent calculation remote folder node where the second input potential is retreived from in case 'pot2' and 'replace_newpos' are also set in settings_node (RemoteData) :returns: output_potential node (SingleFileData) .. note:: The settings_node dictionary needs to be of the following form:: settings_dict = {'pot1': '<filename_input_potential>', 'out_pot': '<filename_output_potential>', 'neworder': [list of intended order in output potential]} Optional entries are:: 'pot2': '<filename_second_input_file>' 'replace_newpos': [[position in neworder list which is replace with potential from pot2, position in pot2 that is chosen for replacement]] 'label': 'label_for_output_node' 'description': 'longer_description_for_output_node' """ import os from aiida_kkr.tools.tools_kkrimp import modify_potential from aiida.common.folders import SandboxFolder from aiida.common.exceptions import UniquenessError from aiida.orm.calculation.job import JobCalculation from aiida.orm import DataFactory if 'parent_calc_folder2' in kwargs.keys(): parent_calc_folder2 = kwargs.get('parent_calc_folder2', None) else: parent_calc_folder2 = None # get aiida data types used here ParameterData = DataFactory('parameter') RemoteData = DataFactory('remote') SingleFileData = DataFactory('singlefile') # check input consistency if not isinstance(settings_node, ParameterData): raise InputValidationError( 'settings_node needs to be a valid aiida ParameterData node') if not isinstance(parent_calc_folder, RemoteData): raise InputValidationError( 'parent_calc_folder needs to be a valid aiida RemoteData node') if parent_calc_folder2 is not None and not isinstance( parent_calc_folder2, RemoteData): raise InputValidationError( 'parent_calc_folder2 needs to be a valid aiida RemoteData node') settings_dict = settings_node.get_dict() pot1 = settings_dict.get('pot1', None) if pot1 is None: raise InputValidationError( 'settings_node_dict needs to have key "pot1" containing the filename of the input potential' ) out_pot = settings_dict.get('out_pot', None) if out_pot is None: raise InputValidationError( 'settings_node_dict needs to have key "out_pot" containing the filename of the input potential' ) neworder = settings_dict.get('neworder', None) if neworder is None: raise InputValidationError( 'settings_node_dict needs to have key "neworder" containing the list of new positions' ) pot2 = settings_dict.get('pot2', None) replace_newpos = settings_dict.get('replace_newpos', None) # Create Sandbox folder for generation of output potential file # and construct output potential with SandboxFolder() as tempfolder: # Get abolute paths of input files from parent calc and filename parent_calcs = parent_calc_folder.get_inputs(node_type=JobCalculation) n_parents = len(parent_calcs) if n_parents != 1: raise UniquenessError( "Input RemoteData is child of {} " "calculation{}, while it should have a single parent" "".format(n_parents, "" if n_parents == 0 else "s")) else: parent_calc = parent_calcs[0] remote_path = parent_calc.out.retrieved.get_abs_path('') pot1_path = os.path.join(remote_path, pot1) # extract nspin from parent calc's input parameter node nspin = parent_calc.inp.parameters.get_dict().get('NSPIN') neworder_spin = [] for iatom in neworder: for ispin in range(nspin): neworder_spin.append(iatom * nspin + ispin) neworder = neworder_spin # Copy optional files? if pot2 is not None and parent_calc_folder2 is not None: parent_calcs = parent_calc_folder2.get_inputs( node_type=JobCalculation) n_parents = len(parent_calcs) if n_parents != 1: raise UniquenessError( "Input RemoteData of parent_calc_folder2 is child of {} " "calculation{}, while it should have a single parent" "".format(n_parents, "" if n_parents == 0 else "s")) else: parent_calc = parent_calcs[0] remote_path = parent_calc.out.retrieved.get_abs_path('') pot2_path = os.path.join(remote_path, pot2) else: pot2_path = None # change file path to Sandbox folder accordingly out_pot_path = tempfolder.get_abs_path(out_pot) # run neworder_potential function modify_potential().neworder_potential(pot1_path, out_pot_path, neworder, potfile_2=pot2_path, replace_from_pot2=replace_newpos) # store output potential to SingleFileData output_potential_sfd_node = SingleFileData(file=out_pot_path) lbl = settings_dict.get('label', None) if lbl is not None: output_potential_sfd_node.label = lbl desc = settings_dict.get('description', None) if desc is not None: output_potential_sfd_node.description = desc #TODO create shapefun sfd node accordingly """ out_shape_path = output_shapefun_sfd_node = SingleFileData(file=out_shape_path) lbl2 = settings_dict.get('label_shape', None) if lbl2 is None and lbl is not None: lbl2 = lbl if lbl2 is not None: output_shapefun_sfd_node.label = lbl2 desc2 = settings_dict.get('description_shape', None) if desc2 is None and desc is not None: desc2 = desc if desc2 is not None: output_shapefun_sfd_node.description = desc2 return output_potential_sfd_node, output_shapefun_sfd_node """ return output_potential_sfd_node
def _generate_PWCPinputdata(self, parameters, settings_dict, pseudos, structure, kpoints=None): """ This method creates the content of an input file in the PW/CP format. : """ from aiida.common.utils import get_unique_filename, get_suggestion import re local_copy_list_to_append = [] # I put the first-level keys as uppercase (i.e., namelist and card names) # and the second-level keys as lowercase # (deeper levels are unchanged) input_params = _uppercase_dict(parameters.get_dict(), dict_name='parameters') input_params = { k: _lowercase_dict(v, dict_name=k) for k, v in input_params.iteritems() } # I remove unwanted elements (for the moment, instead, I stop; to change when # we setup a reasonable logging) for blocked in self._blocked_keywords: nl = blocked[0].upper() flag = blocked[1].lower() defaultvalue = None if len(blocked) >= 3: defaultvalue = blocked[2] if nl in input_params: # The following lines is meant to avoid putting in input the # parameters like celldm(*) stripped_inparams = [ re.sub("[(0-9)]", "", _) for _ in input_params[nl].keys() ] if flag in stripped_inparams: raise InputValidationError( "You cannot specify explicitly the '{}' flag in the '{}' " "namelist or card.".format(flag, nl)) if defaultvalue is not None: if nl not in input_params: input_params[nl] = {} input_params[nl][flag] = defaultvalue # Set some variables (look out at the case! NAMELISTS should be uppercase, # internal flag names must be lowercase) if 'CONTROL' not in input_params: input_params['CONTROL'] = {} input_params['CONTROL']['pseudo_dir'] = self._PSEUDO_SUBFOLDER input_params['CONTROL']['outdir'] = self._OUTPUT_SUBFOLDER input_params['CONTROL']['prefix'] = self._PREFIX input_params['CONTROL']['verbosity'] = input_params['CONTROL'].get( 'verbosity', self._default_verbosity) # Set to high if not specified # ============ I prepare the input site data ============= # ------------ CELL_PARAMETERS ----------- cell_parameters_card = "CELL_PARAMETERS angstrom\n" for vector in structure.cell: cell_parameters_card += ("{0:18.10f} {1:18.10f} {2:18.10f}" "\n".format(*vector)) # ------------- ATOMIC_SPECIES ------------ atomic_species_card_list = [] # Keep track of the filenames to avoid to overwrite files # I use a dictionary where the key is the pseudo PK and the value # is the filename I used. In this way, I also use the same filename # if more than one kind uses the same pseudo. pseudo_filenames = {} # I keep track of the order of species kind_names = [] # I add the pseudopotential files to the list of files to be copied for kind in structure.kinds: # This should not give errors, I already checked before that # the list of keys of pseudos and kinds coincides ps = pseudos[kind.name] if kind.is_alloy() or kind.has_vacancies(): raise InputValidationError( "Kind '{}' is an alloy or has " "vacancies. This is not allowed for pw.x input structures." "".format(kind.name)) try: # It it is the same pseudopotential file, use the same filename filename = pseudo_filenames[ps.pk] except KeyError: # The pseudo was not encountered yet; use a new name and # also add it to the local copy list filename = get_unique_filename(ps.filename, pseudo_filenames.values()) pseudo_filenames[ps.pk] = filename # I add this pseudo file to the list of files to copy local_copy_list_to_append.append( (ps.get_file_abs_path(), os.path.join(self._PSEUDO_SUBFOLDER, filename))) kind_names.append(kind.name) atomic_species_card_list.append("{} {} {}\n".format( kind.name.ljust(6), kind.mass, filename)) # I join the lines, but I resort them using the alphabetical order of # species, given by the kind_names list. I also store the mapping_species # list, with the order of species used in the file mapping_species, sorted_atomic_species_card_list = zip( *sorted(zip(kind_names, atomic_species_card_list))) # The format of mapping_species required later is a dictionary, whose # values are the indices, so I convert to this format # Note the (idx+1) to convert to fortran 1-based lists mapping_species = { sp_name: (idx + 1) for idx, sp_name in enumerate(mapping_species) } # I add the first line sorted_atomic_species_card_list = ( ["ATOMIC_SPECIES\n"] + list(sorted_atomic_species_card_list)) atomic_species_card = "".join(sorted_atomic_species_card_list) # Free memory del sorted_atomic_species_card_list del atomic_species_card_list # ------------ ATOMIC_POSITIONS ----------- atomic_positions_card_list = ["ATOMIC_POSITIONS angstrom\n"] # Check on validity of FIXED_COORDS fixed_coords_strings = [] fixed_coords = settings_dict.pop('FIXED_COORDS', None) if fixed_coords is None: # No fixed_coords specified: I store a list of empty strings fixed_coords_strings = [""] * len(structure.sites) else: if len(fixed_coords) != len(structure.sites): raise InputValidationError( "Input structure contains {:d} sites, but " "fixed_coords has length {:d}".format( len(structure.sites), len(fixed_coords))) for i, this_atom_fix in enumerate(fixed_coords): if len(this_atom_fix) != 3: raise InputValidationError( "fixed_coords({:d}) has not length three" "".format(i + 1)) for fixed_c in this_atom_fix: if not isinstance(fixed_c, bool): raise InputValidationError( "fixed_coords({:d}) has non-boolean " "elements".format(i + 1)) if_pos_values = [self._if_pos(_) for _ in this_atom_fix] fixed_coords_strings.append( " {:d} {:d} {:d}".format(*if_pos_values)) for site, fixed_coords_string in zip(structure.sites, fixed_coords_strings): atomic_positions_card_list.append( "{0} {1:18.10f} {2:18.10f} {3:18.10f} {4}\n".format( site.kind_name.ljust(6), site.position[0], site.position[1], site.position[2], fixed_coords_string)) atomic_positions_card = "".join(atomic_positions_card_list) del atomic_positions_card_list # Optional ATOMIC_FORCES card atomic_forces = settings_dict.pop('ATOMIC_FORCES', None) if atomic_forces is not None: # Checking that there are as many forces defined as there are sites in the structure if len(atomic_forces) != len(structure.sites): raise InputValidationError( 'Input structure contains {:d} sites, but atomic forces has length {:d}' .format(len(structure.sites), len(atomic_forces))) lines = ['ATOMIC_FORCES\n'] for site, vector in zip(structure.sites, atomic_forces): # Checking that all 3 dimensions are specified: if len(vector) != 3: raise InputValidationError( 'Forces({}) for {} has not length three'.format( vector, site)) lines.append('{0} {1:18.10f} {2:18.10f} {3:18.10f}\n'.format( site.kind_name.ljust(6), *vector)) # Append to atomic_positions_card so that this card will be printed directly after atomic_positions_card += ''.join(lines) del lines # Optional ATOMIC_VELOCITIES card atomic_velocities = settings_dict.pop('ATOMIC_VELOCITIES', None) if atomic_velocities is not None: # Checking that there are as many velocities defined as there are sites in the structure if len(atomic_velocities) != len(structure.sites): raise InputValidationError( 'Input structure contains {:d} sites, but atomic velocities has length {:d}' .format(len(structure.sites), len(atomic_velocities))) lines = ['ATOMIC_VELOCITIES\n'] for site, vector in zip(structure.sites, atomic_velocities): # Checking that all 3 dimensions are specified: if len(vector) != 3: raise InputValidationError( 'Velocities({}) for {} has not length three'.format( vector, site)) lines.append('{0} {1:18.10f} {2:18.10f} {3:18.10f}\n'.format( site.kind_name.ljust(6), *vector)) # Append to atomic_positions_card so that this card will be printed directly after atomic_positions_card += ''.join(lines) del lines # I set the variables that must be specified, related to the system # Set some variables (look out at the case! NAMELISTS should be # uppercase, internal flag names must be lowercase) if 'SYSTEM' not in input_params: input_params['SYSTEM'] = {} input_params['SYSTEM']['ibrav'] = 0 input_params['SYSTEM']['nat'] = len(structure.sites) input_params['SYSTEM']['ntyp'] = len(structure.kinds) # ============ I prepare the k-points ============= if self._use_kpoints: try: mesh, offset = kpoints.get_kpoints_mesh() has_mesh = True force_kpoints_list = settings_dict.pop('FORCE_KPOINTS_LIST', False) if force_kpoints_list: kpoints_list = kpoints.get_kpoints_mesh(print_list=True) num_kpoints = len(kpoints_list) has_mesh = False weights = [1.] * num_kpoints except AttributeError: try: kpoints_list = kpoints.get_kpoints() num_kpoints = len(kpoints_list) has_mesh = False if num_kpoints == 0: raise InputValidationError( "At least one k point must be " "provided for non-gamma calculations") except AttributeError: raise InputValidationError( "No valid kpoints have been found") try: _, weights = kpoints.get_kpoints(also_weights=True) except AttributeError: weights = [1.] * num_kpoints gamma_only = settings_dict.pop("GAMMA_ONLY", False) if gamma_only: if has_mesh: if tuple(mesh) != (1, 1, 1) or tuple(offset) != (0., 0., 0.): raise InputValidationError( "If a gamma_only calculation is requested, the " "kpoint mesh must be (1,1,1),offset=(0.,0.,0.)") else: if (len(kpoints_list) != 1 or tuple(kpoints_list[0]) != tuple(0., 0., 0.)): raise InputValidationError( "If a gamma_only calculation is requested, the " "kpoints coordinates must only be (0.,0.,0.)") kpoints_type = "gamma" elif has_mesh: kpoints_type = "automatic" else: kpoints_type = "crystal" kpoints_card_list = ["K_POINTS {}\n".format(kpoints_type)] if kpoints_type == "automatic": if any([(i != 0. and i != 0.5) for i in offset]): raise InputValidationError("offset list must only be made " "of 0 or 0.5 floats") the_offset = [0 if i == 0. else 1 for i in offset] the_6_integers = list(mesh) + the_offset kpoints_card_list.append("{:d} {:d} {:d} {:d} {:d} {:d}\n" "".format(*the_6_integers)) elif kpoints_type == "gamma": # nothing to be written in this case pass else: kpoints_card_list.append("{:d}\n".format(num_kpoints)) for kpoint, weight in zip(kpoints_list, weights): kpoints_card_list.append( " {:18.10f} {:18.10f} {:18.10f} {:18.10f}" "\n".format(kpoint[0], kpoint[1], kpoint[2], weight)) kpoints_card = "".join(kpoints_card_list) del kpoints_card_list # =================== NAMELISTS AND CARDS ======================== try: namelists_toprint = settings_dict.pop('NAMELISTS') if not isinstance(namelists_toprint, list): raise InputValidationError( "The 'NAMELISTS' value, if specified in the settings input " "node, must be a list of strings") except KeyError: # list of namelists not specified; do automatic detection try: control_nl = input_params['CONTROL'] calculation_type = control_nl['calculation'] except KeyError: raise InputValidationError( "No 'calculation' in CONTROL namelist." "It is required for automatic detection of the valid list " "of namelists. Otherwise, specify the list of namelists " "using the NAMELISTS key inside the 'settings' input node") try: namelists_toprint = self._automatic_namelists[calculation_type] except KeyError: sugg_string = get_suggestion(calculation_type, self._automatic_namelists.keys()) raise InputValidationError( "Unknown 'calculation' value in " "CONTROL namelist {}. Otherwise, specify the list of " "namelists using the NAMELISTS inside the 'settings' input " "node".format(sugg_string)) inputfile = "" for namelist_name in namelists_toprint: inputfile += "&{0}\n".format(namelist_name) # namelist content; set to {} if not present, so that we leave an # empty namelist namelist = input_params.pop(namelist_name, {}) for k, v in sorted(namelist.iteritems()): inputfile += convert_input_to_namelist_entry( k, v, mapping=mapping_species) inputfile += "/\n" # Write cards now inputfile += atomic_species_card inputfile += atomic_positions_card if self._use_kpoints: inputfile += kpoints_card inputfile += cell_parameters_card #TODO: write CONSTRAINTS #TODO: write OCCUPATIONS if input_params: raise InputValidationError( "The following namelists are specified in input_params, but are " "not valid namelists for the current type of calculation: " "{}".format(",".join(input_params.keys()))) return inputfile, local_copy_list_to_append
def process_test(self, entity_type, url, full_list=False, empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None, result_name=None): # pylint: disable=too-many-arguments """ Check whether response matches expected values. :param entity_type: url requested fot the type of the node :param url: web url :param full_list: if url is requested to get full list :param empty_list: if the response list is empty :param expected_list_ids: list of expected ids from data :param expected_range: [start, stop] range of expected ids from data :param expected_errormsg: expected error message in response :param uuid: url requested for the node pk :param result_node_type: node type in response data :param result_name: result name in response e.g. incoming, outgoing """ if expected_list_ids is None: expected_list_ids = [] if expected_range is None: expected_range = [] if result_node_type is None and result_name is None: result_node_type = entity_type result_name = entity_type url = self._url_prefix + url with self.app.test_client() as client: rv_response = client.get(url) response = json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else: if full_list: expected_data = self._dummy_data[result_node_type] elif empty_list: expected_data = [] elif expected_list_ids: expected_data = [ self._dummy_data[result_node_type][i] for i in expected_list_ids ] elif expected_range != []: expected_data = self._dummy_data[result_node_type][ expected_range[0]:expected_range[1]] else: from aiida.common.exceptions import InputValidationError raise InputValidationError( 'Pass the expected range of the dummydata') expected_node_uuids = [node['uuid'] for node in expected_data] result_node_uuids = [ node['uuid'] for node in response['data'][result_name] ] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url, response, uuid)
def _prepare_for_submission(self, tempfolder, inputdict): import numpy as np try: struct = inputdict.pop(self.get_linkname('structure')) except KeyError: raise InputValidationError( "no structure is specified for this calculation") if not isinstance(struct, StructureData): raise InputValidationError("struct is not of type StructureData") try: code = inputdict.pop(self.get_linkname('code')) except KeyError: raise InputValidationError( "no code is specified for this calculation") atoms = struct.get_ase() lat_lengths = [ (atoms.cell[0]**2).sum()**0.5, (atoms.cell[1]**2).sum()**0.5, (atoms.cell[2]**2).sum()**0.5, ] lat_angles = np.arccos([ np.vdot(atoms.cell[1], atoms.cell[2]) / lat_lengths[1] / lat_lengths[2], np.vdot(atoms.cell[0], atoms.cell[2]) / lat_lengths[0] / lat_lengths[2], np.vdot(atoms.cell[0], atoms.cell[1]) / lat_lengths[0] / lat_lengths[1], ]) / np.pi * 180 parameters = inputdict.pop(self.get_linkname('parameters'), None) if parameters is None: parameters = ParameterData(dict={}) if not isinstance(parameters, ParameterData): raise InputValidationError( "parameters is not of type ParameterData") par = parameters.get_dict() abbreviation = par.pop('abbreviation', 'aiida_calc') title = par.pop('title', 'AiiDA NWChem calculation') basis = par.pop('basis', None) task = par.pop('task', 'scf') add_cell = par.pop('add_cell', True) if basis is None: basis = dict() for atom_type in set(atoms.get_chemical_symbols()): basis[atom_type] = 'library 6-31g' input_filename = tempfolder.get_abs_path(self._DEFAULT_INPUT_FILE) with open(input_filename, 'w') as f: f.write('start {}\ntitle "{}"\n\n'.format(abbreviation, title)) f.write('geometry units au\n') if add_cell: f.write(' system crystal\n') f.write(' lat_a {}\n lat_b {}\n lat_c {}\n'.format( *lat_lengths)) f.write(' alpha {}\n beta {}\n gamma {}\n'.format( *lat_angles)) f.write(' end\n') for i, atom_type in enumerate(atoms.get_chemical_symbols()): f.write(' {} {} {} {}\n'.format( atom_type, atoms.get_positions()[i][0], atoms.get_positions()[i][1], atoms.get_positions()[i][2])) f.write('end\nbasis\n') for atom_type, b in basis.iteritems(): f.write(' {} {}\n'.format(atom_type, b)) f.write('end\ntask {}\n'.format(task)) f.flush() commandline_params = self._default_commandline_params calcinfo = CalcInfo() calcinfo.uuid = self.uuid calcinfo.local_copy_list = [] calcinfo.remote_copy_list = [] calcinfo.retrieve_list = [ self._DEFAULT_OUTPUT_FILE, self._DEFAULT_ERROR_FILE ] calcinfo.retrieve_singlefile_list = [] codeinfo = CodeInfo() codeinfo.cmdline_params = commandline_params codeinfo.stdout_name = self._DEFAULT_OUTPUT_FILE codeinfo.stderr_name = self._DEFAULT_ERROR_FILE codeinfo.code_uuid = code.uuid calcinfo.codes_info = [codeinfo] return calcinfo