def gp_plot(seedname, data_curves, cwd='.'): scales = find_scale(data_curves) for xtype, xdata in data_curves.items(): if len(xdata['values']) == 0: # No data continue # Do we have the corresponding .dat file? dataf = '{0}_{1}_conv.dat'.format(seedname, xtype) if not os.path.isfile(os.path.join(cwd, dataf)): utils.warn('Data file {0} not found, skipping.'.format(dataf)) continue # Plot lines plines = [] for ytype, yvals in xdata['Ys'].items(): args = dict(_y_types[ytype]) args['file'] = dataf args['ref'] = yvals[-1] args['scale'] = scales[xtype][ytype] lsc = '10^{{{0}}}'.format(-int(np.log10(scales[xtype][ytype]))) args['legend'] = args['legend'].format(scale=lsc) if (max(yvals)-min(yvals)) == 0: continue plines.append(_gp_plot_template.format(**args)) args = dict(_x_types[xtype]) if xtype == 'kpn': xvals = np.prod(xdata['values'], axis=1) args['xticsblock'] = ('set xtics (' + ', '.join(['"{1}" {0}'.format(v, l) for v, l in zip(xvals, xdata['labels'])] ) + ') rotate by 45 right') else: args['xticsblock'] = '' args['plot'] = ', '.join(plines) gp_file = _gp_template.format(**args) with open(os.path.join(cwd, '{0}_{1}_conv.gp'.format(seedname, xtype)), 'w') as f: f.write(gp_file) return
def write_report(seedname, data_curves, Etol=1e-3, Ftol=1e-1, Stol=1e-1, cwd='.'): tols = OrderedDict(zip(['E', 'F', 'S'], [Etol, Ftol, Stol])) report = '' for ytype, tol in tols.items(): report += ('Tolerance on {Y}: {tol} {unit}\n' ).format(Y=_y_types[ytype]['name'], tol=tol, unit=_y_types[ytype]['unit']) for xtype, xdata in data_curves.items(): if len(xdata['values']) == 0: utils.warn('No data available for convergence of ' + _x_types[xtype]['name']) continue for ytype, yvals in xdata['Ys'].items(): tol = tols[ytype] yerr = np.abs(yvals-yvals[-1]) if (max(yerr) == min(yerr)): continue # Find converged streak conv_i = np.where(np.cumprod(yerr[::-1] < tol)[::-1] == 1)[0] try: conv_i = conv_i[0] except IndexError: conv_i = -1 # No convergence if conv_i == -1: report += ('Convergence of {X} with {Y} not found.' 'Increase tolerance or extend tested range.\n' ).format( X=_x_types[xtype]['name'], Y=_y_types[ytype]['name']) else: conv_x = xdata['labels'][conv_i] report += ('Based on {Y}, suggested value for {X} is {conv} ' '{unit}\n').format(X=_x_types[xtype]['name'], Y=_y_types[ytype]['name'], conv=conv_x, unit=_x_types[xtype]['unit']) print('') print(report) with open(os.path.join(cwd, '{0}_report.txt'.format(seedname)), 'w') as f: f.write(report)
def read_data(self): # Read the output from all completed jobs jobstate = self.check() jobdata = OrderedDict([('E', {}), ('F', {}), ('S', {})]) def get_vals(c): nrg = c._energy_total frc = c._forces strs = c._stress return nrg, frc, strs for name, job in self._worktree.items(): if jobstate[name] != C_COMPLETE: # Job isn't finished utils.warn('Results for {0} missing, skipping.'.format(name)) continue ccalc = Castep(keyword_tolerance=3) ccalc.read(job.castep) nrg, frc, strs = get_vals(ccalc) jobdata['E'][name] = nrg jobdata['F'][name] = max(np.linalg.norm(frc, axis=1)) jobdata['S'][name] = np.linalg.norm(strs) # Organise it by ranges wtree = self._worktree data_curves = OrderedDict() for X, jobrange in self._ranges.items(): # Get an effective jobrange jobcomplete = [j for j in jobrange if jobstate[j] == C_COMPLETE] data_curves[X] = { 'values': np.array([wtree[j].values[X] for j in jobcomplete]), 'labels': [wtree[j].labels[X] for j in jobcomplete], 'Ys': OrderedDict() } for Y, data in jobdata.items(): data_curves[X]['Ys'][Y] = np.array( [data[j] for j in jobcomplete]) return data_curves
def param_check(params): """Check the values of the parameters for internal contradictions""" if params['cutmax'] < params['cutmin']: raise ConvError('Invalid parameter - must be cutmax > cutmin') if params['kpnmax'] < params['kpnmin']: raise ConvError('Invalid parameter - must be kpnmax > kpnmin') # Fine grid max checks if params['fgmmode'] is not None: gscale = 1.75 # This is a CASTEP default lbound = gscale**2 * (params['cutmin'] if params['fgmmode'] == 'min' else params['cutmax']) params['fgmmin'] = (lbound if params['fgmmin'] is None else params['fgmmin']) params['fgmmax'] = (params['fgmmin'] + 3 * params['fgmstep'] if params['fgmmax'] is None else params['fgmmax']) if any([ params['fgmmax'] <= lbound, params['fgmmin'] < lbound, params['fgmmin'] > params['fgmmax'] ]): raise ConvError('Invalid parameter - must be fgmmax > fgmmin >= ' '{0}*cutoff_{1}'.format(gscale**2, params['fgmmode'])) if '<seedname>' not in params['rcmd']: utils.warn('Running command does not contain a <seedname> tag.' ' This is likely erroneous and needs checking.') if params['subs'] is not None: try: with open(params['subs']) as f: if '<seedname>' not in f.read(): utils.warn('Submission script does not contain a' '<seedname> tag. This is likely erroneous and' ' needs checking.') except IOError: raise ConvError('Submission script file does not exist')
def main(seedname, cmdline_task): seedpath, basename = os.path.split(seedname) utils.check_pyversion() print(__intromsg__) print('Reading {0}.conv'.format(seedname)) try: with open('{0}.conv'.format(seedname)) as f: convpars = parse_convfile(f.read()) except IOError: utils.warn('.conv file not found - using default parameters') convpars = parse_convfile() task = cmdline_task if cmdline_task is not None else convpars['ctsk'] # Now open the base cell and param files cname = '{0}.cell'.format(seedname) print('Reading ' + cname) # Necessary because of ASE's annoying messages... cfile = read_castep_cell(open(cname), calculator_args={'keyword_tolerance': 3}) pname = '{0}.param'.format(seedname) print('Reading ' + pname) try: read_param(pname, calc=cfile.calc) except FileNotFoundError: print('File {0} not found, skipping'.format(pname)) print('') # Now go for clearing if task == 'clear': to_del = [basename + f for f in [_in_dir, _out_dir, _json_file]] print('The following files and folders will be removed:\n\t' + '\n\t'.join(to_del)) ans = utils.safe_input('Continue (y/N)? ') if ans.lower() == 'y': for f in to_del: if not os.path.exists(f): continue try: os.remove(f) except OSError: shutil.rmtree(f) return # Strip the files cfile.calc.param.task = 'SinglePoint' cfile.calc.param.calculate_stress = convpars['cnvstr'] # Clean up all references to kpoints kclean = [ 'kpoints_mp_grid', 'kpoint_mp_grid', 'kpoints_mp_spacing', 'kpoint_mp_spacing', 'kpoints_list', 'kpoint_list' ] # These, clean up only in the presence of the relevant option if convpars['gamma']: kclean += ['kpoints_mp_offset', 'kpoint_mp_offset'] for k in kclean: cfile.calc.cell.__setattr__(k, None) # Get the kpoint basis invcell = cfile.get_reciprocal_cell() kpnbase = np.linalg.norm(invcell, axis=1) kpnbase = kpnbase / min(kpnbase) # Convergence ranges? convranges = make_ranges(convpars, kpnbase) # Ask for confirmation if task in ('input', 'inputrun', 'all'): try: os.mkdir(basename + _in_dir) except OSError: utils.warn('Input directory existing - some files could be ' 'overwritten.') ans = utils.safe_input('Continue (y/N)? ') if ans.lower() != 'y': return if task in ('output', 'all'): try: os.mkdir(basename + _out_dir) except OSError: utils.warn('Output directory existing - some files could be ' 'overwritten.') ans = utils.safe_input('Continue (y/N)? ') if ans.lower() != 'y': return # The Worktree object is useful for a number of things in all tasks wtree = Worktree(basename, convpars, convranges) ### PHASE 1: Input ### if task in ('input', 'inputrun', 'all'): # Now look for pseudopotentials find_pspots(cfile, basename, seedpath) add_castepopts(cfile, convpars['c8plus']) # If required, rattle the atoms if convpars['displ'] != 0: cfile.rattle(abs(convpars['displ'])) cfile.calc.cell.symmetry_generate = None cfile.calc.cell.symmetry_ops = None wtree.write(cfile) ### PHASE 2: Running ### if task in ('inputrun', 'all'): # Not waiting only makes sense for inputrun wait = convpars['jwait'] or (task == 'all') wtree.run(convpars['rcmd'], wait) ### PHASE 3: Output processing ### if task in ('output', 'all'): data_curves = wtree.read_data() print('Writing output to ' + basename + _out_dir) write_dat(basename, data_curves, basename + _out_dir) write_report(basename, data_curves, convpars['nrgtol'], convpars['fortol'], convpars['strtol'], basename + _out_dir) if convpars['outp'] == 'gnuplot': gp_plot(basename, data_curves, basename + _out_dir) elif convpars['outp'] == 'grace': agr_plot(basename, data_curves, basename + _out_dir)