def init(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'ml_train_init: init') # Get config filenames. current_ml_train_data = self.services.get_config_param('ML_TRAIN_DATA') current_ml_train_state = self.services.get_config_param('CURRENT_ML_TRAIN_STATE') # Remove old inputs. Stage input files. for file in os.listdir('.'): os.remove(file) # State input files and setup the inital state. self.services.stage_input_files(self.INPUT_FILES) # Create plasma state from files. Input files can either be a new plasma state, # training data file or both. If both file were staged, replace the training # data input file. If the training data file is present flag the plasma state # as needing to be updated. with ZipState.ZipState(current_ml_train_state, 'a') as zip_ref: if os.path.exists(current_ml_train_data): os.rename(current_ml_train_data, 'training_data.dat') zip_ref.write('training_data.dat') zip_ref.set_state(state='needs_update') self.services.update_plasma_state()
def init(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'siesta_init: init') # Get config filenames. current_vmec_namelist = self.services.get_config_param( 'VMEC_NAMELIST_INPUT') current_vmec_state = self.services.get_config_param( 'CURRENT_VMEC_STATE') current_siesta_namelist = self.services.get_config_param( 'SIESTA_NAMELIST_INPUT') current_siesta_state = self.services.get_config_param( 'CURRENT_SIESTA_STATE') # Remove old inputs. Stage input files. for file in os.listdir('.'): os.remove(file) self.services.stage_input_files(self.INPUT_FILES) # Create a vmec state. If the vmec namelist file exists add the namelist input # file. with ZipState.ZipState(current_vmec_state, 'a') as zip_ref: if os.path.exists(current_vmec_namelist): zip_ref.write(current_vmec_namelist) zip_ref.set_state(state='needs_update') # Create state from files. Input files can either be a new state, namelist # input file or both. If both files were staged, replace the namelist input # file. If the namelist file is present flag the state as needing to be # updated. Sub states will automatically merge. with ZipState.ZipState(current_siesta_state, 'a') as zip_ref: if os.path.exists(current_siesta_namelist): zip_ref.write(current_siesta_namelist) zip_ref.set_state(state='needs_update') # The vmec state will be merged with any existing vmec state in the siesta # state. zip_ref.write(current_vmec_state) self.services.update_state()
def eval_jacobian(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: eval_jacobian') # Set the model to a known state. shutil.copy2(self.current_model_state, 'model_inputs') for worker in self.model_workers: worker['wait'] = self.services.call_nonblocking(worker['init'], 'init', timeStamp) # Perturb the parameters. for worker in self.model_workers: keywords = {worker['name'] : worker['value'] + worker['vrnc']} self.services.wait_call(worker['wait'], True) worker['wait'] = self.services.call_nonblocking(worker['driver'], 'init', timeStamp, **keywords) # Recompute the model. for worker in self.model_workers: self.services.wait_call(worker['wait'], True) worker['wait'] = self.services.call_nonblocking(worker['driver'], 'step', timeStamp, result_file=worker['result']) # Collect the results. for worker in self.model_workers: self.services.wait_call(worker['wait'], True) worker['wait'] = self.services.call_nonblocking(worker['init'], 'init', timeStamp) self.services.stage_subflow_output_files() # Compute the normalized jacobian A. # # A_ij = d e_i/d a_j (1) # # Where e is the error vector. # # e_i = W_i*((S_i - M_i)/sigma_i)^2 (2) # # Note due to the what the memory is laid out the Jacobian is transposed. for i, worker in enumerate(self.model_workers): with ZipState.ZipState(worker['output'], 'a') as zip_ref: zip_ref.extract(worker['result']) with open(worker['result'], 'r') as result_file: self.jacobian[i] = self.e - self.get_e(result_file) with open('jacobian.log', 'a') as jacobian_ref: jacobian_ref.write('Jacobian step {}\n'.format(timeStamp)); for j in range(len(self.e)): self.jacobian[:,j].tofile(jacobian_ref, sep=',', format='%12.5e'); jacobian_ref.write('\n') jacobian_ref.write('\n')
def init(self, timeStamp=0.0, **keywords): ScreenWriter.screen_output(self, 'verbose', 'ml_train: init') current_ml_train_state = self.services.get_config_param( 'CURRENT_ML_TRAIN_STATE') # Stage state. self.services.stage_state() # Unzip files from the current state. Use mode a so files can be read and # written to. self.zip_ref = ZipState.ZipState(current_ml_train_state, 'a') self.zip_ref.extract('training_data.dat')
def init(self, timeStamp=0.0, **keywords): ScreenWriter.screen_output(self, 'verbose', 'solps_iter: init') # Set the top of the SOLPS source tree as an environment variable. if timeStamp == 0.0: self.eirene_database_path = self.services.get_config_param( 'EIRENE_DATABASE_PATH') self.current_solps_state = self.services.get_config_param( 'CURRENT_SOLPS_STATE') try: self.diag_geometry = self.services.get_config_param( 'DIAGNOSTIC_GEOMETRY') self.diag_state = self.services.get_config_param( 'DIAGNOSTIC_STATE') except: pass os.environ['SOLPSTOP'] = self.services.get_config_param('SOLPSTOP') # Remove existing files. for file in os.listdir('.'): os.remove(file) self.services.stage_state() self.zip_ref = ZipState.ZipState(self.current_solps_state, 'a') self.zip_ref.extractall() # Create eirene symbolic links. os.symlink(os.path.join(self.eirene_database_path, 'graphite_ext.dat'), 'graphite_ext.dat') os.symlink(os.path.join(self.eirene_database_path, 'mo_ext.dat'), 'mo_ext.dat') os.symlink(os.path.join(self.eirene_database_path, 'AMJUEL'), 'AMJUEL') os.symlink(os.path.join(self.eirene_database_path, 'H2VIBR'), 'H2VIBR') os.symlink(os.path.join(self.eirene_database_path, 'HYDHEL'), 'HYDHEL') os.symlink(os.path.join(self.eirene_database_path, 'METHANE'), 'METHANE') os.symlink(os.path.join(self.eirene_database_path, 'PHOTON'), 'PHOTON') os.symlink( os.path.join(self.eirene_database_path, 'Surfacedata', 'SPUTTER'), 'SPUTTER') os.symlink( os.path.join(self.eirene_database_path, 'Surfacedata', 'TRIM', 'trim.dat'), 'fort.21') os.symlink( os.path.join(self.eirene_database_path, 'Surfacedata', 'TRIM', 'marlow.dat'), 'fort.22') # Update parameters in the namelist. self.set_namelists(**keywords)
def init(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_init: init') # Get config filenames. current_model_state = self.services.get_config_param('MODEL_INPUT') quasi_newton_config = self.services.get_config_param( 'QUASI_NEWTON_CONFIG') current_quasi_newton_state = self.services.get_config_param( 'CURRENT_QUASI_NEWTON_STATE') # Remove old inputs. Stage input files. for file in os.listdir('.'): os.remove(file) self.services.stage_input_files(self.INPUT_FILES) # Create state from files. with ZipState.ZipState(current_quasi_newton_state, 'a') as zip_ref: zip_ref.write(quasi_newton_config) zip_ref.write(current_model_state) self.services.update_state()
def init(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'v3fit_init: init') # Get config filenames. current_vmec_namelist = self.services.get_config_param( 'VMEC_NAMELIST_INPUT') current_vmec_state = self.services.get_config_param( 'CURRENT_VMEC_STATE') current_siesta_namelist = self.services.get_config_param( 'SIESTA_NAMELIST_INPUT') current_siesta_state = self.services.get_config_param( 'CURRENT_SIESTA_STATE') current_v3fit_namelist = self.services.get_config_param( 'V3FIT_NAMELIST_INPUT') current_v3fit_state = self.services.get_config_param( 'CURRENT_V3FIT_STATE') # Remove old inputs. Stage input files. for file in os.listdir('.'): os.remove(file) self.services.stage_input_files(self.INPUT_FILES) # All v3fit runs require a vmec state at the minimum. Create a vmec state. If # the vmec namelist file exists add the namelist input file. with ZipState.ZipState(current_vmec_state, 'a') as zip_ref: if os.path.exists(current_vmec_namelist): zip_ref.write(current_vmec_namelist) zip_ref.set_state(state='needs_update') # A siesta state is optional. If a siesta state or namelist exist, create a # siesta state. If the siesta namelist or vmec state files exists add # them to the siesta state. if os.path.exists(current_siesta_state) or os.path.exists( current_siesta_namelist): with ZipState.ZipState(current_siesta_state, 'a') as zip_ref: if os.path.exists(current_siesta_namelist): zip_ref.write(current_siesta_namelist) zip_ref.set_state(state='needs_update') # The vmec state will be merged with any existing vmec state in the siesta # state. zip_ref.write(current_vmec_state) # Create state from files. Input files can either be a new state, namelist # input file or both. If both files were staged, replace the namelist input # file. If the namelist file is present flag the state as needing to be # updated. with ZipState.ZipState(current_v3fit_state, 'a') as zip_ref: if os.path.exists(current_v3fit_namelist): zip_ref.write(current_v3fit_namelist) zip_ref.set_state(state='needs_update') # If a siesta state exists at this point add it to the archive. Otherwise add # the vmec state. if os.path.exists(current_siesta_state): zip_ref.write(current_siesta_state) else: zip_ref.write(current_vmec_state) self.services.update_state()
def init(self, timeStamp=0.0, **keywords): ScreenWriter.screen_output(self, 'verbose', 'vmec: init') self.current_vmec_namelist = self.services.get_config_param( 'VMEC_NAMELIST_INPUT') self.current_wout_file = 'wout_{}.nc'.format( self.current_vmec_namelist.replace('input.', '', 1)) current_vmec_state = self.services.get_config_param( 'CURRENT_VMEC_STATE') # Stage state. self.services.stage_state() # Unzip files from the state. Use mode a so files can be read and written to. self.zip_ref = ZipState.ZipState(current_vmec_state, 'a') self.zip_ref.extract(self.current_vmec_namelist) if len(keywords) > 0: self.zip_ref.set_state(state='needs_update') # Update parameters in the namelist. namelist = OMFITnamelist(self.current_vmec_namelist, collect_arrays={ 'ns_array': { 'default': 0, 'shape': (100, ), 'offset': (1, ), 'sparray': True }, 'niter_array': { 'default': 0, 'shape': (100, ), 'offset': (1, ), 'sparray': True }, 'rbs': { 'default': 0, 'shape': (203, 101), 'offset': (-101, 0), 'sparray': True }, 'rbc': { 'default': 0, 'shape': (203, 101), 'offset': (-101, 0), 'sparray': True }, 'zbs': { 'default': 0, 'shape': (203, 101), 'offset': (-101, 0), 'sparray': True }, 'zbc': { 'default': 0, 'shape': (203, 101), 'offset': (-101, 0), 'sparray': True }, 'am': { 'default': 0, 'shape': (21, ), 'offset': (0, ), 'sparray': True }, 'ai': { 'default': 0, 'shape': (21, ), 'offset': (0, ), 'sparray': True }, 'ac': { 'default': 0, 'shape': (21, ), 'offset': (0, ), 'sparray': True }, 'am_aux_s': { 'default': 0, 'shape': (10001, ), 'offset': (1, ), 'sparray': True }, 'am_aux_f': { 'default': 0, 'shape': (10001, ), 'offset': (1, ), 'sparray': True }, 'ai_aux_s': { 'default': 0, 'shape': (10001, ), 'offset': (1, ), 'sparray': True }, 'ai_aux_f': { 'default': 0, 'shape': (10001, ), 'offset': (1, ), 'sparray': True }, 'ac_aux_s': { 'default': 0, 'shape': (10001, ), 'offset': (1, ), 'sparray': True }, 'ac_aux_f': { 'default': 0, 'shape': (10001, ), 'offset': (1, ), 'sparray': True }, 'raxis': { 'default': 0, 'shape': (102, ), 'offset': (0, ), 'sparray': True }, 'zaxis': { 'default': 0, 'shape': (102, ), 'offset': (0, ), 'sparray': True }, 'raxis_cc': { 'default': 0, 'shape': (102, ), 'offset': (0, ), 'sparray': True }, 'raxis_cs': { 'default': 0, 'shape': (102, ), 'offset': (0, ), 'sparray': True }, 'zaxis_cc': { 'default': 0, 'shape': (102, ), 'offset': (0, ), 'sparray': True }, 'zaxis_cs': { 'default': 0, 'shape': (102, ), 'offset': (0, ), 'sparray': True }, 'ftol_array': { 'default': 0, 'shape': (100, ), 'offset': (1, ), 'sparray': True }, 'extcur': { 'default': 0, 'shape': (300, ), 'offset': (1, ), 'sparray': True } }) for key, value in keywords.iteritems(): NamelistItem.set(namelist['indata'], key, value) namelist.save()
def init(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: init') # Get config filenames. self.current_model_state = self.services.get_config_param('MODEL_INPUT') self.quasi_newton_config_file = self.services.get_config_param('QUASI_NEWTON_CONFIG') self.current_quasi_newton_state = self.services.get_config_param('CURRENT_QUASI_NEWTON_STATE') ips_model_config = self.services.get_config_param('MODEL_SIM_CONFIG') # Stage state and extract all files. self.services.stage_state() with ZipState.ZipState(self.current_quasi_newton_state, 'a') as zip_ref: zip_ref.extractall() # Load the quasi-newton json file. with open(self.quasi_newton_config_file, 'r') as config_file: quasi_newton_config = json.load(config_file) self.signal_sigma = numpy.absolute(numpy.array(quasi_newton_config['signal_sigma'])) self.signal_observed = numpy.array(quasi_newton_config['signal_observed']) self.signal_weights = numpy.sqrt(numpy.array(quasi_newton_config['signal_weights'])) self.dchi2_tol = quasi_newton_config['dchi2_tol'] # Singular value step controls. # Cutoff value for relative singular values. if 'cut_svd' in quasi_newton_config: self.cut_svd = quasi_newton_config['cut_svd'] else: self.cut_svd = 0.0 # Cutoff value for expected step efficiency. if 'cut_eff' in quasi_newton_config: self.cut_eff = quasi_newton_config['cut_eff'] else: self.cut_eff = 0.0 # Cutoff value for expected marginal step efficiency. if 'cut_marg_eff' in quasi_newton_config: self.cut_marg_eff = quasi_newton_config['cut_marg_eff'] else: self.cut_marg_eff = 0.0 # Cutoff value for expected step size. if 'cut_delta_a' in quasi_newton_config: self.cut_delta_a = quasi_newton_config['cut_delta_a'] else: self.cut_delta_a = 0.0 # Cutoff value for expected change in g^2. if 'cut_dg2' in quasi_newton_config: self.cut_dg2 = quasi_newton_config['cut_dg2'] else: self.cut_dg2 = 0.0 # Minimization controls if 'max_step' in quasi_newton_config: self.max_step = quasi_newton_config['max_step'] else: self.max_step = 100.0 # Maximum reconstruction steps if 'max_recon_steps' in quasi_newton_config: self.max_recon_steps = quasi_newton_config['max_recon_steps'] else: self.max_recon_steps = 20 # Maximum number of trys a step can take to reduce g^2 if 'max_step_try' in quasi_newton_config: self.max_step_try = quasi_newton_config['max_step_try'] else: self.max_step_try = 10 # Set keys for the subworkflows. keys = {'PWD' : self.services.get_config_param('PWD'), 'USER_INPUT_FILES' : self.current_model_state, 'OUTPUT_LEVEL' : self.services.get_config_param('OUTPUT_LEVEL')} # Copy the model state to the input file staging directory. Since all the # model instances start from the same state, we only need this in one place. if os.path.exists('model_inputs'): shutil.rmtree('model_inputs') os.mkdir('model_inputs') shutil.copy2(self.current_model_state, 'model_inputs') keywords = {} for i, param in enumerate(quasi_newton_config['params']): self.model_workers.append({'sim_name': None, 'init': None, 'driver': None, 'result' : '{}_result.json'.format(param['name']), 'output' : '{}_model_state.zip'.format(param['name']), 'name' : param['name'], 'vrnc' : param['vrnc'], 'value' : param['init'], 'scale' : (float(i) + 1.0)/float(len(quasi_newton_config['params']))}) keys['SIM_NAME'] = param['name'] keys['LOG_FILE'] = 'log.{}'.format(param['name']) keys['USER_OUTPUT_FILES'] = self.model_workers[i]['output'] (self.model_workers[i]['sim_name'], self.model_workers[i]['init'], self.model_workers[i]['driver']) = self.services.create_sub_workflow(param['name'], ips_model_config, keys, 'model_inputs') self.model_workers[i]['wait'] = self.services.call_nonblocking(self.model_workers[i]['init'], 'init', timeStamp) keywords[param['name']] = param['init'] # Run the inital convergence. Only one model is needed to run but the staging # expects values from each sub_work_flow so we need to launch them all. for worker in self.model_workers: self.services.wait_call(worker['wait'], True) worker['wait'] = self.services.call_nonblocking(worker['driver'], 'init', timeStamp, **keywords) for worker in self.model_workers: self.services.wait_call(worker['wait'], True) worker['wait'] = self.services.call_nonblocking(worker['driver'], 'step', timeStamp, result_file=worker['result']) for worker in self.model_workers: self.services.wait_call(worker['wait'], True) self.services.stage_subflow_output_files() with ZipState.ZipState(self.model_workers[0]['output'], 'a') as zip_ref: zip_ref.extract(self.model_workers[0]['result']) with open(self.model_workers[0]['result'], 'r') as result_file: self.e = self.get_e(result_file) # The initial model state may have changed reset it from the one of the # workers. os.rename(self.model_workers[0]['output'], self.current_model_state)
def try_step(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: try_step') self.k_use = self.get_k_svd() # Try different Levenberg-Marquardt step sizes. new_max = min(self.delta_a_len[self.k_use], self.max_step) step_use = numpy.empty(len(self.model_workers), dtype=float) delta_try = numpy.empty((len(self.model_workers), len(self.model_workers)), dtype=float) e_try = numpy.empty((len(self.model_workers), len(self.signal_observed)), dtype=float) chi2try = numpy.empty(len(self.model_workers), dtype=float) num_trys = 0 while num_trys < self.max_step_try: num_trys += 1 for i, worker in enumerate(self.model_workers): step_use[i] = new_max - i*new_max/(2.0*len(self.model_workers)) delta_try[i] = self.lm_step(step_use[i]) # Set new parameters. keywords = {} for j, worker2 in enumerate(self.model_workers): keywords[worker2['name']] = worker2['value'] + delta_try[i,j]*worker2['vrnc'] self.services.wait_call(worker['wait'], True) worker['wait'] = self.services.call_nonblocking(worker['driver'], 'init', timeStamp, **keywords) # Recompute the model. for worker in self.model_workers: self.services.wait_call(worker['wait'], True) worker['wait'] = self.services.call_nonblocking(worker['driver'], 'step', timeStamp, result_file=worker['result']) # Collect the results. for worker in self.model_workers: self.services.wait_call(worker['wait'], True) self.services.stage_subflow_output_files() # Compute chi^2 for each attempted step. And keep the largest. with open('chi.log', 'a') as chi_ref: chi_ref.write('Chi step {}\n'.format(timeStamp)); for i, worker in enumerate(self.model_workers): with ZipState.ZipState(worker['output'], 'a') as zip_ref: zip_ref.extract(worker['result']) with open(worker['result'], 'r') as result_file: e_try[i] = self.get_e(result_file) chi2try[i] = numpy.dot(e_try[i], e_try[i]) chi_ref.write('chi2 = {} : '.format(chi2try[i])) e_try[i].tofile(chi_ref, sep=',', format='%12.5e') chi_ref.write('\n') chi_ref.write('\n') i_min = numpy.argmin(chi2try) if chi2try[i_min] <= self.chi2: # Chi^2 decreased. Set the best case to the current model. os.rename(self.model_workers[i_min]['output'], self.current_model_state) shutil.copy2(self.current_model_state, 'model_inputs') self.e = e_try[i_min] # Set the new parameter values. current_values = {} for i, worker in enumerate(self.model_workers): worker['value'] += delta_try[i_min,i]*worker['vrnc'] current_values[worker['name']] = worker['value'] # Dump current values to a json file. This can be used for restarting a # reconstruction. with open('current_values.json', 'w') as current_values_file: json.dump(current_values, current_values_file) self.norm_len = numpy.sqrt(numpy.dot(delta_try[i_min], delta_try[i_min])) return True else: # Cut the step size in half and reset the model. new_max /= 2.0 for worker in self.model_workers: worker['wait'] = self.services.call_nonblocking(worker['init'], 'init', timeStamp) return False
def init(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'solps_iter_init: init') # Get config filenames. current_solps_state = self.services.get_config_param( 'CURRENT_SOLPS_STATE') eirene_input_dat = self.services.get_config_param('EIRENE_INPUT_DAT') eirene_nodes = self.services.get_config_param('EIRENE_NODES') eirene_cells = self.services.get_config_param('EIRENE_CELLS') eirene_links = self.services.get_config_param('EIRENE_LINKS') # Remove old inputs. Stage input files. for file in os.listdir('.'): os.remove(file) self.services.stage_input_files(self.INPUT_FILES) # Create state zip file. with ZipState.ZipState(current_solps_state, 'a') as zip_ref: # b2 files if os.path.exists('b2fgmtry'): zip_ref.write('b2fgmtry') zip_ref.set_state(state='needs_update') if os.path.exists('b2fpardf'): zip_ref.write('b2fpardf') zip_ref.set_state(state='needs_update') if os.path.exists('b2frates'): zip_ref.write('b2frates') zip_ref.set_state(state='needs_update') if os.path.exists('b2fstati'): zip_ref.write('b2fstati') zip_ref.set_state(state='needs_update') if os.path.exists('b2mn.dat'): zip_ref.write('b2mn.dat') zip_ref.set_state(state='needs_update') # Namelist files. if os.path.exists('b2.transport.parameters'): zip_ref.write('b2.transport.parameters') zip_ref.set_state(state='needs_update') if os.path.exists('b2.numerics.parameters'): zip_ref.write('b2.numerics.parameters') zip_ref.set_state(state='needs_update') if os.path.exists('b2.neutrals.parameters'): zip_ref.write('b2.neutrals.parameters') zip_ref.set_state(state='needs_update') if os.path.exists('b2.boundary.parameters'): zip_ref.write('b2.boundary.parameters') zip_ref.set_state(state='needs_update') if os.path.exists('b2.transport.inputfile'): zip_ref.write('b2.transport.inputfile') zip_ref.set_state(state='needs_update') # eirene files. We need to rename the eirene input files. if os.path.exists(eirene_input_dat): os.rename(eirene_input_dat, 'fort.1') zip_ref.write('fort.1') zip_ref.set_state(state='needs_update') if os.path.exists(eirene_nodes): os.rename(eirene_nodes, 'fort.33') zip_ref.write('fort.33') zip_ref.set_state(state='needs_update') if os.path.exists(eirene_cells): os.rename(eirene_cells, 'fort.34') zip_ref.write('fort.34') zip_ref.set_state(state='needs_update') if os.path.exists(eirene_links): os.rename(eirene_links, 'fort.35') zip_ref.write('fort.35') zip_ref.set_state(state='needs_update') self.services.update_state() #------------------------------------------------------------------------------- # # SOLPS-ITER_init init Component step method. Not used. # #------------------------------------------------------------------------------- def step(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'solps_iter_init: step') #------------------------------------------------------------------------------- # # SOLPS-ITER_init init Component finalize method. This cleans up afterwards. # Not used. # #------------------------------------------------------------------------------- def finalize(self, timeStamp=0.0): ScreenWriter.screen_output(self, 'verbose', 'solps_iter_init: finalize')
def init(self, timeStamp=0.0, **keywords): ScreenWriter.screen_output(self, 'verbose', 'siesta_driver: init') # Separate out the siesta and vmec keywords. siesta_keywords = {} vmec_keywords = {} for key, value in keywords.iteritems(): if 'vmec__' in key: vmec_keywords[key] = value if 'siesta__' in key: siesta_keywords[key.replace('siesta__', '', 1)] = value # Get config filenames. current_vmec_state = self.services.get_config_param( 'CURRENT_VMEC_STATE') self.current_siesta_state = self.services.get_config_param( 'CURRENT_SIESTA_STATE') # We need to pass the inputs to the VMEC child workflow. self.services.stage_state() zip_ref = ZipState.ZipState(self.current_siesta_state, 'a') zip_ref.extract(current_vmec_state) # If this is the first call, set up the VMEC sub workflow. if timeStamp == 0.0: # Get the siesta port self.siesta_port = self.services.get_port('SIESTA') # Get keys for the sub workflow. keys = { 'PWD': self.services.get_config_param('PWD'), 'SIM_NAME': '{}_vmec'.format(self.services.get_config_param('SIM_NAME')), 'USER_INPUT_FILES': current_vmec_state, 'LOG_FILE': 'log.vmec.warning', 'OUTPUT_LEVEL': self.services.get_config_param('OUTPUT_LEVEL') } if os.path.exists('vmec_input_dir'): shutil.rmtree('vmec_input_dir') os.mkdir('vmec_input_dir') vmec_config = self.services.get_config_param('VMEC_CONFIG') self.vmec_worker = {'sim_name': None, 'init': None, 'driver': None} (self.vmec_worker['sim_name'], self.vmec_worker['init'], self.vmec_worker['driver']) = self.services.create_sub_workflow( 'vmec', vmec_config, keys, 'vmec_input_dir') shutil.copy2(current_vmec_state, 'vmec_input_dir') # Initalize and run VMEC. Replace values in the siesta state. self.services.call(self.vmec_worker['init'], 'init', timeStamp) self.services.call(self.vmec_worker['driver'], 'init', timeStamp, **vmec_keywords) self.services.call(self.vmec_worker['driver'], 'step', timeStamp) # After VMEC has run update the VMEC state. self.services.stage_subflow_output_files() zip_ref.write(current_vmec_state) zip_ref.close() self.services.update_state() # Initialize SIESTA. self.wait = self.services.call_nonblocking(self.siesta_port, 'init', timeStamp, **siesta_keywords)
def init(self, timeStamp=0.0, **keywords): ScreenWriter.screen_output(self, 'verbose', 'ml_gen_data: init') # Perform the inital setup. current_ml_model_state = '' if timeStamp == 0.0: os.environ['PWD'] = os.getcwd() self.current_ml_train_state = self.services.get_config_param('CURRENT_ML_TRAIN_STATE') self.current_ml_train_data = self.services.get_config_param('CURRENT_ML_TRAIN_DATA') self.current_ml_train_new_data = self.services.get_config_param('CURRENT_ML_TRAIN_NEW_DATA') current_ml_model_state = self.services.get_config_param('CURRENT_ML_MODEL_STATE') self.model_sim_config = self.services.get_config_param('MODEL_SIM_CONFIG') if not os.path.exists('inputs') os.makedirs('inputs') # Stage state. self.services.stage_state() # Unzip files from the current state. Use mode a so files can be read and # written to. self.zip_ref = ZipState.ZipState(self.current_ml_train_state, 'a') if self.current_ml_train_new_data in self.zip_ref: self.zip_ref.set_state(state='needs_update') self.zip_ref.extract(self.current_ml_train_new_data) with open(self.current_ml_train_new_data, 'r') as new_data_ref: new_data = json.load(new_data_ref) with open('massive_input') as massive_ref: for param in keywords['input_params']: massive_ref.write('{prefix}:{name}:{type} '.format(param)) massive_ref.write('\n') permutation_size = min(keywords['batch_size'], len(new_data[keywords['input_params'][0]['name']])) permutation = numpy.random.permutation(permutation_size) for i in permutation: for param in keywords['input_params']: massive_ref.write('{} '.format(new_data[param['name']][i])) massive_ref.write('\n') elif self.current_ml_train_data in self.zip_ref: self.zip_ref.set_state(state='updated') else: self.zip_ref.set_state(state='needs_update') with open('massive_input') as massive_ref: for param in keywords['input_params']: massive_ref.write('{prefix}:{name}:{type} '.format(param)) massive_ref.write('\n') for i in batch_size: for param in keywords['input_params']: massive_ref.write('{} '.format(random.uniform(param['lower_range'], param['upper_range']))) massive_ref.write('\n') self.zip_ref.extract(current_ml_model_state) if keywords['extract'] and timeStamp == 0.0: with ZipState.ZipState(current_ml_model_state, 'a') as model_ref: model_ref.extractall('inputs') elif timeStamp == 0.0: os.rename(current_ml_model_state, 'inputs/{}'.format(current_ml_model_state))
def init(self, timeStamp=0.0, **keywords): ScreenWriter.screen_output(self, 'verbose', 'v3fit_driver: init') # Separate out the siesta, vmec and v3fit keywords. eq_keywords = {} v3fit_keywords = {} for key, value in keywords.iteritems(): if 'vmec__' in key or 'siesta__' in key: eq_keywords[key] = value if 'v3fit__' in key: v3fit_keywords[key.replace('v3fit__', '', 1)] = value # Get config filenames. current_vmec_state = self.services.get_config_param( 'CURRENT_VMEC_STATE') current_siesta_state = self.services.get_config_param( 'CURRENT_SIESTA_STATE') self.current_v3fit_state = self.services.get_config_param( 'CURRENT_V3FIT_STATE') # We need to pass the inputs to the SIESTA or VMEC child workflow. self.services.stage_state() zip_ref = ZipState.ZipState(self.current_v3fit_state, 'a') # If this is the first call, set up the VMEC or SIESTA sub workflow. if timeStamp == 0.0: if os.path.exists('eq_input_dir'): shutil.rmtree('eq_input_dir') os.mkdir('eq_input_dir') self.v3fit_port = self.services.get_port('V3FIT') if current_siesta_state in zip_ref: # Get keys for the SIESTA sub workflow. keys = { 'PWD': self.services.get_config_param('PWD'), 'USER_INPUT_FILES': current_siesta_state, 'SIM_NAME': '{}_siesta'.format( self.services.get_config_param('SIM_NAME')), 'LOG_FILE': 'log.{}_siesta.warning'.format( self.services.get_config_param('SIM_NAME')), 'OUTPUT_LEVEL': self.services.get_config_param('OUTPUT_LEVEL') } siesta_config = self.services.get_config_param('SIESTA_CONFIG') (self.eq_worker['sim_name'], self.eq_worker['init'], self.eq_worker['driver']) = self.services.create_sub_workflow( 'siesta', siesta_config, keys, 'eq_input_dir') else: # Get keys for the VMEC sub workflow. keys = { 'PWD': self.services.get_config_param('PWD'), 'USER_INPUT_FILES': current_vmec_state, 'SIM_NAME': '{}_vmec'.format( self.services.get_config_param('SIM_NAME')), 'LOG_FILE': 'log.{}_vmec.warning'.format( self.services.get_config_param('SIM_NAME')), 'OUTPUT_LEVEL': self.services.get_config_param('OUTPUT_LEVEL') } vmec_config = self.services.get_config_param('VMEC_CONFIG') (self.eq_worker['sim_name'], self.eq_worker['init'], self.eq_worker['driver']) = self.services.create_sub_workflow( 'vmec', vmec_config, keys, 'eq_input_dir') # Copy new subworkflow inputs to the input directory. if current_siesta_state in zip_ref: zip_ref.extract(current_siesta_state) shutil.copy2(current_siesta_state, 'eq_input_dir') else: zip_ref.extract(current_vmec_state) shutil.copy2(current_vmec_state, 'eq_input_dir') # Initialize and run the equilibrium. Replace values in the V3FIT state. self.services.call(self.eq_worker['init'], 'init', timeStamp) self.services.call(self.eq_worker['driver'], 'init', timeStamp, **eq_keywords) self.services.call(self.eq_worker['driver'], 'step', timeStamp) # After the equilibrium has run update the state. self.services.stage_subflow_output_files() if current_siesta_state in zip_ref: zip_ref.write(current_siesta_state) else: zip_ref.write(current_vmec_state) zip_ref.close() self.services.update_state() # Initialize V3FIT. self.wait = self.services.call_nonblocking(self.v3fit_port, 'init', timeStamp, **v3fit_keywords)
def init(self, timeStamp=0.0, **keywords): ScreenWriter.screen_output(self, 'verbose', 'v3fit: init') self.services.stage_state() # Get config filenames. self.current_v3fit_namelist = self.services.get_config_param( 'V3FIT_NAMELIST_INPUT') self.current_v3fit_state = self.services.get_config_param( 'CURRENT_V3FIT_STATE') self.result_file = 'result.{}.nc'.format(self.current_v3fit_namelist) current_siesta_namelist = self.services.get_config_param( 'SIESTA_NAMELIST_INPUT') current_siesta_state = self.services.get_config_param( 'CURRENT_SIESTA_STATE') current_vmec_namelist = self.services.get_config_param( 'VMEC_NAMELIST_INPUT') current_vmec_state = self.services.get_config_param( 'CURRENT_VMEC_STATE') current_wout_file = 'wout_{}.nc'.format( current_vmec_namelist.replace('input.', '', 1)) # Stage state. self.services.stage_state() # Unzip files from the state. Use mode a so files can be read and written to. self.zip_ref = ZipState.ZipState(self.current_v3fit_state, 'a') self.zip_ref.extract(self.current_v3fit_namelist) if self.result_file in self.zip_ref: self.zip_ref.extract(self.result_file) if current_siesta_state in self.zip_ref: self.zip_ref.extract(current_siesta_state) with ZipState.ZipState(current_siesta_state, 'r') as siesta_zip_ref: siesta_zip_ref.extract(current_siesta_namelist) namelist = OMFITnamelist(current_siesta_namelist) current_restart_file = 'siesta_{}.nc'.format( namelist['siesta_info']['restart_ext']) siesta_zip_ref.extract(current_restart_file) flags = siesta_zip_ref.get_state() if 'state' in flags and flags['state'] == 'updated': self.zip_ref.set_state(state='needs_update') siesta_zip_ref.extract(current_vmec_state) with ZipState.ZipState(current_vmec_state, 'r') as vmec_zip_ref: vmec_zip_ref.extract(current_wout_file) flags = vmec_zip_ref.get_state() if 'state' in flags and flags['state'] == 'updated': self.zip_ref.set_state(state='needs_update') keywords['siesta_nli_filename'] = current_siesta_namelist keywords['siesta_restart_filename'] = current_restart_file keywords['vmec_nli_filename'] = current_vmec_namelist keywords['vmec_wout_input'] = current_wout_file keywords['model_eq_type'] = 'siesta' else: self.zip_ref.extract(current_vmec_state) with ZipState.ZipState(current_vmec_state, 'r') as vmec_zip_ref: vmec_zip_ref.extract(current_wout_file) vmec_zip_ref.extract(current_vmec_namelist) flags = vmec_zip_ref.get_state() if 'state' in flags and flags['state'] == 'updated': self.zip_ref.set_state(state='needs_update') keywords['vmec_nli_filename'] = current_vmec_namelist keywords['vmec_wout_input'] = current_wout_file keywords['model_eq_type'] = 'vmec' # Update parameters in the namelist. self.set_namelist(**keywords)