Beispiel #1
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'siesta: init')
        self.services.stage_state()

        self.current_siesta_namelist = self.services.get_config_param(
            'SIESTA_NAMELIST_INPUT')
        self.current_siesta_state = self.services.get_config_param(
            'CURRENT_SIESTA_STATE')
        current_vmec_state = self.services.get_config_param(
            'CURRENT_VMEC_STATE')
        current_vmec_namelist = self.services.get_config_param(
            'VMEC_NAMELIST_INPUT')
        current_wout_file = 'wout_{}.nc'.format(
            current_vmec_namelist.replace('input.', '', 1))

        #  Stage state.
        self.services.stage_state()

        #  Unzip files from the state. Use mode a so files can be read and written to.
        self.zip_ref = ZipState.ZipState(self.current_siesta_state, 'a')
        self.zip_ref.extract(self.current_siesta_namelist)
        self.zip_ref.extract(current_vmec_state)

        with ZipState.ZipState(current_vmec_state, 'r') as vmec_zip_ref:
            vmec_zip_ref.extract(current_wout_file)
            flags = vmec_zip_ref.get_state()
            if 'state' in flags and flags['state'] == 'updated':
                self.zip_ref.set_state(state='needs_update')

#  Update parameters in the namelist.
        self.set_namelist(wout_file=current_wout_file, **keywords)
Beispiel #2
0
    def get_magnetic_axis(self):
        with ZipState.ZipState(self.current_v3fit_state, 'r') as v3fit_ref:
            if self.current_siesta_state in v3fit_ref:
                v3fit_ref.extract(self.current_siesta_state)
                with ZipState.ZipState(self.current_siesta_state,
                                       'r') as siesta_ref:
                    siesta_ref.extract(self.current_vmec_state)
            else:
                v3fit_ref.extract(self.current_vmec_state)

        with ZipState.ZipState(self.current_vmec_state, 'r') as vmec_ref:
            vmec_ref.extract(self.current_wout_file)

        return netCDF4.Dataset(self.current_wout_file).variables['rmnc'][0, 0]
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train_init: init')

#  Get config filenames.
        if timeStamp == 0.0:
            self.current_ml_train_state = self.services.get_config_param('CURRENT_ML_TRAIN_STATE')

            self.data_gen_config = self.services.get_config_param('DATA_GEN_CONFIG')
            self.data_gen_state = self.services.get_config_param('DATA_GEN_STATE')

            self.training_data = self.services.get_config_param('TRAINING_DATA')
            self.new_data = self.services.get_config_param('NEW_DATA')
            self.prediction_data = self.services.get_config_param('PREDICTION_DATA')

            self.nn_model_config = self.services.get_config_param('NN_MODEL_CONFIG')
            self.nn_model_matrix = self.services.get_config_param('NN_MODEL_MATRIX')
            self.nn_model = self.services.get_config_param('NN_MODEL')
            self.ml_train_args = self.services.get_config_param('ML_TRAIN_ARGS')

#  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

#  State input files and setup the inital state.
        self.services.stage_input_files(self.INPUT_FILES)

#  Create plasma state from files. Input files can either be a new plasma state,
#  training data file or both. If both file were staged, replace the training
#  data input file. If the training data file is present flag the plasma state
#  as needing to be updated.
        with ZipState.ZipState(self.current_ml_train_state, 'a') as zip_ref:
            zip_ref.write_or_check(self.data_gen_config)
            zip_ref.write_or_check(self.data_gen_state)

            zip_ref.write_optional(self.training_data)
            zip_ref.write_optional(self.new_data)
            zip_ref.write_optional(self.prediction_data)

            zip_ref.write_or_check(self.nn_model_config)
            zip_ref.write_optional(self.nn_model_matrix)

            if os.path.exists(self.nn_model):
                with ZipState.ZipState('{}.zip'.format(self.nn_model), 'w') as nn_ref:
                    nn_ref.write(self.nn_model)
            zip_ref.write_optional('{}.zip'.format(self.nn_model))
            zip_ref.write_or_check(self.ml_train_args)

            zip_ref.set_state(state='needs_update')

        self.services.update_state()
Beispiel #4
0
    def get_updated_substate(self, timeStamp=0.0):
        self.services.stage_subflow_output_files()

        with ZipState.ZipState(self.current_v3fit_state, 'r') as v3fit_ref:
            flags = v3fit_ref.get_state()

        if 'state' in flags and flags['state'] != 'unchanged':
            self.zip_ref.write(self.current_v3fit_state)
            self.zip_ref.set_state(state='updated')
            self.zip_ref.close()
            self.services.update_state()
            self.services.stage_output_files(timeStamp,
                                             self.current_cariddi_state)
            self.zip_ref = ZipState.ZipState(self.current_cariddi_state, 'a')
Beispiel #5
0
    def step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train_driver: step')

        with ZipState.ZipState(self.current_ml_train_state, 'r') as zip_ref:
            flags = zip_ref.get_state()

#  Adaptive training loop. Hard code the number of iterations for now.
        for i in range(self.max_iterations):

#  Train the NN model.
            self.services.call(self.ml_train_port, 'init', timeStamp)
            self.services.call(self.ml_train_port, 'step', timeStamp)

#  Get the new data batch and add it to the gen data state.
            self.services.stage_state()
            with ZipState.ZipState(self.current_ml_train_state, 'a') as zip_ref:
                zip_ref.extract_or_check(self.new_data)
                zip_ref.extract_or_check(self.training_data)

                with ZipState.ZipState(self.data_gen_state, 'a') as model_state_ref:
                    model_state_ref.write(self.new_data)

#  Generate new training data. Update the time the first data batch was
#  generated in the init method.
                timeStamp = timeStamp + 1.0
                shutil.copy2(self.data_gen_state, 'data_gen_input_dir')
                self.services.call(self.data_gen['init'], 'init', timeStamp)
                self.services.call(self.data_gen['driver'], 'init', timeStamp)
                self.services.call(self.data_gen['driver'], 'step', timeStamp)

                self.services.stage_subflow_output_files()

#  Append the new data to he training data.
                with ZipState.ZipState(self.data_gen_state, 'r') as model_state_ref:
                    model_state_ref.extract_or_check(self.new_data)

                if os.path.exists(self.training_data):
                    self.append_data()
                else:
                    os.rename(self.new_data, self.training_data)
                zip_ref.write(self.training_data)

                zip_ref.set_state(state='needs_update')
                flags = zip_ref.get_state()

            self.services.update_state()
Beispiel #6
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train: init')

        if timeStamp == 0.0:
            self.current_ml_train_state = self.services.get_config_param(
                'CURRENT_ML_TRAIN_STATE')
            self.training_data = self.services.get_config_param(
                'TRAINING_DATA')
            self.new_data = self.services.get_config_param('NEW_DATA')
            self.prediction_data = self.services.get_config_param(
                'PREDICTION_DATA')

            self.nn_model_config = self.services.get_config_param(
                'NN_MODEL_CONFIG')
            self.nn_model_matrix = self.services.get_config_param(
                'NN_MODEL_MATRIX')
            self.nn_model = self.services.get_config_param('NN_MODEL')
            self.batch_size = self.services.get_config_param('BATCH_SIZE')

            self.constraint_path = self.services.get_config_param(
                'MODULE_PATH')
            self.constraint_name = self.services.get_config_param(
                'MODULE_NAME')

#  Stage state.
        self.services.stage_state()

        #  Unzip files from the current state. Use mode a so files can be read and
        #  written to.
        self.zip_ref = ZipState.ZipState(self.current_ml_train_state, 'a')

        if timeStamp == 0.0:
            ml_train_args = self.services.get_config_param('ML_TRAIN_ARGS')
            self.zip_ref.extract_or_check(ml_train_args)
            self.zip_ref.extract_or_check(self.nn_model_config)

            with open(ml_train_args, 'r') as args_ref:
                self.task_args = json.load(args_ref)

        self.zip_ref.extract_or_check(self.training_data)
        self.zip_ref.extract_optional('{}.zip'.format(self.nn_model))

        if os.path.exists('{}.zip'.format(self.nn_model)):
            with ZipState.ZipState('{}.zip'.format(self.nn_model),
                                   'r') as nn_ref:
                nn_ref.extractall()
Beispiel #7
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'cariddi: init')

#  Stage state.
        self.services.stage_state()

#  Get config filenames.
        if timeStamp == '0.0':
            self.current_cariddi_matrix = self.services.get_config_param('CARIDDI_MATRIX_FILE')
            self.current_cariddi_geometry = self.services.get_config_param('CARIDDI_GEOMETRY_FILE')
            self.cariddi_matrix_path = self.services.get_config_param('CARIDDI_MATRIX_PATH')
            self.current_cariddi_state = self.services.get_config_param('CURRENT_CARIDDI_STATE')

            self.current_v3fit_state = self.services.get_config_param('CURRENT_V3FIT_STATE')

            self.current_siesta_state = self.services.get_config_param('CURRENT_SIESTA_STATE')

            self.current_vmec_state = self.services.get_config_param('CURRENT_VMEC_STATE')
            current_vmec_namelist = self.services.get_config_param('VMEC_NAMELIST_INPUT')
            self.current_wout_file = 'wout_{}.nc'.format(current_vmec_namelist.replace('input.','',1))

            self.mgrid_file = self.services.get_config_param('MGRID_FILE')
            self.current_vmec_profile = self.services.get_config_param('CURRENT_VMEC_PROFILE')

            self.zip_ref = ZipState.ZipState(self.current_cariddi_state, 'a')
            if 'A1.nc' in self.zip_ref:
                self.zip_ref.extract('A1.nc')
        else:
            self.zip_ref = ZipState.ZipState(self.current_cariddi_state, 'a')

# Extract input files.
        self.flags = self.zip_ref.get_state()

        if 'state' in self.flags and self.flags['state'] != 'unchanged':
            self.zip_ref.extract(self.current_v3fit_state)

            with ZipState.ZipState(self.current_v3fit_state, 'r') as v3fit_zip_ref:
                if self.current_siesta_state in v3fit_zip_ref:
                    with ZipState.ZipState(self.current_siesta_state, 'r') as siesta_zip_ref:
                        siesta_zip_ref.extract(self.current_vmec_state)
                else:
                    v3fit_zip_ref.extract(self.current_vmec_state)

            with ZipState.ZipState(self.current_vmec_state, 'r') as vmec_zip_ref:
                if self.current_wout_file in vmec_zip_ref:
                    vmec_zip_ref.extract(self.current_wout_file)
Beispiel #8
0
    def eval_jacobian(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose',
                                   'quasi_newton_driver: eval_jacobian')

        #  Set the model to a known state.
        shutil.copy2(self.current_model_state, 'model_inputs')
        for worker in self.model_workers:
            worker['wait'] = self.services.call_nonblocking(
                worker['init'], 'init', timeStamp)

#  Perturb the parameters.
        for worker in self.model_workers:
            keywords = {worker['name']: worker['value'] + worker['vrnc']}
            self.services.wait_call(worker['wait'], True)
            worker['wait'] = self.services.call_nonblocking(
                worker['driver'], 'init', timeStamp, **keywords)

#  Recompute the model.
        for worker in self.model_workers:
            self.services.wait_call(worker['wait'], True)
            worker['wait'] = self.services.call_nonblocking(
                worker['driver'],
                'step',
                timeStamp,
                result_file=worker['result'])

#  Collect the results.
        for worker in self.model_workers:
            self.services.wait_call(worker['wait'], True)
            worker['wait'] = self.services.call_nonblocking(
                worker['init'], 'init', timeStamp)

        self.services.stage_subflow_output_files()

        #  Compute the normalized jacobian A.
        #
        #    A_ij = d e_i/d a_j                                                      (1)
        #
        #  Where e is the error vector.
        #
        #    e_i = W_i*((S_i - M_i)/sigma_i)^2                                       (2)
        #
        #  Note due to the what the memory is laid out the Jacobian is transposed.
        for i, worker in enumerate(self.model_workers):
            with ZipState.ZipState(worker['output'], 'a') as zip_ref:
                zip_ref.extract(worker['result'])
                with open(worker['result'], 'r') as result_file:
                    self.jacobian[i] = self.e - self.get_e(result_file)

        with open('jacobian.log', 'a') as jacobian_ref:
            jacobian_ref.write('Jacobian step {}\n'.format(timeStamp))
            for j in range(len(self.e)):
                self.jacobian[:, j].tofile(jacobian_ref,
                                           sep=',',
                                           format='%12.5e')
                jacobian_ref.write('\n')
            jacobian_ref.write('\n')
Beispiel #9
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'siesta_init: init')

        #  Get config filenames.
        current_vmec_namelist = self.services.get_config_param(
            'VMEC_NAMELIST_INPUT')
        current_vmec_state = self.services.get_config_param(
            'CURRENT_VMEC_STATE')
        current_siesta_namelist = self.services.get_config_param(
            'SIESTA_NAMELIST_INPUT')
        current_siesta_state = self.services.get_config_param(
            'CURRENT_SIESTA_STATE')

        #  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_input_files(self.INPUT_FILES)

        #  Create a vmec state. If the vmec namelist file exists add the namelist input
        #  file.
        with ZipState.ZipState(current_vmec_state, 'a') as zip_ref:
            if os.path.exists(current_vmec_namelist):
                zip_ref.write(current_vmec_namelist)
                zip_ref.set_state(state='needs_update')

#  Create state from files. Input files can either be a new state, namelist
#  input file or both. If both files were staged, replace the namelist input
#  file. If the namelist file is present flag the state as needing to be
#  updated. Sub states will automatically merge.
        with ZipState.ZipState(current_siesta_state, 'a') as zip_ref:
            if os.path.exists(current_siesta_namelist):
                zip_ref.write(current_siesta_namelist)
                zip_ref.set_state(state='needs_update')

#  The vmec state will be merged with any existing vmec state in the siesta
#  state.
            zip_ref.write(current_vmec_state)

        self.services.update_state()
Beispiel #10
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'vmec: init')

        self.current_vmec_namelist = self.services.get_config_param('VMEC_NAMELIST_INPUT')
        self.current_wout_file = 'wout_{}.nc'.format(self.current_vmec_namelist.replace('input.','',1))
        current_vmec_state = self.services.get_config_param('CURRENT_VMEC_STATE')
        
#  Stage state.
        self.services.stage_state()

#  Unzip files from the state. Use mode a so files can be read and written to.
        self.zip_ref = ZipState.ZipState(current_vmec_state, 'a')
        self.zip_ref.extract(self.current_vmec_namelist)

        if len(keywords) > 0:
            self.zip_ref.set_state(state='needs_update')
        
#  Update parameters in the namelist.
            namelist = OMFITnamelist(self.current_vmec_namelist,
                                     collect_arrays={
                                     'ns_array'    : {'default' : 0, 'shape' : (100,),    'offset' : (1,),     'sparray' : True},
                                     'niter_array' : {'default' : 0, 'shape' : (100,),    'offset' : (1,),     'sparray' : True},
                                     'rbs'         : {'default' : 0, 'shape' : (203,101), 'offset' : (-101,0), 'sparray' : True},
                                     'rbc'         : {'default' : 0, 'shape' : (203,101), 'offset' : (-101,0), 'sparray' : True},
                                     'zbs'         : {'default' : 0, 'shape' : (203,101), 'offset' : (-101,0), 'sparray' : True},
                                     'zbc'         : {'default' : 0, 'shape' : (203,101), 'offset' : (-101,0), 'sparray' : True},
                                     'am'          : {'default' : 0, 'shape' : (21,),     'offset' : (0,),     'sparray' : True},
                                     'ai'          : {'default' : 0, 'shape' : (21,),     'offset' : (0,),     'sparray' : True},
                                     'ac'          : {'default' : 0, 'shape' : (21,),     'offset' : (0,),     'sparray' : True},
                                     'am_aux_s'    : {'default' : 0, 'shape' : (10001,),  'offset' : (1,),     'sparray' : True},
                                     'am_aux_f'    : {'default' : 0, 'shape' : (10001,),  'offset' : (1,),     'sparray' : True},
                                     'ai_aux_s'    : {'default' : 0, 'shape' : (10001,),  'offset' : (1,),     'sparray' : True},
                                     'ai_aux_f'    : {'default' : 0, 'shape' : (10001,),  'offset' : (1,),     'sparray' : True},
                                     'ac_aux_s'    : {'default' : 0, 'shape' : (10001,),  'offset' : (1,),     'sparray' : True},
                                     'ac_aux_f'    : {'default' : 0, 'shape' : (10001,),  'offset' : (1,),     'sparray' : True},
                                     'raxis'       : {'default' : 0, 'shape' : (102,),    'offset' : (0,),     'sparray' : True},
                                     'zaxis'       : {'default' : 0, 'shape' : (102,),    'offset' : (0,),     'sparray' : True},
                                     'raxis_cc'    : {'default' : 0, 'shape' : (102,),    'offset' : (0,),     'sparray' : True},
                                     'raxis_cs'    : {'default' : 0, 'shape' : (102,),    'offset' : (0,),     'sparray' : True},
                                     'zaxis_cc'    : {'default' : 0, 'shape' : (102,),    'offset' : (0,),     'sparray' : True},
                                     'zaxis_cs'    : {'default' : 0, 'shape' : (102,),    'offset' : (0,),     'sparray' : True},
                                     'ftol_array'  : {'default' : 0, 'shape' : (100,),    'offset' : (1,),     'sparray' : True},
                                     'extcur'      : {'default' : 0, 'shape' : (300,),    'offset' : (1,),     'sparray' : True}
                                     })

            for key, value in keywords.items():
                NamelistItem.set(namelist['indata'], key, value)

            namelist.save()
Beispiel #11
0
    def step(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'v3fit: step')

        flags = self.zip_ref.get_state()

        if 'state' in flags and flags['state'] == 'needs_update' or 'force_update' in keywords:
            task_wait = self.services.launch_task(self.NPROC,
                                                  self.services.get_working_dir(),
                                                  self.V3FIT_EXE,
                                                  self.current_v3fit_namelist,
                                                  logfile = 'v3fit_{}.log'.format(timeStamp))

#  Update flags.
            self.zip_ref.set_state(state='updated')

#  Wait for V3FIT to finish.
            if (self.services.wait_task(task_wait) and not os.path.exists(self.result_file)):
                self.services.error('v3fit: step failed.')

            if 'force_update' in keywords:
                with ZipState.ZipState(self.current_vmec_state, 'a') as vmec_zip_ref:
                    vmec_zip_ref.write(self.current_wout_file)
                self.zip_ref.write(self.current_vmec_state)

#  Add the result file to the state.
            self.zip_ref.write([self.current_v3fit_namelist, self.result_file])

        else:
#  Update flags.
            self.zip_ref.set_state(state='unchanged')

        if 'result_file' in keywords:
            result_nc = OMFITnc(self.result_file)
            nsteps = result_nc['nsteps']['data']
            result = {'signal_model': result_nc['signal_model_value']['data'][nsteps,:,0].tolist()}
            with open(keywords['result_file'], 'w') as result_ref:
                json.dump(result, result_ref)
            self.zip_ref.write(keywords['result_file'])

        self.zip_ref.close()
        self.services.update_state()
Beispiel #12
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_init: init')

        #  Get config filenames.
        current_model_state = self.services.get_config_param('MODEL_INPUT')
        quasi_newton_config = self.services.get_config_param(
            'QUASI_NEWTON_CONFIG')
        current_quasi_newton_state = self.services.get_config_param(
            'CURRENT_QUASI_NEWTON_STATE')

        #  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_input_files(self.INPUT_FILES)

        #  Create state from files.
        with ZipState.ZipState(current_quasi_newton_state, 'a') as zip_ref:
            zip_ref.write(quasi_newton_config)
            zip_ref.write(current_model_state)

        self.services.update_state()
Beispiel #13
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'v3fit_driver: init')

        #  Separate out the siesta, vmec and v3fit keywords.
        eq_keywords = {}
        v3fit_keywords = {}
        for key, value in keywords.items():
            if 'vmec__' in key or 'siesta__' in key:
                eq_keywords[key] = value
            if 'v3fit__' in key:
                v3fit_keywords[key.replace('v3fit__', '', 1)] = value

#  Get config filenames.
        current_vmec_state = self.services.get_config_param(
            'CURRENT_VMEC_STATE')
        current_siesta_state = self.services.get_config_param(
            'CURRENT_SIESTA_STATE')
        self.current_v3fit_state = self.services.get_config_param(
            'CURRENT_V3FIT_STATE')

        #  We need to pass the inputs to the SIESTA or VMEC child workflow.
        self.services.stage_state()

        zip_ref = ZipState.ZipState(self.current_v3fit_state, 'a')

        #  If this is the first call, set up the VMEC or SIESTA sub workflow.
        if timeStamp == 0.0:
            if os.path.exists('eq_input_dir'):
                shutil.rmtree('eq_input_dir')
            os.mkdir('eq_input_dir')

            self.v3fit_port = self.services.get_port('V3FIT')

            if current_siesta_state in zip_ref:
                #  Get keys for the SIESTA sub workflow.
                keys = {
                    'PWD':
                    self.services.get_config_param('PWD'),
                    'USER_INPUT_FILES':
                    current_siesta_state,
                    'SIM_NAME':
                    '{}_siesta'.format(
                        self.services.get_config_param('SIM_NAME')),
                    'LOG_FILE':
                    'log.{}_siesta.warning'.format(
                        self.services.get_config_param('SIM_NAME')),
                    'OUTPUT_LEVEL':
                    self.services.get_config_param('OUTPUT_LEVEL')
                }

                try:
                    self.services.get_config_param('VMEC_NAMELIST_INPUT')
                except:
                    pass
                else:
                    keys[
                        'VMEC_NAMELIST_INPUT'] = self.services.get_config_param(
                            'VMEC_NAMELIST_INPUT')

                try:
                    self.services.get_config_param('SIESTA_NAMELIST_INPUT')
                except:
                    pass
                else:
                    keys[
                        'SIESTA_NAMELIST_INPUT'] = self.services.get_config_param(
                            'SIESTA_NAMELIST_INPUT')

                siesta_config = self.services.get_config_param('SIESTA_CONFIG')

                (self.eq_worker['sim_name'], self.eq_worker['init'],
                 self.eq_worker['driver']) = self.services.create_sub_workflow(
                     'siesta', siesta_config, keys, 'eq_input_dir')

            else:
                #  Get keys for the VMEC sub workflow.
                keys = {
                    'PWD':
                    self.services.get_config_param('PWD'),
                    'USER_INPUT_FILES':
                    current_vmec_state,
                    'SIM_NAME':
                    '{}_vmec'.format(
                        self.services.get_config_param('SIM_NAME')),
                    'LOG_FILE':
                    'log.{}_vmec.warning'.format(
                        self.services.get_config_param('SIM_NAME')),
                    'OUTPUT_LEVEL':
                    self.services.get_config_param('OUTPUT_LEVEL')
                }

                try:
                    self.services.get_config_param('VMEC_NAMELIST_INPUT')
                except:
                    pass
                else:
                    keys[
                        'VMEC_NAMELIST_INPUT'] = self.services.get_config_param(
                            'VMEC_NAMELIST_INPUT')

                vmec_config = self.services.get_config_param('VMEC_CONFIG')

                (self.eq_worker['sim_name'], self.eq_worker['init'],
                 self.eq_worker['driver']) = self.services.create_sub_workflow(
                     'vmec', vmec_config, keys, 'eq_input_dir')

#  Copy new subworkflow inputs to the input directory.
        if current_siesta_state in zip_ref:
            zip_ref.extract(current_siesta_state)
            shutil.copy2(current_siesta_state, 'eq_input_dir')
        else:
            zip_ref.extract(current_vmec_state)
            shutil.copy2(current_vmec_state, 'eq_input_dir')

#  Initialize and run the equilibrium. Replace values in the V3FIT state.
        self.services.call(self.eq_worker['init'], 'init', timeStamp)
        self.services.call(self.eq_worker['driver'], 'init', timeStamp,
                           **eq_keywords)
        self.services.call(self.eq_worker['driver'], 'step', timeStamp)

        #  After the equilibrium has run update the state.
        self.services.stage_subflow_output_files()
        if current_siesta_state in zip_ref:
            zip_ref.write(current_siesta_state)
        else:
            zip_ref.write(current_vmec_state)
        zip_ref.close()
        self.services.update_state()

        #  Initialize V3FIT.
        self.wait = self.services.call_nonblocking(self.v3fit_port, 'init',
                                                   timeStamp, **v3fit_keywords)
Beispiel #14
0
    def step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train: step')

        flags = self.zip_ref.get_state()

        if 'state' in flags and flags['state'] == 'needs_update':
            if os.path.exists(self.nn_model):
                task_wait = self.services.launch_task(
                    self.NPROC,
                    self.services.get_working_dir(),
                    self.ML_TRAIN_EXE,
                    '--config={}'.format(self.nn_model_config),
                    '--model={}'.format(self.nn_model),
                    '--training_data={}'.format(self.training_data),
                    '--supplemental_data={}'.format(self.new_data),
                    '--prediction_data={}'.format(self.prediction_data),
                    '--batch_size={}'.format(self.batch_size),
                    '--iterations={}'.format(self.task_args['--iterations']),
                    '--epochs={}'.format(self.task_args['--epochs']),
                    '--param_covar_matrix={}'.format(self.nn_model_matrix),
                    '--validation_split={}'.format(
                        self.task_args['--validation_split']),
                    '--module_path={}'.format(self.constraint_path),
                    '--module={}'.format(self.constraint_name),
                    '--locations={}'.format(self.task_args['--locations']),
                    '--adaptive_percentage={}'.format(
                        self.task_args['--adaptive_percentage']),
                    logfile='ml_train_{}.log'.format(timeStamp))
            else:
                task_wait = self.services.launch_task(
                    self.NPROC,
                    self.services.get_working_dir(),
                    self.ML_TRAIN_EXE,
                    '--config={}'.format(self.nn_model_config),
                    '--model={}'.format(self.nn_model),
                    '--activation={}'.format(self.task_args['--activation']),
                    '--training_data={}'.format(self.training_data),
                    '--supplemental_data={}'.format(self.new_data),
                    '--prediction_data={}'.format(self.prediction_data),
                    '--batch_size={}'.format(self.batch_size),
                    '--iterations={}'.format(self.task_args['--iterations']),
                    '--epochs={}'.format(self.task_args['--epochs']),
                    '--num_layers={}'.format(self.task_args['--num_layers']),
                    '--layer_width={}'.format(self.task_args['--layer_width']),
                    '--param_covar_matrix={}'.format(self.nn_model_matrix),
                    '--l1_factor={}'.format(self.task_args['--l1_factor']),
                    '--l2_factor={}'.format(self.task_args['--l2_factor']),
                    '--validation_split={}'.format(
                        self.task_args['--validation_split']),
                    '--module_path={}'.format(self.constraint_path),
                    '--module={}'.format(self.constraint_name),
                    '--locations={}'.format(self.task_args['--locations']),
                    '--adaptive_percentage={}'.format(
                        self.task_args['--adaptive_percentage']),
                    logfile='ml_train_{}.log'.format(timeStamp))

#  Update flags.
            self.zip_ref.set_state(state='updated')

            #  Wait for Training to finish. FIXME: Need to check that the outputs exist to
            #  check errors
            if (self.services.wait_task(task_wait) and False):
                self.services.error('ml_train: step failed.')

#  NN models may be a directory. Zip them first before adding them to the state.
            with ZipState.ZipState('{}.zip'.format(self.nn_model),
                                   'w') as nn_ref:
                nn_ref.write(self.nn_model)
            self.zip_ref.write('{}.zip'.format(self.nn_model))

            self.zip_ref.write(self.new_data)
            self.zip_ref.write(self.prediction_data)
            self.zip_ref.write(self.nn_model_matrix)

#  Add outputs to state.
        else:
            #  Update flags.
            self.zip_ref.set_state(state='unchanged')

        self.zip_ref.close()

        self.services.update_state()
        self.services.stage_output_files(timeStamp,
                                         self.current_ml_train_state)
Beispiel #15
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose',
                                   'massive_serial_runner_init: init')

        #  Get config filenames.
        if timeStamp == 0.0:
            self.current_state = self.services.get_config_param(
                'CURRENT_MSR_STATE')
            self.msr_global_config = self.services.get_config_param(
                'MSR_GLOBAL_CONFIG')
            self.current_batch = self.services.get_config_param(
                'CURRENT_BATCH')
            self.database_config = self.services.get_config_param(
                'DATABASE_CONFIG')
            self.inscan_config_file = self.services.get_config_param(
                'INSCAN_CONFIG')
            self.batch_size = self.services.get_config_param('BATCH_SIZE')

#  Remove old inputs.
        if os.path.exists(self.current_batch):
            os.remove(self.current_batch)
        if os.path.exists('inscan'):
            os.remove('inscan')

#  Stage input files and setup inital state.
        self.services.stage_input_files(self.INPUT_FILES)

        #  Load or create a masive serial runner zip state.
        with ZipState.ZipState(self.current_state, 'a') as zip_ref:

            if timeStamp == 0.0:
                #  Overwrite the database_config file if it was staged as input files. Over the
                #  write inscan_config if it was staged, otherwise extract it. These files are
                #  not expected to change so we only need todo this once.
                zip_ref.write_or_extract(self.inscan_config_file)
                zip_ref.write_or_check(self.database_config)
                zip_ref.write_or_check(self.msr_global_config)
                zip_ref.write_or_check(
                    self.services.get_config_param('MSR_SERIAL_STATE'))

#  Batch files are optional. If a batch file was not staged as an input, extract
#  if from the plasma state if one exists inside it.
            extract_if_needed(zip_ref, self.current_batch)

            #  Check if a new batch of data exists. If it does create the new inscan file.
            if os.path.exists(self.current_batch):
                task_wait = self.services.launch_task(
                    self.NPROC,
                    self.services.get_working_dir(),
                    self.SAMPLE_EXE,
                    '--input={}'.format(self.inscan_config_file),
                    '--output=inscan',
                    '--nscan={}'.format(self.batch_size),
                    '--new={}'.format(self.current_batch),
                    logfile='sample_{}.log'.format(timeStamp))

                if self.services.wait_task(task_wait):
                    self.services.error(
                        'massive_serial_runner_init: failed to generate inscan sample'
                    )

#  There maybe an existing inscan file in the state file. If one doesn't exist,
#  create a new one.
            elif 'inscan' not in zip_ref:
                task_wait = self.services.launch_task(
                    self.NPROC,
                    self.services.get_working_dir(),
                    self.SAMPLE_EXE,
                    '--input={}'.format(self.inscan_config_file),
                    '--output=inscan',
                    '--nscan={}'.format(self.batch_size),
                    logfile='sample_{}.log'.format(timeStamp))

                if self.services.wait_task(task_wait):
                    self.services.error(
                        'massive_serial_runner_init: failed to generate inscan sample'
                    )

            else:
                raise Expection('Expected inscan or {} file not found.'.format(
                    self.current_batch))

            zip_ref.write('inscan')
            zip_ref.set_state(batch_size=self.batch_size)

            zip_ref.set_state(state='needs_update')

        self.services.update_state()
Beispiel #16
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'v3fit: init')
        self.services.stage_state()

#  Get config filenames.
        self.current_v3fit_namelist = self.services.get_config_param('V3FIT_NAMELIST_INPUT')
        self.current_v3fit_state = self.services.get_config_param('CURRENT_V3FIT_STATE')
        self.result_file = 'result.{}.nc'.format(self.current_v3fit_namelist)
        current_siesta_namelist = self.services.get_config_param('SIESTA_NAMELIST_INPUT')
        current_siesta_state = self.services.get_config_param('CURRENT_SIESTA_STATE')
        current_vmec_namelist = self.services.get_config_param('VMEC_NAMELIST_INPUT')
        self.current_vmec_state = self.services.get_config_param('CURRENT_VMEC_STATE')
        self.current_wout_file = 'wout_{}.nc'.format(current_vmec_namelist.replace('input.','',1))

#  Stage state.
        self.services.stage_state()

#  Unzip files from the state. Use mode a so files can be read and written to.
        self.zip_ref = ZipState.ZipState(self.current_v3fit_state, 'a')
        self.zip_ref.extract(self.current_v3fit_namelist)
        if self.result_file in self.zip_ref:
            self.zip_ref.extract(self.result_file)

        if current_siesta_state in self.zip_ref:
            self.zip_ref.extract(current_siesta_state)

            with ZipState.ZipState(current_siesta_state, 'r') as siesta_zip_ref:
                siesta_zip_ref.extract(current_siesta_namelist)
                namelist = OMFITnamelist(current_siesta_namelist)
                current_restart_file = 'siesta_{}.nc'.format(namelist['siesta_info']['restart_ext'])

                siesta_zip_ref.extract(current_restart_file)
                flags = siesta_zip_ref.get_state()
                if 'state' in flags and flags['state'] == 'updated':
                    self.zip_ref.set_state(state='needs_update')

                siesta_zip_ref.extract(self.current_vmec_state)

                with ZipState.ZipState(self.current_vmec_state, 'r') as vmec_zip_ref:
                    vmec_zip_ref.extract(self.current_wout_file)
                    flags = vmec_zip_ref.get_state()
                    if 'state' in flags and flags['state'] == 'updated':
                        self.zip_ref.set_state(state='needs_update')

                keywords['siesta_nli_filename'] = current_siesta_namelist
                keywords['siesta_restart_filename'] = current_restart_file
                keywords['vmec_nli_filename'] = current_vmec_namelist
                keywords['vmec_wout_input'] = self.current_wout_file
                keywords['model_eq_type'] = 'siesta'
        else:
            self.zip_ref.extract(self.current_vmec_state)

            with ZipState.ZipState(self.current_vmec_state, 'r') as vmec_zip_ref:
                vmec_zip_ref.extract(self.current_wout_file)
                vmec_zip_ref.extract(current_vmec_namelist)
                flags = vmec_zip_ref.get_state()
                if 'state' in flags and flags['state'] == 'updated':
                    self.zip_ref.set_state(state='needs_update')

            keywords['vmec_nli_filename'] = current_vmec_namelist
            keywords['vmec_wout_input'] = self.current_wout_file
            keywords['model_eq_type'] = 'vmec'

#  Update parameters in the namelist.
        self.set_namelist(**keywords)
Beispiel #17
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'massive_serial_runner: init')

#  Stage state.
        self.services.stage_state()

#  Unzip files from the state. Use mode a so files an be read and written to.
        if timeStamp == 0.0:
            self.current_state = self.services.get_config_param('CURRENT_MSR_STATE')

        self.zip_ref = ZipState.ZipState(self.current_state, 'a')

#  Get config filenames.
        if timeStamp == 0.0:
            self.database_config = self.services.get_config_param('DATABASE_CONFIG')
            self.zip_ref.extract(self.database_config)

            self.current_batch = self.services.get_config_param('CURRENT_BATCH')

            self.constraint_path = self.services.get_config_param('MODULE_PATH')
            self.constraint_name = self.services.get_config_param('MODULE_NAME')

#  Keys for the massiver serial subworkflow.
            keys = {
                'PWD'            : self.services.get_config_param('PWD'),
                'SIM_NAME'       : 'massive_serial_runner_sub',
                'LOG_FILE'       : 'log.massive_serial_runner',
                'NNODES'         : self.services.get_config_param('MSR_NNODES'),
                'INPUT_DIR_SIM'  : 'massive_serial_runner_input_dir',
                'OUTPUT_DIR_SIM' : '{}/massive_serial_runner_output_dir'.format(os.getcwd())
            }

            if os.path.exists('massive_serial_runner_input_dir'):
                shutil.rmtree('massive_serial_runner_input_dir')
            os.mkdir('massive_serial_runner_input_dir')

            self.massive_serial_worker = {
                'sim_name' : None,
                'init'     : None,
                'driver'   : None
            }

            msr_global = self.services.get_config_param('MSR_GLOBAL_CONFIG')
            self.zip_ref.extract(msr_global)

            (self.massive_serial_worker['sim_name'],
             self.massive_serial_worker['init'],
             self.massive_serial_worker['driver']) = self.services.create_sub_workflow('massive_serial',
                                                                                       msr_global,
                                                                                       keys,
                                                                                       'massive_serial_runner_input_dir')

        self.zip_ref.extract('inscan')
        shutil.copy2('inscan', 'massive_serial_runner_input_dir')

#  These files should never change so only extract them once.
        if timeStamp == 0.0:
            ms_state = self.services.get_config_param('MSR_SERIAL_STATE')

            self.zip_ref.extract(ms_state)
            shutil.copy2(ms_state, 'massive_serial_runner_input_dir')

            os.chdir('massive_serial_runner_input_dir')

#  We need the input directory to exist in a directory called input. So we must
#  make that directory first than extract the files. Remember to change back to
#  the orginal working directory after extraction.
            with ZipState.ZipState(ms_state, 'r') as zip_ref:
                zip_ref.extractall()
            with ZipState.ZipState('input.zip', 'r') as input_ref:
                input_ref.extractall()

            override = ConfigObj(infile=self.services.get_config_param('MSR_SERIAL_NODE_CONFIG'), interpolation='template', file_error=True)
            override['INPUT_DIR_SIM'] = os.getcwd()
            override.write()

            override2 = ConfigObj(infile=self.services.get_config_param('MSR_MODEL_CONFIG'), interpolation='template', file_error=True)
            override2['INPUT_DIR_SIM'] = os.getcwd()
            override2.write()

            os.chdir('../')
Beispiel #18
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'cariddi_init: init')

#  Get config filenames.
        current_cariddi_state = self.services.get_config_param('CURRENT_CARIDDI_STATE')

        current_vmec_namelist = self.services.get_config_param('VMEC_NAMELIST_INPUT')
        current_vmec_state = self.services.get_config_param('CURRENT_VMEC_STATE')

        current_siesta_namelist = self.services.get_config_param('SIESTA_NAMELIST_INPUT')
        current_siesta_state = self.services.get_config_param('CURRENT_SIESTA_STATE')

        current_v3fit_namelist = self.services.get_config_param('V3FIT_NAMELIST_INPUT')
        current_v3fit_state = self.services.get_config_param('CURRENT_V3FIT_STATE')

#  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_input_files(self.INPUT_FILES)

#  All v3fit runs require a vmec state at the minimum. Create a vmec state. If
#  the vmec namelist file exists add the namelist input file.
        with ZipState.ZipState(current_vmec_state, 'a') as zip_ref:
            if os.path.exists(current_vmec_namelist):
                zip_ref.write(current_vmec_namelist)
                zip_ref.set_state(state='needs_update')

#  A siesta state is optional. If a siesta state or namelist exist, create a
#  siesta state. If the siesta namelist or vmec state files exists add
#  them to the siesta state.
        if os.path.exists(current_siesta_state) or os.path.exists(current_siesta_namelist):
            with ZipState.ZipState(current_siesta_state, 'a') as zip_ref:
                if os.path.exists(current_siesta_namelist):
                    zip_ref.write(current_siesta_namelist)
                    zip_ref.set_state(state='needs_update')

#  The vmec state will be merged with any existing vmec state in the siesta
#  state.
                zip_ref.write(current_vmec_state)

#  Create state from files. Input files can either be a new state, namelist
#  input file or both. If both files were staged, replace the namelist input
#  file. If the namelist file is present flag the state as needing to be
#  updated.
        with ZipState.ZipState(current_v3fit_state, 'a') as zip_ref:
            if os.path.exists(current_v3fit_namelist):
                zip_ref.write(current_v3fit_namelist)
                zip_ref.set_state(state='needs_update')

#  If a siesta state exists at this point add it to the archive. Otherwise add
#  the vmec state.
            if os.path.exists(current_siesta_state):
                zip_ref.write(current_siesta_state)
            else:
                zip_ref.write(current_vmec_state)

#  Create state from files. Input files can either be a new state, input file or
#  both. If both files were staged, replace the input file. If the input file is
#  present flag the state as needing to be updated.
        with ZipState.ZipState(current_cariddi_state, 'a') as zip_ref:
            zip_ref.write(current_v3fit_state)

        self.services.update_state()
Beispiel #19
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train_driver: init')

        if timeStamp == 0.0:
            self.current_ml_train_state = self.services.get_config_param('CURRENT_ML_TRAIN_STATE')
            self.new_data = self.services.get_config_param('NEW_DATA')
            self.training_data = self.services.get_config_param('TRAINING_DATA')
            self.data_gen_state = self.services.get_config_param('DATA_GEN_STATE')
            self.max_iterations = int(self.services.get_config_param('MAX_ITERATIONS'))
            self.ml_train_port = self.services.get_port('ML_TRAIN')

        self.services.stage_state()

#  Extract files needed to set up the data gen model.
        with ZipState.ZipState(self.current_ml_train_state, 'a') as zip_ref:
            zip_ref.extract_optional(self.new_data)
            zip_ref.extract_or_check(self.data_gen_state)

            with ZipState.ZipState(self.data_gen_state, 'a') as model_state_ref:
                model_state_ref.write_optional(self.new_data)

#  Setup the data generation subworkflow.
            if timeStamp == 0.0:
                data_gen_config = self.services.get_config_param('DATA_GEN_CONFIG')
                zip_ref.extract_or_check(data_gen_config)

#  Get keys for the sub workflow.
                keys = {
                    'pwd'              : self.services.get_config_param('PWD'),
                    'SIM_NAME'         : '{}_gen_data'.format(self.services.get_config_param('SIM_NAME')),
                    'LOG_FILE'         : 'log.gen_data.warning',
                    'OUTPUT_LEVEL'     : self.services.get_config_param('OUTPUT_LEVEL'),
                    'CURRENT_BATCH'    : self.new_data,
                    'USER_INPUT_FILES' : self.data_gen_state,
                    'MODULE_PATH'      : self.services.get_config_param('MODULE_PATH'),
                    'MODULE_NAME'      : self.services.get_config_param('MODULE_NAME'),
                    'BATCH_SIZE'       : self.services.get_config_param('BATCH_SIZE'),
                    'MSR_NNODES'       : self.services.get_config_param('MSR_NNODES')
                }

                if os.path.exists('data_gen_input_dir'):
                    shutil.rmtree('data_gen_input_dir')
                os.mkdir('data_gen_input_dir')

                self.data_gen = {
                    'sim_name' : None,
                    'init'     : None,
                    'driver'   : None
                }
                (self.data_gen['sim_name'],
                 self.data_gen['init'],
                 self.data_gen['driver']) = self.services.create_sub_workflow('data_gen',
                                                                              data_gen_config,
                                                                              keys,
                                                                              'data_gen_input_dir')

            shutil.copy2(self.data_gen_state, 'data_gen_input_dir')

#  If new data exists or a training data does not exist. Generate a data batch.
            if os.path.exists(self.new_data) or self.training_data not in zip_ref:
                self.services.call(self.data_gen['init'], 'init', timeStamp)
                self.services.call(self.data_gen['driver'], 'init', timeStamp)
                self.services.call(self.data_gen['driver'], 'step', timeStamp)

                self.services.stage_subflow_output_files()

                with ZipState.ZipState(self.data_gen_state, 'r') as model_state_ref:
                    model_state_ref.extract_or_check(self.new_data)

                if self.training_data not in zip_ref:
                    os.rename(self.new_data, self.training_data)
                else:
                    zip_ref.extract(self.training_data)
                    self.append_data()

                zip_ref.write(self.training_data)
                zip_ref.write(self.data_gen_state)
                zip_ref.set_state(state='needs_update')

        self.services.update_state()
Beispiel #20
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'siesta_driver: init')

        #  Separate out the siesta and vmec keywords.
        siesta_keywords = {}
        vmec_keywords = {}
        for key, value in keywords.items():
            if 'vmec__' in key:
                vmec_keywords[key] = value
            if 'siesta__' in key:
                siesta_keywords[key.replace('siesta__', '', 1)] = value

#  Get config filenames.
        current_vmec_state = self.services.get_config_param(
            'CURRENT_VMEC_STATE')
        self.current_siesta_state = self.services.get_config_param(
            'CURRENT_SIESTA_STATE')

        #  We need to pass the inputs to the VMEC child workflow.
        self.services.stage_state()

        zip_ref = ZipState.ZipState(self.current_siesta_state, 'a')
        zip_ref.extract(current_vmec_state)

        #  If this is the first call, set up the VMEC sub workflow.
        if timeStamp == 0.0:
            #  Get the siesta port
            self.siesta_port = self.services.get_port('SIESTA')

            #  Get keys for the sub workflow.
            keys = {
                'PWD':
                self.services.get_config_param('PWD'),
                'SIM_NAME':
                '{}_vmec'.format(self.services.get_config_param('SIM_NAME')),
                'USER_INPUT_FILES':
                current_vmec_state,
                'LOG_FILE':
                'log.vmec.warning',
                'OUTPUT_LEVEL':
                self.services.get_config_param('OUTPUT_LEVEL')
            }

            if os.path.exists('vmec_input_dir'):
                shutil.rmtree('vmec_input_dir')
            os.mkdir('vmec_input_dir')

            vmec_config = self.services.get_config_param('VMEC_CONFIG')

            self.vmec_worker = {'sim_name': None, 'init': None, 'driver': None}
            (self.vmec_worker['sim_name'], self.vmec_worker['init'],
             self.vmec_worker['driver']) = self.services.create_sub_workflow(
                 'vmec', vmec_config, keys, 'vmec_input_dir')

        shutil.copy2(current_vmec_state, 'vmec_input_dir')

        #  Initalize and run VMEC. Replace values in the siesta state.
        self.services.call(self.vmec_worker['init'], 'init', timeStamp)
        self.services.call(self.vmec_worker['driver'], 'init', timeStamp,
                           **vmec_keywords)
        self.services.call(self.vmec_worker['driver'], 'step', timeStamp)

        #  After VMEC has run update the VMEC state.
        self.services.stage_subflow_output_files()
        zip_ref.write(current_vmec_state)
        zip_ref.close()
        self.services.update_state()

        #  Initialize SIESTA.
        self.wait = self.services.call_nonblocking(self.siesta_port, 'init',
                                                   timeStamp,
                                                   **siesta_keywords)
Beispiel #21
0
    def try_step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose',
                                   'quasi_newton_driver: try_step')

        self.k_use = self.get_k_svd()

        #  Try different Levenberg-Marquardt step sizes.
        new_max = min(self.delta_a_len[self.k_use], self.max_step)
        step_use = numpy.empty(len(self.model_workers), dtype=float)
        delta_try = numpy.empty(
            (len(self.model_workers), len(self.model_workers)), dtype=float)
        e_try = numpy.empty(
            (len(self.model_workers), len(self.signal_observed)), dtype=float)
        chi2try = numpy.empty(len(self.model_workers), dtype=float)

        num_trys = 0

        while num_trys < self.max_step_try:
            num_trys += 1

            for i, worker in enumerate(self.model_workers):
                step_use[i] = new_max - i * new_max / (2.0 *
                                                       len(self.model_workers))
                delta_try[i] = self.lm_step(step_use[i])

                #  Set new parameters.
                keywords = {}
                for j, worker2 in enumerate(self.model_workers):
                    keywords[worker2['name']] = worker2['value'] + delta_try[
                        i, j] * worker2['vrnc']
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(
                    worker['driver'], 'init', timeStamp, **keywords)

#  Recompute the model.
            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(
                    worker['driver'],
                    'step',
                    timeStamp,
                    result_file=worker['result'])

#  Collect the results.
            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)

            self.services.stage_subflow_output_files()

            #  Compute chi^2 for each attempted step. And keep the largest.
            with open('chi.log', 'a') as chi_ref:
                chi_ref.write('Chi step {}\n'.format(timeStamp))
                for i, worker in enumerate(self.model_workers):
                    with ZipState.ZipState(worker['output'], 'a') as zip_ref:
                        zip_ref.extract(worker['result'])
                    with open(worker['result'], 'r') as result_file:
                        e_try[i] = self.get_e(result_file)
                        chi2try[i] = numpy.dot(e_try[i], e_try[i])

                    chi_ref.write('chi2 = {} : '.format(chi2try[i]))
                    e_try[i].tofile(chi_ref, sep=',', format='%12.5e')
                    chi_ref.write('\n')
                chi_ref.write('\n')

            i_min = numpy.argmin(chi2try)
            if chi2try[i_min] <= self.chi2:
                #  Chi^2 decreased. Set the best case to the current model.
                os.rename(self.model_workers[i_min]['output'],
                          self.current_model_state)
                shutil.copy2(self.current_model_state, 'model_inputs')
                self.e = e_try[i_min]

                #  Set the new parameter values.
                current_values = {}
                for i, worker in enumerate(self.model_workers):
                    worker['value'] += delta_try[i_min, i] * worker['vrnc']
                    current_values[worker['name']] = worker['value']

#  Dump current values to a json file. This can be used for restarting a
#  reconstruction.
                with open('current_values.json', 'w') as current_values_file:
                    json.dump(current_values, current_values_file)

                self.norm_len = numpy.sqrt(
                    numpy.dot(delta_try[i_min], delta_try[i_min]))

                return True
            else:
                #  Cut the step size in half and reset the model.
                new_max /= 2.0
                for worker in self.model_workers:
                    worker['wait'] = self.services.call_nonblocking(
                        worker['init'], 'init', timeStamp)

        return False
Beispiel #22
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose',
                                   'quasi_newton_driver: init')

        #  Get config filenames.
        self.current_model_state = self.services.get_config_param(
            'MODEL_INPUT')
        self.quasi_newton_config_file = self.services.get_config_param(
            'QUASI_NEWTON_CONFIG')
        self.current_quasi_newton_state = self.services.get_config_param(
            'CURRENT_QUASI_NEWTON_STATE')
        ips_model_config = self.services.get_config_param('MODEL_SIM_CONFIG')

        #  Stage state and extract all files.
        self.services.stage_state()
        with ZipState.ZipState(self.current_quasi_newton_state,
                               'a') as zip_ref:
            zip_ref.extractall()

#  Load the quasi-newton json file.
        with open(self.quasi_newton_config_file, 'r') as config_file:
            quasi_newton_config = json.load(config_file)
            self.signal_sigma = numpy.absolute(
                numpy.array(quasi_newton_config['signal_sigma']))
            self.signal_observed = numpy.array(
                quasi_newton_config['signal_observed'])
            self.signal_weights = numpy.sqrt(
                numpy.array(quasi_newton_config['signal_weights']))
            self.dchi2_tol = quasi_newton_config['dchi2_tol']

            #  Singular value step controls.
            #  Cutoff value for relative singular values.
            if 'cut_svd' in quasi_newton_config:
                self.cut_svd = quasi_newton_config['cut_svd']
            else:
                self.cut_svd = 0.0
#  Cutoff value for expected step efficiency.
            if 'cut_eff' in quasi_newton_config:
                self.cut_eff = quasi_newton_config['cut_eff']
            else:
                self.cut_eff = 0.0
#  Cutoff value for expected marginal step efficiency.
            if 'cut_marg_eff' in quasi_newton_config:
                self.cut_marg_eff = quasi_newton_config['cut_marg_eff']
            else:
                self.cut_marg_eff = 0.0
#  Cutoff value for expected step size.
            if 'cut_delta_a' in quasi_newton_config:
                self.cut_delta_a = quasi_newton_config['cut_delta_a']
            else:
                self.cut_delta_a = 0.0
#  Cutoff value for expected change in g^2.
            if 'cut_dg2' in quasi_newton_config:
                self.cut_dg2 = quasi_newton_config['cut_dg2']
            else:
                self.cut_dg2 = 0.0

#  Minimization controls
            if 'max_step' in quasi_newton_config:
                self.max_step = quasi_newton_config['max_step']
            else:
                self.max_step = 100.0

#  Maximum reconstruction steps
            if 'max_recon_steps' in quasi_newton_config:
                self.max_recon_steps = quasi_newton_config['max_recon_steps']
            else:
                self.max_recon_steps = 20

#  Maximum number of trys a step can take to reduce g^2
            if 'max_step_try' in quasi_newton_config:
                self.max_step_try = quasi_newton_config['max_step_try']
            else:
                self.max_step_try = 10

#  Set keys for the subworkflows.
            keys = {
                'PWD': self.services.get_config_param('PWD'),
                'USER_INPUT_FILES': self.current_model_state,
                'OUTPUT_LEVEL': self.services.get_config_param('OUTPUT_LEVEL')
            }

            #  Copy the model state to the input file staging directory. Since all the
            #  model instances start from the same state, we only need this in one place.
            if os.path.exists('model_inputs'):
                shutil.rmtree('model_inputs')
            os.mkdir('model_inputs')
            shutil.copy2(self.current_model_state, 'model_inputs')

            keywords = {}

            for i, param in enumerate(quasi_newton_config['params']):

                self.model_workers.append({
                    'sim_name':
                    None,
                    'init':
                    None,
                    'driver':
                    None,
                    'result':
                    '{}_result.json'.format(param['name']),
                    'output':
                    '{}_model_state.zip'.format(param['name']),
                    'name':
                    param['name'],
                    'vrnc':
                    param['vrnc'],
                    'value':
                    param['init'],
                    'scale': (float(i) + 1.0) /
                    float(len(quasi_newton_config['params']))
                })

                keys['SIM_NAME'] = param['name']
                keys['LOG_FILE'] = 'log.{}'.format(param['name'])
                keys['USER_OUTPUT_FILES'] = self.model_workers[i]['output']

                (self.model_workers[i]['sim_name'],
                 self.model_workers[i]['init'], self.model_workers[i]['driver']
                 ) = self.services.create_sub_workflow(param['name'],
                                                       ips_model_config, keys,
                                                       'model_inputs')

                self.model_workers[i]['wait'] = self.services.call_nonblocking(
                    self.model_workers[i]['init'], 'init', timeStamp)

                keywords[param['name']] = param['init']

#  Run the inital convergence. Only one model is needed to run but the staging
#  expects values from each sub_work_flow so we need to launch them all.
            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(
                    worker['driver'], 'init', timeStamp, **keywords)

            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(
                    worker['driver'],
                    'step',
                    timeStamp,
                    result_file=worker['result'])

            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)

            self.services.stage_subflow_output_files()

            with ZipState.ZipState(self.model_workers[0]['output'],
                                   'a') as zip_ref:
                zip_ref.extract(self.model_workers[0]['result'])
                with open(self.model_workers[0]['result'], 'r') as result_file:
                    self.e = self.get_e(result_file)

#  The initial model state may have changed reset it from the one of the
#  workers.
            os.rename(self.model_workers[0]['output'],
                      self.current_model_state)
Beispiel #23
0
 def get_updated_state(self):
     self.zip_ref.close()
     self.services.stage_state()
     self.zip_ref = ZipState.ZipState(self.current_cariddi_state, 'a')
Beispiel #24
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'cariddi_driver: init')

        #  Get config filenames.
        self.current_cariddi_state = self.services.get_config_param(
            'CURRENT_CARIDDI_STATE')

        #  We need to pass the inputs to the V3FIT child workflow.
        self.services.stage_state()

        self.zip_ref = ZipState.ZipState(self.current_cariddi_state, 'a')

        #  If this is the first call, set up the V3FIT sub workflow.
        if timeStamp == 0.0:
            self.current_v3fit_state = self.services.get_config_param(
                'CURRENT_V3FIT_STATE')
            self.current_siesta_state = self.services.get_config_param(
                'CURRENT_SIESTA_STATE')
            self.current_vmec_state = self.services.get_config_param(
                'CURRENT_VMEC_STATE')
            self.current_vmec_profile = self.services.get_config_param(
                'CURRENT_VMEC_PROFILE')
            current_vmec_namelist = self.services.get_config_param(
                'VMEC_NAMELIST_INPUT')
            self.current_wout_file = 'wout_{}.nc'.format(
                current_vmec_namelist.replace('input.', '', 1))

            self.time_steps = int(
                self.services.get_config_param('NUMBER_OF_TIME_STEPS'))
            self.time_sub_steps = int(
                self.services.get_config_param('NUMBER_OF_SUB_TIME_STEPS'))

            if os.path.exists('eq_input_dir'):
                shutil.rmtree('eq_input_dir')
            os.mkdir('eq_input_dir')

            self.zip_ref.extract(self.current_v3fit_state)
            shutil.copy2(self.current_v3fit_state, 'eq_input_dir')

            self.cariddi_port = self.services.get_port('CARIDDI')

            #  Get keys for the V3FIT sub workflow.
            keys = {
                'PWD':
                self.services.get_config_param('PWD'),
                'USER_INPUT_FILES':
                self.current_v3fit_state,
                'SIM_NAME':
                '{}_v3fit'.format(self.services.get_config_param('SIM_NAME')),
                'LOG_FILE':
                'log.{}_v3fit.warning'.format(
                    self.services.get_config_param('SIM_NAME')),
                'OUTPUT_LEVEL':
                self.services.get_config_param('OUTPUT_LEVEL'),
                'VMEC_NAMELIST_INPUT':
                self.services.get_config_param('VMEC_NAMELIST_INPUT'),
                'V3FIT_NAMELIST_INPUT':
                self.services.get_config_param('V3FIT_NAMELIST_INPUT')
            }

            v3fit_config = self.services.get_config_param('V3FIT_CONFIG')

            (self.eq_worker['sim_name'], self.eq_worker['init'],
             self.eq_worker['driver']) = self.services.create_sub_workflow(
                 'v3fit', v3fit_config, keys, 'eq_input_dir')

            eq_keywords = {
                'vmec__mgrid_file':
                self.services.get_config_param('MGRID_FILE')
            }

            #  Initialize the equilibrium.
            self.services.call(self.eq_worker['init'], 'init', timeStamp)

            self.cariddi_port = self.services.get_port('CARIDDI')