Пример #1
0
    def step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'vmec: step')

        flags = self.zip_ref.get_state()

        if 'state' in flags and flags['state'] == 'needs_update':
            task_wait = self.services.launch_task(
                self.NPROC,
                self.services.get_working_dir(),
                self.VMEC_EXE,
                self.current_vmec_namelist,
                logfile='vmec.log')

            #  Update flags.
            self.zip_ref.set_state(state='updated')

            #  Wait for VMEC to finish.
            if (self.services.wait_task(task_wait)
                    and not os.path.exists(self.current_wout_file)):
                self.services.error('vmec: step failed.')

#  Add the wout file to the state.
            self.zip_ref.write(
                [self.current_vmec_namelist, self.current_wout_file])

        else:
            #  Update flags.
            self.zip_ref.set_state(state='unchanged')

        self.zip_ref.close()

        self.services.update_state()
Пример #2
0
    def step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train: step')

        flags = self.zip_ref.get_state()

        if 'state' in flags and flags['state'] == 'needs_update':
            task_wait = self.services.launch_task(
                self.NPROC,
                self.services.get_working_dir(),
                'python',
                self.ML_TRAIN_EXE,
                logfile='ml_train.log')

            #  Update flags.
            self.zip_ref.set_state(state='updated')

            #  Wait for Training to finish. FIXME: Need to check that the outputs exist to
            #  check errors
            if (self.services.wait_task(task_wait) and False):
                self.services.error('ml_train: step failed.')

#  Add outputs to state.
        else:
            #  Update flags.
            self.zip_ref.set_state(state='unchanged')

        self.zip_ref.close()

        self.services.update_state()
Пример #3
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train_init: init')

#  Get config filenames.
        current_ml_train_data = self.services.get_config_param('ML_TRAIN_DATA')
        current_ml_train_state = self.services.get_config_param('CURRENT_ML_TRAIN_STATE')

#  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

#  State input files and setup the inital state.
        self.services.stage_input_files(self.INPUT_FILES)

#  Create plasma state from files. Input files can either be a new plasma state,
#  training data file or both. If both file were staged, replace the training
#  data input file. If the training data file is present flag the plasma state
#  as needing to be updated.
        with ZipState.ZipState(current_ml_train_state, 'a') as zip_ref:
            if os.path.exists(current_ml_train_data):
                os.rename(current_ml_train_data, 'training_data.dat')
                zip_ref.write('training_data.dat')
                zip_ref.set_state(state='needs_update')

        self.services.update_plasma_state()
Пример #4
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train_driver: init')

        current_ml_train_state = self.services.get_config_param('CURRENT_ML_TRAIN_STATE')
        
#  Initialize ml_train.
        self.ml_train_port = self.services.get_port('ML_TRAIN')
        self.wait = self.services.call_nonblocking(self.ml_train_port, 'init',
                                                   timeStamp, **keywords)

        self.stage_state()
        with ZipState.ZipState(current_ml_train_state, 'a') as zip_ref:
    def eval_jacobian(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: eval_jacobian')

#  Set the model to a known state.
        shutil.copy2(self.current_model_state, 'model_inputs')
        for worker in self.model_workers:
            worker['wait'] = self.services.call_nonblocking(worker['init'],
                                                            'init', timeStamp)
                
#  Perturb the parameters.
        for worker in self.model_workers:
            keywords = {worker['name'] : worker['value'] + worker['vrnc']}
            self.services.wait_call(worker['wait'], True)
            worker['wait'] = self.services.call_nonblocking(worker['driver'],
                                                            'init', timeStamp,
                                                            **keywords)
                                                            
#  Recompute the model.
        for worker in self.model_workers:
            self.services.wait_call(worker['wait'], True)
            worker['wait'] = self.services.call_nonblocking(worker['driver'], 'step', timeStamp,
                                                            result_file=worker['result'])
                                                                    
#  Collect the results.
        for worker in self.model_workers:
            self.services.wait_call(worker['wait'], True)
            worker['wait'] = self.services.call_nonblocking(worker['init'],
                                                            'init', timeStamp)

        self.services.stage_subflow_output_files()
                                                                            
#  Compute the normalized jacobian A.
#
#    A_ij = d e_i/d a_j                                                      (1)
#
#  Where e is the error vector.
#
#    e_i = W_i*((S_i - M_i)/sigma_i)^2                                       (2)
#
#  Note due to the what the memory is laid out the Jacobian is transposed.
        for i, worker in enumerate(self.model_workers):
            with ZipState.ZipState(worker['output'], 'a') as zip_ref:
                zip_ref.extract(worker['result'])
                with open(worker['result'], 'r') as result_file:
                    self.jacobian[i] = self.e - self.get_e(result_file)

        with open('jacobian.log', 'a') as jacobian_ref:
            jacobian_ref.write('Jacobian step {}\n'.format(timeStamp));
            for j in range(len(self.e)):
                self.jacobian[:,j].tofile(jacobian_ref, sep=',', format='%12.5e');
                jacobian_ref.write('\n')
            jacobian_ref.write('\n')
Пример #6
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'ml_train: init')

        current_ml_train_state = self.services.get_config_param(
            'CURRENT_ML_TRAIN_STATE')

        #  Stage state.
        self.services.stage_state()

        #  Unzip files from the current state. Use mode a so files can be read and
        #  written to.
        self.zip_ref = ZipState.ZipState(current_ml_train_state, 'a')
        self.zip_ref.extract('training_data.dat')
    def lm_step(self, step_size):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: lm_step')

        if step_size > 0.0 and self.delta_a_len[self.k_use] > step_size:
            ut_dot_e = numpy.matmul(self.e, self.j_svd_u)
        
#  Find the L-M parameter lambda that corresponds to a step length of step_size.
            _lambda = self.lm_get_lambda(step_size, ut_dot_e)

#  Find the step.
            return numpy.matmul(ut_dot_e[0:self.k_use]*self.j_svd_w[0:self.k_use]/(self.j_svd_w[0:self.k_use]**2.0 + _lambda), self.j_svd_vt[0:self.k_use])
        else:
            return self.delta_a[self.k_use]
Пример #8
0
    def finalize(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'siesta_driver: finalize')

        self.wait = [
            self.services.call_nonblocking(self.vmec_worker['init'],
                                           'finalize', timeStamp),
            self.services.call_nonblocking(self.vmec_worker['driver'],
                                           'finalize', timeStamp),
            self.services.call_nonblocking(self.siesta_port, 'finalize',
                                           timeStamp)
        ]

        self.services.wait_call_list(self.wait, True)
Пример #9
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'vmec_driver: init')

        #  Separate out the vmec keywords.
        vmec_keywords = {}
        for key, value in keywords.iteritems():
            if 'vmec__' in key:
                vmec_keywords[key.replace('vmec__', '', 1)] = value

#  Initialize vmec.
        self.vmec_port = self.services.get_port('VMEC')
        self.wait = self.services.call_nonblocking(self.vmec_port, 'init',
                                                   timeStamp, **vmec_keywords)
Пример #10
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'solps_iter: init')

        #  Set the top of the SOLPS source tree as an environment variable.
        if timeStamp == 0.0:
            self.eirene_database_path = self.services.get_config_param(
                'EIRENE_DATABASE_PATH')
            self.current_solps_state = self.services.get_config_param(
                'CURRENT_SOLPS_STATE')
            try:
                self.diag_geometry = self.services.get_config_param(
                    'DIAGNOSTIC_GEOMETRY')
                self.diag_state = self.services.get_config_param(
                    'DIAGNOSTIC_STATE')
            except:
                pass

            os.environ['SOLPSTOP'] = self.services.get_config_param('SOLPSTOP')

#  Remove existing files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_state()

        self.zip_ref = ZipState.ZipState(self.current_solps_state, 'a')
        self.zip_ref.extractall()

        #  Create eirene symbolic links.
        os.symlink(os.path.join(self.eirene_database_path, 'graphite_ext.dat'),
                   'graphite_ext.dat')
        os.symlink(os.path.join(self.eirene_database_path, 'mo_ext.dat'),
                   'mo_ext.dat')
        os.symlink(os.path.join(self.eirene_database_path, 'AMJUEL'), 'AMJUEL')
        os.symlink(os.path.join(self.eirene_database_path, 'H2VIBR'), 'H2VIBR')
        os.symlink(os.path.join(self.eirene_database_path, 'HYDHEL'), 'HYDHEL')
        os.symlink(os.path.join(self.eirene_database_path, 'METHANE'),
                   'METHANE')
        os.symlink(os.path.join(self.eirene_database_path, 'PHOTON'), 'PHOTON')
        os.symlink(
            os.path.join(self.eirene_database_path, 'Surfacedata', 'SPUTTER'),
            'SPUTTER')
        os.symlink(
            os.path.join(self.eirene_database_path, 'Surfacedata', 'TRIM',
                         'trim.dat'), 'fort.21')
        os.symlink(
            os.path.join(self.eirene_database_path, 'Surfacedata', 'TRIM',
                         'marlow.dat'), 'fort.22')

        #  Update parameters in the namelist.
        self.set_namelists(**keywords)
Пример #11
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'solps_iter_driver: init')

#  Separate out the solps_iter keywords.
        solps_keywords = {}
        for key, value in keywords.iteritems():
            if 'solps_iter__' in key:
                solps_keywords[key.replace('solps_iter__','',1)] = value

        self.solps_port = self.services.get_port('SOLPS')
        self.wait = self.services.call_nonblocking(self.solps_port, 'init',
                                                   timeStamp, **solps_keywords)

        if timeStamp == 0.0:
            self.current_solps_state = self.services.get_config_param('CURRENT_SOLPS_STATE')
Пример #12
0
    def step(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'solps_iter_driver: step')

#  Run SOLPS.
        self.services.wait_call(self.wait, True)
        self.services.call(self.solps_port, 'step', timeStamp, **keywords)

#  Prepare the output files for a super work flow. Need to remove any old output
#  files first before staging the state.
        if os.path.exists(self.OUTPUT_FILES):
            os.remove(self.OUTPUT_FILES)
        self.services.stage_state()

#  The super flow may need to rename the output file. Check is the current state
#  matches if output file. If it does not rename the state so it can be staged.
        if not os.path.exists(self.OUTPUT_FILES):
            os.rename(self.current_solps_state, self.OUTPUT_FILES)
    def get_exp_dg2(self, delta):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: get_exp_dg2')
        
#  Linear part.
#
#    2e * A * da                                                             (1)
        exp_dg2_lin = 2.0*numpy.dot(self.e, numpy.matmul(numpy.transpose(self.jacobian), delta))
        
#  Qaudratic part.
#
#    da * A^T * A * da = da * alpha * da                                     (2)
#
#  Alpha is the hessian matrix. Equation 14 in Hanson et. al.
#  doi: 10.1088/0029-5515/49/7/075031
        exp_dg2_quad = numpy.dot(delta, numpy.matmul(self.hessian, delta))
        
        return exp_dg2_lin - exp_dg2_quad
Пример #14
0
    def step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'vmec_driver: step')

        #  Run vmec.
        self.services.wait_call(self.wait, True)
        self.services.call(self.vmec_port, 'step', timeStamp)

        #  Prepare the output files for a super work flow. Need to remove any old output
        #  files first before staging the state.
        if os.path.exists(self.OUTPUT_FILES):
            os.remove(self.OUTPUT_FILES)
        self.services.stage_state()

        #  The super flow may need to rename the output file. Check is the current state
        #  matches if output file. If it does not rename the state so it can be staged.
        if not os.path.exists(self.OUTPUT_FILES):
            os.rename(self.services.get_config_param('CURRENT_VMEC_STATE'),
                      self.OUTPUT_FILES)
Пример #15
0
    def step(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'v3fit: step')

        flags = self.zip_ref.get_state()

        if 'state' in flags and flags['state'] == 'needs_update':
            self.set_namelist(my_task='v3post')
            self.task_wait = self.services.launch_task(
                self.NPROC,
                self.services.get_working_dir(),
                self.V3FIT_EXE,
                self.current_v3fit_namelist,
                logfile='v3fit.log')

            #  Update flags.
            self.zip_ref.set_state(state='updated')

            #  Wait for V3FIT to finish.
            if (self.services.wait_task(self.task_wait)
                    and not os.path.exists(self.result_file)):
                self.services.error('v3fit: step failed.')

#  Add the result file to the state.
            self.zip_ref.write([self.current_v3fit_namelist, self.result_file])

        else:
            #  Update flags.
            self.zip_ref.set_state(state='unchanged')

        if 'result_file' in keywords:
            result_nc = OMFITnc(self.result_file)
            nsteps = result_nc['nsteps']['data']
            result = {
                'signal_model':
                result_nc['signal_model_value']['data'][nsteps, :, 0].tolist()
            }
            with open(keywords['result_file'], 'w') as result_ref:
                json.dump(result, result_ref)
            self.zip_ref.write(keywords['result_file'])

        self.zip_ref.close()
        self.services.update_state()
Пример #16
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'siesta_init: init')

        #  Get config filenames.
        current_vmec_namelist = self.services.get_config_param(
            'VMEC_NAMELIST_INPUT')
        current_vmec_state = self.services.get_config_param(
            'CURRENT_VMEC_STATE')
        current_siesta_namelist = self.services.get_config_param(
            'SIESTA_NAMELIST_INPUT')
        current_siesta_state = self.services.get_config_param(
            'CURRENT_SIESTA_STATE')

        #  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_input_files(self.INPUT_FILES)

        #  Create a vmec state. If the vmec namelist file exists add the namelist input
        #  file.
        with ZipState.ZipState(current_vmec_state, 'a') as zip_ref:
            if os.path.exists(current_vmec_namelist):
                zip_ref.write(current_vmec_namelist)
                zip_ref.set_state(state='needs_update')

#  Create state from files. Input files can either be a new state, namelist
#  input file or both. If both files were staged, replace the namelist input
#  file. If the namelist file is present flag the state as needing to be
#  updated. Sub states will automatically merge.
        with ZipState.ZipState(current_siesta_state, 'a') as zip_ref:
            if os.path.exists(current_siesta_namelist):
                zip_ref.write(current_siesta_namelist)
                zip_ref.set_state(state='needs_update')

#  The vmec state will be merged with any existing vmec state in the siesta
#  state.
            zip_ref.write(current_vmec_state)

        self.services.update_state()
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_init: init')

        #  Get config filenames.
        current_model_state = self.services.get_config_param('MODEL_INPUT')
        quasi_newton_config = self.services.get_config_param(
            'QUASI_NEWTON_CONFIG')
        current_quasi_newton_state = self.services.get_config_param(
            'CURRENT_QUASI_NEWTON_STATE')

        #  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_input_files(self.INPUT_FILES)

        #  Create state from files.
        with ZipState.ZipState(current_quasi_newton_state, 'a') as zip_ref:
            zip_ref.write(quasi_newton_config)
            zip_ref.write(current_model_state)

        self.services.update_state()
    def lm_get_lambda(self, step_size, ut_dot_e):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: lm_get_lambda')

#  Define a default value incase the root find fails. Note lambda is a python
#  builtin add an underscore to avoid this.
        _lambda = (self.j_svd_w[0]*self.delta_a_len[self.k_use]/step_size)**2.0
        
        f_sqrd = ut_dot_e[0:self.k_use]**2.0
        step_size_sqrd = step_size**2.0

#  Define a lambda function for the root finder.
        f = lambda x: numpy.sum(f_sqrd*(self.j_svd_w[0:self.k_use]/(self.j_svd_w[0:self.k_use]**2.0 + x))**2.0) - step_size_sqrd

#  Find the bracketing values of lambda. Look for a small value of lambda, to
#  give a positive function value. f(0) should be greater than zero since the
#  step size at k_use is larger than step_size.
        f_a = f(0)
        if f_a < 0.0:
            return _lambda

        lambda_b = self.j_svd_w[0]**2.0
        f_b = f(lambda_b)
        for i in range(0, 20):
            if f_b <= 0.0:
                break
            lambda_b = 4.0*lambda_b
            f_b = f(lambda_b)

        if f_b > 0.0:
            return _lambda

        if f_a*f_b > 0.0:
            return _lambda

#  Found to intervals that bracket the roots. Now find the root.
        return optimize.brentq(f, 0.0, lambda_b)
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: init')
    
#  Get config filenames.
        self.current_model_state = self.services.get_config_param('MODEL_INPUT')
        self.quasi_newton_config_file = self.services.get_config_param('QUASI_NEWTON_CONFIG')
        self.current_quasi_newton_state = self.services.get_config_param('CURRENT_QUASI_NEWTON_STATE')
        ips_model_config = self.services.get_config_param('MODEL_SIM_CONFIG')

#  Stage state and extract all files.
        self.services.stage_state()
        with ZipState.ZipState(self.current_quasi_newton_state, 'a') as zip_ref:
            zip_ref.extractall()

#  Load the quasi-newton json file.
        with open(self.quasi_newton_config_file, 'r') as config_file:
            quasi_newton_config = json.load(config_file)
            self.signal_sigma = numpy.absolute(numpy.array(quasi_newton_config['signal_sigma']))
            self.signal_observed = numpy.array(quasi_newton_config['signal_observed'])
            self.signal_weights = numpy.sqrt(numpy.array(quasi_newton_config['signal_weights']))
            self.dchi2_tol = quasi_newton_config['dchi2_tol']
        
#  Singular value step controls.
#  Cutoff value for relative singular values.
            if 'cut_svd' in quasi_newton_config:
                self.cut_svd = quasi_newton_config['cut_svd']
            else:
                self.cut_svd = 0.0
#  Cutoff value for expected step efficiency.
            if 'cut_eff' in quasi_newton_config:
                self.cut_eff = quasi_newton_config['cut_eff']
            else:
                self.cut_eff = 0.0
#  Cutoff value for expected marginal step efficiency.
            if 'cut_marg_eff' in quasi_newton_config:
                self.cut_marg_eff = quasi_newton_config['cut_marg_eff']
            else:
                self.cut_marg_eff = 0.0
#  Cutoff value for expected step size.
            if 'cut_delta_a' in quasi_newton_config:
                self.cut_delta_a = quasi_newton_config['cut_delta_a']
            else:
                self.cut_delta_a = 0.0
#  Cutoff value for expected change in g^2.
            if 'cut_dg2' in quasi_newton_config:
                self.cut_dg2 = quasi_newton_config['cut_dg2']
            else:
                self.cut_dg2 = 0.0

#  Minimization controls
            if 'max_step' in quasi_newton_config:
                self.max_step = quasi_newton_config['max_step']
            else:
                self.max_step = 100.0

#  Maximum reconstruction steps
            if 'max_recon_steps' in quasi_newton_config:
                self.max_recon_steps = quasi_newton_config['max_recon_steps']
            else:
                self.max_recon_steps = 20

#  Maximum number of trys a step can take to reduce g^2
            if 'max_step_try' in quasi_newton_config:
                self.max_step_try = quasi_newton_config['max_step_try']
            else:
                self.max_step_try = 10

#  Set keys for the subworkflows.
            keys = {'PWD'              : self.services.get_config_param('PWD'),
                    'USER_INPUT_FILES' : self.current_model_state,
                    'OUTPUT_LEVEL'     : self.services.get_config_param('OUTPUT_LEVEL')}

#  Copy the model state to the input file staging directory. Since all the
#  model instances start from the same state, we only need this in one place.
            if os.path.exists('model_inputs'):
                shutil.rmtree('model_inputs')
            os.mkdir('model_inputs')
            shutil.copy2(self.current_model_state, 'model_inputs')

            keywords = {}

            for i, param in enumerate(quasi_newton_config['params']):
            
                self.model_workers.append({'sim_name': None, 'init': None, 'driver': None,
                                           'result'  : '{}_result.json'.format(param['name']),
                                           'output'  : '{}_model_state.zip'.format(param['name']),
                                           'name'    : param['name'],
                                           'vrnc'    : param['vrnc'],
                                           'value'   : param['init'],
                                           'scale'   : (float(i) + 1.0)/float(len(quasi_newton_config['params']))})

                keys['SIM_NAME'] = param['name']
                keys['LOG_FILE'] = 'log.{}'.format(param['name'])
                keys['USER_OUTPUT_FILES'] = self.model_workers[i]['output']

                (self.model_workers[i]['sim_name'],
                 self.model_workers[i]['init'],
                 self.model_workers[i]['driver']) = self.services.create_sub_workflow(param['name'], ips_model_config,
                                                                                      keys, 'model_inputs')
                
                self.model_workers[i]['wait'] = self.services.call_nonblocking(self.model_workers[i]['init'],
                                                                               'init', timeStamp)

                keywords[param['name']] = param['init']

#  Run the inital convergence. Only one model is needed to run but the staging
#  expects values from each sub_work_flow so we need to launch them all.
            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(worker['driver'], 'init', timeStamp,
                                                                **keywords)

            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(worker['driver'], 'step', timeStamp,
                                                                result_file=worker['result'])

            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)
                            
            self.services.stage_subflow_output_files()
                
            with ZipState.ZipState(self.model_workers[0]['output'], 'a') as zip_ref:
                zip_ref.extract(self.model_workers[0]['result'])
                with open(self.model_workers[0]['result'], 'r') as result_file:
                    self.e = self.get_e(result_file)

#  The initial model state may have changed reset it from the one of the
#  workers.
            os.rename(self.model_workers[0]['output'], self.current_model_state)
Пример #20
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'v3fit_init: init')

        #  Get config filenames.
        current_vmec_namelist = self.services.get_config_param(
            'VMEC_NAMELIST_INPUT')
        current_vmec_state = self.services.get_config_param(
            'CURRENT_VMEC_STATE')
        current_siesta_namelist = self.services.get_config_param(
            'SIESTA_NAMELIST_INPUT')
        current_siesta_state = self.services.get_config_param(
            'CURRENT_SIESTA_STATE')
        current_v3fit_namelist = self.services.get_config_param(
            'V3FIT_NAMELIST_INPUT')
        current_v3fit_state = self.services.get_config_param(
            'CURRENT_V3FIT_STATE')

        #  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_input_files(self.INPUT_FILES)

        #  All v3fit runs require a vmec state at the minimum. Create a vmec state. If
        #  the vmec namelist file exists add the namelist input file.
        with ZipState.ZipState(current_vmec_state, 'a') as zip_ref:
            if os.path.exists(current_vmec_namelist):
                zip_ref.write(current_vmec_namelist)
                zip_ref.set_state(state='needs_update')

#  A siesta state is optional. If a siesta state or namelist exist, create a
#  siesta state. If the siesta namelist or vmec state files exists add
#  them to the siesta state.
        if os.path.exists(current_siesta_state) or os.path.exists(
                current_siesta_namelist):
            with ZipState.ZipState(current_siesta_state, 'a') as zip_ref:
                if os.path.exists(current_siesta_namelist):
                    zip_ref.write(current_siesta_namelist)
                    zip_ref.set_state(state='needs_update')

#  The vmec state will be merged with any existing vmec state in the siesta
#  state.
                zip_ref.write(current_vmec_state)

#  Create state from files. Input files can either be a new state, namelist
#  input file or both. If both files were staged, replace the namelist input
#  file. If the namelist file is present flag the state as needing to be
#  updated.
        with ZipState.ZipState(current_v3fit_state, 'a') as zip_ref:
            if os.path.exists(current_v3fit_namelist):
                zip_ref.write(current_v3fit_namelist)
                zip_ref.set_state(state='needs_update')

#  If a siesta state exists at this point add it to the archive. Otherwise add
#  the vmec state.
            if os.path.exists(current_siesta_state):
                zip_ref.write(current_siesta_state)
            else:
                zip_ref.write(current_vmec_state)

        self.services.update_state()
Пример #21
0
 def finalize(self, timeStamp=0.0):
     ScreenWriter.screen_output(self, 'verbose', 'ml_train_driver: finalize')
     self.services.call(self.ml_train_port, 'finalize', timeStamp)
Пример #22
0
    def init(self, timeStamp=0.0, **keywords):
        ScreenWriter.screen_output(self, 'verbose', 'vmec: init')

        self.current_vmec_namelist = self.services.get_config_param(
            'VMEC_NAMELIST_INPUT')
        self.current_wout_file = 'wout_{}.nc'.format(
            self.current_vmec_namelist.replace('input.', '', 1))
        current_vmec_state = self.services.get_config_param(
            'CURRENT_VMEC_STATE')

        #  Stage state.
        self.services.stage_state()

        #  Unzip files from the state. Use mode a so files can be read and written to.
        self.zip_ref = ZipState.ZipState(current_vmec_state, 'a')
        self.zip_ref.extract(self.current_vmec_namelist)

        if len(keywords) > 0:
            self.zip_ref.set_state(state='needs_update')

            #  Update parameters in the namelist.
            namelist = OMFITnamelist(self.current_vmec_namelist,
                                     collect_arrays={
                                         'ns_array': {
                                             'default': 0,
                                             'shape': (100, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'niter_array': {
                                             'default': 0,
                                             'shape': (100, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'rbs': {
                                             'default': 0,
                                             'shape': (203, 101),
                                             'offset': (-101, 0),
                                             'sparray': True
                                         },
                                         'rbc': {
                                             'default': 0,
                                             'shape': (203, 101),
                                             'offset': (-101, 0),
                                             'sparray': True
                                         },
                                         'zbs': {
                                             'default': 0,
                                             'shape': (203, 101),
                                             'offset': (-101, 0),
                                             'sparray': True
                                         },
                                         'zbc': {
                                             'default': 0,
                                             'shape': (203, 101),
                                             'offset': (-101, 0),
                                             'sparray': True
                                         },
                                         'am': {
                                             'default': 0,
                                             'shape': (21, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'ai': {
                                             'default': 0,
                                             'shape': (21, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'ac': {
                                             'default': 0,
                                             'shape': (21, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'am_aux_s': {
                                             'default': 0,
                                             'shape': (10001, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'am_aux_f': {
                                             'default': 0,
                                             'shape': (10001, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'ai_aux_s': {
                                             'default': 0,
                                             'shape': (10001, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'ai_aux_f': {
                                             'default': 0,
                                             'shape': (10001, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'ac_aux_s': {
                                             'default': 0,
                                             'shape': (10001, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'ac_aux_f': {
                                             'default': 0,
                                             'shape': (10001, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'raxis': {
                                             'default': 0,
                                             'shape': (102, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'zaxis': {
                                             'default': 0,
                                             'shape': (102, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'raxis_cc': {
                                             'default': 0,
                                             'shape': (102, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'raxis_cs': {
                                             'default': 0,
                                             'shape': (102, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'zaxis_cc': {
                                             'default': 0,
                                             'shape': (102, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'zaxis_cs': {
                                             'default': 0,
                                             'shape': (102, ),
                                             'offset': (0, ),
                                             'sparray': True
                                         },
                                         'ftol_array': {
                                             'default': 0,
                                             'shape': (100, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         },
                                         'extcur': {
                                             'default': 0,
                                             'shape': (300, ),
                                             'offset': (1, ),
                                             'sparray': True
                                         }
                                     })

            for key, value in keywords.iteritems():
                NamelistItem.set(namelist['indata'], key, value)

            namelist.save()
Пример #23
0
 def finalize(self, timeStamp=0.0):
     ScreenWriter.screen_output(self, 'verbose', 'vmec: finalize')
    def get_k_svd(self):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: get_k_svd')
    
#  Approximate the hessian.
#
#    alpha = A^T * A                                                         (1)
#
#  The gradient
#
#    beta = A^T * e                                                          (2)
#
        self.hessian = numpy.matmul(self.jacobian, numpy.transpose(self.jacobian))
        self.gradient = numpy.dot(self.jacobian, self.e)

#  Compute the steepest descent setp size in normalized parameter space.
#
#    da = beta * beta * beta / (beta * alpha * beta)                         (3)
        self.delta_a[0,:] = self.gradient*numpy.dot(self.gradient, self.gradient)/numpy.dot(self.gradient, numpy.matmul(self.hessian, self.gradient))
        
#  Singular value decomposition of the Jacobian.
        self.j_svd_u, self.j_svd_w, self.j_svd_vt = numpy.linalg.svd(numpy.transpose(self.jacobian))
        
#  Define Inverse singular values.
        temp_work1 = numpy.where(self.j_svd_w > 0, 1.0/self.j_svd_w, 0)
#    U^T * e                                                                 (4)
        temp_work2 = numpy.dot(numpy.transpose(self.j_svd_u), self.e)
        
#  Perform pseudo-inversion for successive numbers of singular values retained.
        temp_work3 = numpy.zeros(len(self.model_workers))
        for i in range(0, len(self.j_svd_w)):
            temp_work3[i] = temp_work1[i]*temp_work2[i]
            self.delta_a[i + 1,:] = numpy.matmul(numpy.transpose(self.j_svd_vt), temp_work3)
    
#  Estimate the expected changes in g^2. Equation 22 in Hanson et. al.
#  doi: 10.1088/0029-5515/49/7/075031
        exp_dg2 = numpy.empty(len(self.j_svd_w) + 1, dtype=float)
        self.delta_a_len = numpy.empty(len(self.j_svd_w) + 1, dtype=float)
        exp_eff = numpy.empty(len(self.j_svd_w) + 1, dtype=float)
        for i in range(0, len(exp_dg2)):
            exp_dg2[i] = self.get_exp_dg2(self.delta_a[i,:])
            self.delta_a_len[i] = math.sqrt(numpy.dot(self.delta_a[i,:], self.delta_a[i,:]))
            
#  Equation 23 in Hanson et. al. doi: 10.1088/0029-5515/49/7/075031
            exp_eff[i] = abs(exp_dg2[i])/self.delta_a_len[i]
        
        
#  Although marginal efficiencies are not strictly defined for the Steepest
#  Descent (index 0) and just one singular value cases, for convenience define
#  them here.
        marg_exp_eff = numpy.empty(len(self.j_svd_w) + 1, dtype=float)
        marg_exp_eff[0:1] = exp_eff[0:1]
        for i in range(2, len(exp_dg2)):
            d_len = max(self.delta_a_len[i] - self.delta_a_len[i - 1], 1.0e-10)
            marg_exp_eff[i] = abs(exp_dg2[i]) - abs(exp_dg2[i - 1])/d_len

#  Check for cutoffs.
        largest_w = 1.0
        if self.j_svd_w[0] > 0:
            largest_w = self.j_svd_w[0]

#  Find the largest number of singular values to use.
        for i in range(len(self.j_svd_w), 1, -1):
            meets_cut =               (self.j_svd_w[i - 1]/largest_w >= self.cut_svd)
            meets_cut = meets_cut and (exp_eff[i]                    >= self.cut_eff)
            meets_cut = meets_cut and (marg_exp_eff[i]               >= self.cut_marg_eff)
            meets_cut = meets_cut and (self.delta_a_len[i]           >= self.cut_delta_a)
            meets_cut = meets_cut and (numpy.abs(exp_dg2[i])         >= self.cut_dg2)
            if meets_cut:
                return i

        #  All the selected criteria failed use no singular values.
        return 0
Пример #25
0
    def init(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'solps_iter_init: init')

        #  Get config filenames.
        current_solps_state = self.services.get_config_param(
            'CURRENT_SOLPS_STATE')
        eirene_input_dat = self.services.get_config_param('EIRENE_INPUT_DAT')
        eirene_nodes = self.services.get_config_param('EIRENE_NODES')
        eirene_cells = self.services.get_config_param('EIRENE_CELLS')
        eirene_links = self.services.get_config_param('EIRENE_LINKS')

        #  Remove old inputs. Stage input files.
        for file in os.listdir('.'):
            os.remove(file)

        self.services.stage_input_files(self.INPUT_FILES)

        #  Create state zip file.
        with ZipState.ZipState(current_solps_state, 'a') as zip_ref:
            #  b2 files
            if os.path.exists('b2fgmtry'):
                zip_ref.write('b2fgmtry')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2fpardf'):
                zip_ref.write('b2fpardf')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2frates'):
                zip_ref.write('b2frates')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2fstati'):
                zip_ref.write('b2fstati')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2mn.dat'):
                zip_ref.write('b2mn.dat')
                zip_ref.set_state(state='needs_update')

#  Namelist files.
            if os.path.exists('b2.transport.parameters'):
                zip_ref.write('b2.transport.parameters')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2.numerics.parameters'):
                zip_ref.write('b2.numerics.parameters')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2.neutrals.parameters'):
                zip_ref.write('b2.neutrals.parameters')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2.boundary.parameters'):
                zip_ref.write('b2.boundary.parameters')
                zip_ref.set_state(state='needs_update')
            if os.path.exists('b2.transport.inputfile'):
                zip_ref.write('b2.transport.inputfile')
                zip_ref.set_state(state='needs_update')

#  eirene files. We need to rename the eirene input files.
            if os.path.exists(eirene_input_dat):
                os.rename(eirene_input_dat, 'fort.1')
                zip_ref.write('fort.1')
                zip_ref.set_state(state='needs_update')
            if os.path.exists(eirene_nodes):
                os.rename(eirene_nodes, 'fort.33')
                zip_ref.write('fort.33')
                zip_ref.set_state(state='needs_update')
            if os.path.exists(eirene_cells):
                os.rename(eirene_cells, 'fort.34')
                zip_ref.write('fort.34')
                zip_ref.set_state(state='needs_update')
            if os.path.exists(eirene_links):
                os.rename(eirene_links, 'fort.35')
                zip_ref.write('fort.35')
                zip_ref.set_state(state='needs_update')

        self.services.update_state()

        #-------------------------------------------------------------------------------
        #
        #  SOLPS-ITER_init init Component step method. Not used.
        #
        #-------------------------------------------------------------------------------
        def step(self, timeStamp=0.0):
            ScreenWriter.screen_output(self, 'verbose',
                                       'solps_iter_init: step')

#-------------------------------------------------------------------------------
#
#  SOLPS-ITER_init init Component finalize method. This cleans up afterwards.
#  Not used.
#
#-------------------------------------------------------------------------------

        def finalize(self, timeStamp=0.0):
            ScreenWriter.screen_output(self, 'verbose',
                                       'solps_iter_init: finalize')
Пример #26
0
 def step(self, timeStamp=0.0):
     ScreenWriter.screen_output(self, 'verbose', 'ml_train_init: step')
    def finalize(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: finalize')

        for worker in self.model_workers:
            worker['wait'] = [
                              self.services.call_nonblocking(worker['init'], 'finalize', timeStamp),
                              self.services.call_nonblocking(worker['driver'], 'finalize', timeStamp)
                             ]

        for worker in self.model_workers:
            self.services.wait_call_list(worker['wait'], True)

        ScreenWriter.screen_output(self, 'quiet', '-------------------------------------------------------------------------------------------------')
        ScreenWriter.screen_output(self, 'quiet', '{:<4} : {:<12} : {:<12} : {:<6} : {:<12}'.format('Step', 'chi^2', 'dchi^2', 'Num SV', 'Norm Size'))
        ScreenWriter.screen_output(self, 'quiet', '')
        for step in self.history:
            if "DChi2" in step:
                ScreenWriter.screen_output(self, 'quiet', '{Time:>4.0f} : {Chi2:>12.5e} : {DChi2:>12.5e} : {Num SV:>6} : {Size:>12.5e}'.format(**step))
            else:
                ScreenWriter.screen_output(self, 'quiet', '{Time:>4.0f} : {Chi2:>12.5e}'.format(**step))
        ScreenWriter.screen_output(self, 'quiet', '-------------------------------------------------------------------------------------------------')

        correlation_matrix = numpy.linalg.inv(self.hessian)
        ScreenWriter.screen_output(self, 'quiet', '{:<50} {:<12} {:<12}'.format('Parameter', 'Value', 'Sigma'))
        ScreenWriter.screen_output(self, 'quiet', '')
        for i, worker in enumerate(self.model_workers):
            worker['sigma'] = math.sqrt(correlation_matrix[i,i]*worker['vrnc']**2.0)
            ScreenWriter.screen_output(self, 'quiet', '{name:<50} {value:>12.5e} {sigma:>12.5e}'.format(**worker))
        ScreenWriter.screen_output(self, 'quiet', '-------------------------------------------------------------------------------------------------')
    def step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: step')

#  Compute chi^2. Set the inital change in chi^2 higher than the tolarance to
#  ensure at least one iteration of the while loop is performed.
        self.chi2 = numpy.dot(self.e, self.e)
        dchi2 = self.dchi2_tol + 1.0
        ScreenWriter.screen_output(self, 'quiet',   '-------------------------------------------------------------------------------------------------')
        ScreenWriter.screen_output(self, 'quiet',   'Step {:>4.0f} : chi^2 = {:12.5e}'.format(timeStamp, self.chi2))
        ScreenWriter.screen_output(self, 'verbose', '-------------------------------------------------------------------------------------------------')

        self.jacobian = numpy.empty((len(self.model_workers), self.e.size), dtype=float)
        self.hessian = numpy.empty((len(self.model_workers), len(self.model_workers)), dtype=float)
        self.gradient = numpy.empty(len(self.model_workers), dtype=float)
        self.delta_a = numpy.empty((min(len(self.e), len(self.model_workers)) + 1, len(self.model_workers)), dtype=float)

        self.history.append({'Time' : timeStamp, 'Chi2' : self.chi2})

#  Perform a quasi-newton minimization.
        while dchi2 > self.dchi2_tol and timeStamp < self.max_recon_steps:
            timeStamp += 1.0

            self.eval_jacobian(timeStamp)
            if self.try_step(timeStamp):
                new_chi2 = numpy.dot(self.e, self.e)
                dchi2 = self.chi2 - new_chi2
                self.chi2 = new_chi2
                self.history.append({'Time' : timeStamp, 'Chi2' : self.chi2, 'DChi2' : dchi2, 'Num SV' : self.k_use, 'Size' : self.norm_len})
                ScreenWriter.screen_output(self, 'verbose', '-------------------------------------------------------------------------------------------------')
                ScreenWriter.screen_output(self, 'quiet',   'Step {:>4.0f} : chi^2 = {:12.5e} : dchi^2 = {:12.5e} : Num SV = {:4} : Norm Size {:12.5e}'.format(timeStamp, self.chi2, dchi2, self.k_use, self.norm_len))
                ScreenWriter.screen_output(self, 'verbose', '-------------------------------------------------------------------------------------------------')
            else:
                break
Пример #29
0
 def finalize(self, timeStamp=0.0):
     ScreenWriter.screen_output(self, 'verbose', 'ml_train_init: finalize')
    def try_step(self, timeStamp=0.0):
        ScreenWriter.screen_output(self, 'verbose', 'quasi_newton_driver: try_step')

        self.k_use = self.get_k_svd()

#  Try different Levenberg-Marquardt step sizes.
        new_max = min(self.delta_a_len[self.k_use], self.max_step)
        step_use = numpy.empty(len(self.model_workers), dtype=float)
        delta_try = numpy.empty((len(self.model_workers), len(self.model_workers)), dtype=float)
        e_try = numpy.empty((len(self.model_workers), len(self.signal_observed)), dtype=float)
        chi2try = numpy.empty(len(self.model_workers), dtype=float)
        
        num_trys = 0
        
        while num_trys < self.max_step_try:
            num_trys += 1
            
            for i, worker in enumerate(self.model_workers):
                step_use[i] = new_max - i*new_max/(2.0*len(self.model_workers))
                delta_try[i] = self.lm_step(step_use[i])

#  Set new parameters.
                keywords = {}
                for j, worker2 in enumerate(self.model_workers):
                    keywords[worker2['name']] = worker2['value'] + delta_try[i,j]*worker2['vrnc']
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(worker['driver'],
                                                                'init', timeStamp,
                                                                **keywords)

#  Recompute the model.
            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)
                worker['wait'] = self.services.call_nonblocking(worker['driver'], 'step', timeStamp,
                                                                result_file=worker['result'])

#  Collect the results.
            for worker in self.model_workers:
                self.services.wait_call(worker['wait'], True)

            self.services.stage_subflow_output_files()

#  Compute chi^2 for each attempted step. And keep the largest.
            with open('chi.log', 'a') as chi_ref:
                chi_ref.write('Chi step {}\n'.format(timeStamp));
                for i, worker in enumerate(self.model_workers):
                    with ZipState.ZipState(worker['output'], 'a') as zip_ref:
                        zip_ref.extract(worker['result'])
                    with open(worker['result'], 'r') as result_file:
                        e_try[i] = self.get_e(result_file)
                        chi2try[i] = numpy.dot(e_try[i], e_try[i])

                    chi_ref.write('chi2 = {} : '.format(chi2try[i]))
                    e_try[i].tofile(chi_ref, sep=',', format='%12.5e')
                    chi_ref.write('\n')
                chi_ref.write('\n')

            i_min = numpy.argmin(chi2try)
            if chi2try[i_min] <= self.chi2:
#  Chi^2 decreased. Set the best case to the current model.
                os.rename(self.model_workers[i_min]['output'], self.current_model_state)
                shutil.copy2(self.current_model_state, 'model_inputs')
                self.e = e_try[i_min]
                
#  Set the new parameter values.
                current_values = {}
                for i, worker in enumerate(self.model_workers):
                    worker['value'] += delta_try[i_min,i]*worker['vrnc']
                    current_values[worker['name']] = worker['value']
                
#  Dump current values to a json file. This can be used for restarting a
#  reconstruction.
                with open('current_values.json', 'w') as current_values_file:
                    json.dump(current_values, current_values_file)

                self.norm_len = numpy.sqrt(numpy.dot(delta_try[i_min], delta_try[i_min]))

                return True
            else:
#  Cut the step size in half and reset the model.
                new_max /= 2.0
                for worker in self.model_workers:
                    worker['wait'] = self.services.call_nonblocking(worker['init'],
                                                                    'init', timeStamp)

        return False