Ejemplo n.º 1
0
def run_compare(next_folder, this_folder, err_type):

    print('Running compare between this folder %s \n and next folder %s' %
          (this_folder, next_folder))

    # check this folder contains some valid files
    valid_files = [x for x in os.listdir(this_folder) if '.hdf5' in x]
    final_computed_plt_file = get_final_plot_file(this_folder)
    if not final_computed_plt_file:
        print('No plot files found in this folder')
        return

    final_exact_plt_file = get_final_plot_file(next_folder)
    if not final_exact_plt_file:
        print('No plot files found in higher resolution folder')
        return

    chombo_dir = os.environ['CHOMBO_HOME']
    compare_dir = os.path.join(chombo_dir, 'lib', 'util', 'ChomboCompare')
    compare_exec = get_executable_name(compare_dir, 'compare2d')
    compare_exec = os.path.join(compare_dir, compare_exec)
    # print('Found executable %s ' % compare_exec)

    this_err_folder = os.path.join(this_folder, 'error-' + err_type)

    if not os.path.exists(this_err_folder):
        os.makedirs(this_err_folder)

    compare_params_file = os.path.join(this_err_folder,
                                       'compare-' + err_type + '.inputs')
    error_file = os.path.join(this_err_folder, 'err-' + err_type + '.2d.hdf5')

    computed_file = os.path.join(this_folder, final_computed_plt_file)
    exact_file = os.path.join(next_folder, final_exact_plt_file)
    compare_params = {
        'compare.sameSize': 0,
        'compare.exactRoot': exact_file,
        'compare.computedRoot': computed_file,
        'compare.errorRoot': error_file,
        'compare.doPlots': 1,
        'compare.HOaverage': 0,
        'compare.no_average_var': 'T err'
    }

    write_inputs(compare_params_file, compare_params)
    cmd = 'cd %s \n %s %s \n \n' % (this_err_folder, compare_exec,
                                    compare_params_file)

    print(cmd)

    os.system(cmd)
def create_refined_restart(argv):
    # Default vals:
    old_dir = ''
    new_dir = ''
    refinement = ''
    
    try:
        opts, args = getopt.getopt(argv, "n:p:r:")
    except getopt.GetoptError as err:
        print(str(err))
        print('create_refined_restart.py -n<new folder> -p<previous dir>'
              ' -r<refinement>')
        sys.exit(2)

    for opt, arg in opts:
        if opt in "-n":
            new_dir = str(arg)
        elif opt in "-p":
            old_dir = str(arg)
        elif opt in "-r":
            refinement = int(arg)
            
            
    prev_chk_file = os.path.join(old_dir, get_final_chk_file(old_dir))
    old_inputs_loc = os.path.join(old_dir, 'inputs')
    old_inputs = read_inputs(old_inputs_loc)
    new_box_size = int(old_inputs['main.max_grid_size']) * refinement
    
    out_file = os.path.join(new_dir, 'restart.2d.hdf5')


    new_inputs = {'inFile': prev_chk_file,
                  'run_inputs': old_inputs_loc,
                  'outFile': out_file,
                  'box_size': new_box_size,
                  'refinement': refinement}
                        
    new_inputs_loc = os.path.join(new_dir, 'inputsRefine')
    write_inputs(new_inputs_loc, new_inputs)
    
    # Run refine code
    exec_file = os.path.join(get_mushy_layer_dir(),
                                  'setupNewRun', get_executable('setupnewrun'))
   
    cmd = 'cd %s; %s %s' % (new_dir, exec_file, new_inputs_loc)
    print(cmd)
    os.system(cmd)
    def make_simulation(self):
        if os.path.exists(self.folder):
            print('Unable to make simulation as folder exists: %s' %
                  self.folder)
            return

        if self.inputs is None:
            print('Unable to make simulation as no inputs have been defined')
            return

        # Make folder
        os.makedirs(self.folder)

        # Write inputs file
        write_inputs(self.inputs_file_loc, self.inputs)

        self.status = SimulationStatus.UNSTARTED
    inputs['parameters.stefan'] = st
    inputs['bc.temperatureHiVal'] = [0, opt['F']]
    inputs['bc.aHi'] = [0, opt['a']]
    inputs['bc.bHi'] = [0, opt['b']]
    inputs['bc.TRefHi'] = [0, opt['Tref']]
    inputs['bc.enthalpyLoVal'] = [0.0, T_max + st]
    inputs['bc.enthalpyHiVal'] = [0.0, T_max + st]
    inputs['parameters.nonDimVel'] = opt['V']
    inputs['parameters.compositionRatio'] = cr

    inputs['bc.HC_noflux'] = True

    inputs['main.output_folder'] = full_output_folder

    new_inputs_loc = os.path.join(full_output_folder, 'inputs')
    mushyLayerRunUtils.write_inputs(new_inputs_loc, inputs)

    exec_loc = mushyLayerRunUtils.get_executable_name(return_full_path=True)

    cmd = 'cd %s; %s inputs' % (full_output_folder, exec_loc)

    print(cmd)
    print('Running... (usually takes ~ 30 seconds) ')

    process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
    process.wait()
    print(process.returncode)

    print('Finished')

    # Run comparison
Ejemplo n.º 5
0
    def single_run(self, run_name):

        this_run_directory = self.make_next_dir(
            join(self.base_output_dir, run_name))
        if this_run_directory is None:
            print('Run already done, skipping \n \n')
            return -1

        output_folder = join(self.base_output_dir, this_run_directory)

        # Perform various checks on the inputs

        # Remove trailing slash from output_folder
        if output_folder[-1] == "/":
            output_folder = output_folder[:-1]

        self.parameters['main.output_folder'] = output_folder

        # If this isn't specified, don't output plots for each iteration
        # (in case an input file has this option left in)
        # don't want to generate loads of output unnecessarily
        if 'main.iteration_plot_interval' not in self.parameters:
            self.parameters['main.iteration_plot_interval'] = '-1'

        # Unless a max step has been explicitly specified, set it to something
        # big so we hit a steady state
        if 'main.max_step' not in self.parameters:
            self.parameters['main.max_step'] = '999999'

        if 'main.max_time' not in self.parameters:
            self.parameters['main.maxTime'] = '9999'

        # Finished checking inputs

        # Now need to pass all our parameters to the code. The easy way is to
        # do it with arguments, the hard way is to create a new inputs file
        # and edit it. The latter means we have a record of all parameters
        # used, which is preferable.

        # This is the easy way (just append this to the command)
        # parameters_string = ' '.join([key+'='+value for key, value
        #                             in self.parameters.items()])

        # Hard way
        # 1) get base inputs file
        new_input_file = output_folder + '/inputs'

        # copyfile(inputs_file, new_input_file)
        # Read in the file
        params_key = 'main.params_file'

        # inputsParams = read_inputs(inputs_file)
        inputs_params = {}

        extra_params = {}
        if params_key in self.parameters:
            extra_params = read_inputs(self.parameters[params_key])
        elif params_key in inputs_params:
            extra_params = read_inputs(inputs_params[params_key])

        # Merge all dictionary's
        # Important that it's done in this order, so that parameters
        # are overwritten in the correct order. Priority is:
        # 1) params specified in this python script
        # 2) params in main.params_file
        # 3) params in the inputs file

        for key in extra_params.keys():
            if key not in self.parameters:
                self.parameters[key] = extra_params[key]

        for key in inputs_params.keys():
            if key not in self.parameters:
                self.parameters[key] = inputs_params[key]

        # If we have Kozeny-Carman permeability, make sure we have hele-shaw cell to limit the max permeability
        if self.parameters['parameters.permeabilityFunction'] == 2:
            self.parameters['parameters.heleShaw'] = 'true'

        # Let's add another item to the parameters: the code repository version.
        self.parameters['gitRevision'] = get_current_vcs_revision()

        # Also add the current machine to the parameter list so we now where each run was done from
        self.parameters['machineCodeIsRunOn'] = socket.gethostname()

        # Write out final params file
        write_inputs(new_input_file, self.parameters)

        # Set remaining parameters on the slurm jobs
        self.slurm_job.folder = output_folder
        self.slurm_job.set_exec_file(
            os.path.join(self.exec_dir, self.program_name))
        print('Exec dir: %s, program name: %s, exec file: %s' %
              (self.exec_dir, self.program_name,
               os.path.join(self.exec_dir, self.program_name)))

        # self.slurm_job.write_slurm_file()
        self.slurm_job.run_task()

        exit_status = 1

        return exit_status
def test_folder(test_directory, verbose_output=False):
    """
    Run the regression test contained within a folder. Expects the folder o have the following files:
    - inputs
    - properties.json
    - at least one file ending with .expected to compare with the computed ouptut,
        e.g. pout.0.expected, or plt000010.2d.hdf5.expected
    :param test_directory:
    :return: success - if test was successful or not
    """

    logger.log('Processing folder "%s"' % test_directory)

    remove_existing_diffs_cmd = 'rm %s/diff-*' % test_directory
    logger.log('Remove existing diffs: ' + remove_existing_diffs_cmd)
    os.system(remove_existing_diffs_cmd)

    test_files = os.listdir(test_directory)

    logger.log('Initial files in folder ' + str(test_files))

    # Check the required files exist
    # if not, skip this test
    if not all(elem in test_files for elem in REQUIRED):
        test_name = test_directory.split('/')[-1]
        logger.logl('%-25s    ' % test_name, console_display=True)

        logger.log(
            '  Required files for conducting a test (%s) not found in %s' %
            (REQUIRED, test_directory))
        logger.log('  Skipping test \n')

        logger.log_void()
        return False, 'Void'

    # Load properties
    with open(os.path.join(test_directory, PROPERTIES_FILE)) as json_file:
        properties = json.load(json_file)

    # logger.log('==Running test: %s==' % properties['name'])
    logger.logl('%-25s    ' % properties['name'], console_display=True)

    try:
        mpi = subprocess.check_output(['which', 'mpiruna'])
        mpi_path = str(mpi.decode()).strip()
    except subprocess.CalledProcessError:
        mpi = None
        mpi_path = None

    # Skip if parallel test and no mpirun
    if properties['proc'] > 1 and mpi is None:
        logger.log_void()
        return False, 'Void'

    # Get correct executable
    # mushy_layer_exec = mushyLayerRunUtils.get_executable(dim=properties['dim'])
    # mushy_layer_exec_path = os.path.join(mushyLayerRunUtils.get_mushy_layer_dir(), 'execSubcycle', mushy_layer_exec)
    exec_name = 'mushyLayer%dd' % properties['dim']
    exec_dir = os.path.join(mushyLayerRunUtils.get_mushy_layer_dir(),
                            'execSubcycle')
    mushy_layer_exec_path = mushyLayerRunUtils.get_executable_name(
        exec_dir, exec_name=exec_name, return_full_path=True)

    if not os.path.exists(mushy_layer_exec_path):
        logger.log('\n**Could not find mushy layer executable: %s' %
                   mushy_layer_exec_path)
        logger.log(
            '**Have you compiled the code for the right number of dimensions?')
        logger.logl('**Use \'make all DIM=3\' to compile in 3D    ')

        logger.log_failed()
        return False, 'Failed'

    # Run test
    # try:
    if mpi is not None:
        cmd = 'cd %s; %s -np %d %s inputs' % (test_directory, mpi_path,
                                              properties['proc'],
                                              mushy_layer_exec_path)
        os.system(cmd)
        # mpi_path = 'mpirun'
        # res = subprocess.run([mpi_path, '-n ', str(properties['proc']), ' inputs'], cwd=test_directory)

    else:
        cmd = 'cd %s; %s inputs > pout.0' % (test_directory,
                                             mushy_layer_exec_path)
        logger.log(cmd)
        res = subprocess.check_output(cmd, shell=True)

        logger.logl('Response: "%s"' % str(res.decode()))

    # os.system(cmd)

    # except subprocess.CalledProcessError:
    #     logger.logl('[Exception]')
    #     logger.log_failed()
    #     return False, 'Failed'

    # Compare output against the expected output
    # logger.log('Test files: %s' % test_files)

    expected_files = [f for f in test_files if EXPECTED in f and DIFF not in f]
    if not expected_files:
        logger.log('No expected files to compared against')
        logger.log_void()
        return False, 'Void'

    failed_test = False
    for expected_file in expected_files:
        logger.log('Expected file: %s' % expected_file)

        # Check an output file to compare against exists
        test_output_filename = expected_file.replace(EXPECTED, '')
        test_output_file_path = os.path.join(test_directory,
                                             test_output_filename)
        if not os.path.exists(test_output_file_path):
            logger.log('No output file generated to compare against: %s' %
                       test_output_file_path)
            logger.log_failed()
            return False, 'Failed'

        if '.hdf5' in expected_file:
            # Do chombo compare
            # logger.log('Using chombo compare')

            # Make diffs folder if it doesn't exist
            diffs_folder = os.path.join(test_directory, DIFF_FOLDER)
            if not os.path.exists(diffs_folder):
                os.makedirs(diffs_folder)
                logger.log('Making folder %s' % diffs_folder)

            chombo_dir = os.environ['CHOMBO_HOME']
            compare_dir = os.path.join(chombo_dir, 'util', 'ChomboCompare')
            compare_exec = mushyLayerRunUtils.get_executable_name(
                compare_dir, 'compare%dd' % properties['dim'])
            compare_exec = os.path.join(compare_dir, compare_exec)
            # logger.log('Found executable %s ' % compare_exec)

            if not os.path.exists(compare_exec):
                logger.log('Could not find Chombo compare executable %s' %
                           compare_exec)
                logger.log('So cannot compare hdf5 output files')

            compare_params_file = os.path.join(test_directory,
                                               'compare.inputs')

            computed_file = os.path.join(test_directory, test_output_file_path)
            exact_file = os.path.join(test_directory, expected_file)
            error_file = os.path.join(diffs_folder,
                                      DIFF + test_output_filename)
            compare_params = {
                'compare.sameSize': 1,
                'compare.exactRoot': exact_file,
                'compare.computedRoot': computed_file,
                'compare.errorRoot': error_file,
                'compare.doPlots': 1,
                'compare.HOaverage': 0,
                'compare.no_average_var': 'T err'
            }

            mushyLayerRunUtils.write_inputs(compare_params_file,
                                            compare_params)
            cmd = 'cd %s ; %s %s > pout.0' % (diffs_folder, compare_exec,
                                              compare_params_file)

            logger.log('Executing: %s' % cmd)
            res = subprocess.check_call(cmd,
                                        shell=True,
                                        stderr=subprocess.STDOUT)
            logger.log('Compare command run, response: %s' % res)

            # Rename pout.0 in case we make lots
            old_pout_name = os.path.join(diffs_folder, 'pout.0')
            new_pout_name = os.path.join(diffs_folder,
                                         'pout-%s.0' % test_output_filename)
            # logger.log('Rename %s to %s' % (old_pout_name, new_pout_name))
            if not os.path.exists(old_pout_name):
                logger.log('Cannot find %s' % old_pout_name,
                           console_display=True)

            os.rename(old_pout_name, new_pout_name)

            # Now check the output diff
            errs = chcompare.load_error_file(new_pout_name)
            # logger.log(errs)

            for field in errs.keys():
                field_errs = errs[field]
                # logger.log(field_errs)
                for err_type in field_errs.keys():
                    this_field_err = field_errs[err_type]
                    if abs(this_field_err) > err_tolerance:
                        logger.log('Error in field %s is non-zero' % field)
                        logger.log('See %s and %s for more info' %
                                   (new_pout_name, error_file))
                        # logger.log_failed()
                        # return False, 'Failed'
                        failed_test = True

        elif '.csv' in expected_file:
            data = pd.read_csv(
                os.path.join(test_directory, test_output_file_path))
            data_expected = pd.read_csv(
                os.path.join(test_directory, expected_file))

            data_keys = data.keys()

            diff = data.copy()

            for key in data_expected.keys():
                if key not in data_keys:
                    logger.log('Key not found: %s' % key, verbose_output)
                    failed_test = True
                    break

                tolerance = 1e-5
                expected_value = data_expected[key].iloc[0]
                value_diff = float(data[key].iloc[0] - data_expected[key])

                # Compute relative different for large values
                if abs(expected_value) > 1.0:
                    value_diff = value_diff / expected_value

                diff[key] = value_diff
                if abs(value_diff) > tolerance:
                    logger.log('Error in field %s = %.2g' % (key, value_diff),
                               verbose_output)
                    failed_test = True

            diff.to_csv(os.path.join(test_directory,
                                     'diff-%s' % expected_file))

        else:
            # Assume text file - do a diff
            text1 = open(os.path.join(test_directory,
                                      expected_file)).readlines()
            text2 = open(os.path.join(test_directory,
                                      test_output_file_path)).readlines()

            text1 = filter_pout(text1)
            text2 = filter_pout(text2)

            diff = difflib.unified_diff(text1, text2)
            # diff = difflib.ndiff(text1, text2)
            differences = [line for line in diff]

            diff_out_file = os.path.join(test_directory,
                                         DIFF + test_output_filename)

            with open(diff_out_file, 'w') as diff_file:
                diff_file.writelines(differences)

            logger.log('Diff: %s' % differences)

            if differences:
                logger.log('Differences found in %s' % test_output_file_path)
                logger.log('For details, see %s' % diff_out_file)
                # logger.log('** Test failed \n')
                # logger.log_failed()
                # return False, 'Failed'
                # failed_test = True

    if failed_test:
        logger.log_failed()
        return False, 'Failed'
    else:
        logger.log_ok()
        return True, 'OK'
Ejemplo n.º 7
0
import mushyLayerRunUtils as util
import os
from valgrind_parser import ValgrindLogParser
import shutil

mem_leak_path = os.path.join(util.get_mushy_layer_dir(), 'test', 'memory_leak')
VALGRIND_LOG = os.path.join(mem_leak_path, 'valgrind_log.txt')

shutil.copy('valgrind_regexes.json', '../../env/lib/python3.6/site-packages/valgrind_parser/data/')

# Create inputs file and directory
inputs_base = os.path.join(util.get_mushy_layer_dir(), 'test', 'regression', 'AMRDarcyBrinkman', 'inputs')
inputs = util.read_inputs(inputs_base)

inputs['main.max_step'] = 5
util.write_inputs('inputs', inputs)

exec_file = util.get_executable_name(exec_name='mushyLayer2d', return_full_path=True)

# , os.path.join(mem_leak_path, 'inputs')
#  --log-file="%s"
cmd = 'valgrind --child-silent-after-fork=yes --xml=yes --xml-file=valgrind.xml --leak-check=full ' \
      '--show-reachable=yes %s inputs > %s ' % (exec_file, VALGRIND_LOG)
# print(cmd)
os.system(cmd)

vlp = ValgrindLogParser(VALGRIND_LOG, html_report_location='valgrind.html')
vlp.generate_html_report()