Beispiel #1
0
 def GenerateOutput(self, **kwargs):
     '''
     Generate contents of output file
     Outputs:
         contents(str): output contents
     '''
     _contents = ''
     for key, value in self.layout.items():
         if key == 'case_name':
             _contents += '# %s\n' % self.case_name
         else:
             _contents += '# %s\n\n' % key
             if type(value) == str:
                 try:
                     _contents += (ConvertMediaMKD(value, self.media[value]) + '\n')
                 except KeyError:
                     _contents += 'None\n'
             elif type(value) == list:
                 for _sub_value in value:
                     my_assert(type(_sub_value) == str, TypeError, "value must be string or a list of string")
                     _contents += "## %s \n\n" % _sub_value
                     try:
                         _contents += (ConvertMediaMKD(_sub_value, self.media[_sub_value]) + '\n')
                     except KeyError:
                         _contents += 'None\n\n'
         _contents += '\n'
     return _contents
Beispiel #2
0
    def GetStepMT(self, filename, step):
        '''
        get the total core hours spent before reaching a specific step
        Inputs:
            step(int): a specified step
        Returns:
            machine_time(float): machine time spent before reaching this step
        '''
        # Read header and data
        self.ReadHeader(filename)
        state = self.ReadData(filename)
        if state == 1:
            # empty file, return None
            return None

        # get steps and machine time
        col_step = self.header['Time_step_number']['col']
        col_mt = self.header['Machine_time']['col']
        steps = self.data[:, col_step]
        machine_times = self.data[:, col_mt]

        # check step is in range
        my_assert(
            type(step) == int, TypeError,
            "GetStepMT: step mush be an int value")
        my_assert(step <= np.max(steps), ValueError,
                  "GetStepMT: step given is bigger than maximum step")

        # interp for step
        machine_time_at_step = np.interp(step, steps, machine_times)

        # compute total time
        col_cpu = self.header['CPU_number']['col']
        number_of_cpu = self.data[0, col_cpu]
        return machine_time_at_step * number_of_cpu, number_of_cpu
Beispiel #3
0
def ExtractNav(_lines, **kwargs):
    '''
    extract the nav message from plain text
    Inputs:
        _contents(str): plain text contents
        kwargs:
        fix the name with the produce function
    '''
    _previous = kwargs.get('previous', 0)
    _at = kwargs.get('at', 0)
    assert(_at <= len(_lines))
    _odict = {}
    i = -1
    for i in range(_at, len(_lines)):
        _line = _lines[i]
        my_assert('-' in _line, TypeError, "Each line must start with \'-\'")
        _now = re_count_indent(_line)
        if _now < _previous:
            # this level is all done
            break
        elif _now > _previous:
            # come to lower level, which is already fixed by interation
            continue
        else:
            # still on this level, add new memember to dictionary
            key, value = SeparateNavPattern(_line)
            if value == '':
                # going to the next level, call iteration
                _next = re_count_indent(_lines[i+1])
                my_assert(_next > _now, ValueError, 'A group with no cases or a Case with no files are not supported')
                _odict[key], i = ExtractNav(_lines, previous=_next, at=i+1)
            else:
                # add key and value 
                _odict[key] = value
    return _odict, i
Beispiel #4
0
def GetGroupCaseFromDict(_idict):
    '''
    Get a list for names and a list for parameters from a dictionary read from a json file
    Inputs:
    _idict(dict):
        input dictionary
    Returns:
        _names(list):
            list of names, each member is a list itself
        _parameters(list):
            list of parameters, each member is a list itself
    '''
    my_assert(type(_idict) == dict, TypeError, 'Input is not a dictionary')
    _parameters = []  # initialize a array to load parameters
    _names = []  # initialize a array to load names of parameters
    for key, value in sorted(_idict.items(), key=lambda item: item[0]):
        if type(value) is dict:
            # in a top hierachy, append the name and call recursively
            _sub_names, _sub_parameters = GetGroupCaseFromDict(value)
            for i in range(len(_sub_names)):
                # concatenate names and append
                _names.append([key] + _sub_names[i])
            _parameters += _sub_parameters  # append parameters
        elif type(value) is list:
            _names.append([key])  # concatenate names
            _parameters.append(value)
        elif type(value) in [int, float, str]:
            _names.append([key])  # concatenate names
            _parameters.append([value])
        else:
            raise TypeError('%s is not int, float or str' % str(type(value)))
    return _names, _parameters
Beispiel #5
0
def UpdateProjectDoc(_project_dict, _project_dir, **kwargs):
    '''
    Update doc for all cases in this project
    Inputs:
        kwargs(dict): options
            analysis: a dictionary of tests
    '''
    _mkdocs = kwargs.get('mkdocs', 'mkdocs_project')
    _imgs = kwargs.get('images', [])
    myMkdoc = MKDOC(os.path.join(_project_dir, _mkdocs))

    # deal with case and group
    for key, value in _project_dict.items():
        if key == 'cases':
            for _case in value:
                myMkdoc(_case, os.path.join(_project_dir, _case), append_prm=True, update=True, images=_imgs)
        else:
            my_assert(type(value) == list and value != [], TypeError, 'Input group must have a \'case\' list and it cannot be []')
            myMkdoc(key, os.path.join(_project_dir, key), append_prm=True, update=True, type='group', case_names=value, images=_imgs)
    
    # deal with analysis
    analysis_dict = kwargs.get('analysis', {})
    for key, value in analysis_dict.items():
        case_dirs = value['case_dirs']
        images = value.get('images', [])
        extra_analysis = value.get('extra_analysis', {})
        myMkdoc(key, _project_dir, append_prm=True, update=True, type='analysis', case_dirs=case_dirs, images=images, extra_analysis=extra_analysis)
Beispiel #6
0
 def __init__(self, _case_name, _dir, **kwargs):
     '''
     Inputs:
         _case_name(str) - case name
         _dir(str) - directory of case data
         _kwargs:
             layout(dict):
                 layout of doc
             layout_json(str):
                 file name to read layout from
     '''
     self.case_name = _case_name
     self.idir = _dir
     self.media = {}
     # get layout
     try:
         _layout = kwargs['layout']
         my_assert(type(_layout) == dict, TypeError, "layout must be a dictionary")
         self.layout = _layout
     except KeyError:
         _layout_json = kwargs.get('layout_json', 'DocLayout.json')
         with resources.open_text(shilofue.json, _layout_json) as fin:
             _layout = json.load(fin)
         my_assert(type(_layout) == dict, TypeError, "layout must be a dictionary")
         self.layout = _layout
Beispiel #7
0
def GetGroupCaseFromDict1(_idict):
    '''
    second method(simpler one) of getting a list for names and a list for parameters from a dictionary read from a json file
    Inputs:
        _idict(dict):
            input dictionary
    returns:
        _config_tests(dict of dict):
            list of configuretions and tests for cases
    '''
    my_assert(type(_idict) == dict, TypeError, 'Input is not a dictionary')
    _config_tests=[]
    _configs = _idict['config']
    _tests = _idict.get('test', {})
    # get total number of cases
    _total = 1
    _totals = [1]
    for key, value in sorted(_configs.items(), key=lambda item: item[0]):
        # if re.match("sub_group", key):
        #   continue
        if type(value) == list:
            _total *= len(value)
        _totals.append(_total)
    for key, value in sorted(_tests.items(), key=lambda item: item[0]):
        if type(value) == list:
            _total *= len(value)
        _totals.append(_total)
    # get case configuration and test
    for j in range(_total):
        # loop for every case
        _config_test = {'config': {}, 'test': {}}  # initiate dict
        i = 0  # current index of parameters
        for key, value in sorted(_configs.items(), key=lambda item: item[0]):
            # loop for configurations
            # derive index number by mod
            _ind = j
            _ind = int(_ind // _totals[i])
            try:
                _ind = _ind % len(value)
                # indexing by _ind and append value to key
                _config_test['config'][key] = value[_ind]
            except TypeError:
                # only one value
                _config_test['config'][key] = value
            i += 1
        for key, value in sorted(_tests.items(), key=lambda item: item[0]):
            # loop for tests
            _ind = j
            _ind = int(_ind // _totals[i])
            try:
                _ind = _ind % len(value)
                # indexing by _ind and append value to key
                _config_test['test'][key] = value[_ind]
            except TypeError:
                # only one value
                _config_test['test'][key] = value
            i += 1
        _config_tests.append(_config_test)
    return _config_tests
Beispiel #8
0
    def GenerateAnalysisMkd(self, _target_dir, _case_dirs, **kwargs):
        '''
        Generate markdown file of a analysis
        Inputs:
            _target_dir(str): directory of this case
            kwargs:
                filename(str): name of the file
        Returns:
            _filename(str): file generated
        '''
        # get filename
        _filename = kwargs.get('filename', 'summary.md')
        _filename = os.path.join(_target_dir, _filename)

        # write file header
        contents = ''
        _name = os.path.basename(_target_dir)
        contents += '# Analysis %s\n\n' % _name
        
        # append names of cases
        contents += '## This includes cases: \n\n'
        for _case_dir in _case_dirs:
            _case_link = os.path.join('..', _case_dir, 'summary.md')
            contents += '* %s\n' % ConvertLinkMKD(_case_dir, _case_link)
        contents += '\n'
       
        # append imgs
        imgs_ = kwargs.get('images', [[]]*len(self.imgs))
        my_assert(len(self.imgs)==len(imgs_), ValueError,
                  "GenerateAnalysisMkd: images input must be a list that have the same length with self.imgs")
        for i in range(len(self.imgs)):
            img_root = self.imgs[i]
            contents += '## Image %s\n\n' % img_root
            for img_ in imgs_[i]:
                basename_ = os.path.basename(img_)
                relative_route = os.path.join('img', os.path.basename(basename_))
                contents += '%s\n' % basename_
                contents += '%s\n\n' % ConvertMediaMKD(basename_, relative_route)

        # append extra analysis
        extra_analysis = kwargs.get('extra_analysis', {})
        for key, value in extra_analysis.items():
            contents += '## %s\n\n' % key
            if key == 'machine_time':
                contents += 'Here we show machine time (core hrs) for each case\n'
                contents += '%s\n\n' % ConvertMediaMKD("MachineTimeAnalysis.png", "img/MachineTimeAnalysis.png")
            if key == 'newton_solver':
                contents += 'Here we show solver output for each case\n'
                contents += '%s\n\n' % ConvertMediaMKD("NewtonSolverAnalysis.png", "img/NewtonSolverAnalysis.png")

        # write
        with open(_filename, 'w') as fout:
            fout.write(contents)
        pass

        return _filename
Beispiel #9
0
def SlabMorph(case_dir, kwargs={}):
    """
    Slab morphology
    Inputs:
        case_dir(str): directory of case
        kwargs(dict): options
    """
    case_output_dir = os.path.join(case_dir, 'output')
    case_morph_dir = os.path.join(case_output_dir, 'slab_morphs')

    # Initiation
    Visit_Xyz = VISIT_XYZ()

    # a header for interpreting file format
    # note that 'col' starts form 0
    header = {
        'x': {
            'col': 1,
            'unit': 'm'
        },
        'y': {
            'col': 2,
            'unit': 'm'
        },
        'id': {
            'col': 4
        }
    }

    # depth range
    # this is for computing dip angles with different ranges
    depth_ranges = kwargs.get('depth_ranges',
                              [[0, 100e3], [100e3, 400e3], [400e3, 6371e3]])
    my_assert(
        type(depth_ranges) == list, TypeError, "depth_ranges mush be a list")

    # remove older results
    ofile = os.path.join(case_output_dir, 'slab_morph')
    if os.path.isfile(ofile):
        os.remove(ofile)

    #   loop for every snap and call function
    snaps, times, _ = Parse.GetSnapsSteps(case_dir, 'particle')

    for i in snaps:
        visit_xyz_file = os.path.join(case_morph_dir,
                                      'visit_particles_%06d.xyz' % i)
        Visit_Xyz(visit_xyz_file,
                  header=header,
                  ofile=ofile,
                  depth_ranges=depth_ranges,
                  time=times[i])
Beispiel #10
0
def ParsePhaseInput(inputs):
    '''
    parse input of phases to a aspect input form
    todo
    '''
    output = ""
   
    # read in density of the base phase
    rho_base = inputs.get("rho_base", 3300.0)
    my_assert(type(rho_base) == float, TypeError, "base value for density must be a float")
    # read in the density change with each transtion
    drho = inputs.get("drho", [0.0])
    my_assert(type(drho) == list, TypeError, "value of density change must be a list")
    # read in the fraction with each transtion
    xc = inputs.get("xc", [1.0])
    my_assert(type(xc) == list, TypeError, "value of fraction with transition must be a list")
    # number of transition
    total = len(drho)
    my_assert(len(xc) == total, ValueError, "length of xc and drho must be the same")
    
    # compute density
    rho = rho_base * np.ones(total + 1)
    for i in range(total):
        rho[i+1:] += drho[i] * xc[i]
    
    # generate output
    output += "%.1f" % rho[0]
    for i in range(1, total+1):
        output += "|%.1f" % rho[i]
    
    return output
Beispiel #11
0
 def AnalyzeExtra(self, _name, _project_dir, _case_dirs, _target_dir, extra_analysis, kwargs):
     # extra procedures in an analysis
     # Inputs:
     #   extra_analysis(str): type of extra analysis
     #   kwargs(dict): dictionary of options
     for key,value in extra_analysis.items():
         if key == 'machine_time':
             # todo change to a class 
             my_assert(type(value)==dict, TypeError, "AnalyzeExtra: settings to an option(a key in the analysis dict) must be a dict")
             AnalyzeMachineTime = ANALYZEMACHINETIME(_project_dir, _case_dirs, _target_dir, value)
             AnalyzeMachineTime(value)
         if key == 'newton_solver':
             my_assert(type(value)==dict, TypeError, "AnalyzeExtra: settings to an option(a key in the analysis dict) must be a dict")
             self.AnalyzeNewtonSolver(_project_dir, _case_dirs, _target_dir, value)
Beispiel #12
0
 def LowerMantle(self, Inputs, _config):
     """
     calculate flow law parameters
     """
     _type = _config.get('model_type', 0)
     my_assert(
         type(_type) == int, TypeError,
         "Type of input \'model_type\' must be int")
     if _type == 0:
         # when phase transition only happens on mantle composition
         self.LowerMantle0(Inputs, _config)
     if _type == 1:
         # when phase transition only happens on all compositions
         # There is a eclogite transition of crustal layer
         self.LowerMantle1(Inputs, _config)
Beispiel #13
0
    def __call__(self, kwargs):
        '''
        Analysis and plot
        '''
        my_assert(type(kwargs) == dict, TypeError, "ANALYZEMACHIENTIME: __call__: kwargs must be a dictionary")

        # read data
        step = kwargs.get('step', 1)
        self.ReadData(step)

        # do analysis
        type_ = kwargs.get('type', 'strong_scaling')
        if type_ == 'strong_scaling':
            self.StrongScaling(step, kwargs)
        else:
            raise ValueError("type_ could only be one of: 'strong_scaling'")
Beispiel #14
0
    def __call__(self, _name, _dir, **kwargs):
        '''
        Call function and write to file that will be used bu mkdoc
        Inputs:
            kwargs: 
                update(bool) - if update an existing case
                append_prm(bool) - if append a prm file
                prm(str) - name of a prm file
                extra_images(list) - names of extra images to append
        '''
        self.new_files = {}
        self.imgs = kwargs.get('images', [])
        update = kwargs.get('update', False)
        append_prm = kwargs.get('append_prm', False)
        _prm = kwargs.get('prm', 'case.prm')
        _type = kwargs.get('type', 'case')

        # These directory and files need to be preexist
        assert(os.path.isdir(self.odir))
        assert(os.path.isdir(os.path.join(self.odir, 'docs')))
        _mcdocs_file = os.path.join(self.odir, 'mkdocs.yml')
        assert(os.path.isfile(_mcdocs_file))
        _index_file = os.path.join(self.odir, 'docs', 'index.md')

        # deal with different types of entry
        _target_dir = os.path.join(self.odir, 'docs', _name)
        if _type == 'case':
            # append a case
            self.AppendCase(_name, _dir, _target_dir, update=update, append_prm=append_prm)
        elif _type == 'group':
            # append a group
            _case_names = kwargs.get('case_names', None)
            my_assert(_case_names is not None, ValueError, 'For a group, case names cannot be None. Valid names must be given')
            self.AppendGroup(_name, _dir, _case_names, _target_dir, update=update, append_prm=append_prm)
        elif _type == 'analysis':
            # append an analysis
            _case_dirs = kwargs.get('case_dirs', [])
            # here the _dir is the project directory and case_dirs are relative directories to that
            self.AppendAnalysis(_name, _dir, _case_dirs, _target_dir, kwargs)
        else:
            raise ValueError("Type must be 'case', 'group', 'analysis' ")
        if self.new_files != {}:
            self.RenewMkdocsYml(_name)
Beispiel #15
0
def GetSubCases(_dir):
    '''
    get cases in a folder
    '''
    my_assert(os.path.isdir(_dir), TypeError, "_dir must be a directory")
    dir_abs = os.path.abspath(_dir)

    # initiate
    case_dirs = []

    # look for config.json and case.prm
    if 'case.prm' in os.listdir(dir_abs):
        case_dirs.append(dir_abs)

    # loop in _dir and iteration
    for _subname in os.listdir(dir_abs):
        _fullsubname = os.path.join(dir_abs, _subname)
        if os.path.isdir(_fullsubname):
            case_dirs += GetSubCases(_fullsubname)
    
    return case_dirs
Beispiel #16
0
    def ReadData(self, filein):
        """
        read date form file
        Args:
            filein(str): file input
        """
        # assert file
        my_assert(os.access(filein, os.R_OK), FileNotFoundError,
                  'VISIT_XYZ.__init__: visit xyz file - %s cannot be read' % filein)

        # construct cols
        cols = []
        i = 0
        for key, value in self.header.items():
            cols.append(value['col'])
            # record column in self.data
            self.column_indexes[key] = i
            i += 1

        # read data
        self.data = np.loadtxt(filein, usecols=(cols), skiprows=2)
Beispiel #17
0
def main():
    '''
    main function of this module
    Inputs:
        sys.arg[1](str):
            commend
        sys.arg[2, :](str):
            options
    '''
    _commend = sys.argv[1]
    # parse options
    parser = argparse.ArgumentParser(description='Parse parameters')
    parser.add_argument('-j', '--json_file', type=str,
                        default='./config_case.json',
                        help='Filename for json file')
    _options = []
    try:
        _options = sys.argv[2: ]
    except IndexError:
        pass
    arg = parser.parse_args(_options)

    # commands

    if _commend == 'phase_input':
        # example:
        #   python -m shilofue.Parse phase_input -j ./files/TwoDSubduction/phases_1_0.json
        my_assert(os.access(arg.json_file, os.R_OK), FileExistsError, "Json file doesn't exist.")
        with open(arg.json_file) as fin:
            inputs = json.load(fin)

        # get the outputs
        outputs = "density = "
        for key, value in inputs.items():
            if type(value) == dict:
                output = ParsePhaseInput(value)
                outputs += "%s: %s, " % (key, output)

        # print the output 
        print(outputs)
Beispiel #18
0
def ExpandNamesParameters(_names, _parameters):
    '''
    Inputs:
        _names(list):
            list of names, each member is a list itself
        _parameters(list):
            list of parameters, each member is a list itself
    Returns:
        _cases_config(list<dict>):
            a list of dictionaries. One dictionary is a config for a list file
    '''
    my_assert(type(_names) == list, TypeError, 'First Entry is not a list')
    my_assert(type(_parameters) == list, TypeError, 'Second Entry is not a list')
    my_assert(len(_names) == len(_parameters), ValueError, 'Length of first and second entry is not equal')
    _total = 1
    for _sub_parameters in _parameters:
        # take the value of total of all lengths multiplied
        _total *= len(_sub_parameters)
    # initialize this list of dictionaries for configurations
    _cases_config = []
    for i in range(_total):
        _cases_config.append({'names': [], 'values': []})
    # fill in all entries
    for j in range(len(_cases_config)):
        _cases_config[j]['names'] = _names.copy()
        for i in range(len(_names)):
            _ind = j  # get the index in _parameters[i]
            for k in range(len(_names)-1, i, -1):
                _ind = int(_ind // len(_parameters[k]))
            _ind = _ind % len(_parameters[i])
            _cases_config[j]['values'].append(_parameters[i][_ind])
    return _cases_config
Beispiel #19
0
 def RenewMkdocsYml(self, _name):
     '''
     Renew the mkdocs.yml file in the project directory
     '''
     _filename = os.path.join(self.odir, 'mkdocs.yml')
     _start = None
     _end = None
     with open(_filename, 'r') as fin:
         # read in the old file
         _lines = fin.read().split('\n')
     for i in range(len(_lines)):
         # read in the nav part
         _line = _lines[i]
         if _start is None and re.match('^nav', _line):
             _start = i + 1
             _previous_indent = re_count_indent(_lines[i])
             _indent = re_count_indent(_lines[i+1])
         elif _start is not None and re_count_indent(_line) == _previous_indent:
             _end = i
             break
     my_assert(_start is not None and _end is not None, TypeError, 'Cannot find start and end of the nav part')
     _nav_dict, _temp= ExtractNav(_lines[_start: _end], previous=_indent)
     assert(_temp == _end - _start - 1)
     try:
         # this case is already in the nav part of the yml file
         value = _nav_dict[_name]
         my_assert(type(value) == dict, TypeError,
                   'entry for a case must be a single dict, prepared to include dictionary in the future')
         _nav_dict[_name] = {**value, **self.new_files}  # merge and substitute value in first dict with value in second dict
     except KeyError:
         # this case is new to the yml file
         _nav_dict[_name] = self.new_files
     _new_lines = _lines[0: _start]
     _new_lines += ProduceNav(_nav_dict)
     _new_lines += _lines[_end: -1]
     with open(_filename, 'w') as fout:
         for _line in _new_lines:
             fout.write(_line + '\n')
Beispiel #20
0
 def process_particle_data(self):
     '''
     process the coordinates of particle, doing nothing here.
     Reload here to add particles in the crust
     '''
     # all configurations
     _config = {**self.config, **self.test, **self.extra}
     # import values
     slab_phi_c = _config.get("slab_phi_c", 0.628319)
     R0 = _config.get("R0", 6.371e6)
     Rc = _config.get("Rc", 4.0e5)
     slab_to = _config.get("slab_to", 2.0e5)
     depth_particle_in_slab = _config.get("depth_particle_in_slab", 100.0)
     number_particle_in_slab = _config.get("number_particle_in_slab", 1000)
     my_assert(
         type(number_particle_in_slab) == int, TypeError,
         "number_particle_in_slab must be an int value")
     # initiate particle_data
     self.particle_data = np.zeros((number_particle_in_slab, 2))
     # get phi value at the tip of initial slab
     phi_st = slab_phi_c + (2 * Rc * slab_to - slab_to**2.0)**0.5 / R0
     for i in range(number_particle_in_slab):
         # get particle coordinates
         # First, angle is divided uniformly.
         # Then, radius is computed accordingly.
         phi = i * phi_st / number_particle_in_slab
         if phi < slab_phi_c:
             r = R0 - depth_particle_in_slab
         else:
             r = R0 + (
                 Rc**2.0 - R0**2.0 *
                 (phi - slab_phi_c)**2.0)**0.5 - Rc - depth_particle_in_slab
         # apply transform to cartisian coordinates
         x, y = ggr2cart2(phi, r)
         # assign value in particle_data
         self.particle_data[i, 0] = x
         self.particle_data[i, 1] = y
Beispiel #21
0
    def __init__(self, case_dir):
        """
        Initiation
        Args:
            case_dir(str): directory of case
        """
        # check directory
        self._case_dir = case_dir
        my_assert(os.path.isdir(self._case_dir), FileNotFoundError,
                  'BASH_OPTIONS.__init__: case directory - %s doesn\'t exist' % self._case_dir)
        self._output_dir = os.path.join(case_dir, 'output')
        my_assert(os.path.isdir(self._output_dir), FileNotFoundError,
                  'BASH_OPTIONS.__init__: case output directory - %s doesn\'t exist' % self._output_dir)
        self._visit_file = os.path.join(self._output_dir, 'solution.visit')
        my_assert(os.access(self._visit_file, os.R_OK), FileNotFoundError,
                  'BASH_OPTIONS.__init__: case visit file - %s cannot be read' % self._visit_file)
        # output dir
        self._output_dir = os.path.join(case_dir, 'output')
        if not os.path.isdir(self._output_dir):
            os.mkdir(self._output_dir)
        # img dir
        self._img_dir = os.path.join(case_dir, 'img')
        if not os.path.isdir(self._img_dir):
            os.mkdir(self._img_dir)
        
        # get inputs from .prm file
        prm_file = os.path.join(self._case_dir, 'case.prm')
        my_assert(os.access(prm_file, os.R_OK), FileNotFoundError,
                  'BASH_OPTIONS.__init__: case prm file - %s cannot be read' % prm_file)
        with open(prm_file, 'r') as fin:
            self.idict = ParseFromDealiiInput(fin)

        # initiate a dictionary
        self.odict = {}

        # initiate a statistic data
        self.Statistics = Plot.STATISTICS_PLOT('Statistics')
        statistic_file = os.path.join(self._output_dir, 'statistics')
        self.Statistics.ReadHeader(statistic_file)
        self.Statistics.ReadData(statistic_file)
Beispiel #22
0
 def __init__(self, _idict, **kwargs):
     '''
     initiate from a dictionary
     Inputs:
         _idict(dict):
             dictionary import from a base file
         kwargs:
             config: (dict) - a dictionary that contains the configuration
             test: (dict) - a dictionary that contains the configuration to test
     '''
     my_assert(type(_idict)==dict, TypeError, "First entry mush be a dictionary")
     self.case_name = ''
     self.idict = _idict
     self.config = kwargs.get('config', {})
     # configurations
     my_assert(type(self.config)==dict, TypeError, 'Config must be a dictionary')
     self.extra = kwargs.get('extra', {})
     my_assert(type(self.extra)==dict, TypeError, 'extra must be a dictionary')
     self.test = kwargs.get('test', {})
     my_assert(type(self.test)==dict, TypeError, 'Test must be a dictionary')
     # list of particle coordinates
     self.particle_data = None
Beispiel #23
0
def ChangeDiscValues(_idict, _names, _values):
    '''
    Change values in a complex dictionary with names and values
    Inputs:
        _idict(dict):
            Dictionary of parameters from a .prm file
        _names(list):
            list of parameters, each member is a list it self,
            which contains the path the the variable
        _values(list):
            list of values of variables
    '''
    my_assert(type(_idict) == dict, TypeError, 'First Entry needs to be a dict')
    my_assert(type(_names) == list, TypeError, 'Second Entry needs to be a list')
    my_assert(type(_values) == list, TypeError, 'Third Entry needs to be a list')
    my_assert(len(_names) == len(_values), ValueError, 'Length of second and third entries must match')
    for i in range(len(_names)):
        _name = _names[i]
        _sub_dict = _idict
        for _key in _name[0: len(_name)-1]:
            _sub_dict = _sub_dict[_key]
        _sub_dict[_name[-1]] = _values[i]
Beispiel #24
0
def GetSnapsSteps(case_dir, type_='graphical'):
    case_output_dir = os.path.join(case_dir, 'output')
    
    # import parameters 
    prm_file = os.path.join(case_dir, 'case.prm')
    my_assert(os.access(prm_file, os.R_OK), FileNotFoundError,
              'case prm file - %s cannot be read' % prm_file)
    with open(prm_file, 'r') as fin:
        idict = ParseFromDealiiInput(fin)

    # import statistics file
    Statistics = Plot.STATISTICS_PLOT('Statistics')
    statistic_file = os.path.join(case_output_dir, 'statistics')
    my_assert(os.access(statistic_file, os.R_OK), FileNotFoundError,
              'case statistic file - %s cannot be read' % prm_file)
    Statistics.ReadHeader(statistic_file)
    Statistics.ReadData(statistic_file)
    col_time = Statistics.header['Time']['col']
    
    # final time
    final_time = Statistics.data[-1, col_time]
    
    # time interval
    # graphical
    try:
        time_between_graphical_output = float(idict['Postprocess']['Visualization']['Time between graphical output'])
    except KeyError:
        time_between_graphical_output = 1e8
    total_graphical_outputs = int(final_time / time_between_graphical_output) + 1
    graphical_times = [i*time_between_graphical_output for i in range(total_graphical_outputs)]
    graphical_steps = [Statistics.GetStep(time) for time in graphical_times]
    # particle
    try:
        time_between_particles_output = float(idict['Postprocess']['Particles']['Time between data output'])
    except KeyError:
        time_between_particles_output = 1e8
    total_particles_outputs = int(final_time / time_between_particles_output) + 1
    particle_times = [i*time_between_particles_output for i in range(total_particles_outputs)]
    particle_steps = [Statistics.GetStep(time) for time in particle_times]
    
    # initial_snap
    try:
        initial_snap = int(idict['Mesh refinement']['Initial adaptive refinement'])
    except KeyError:
        initial_snap = 6

    # end snap
    snaps = [0]
    if type_ == 'graphical':
        start_ = initial_snap
        end_ = total_graphical_outputs + initial_snap
        snaps = list(range(start_, end_))
        times = graphical_times
        steps = graphical_steps
    elif type_ == 'particle':
        start_ = 0
        end_ = total_particles_outputs
        snaps = list(range(start_, end_))
        times = particle_times
        steps = particle_steps
    
    return snaps, times, steps
Beispiel #25
0
    def __call__(self, parse_operations, **kwargs):
        '''
        Create a .prm file
        inputs:
            parse_operations(class): operations to do
            kwargs:
                method: (str) - method of generate files
                dirname: (str) - output directory, in use with 'auto' method
                basename: (str) - base for case name, in use with 'auto' method
                filename: (str) - output file, in use with 'manual' method
        '''
        # assign file name with a method defined
        _method = kwargs.get('method', 'auto')
        if _method == 'auto':
            _dirname = kwargs.get('dirname', '.')
            _basename = kwargs.get('basename', '')
            _extra = kwargs.get('extra', {})  # extra dictionary, pass to intepret
            _extra_files = kwargs.get('extra_file', {})  # extra dictionary, pass to intepret
            # Process particle data
            self.process_particle_data()
            # First intepret the configurations and update prm
            my_assert(self.config != None, ValueError,
                      'With the \'auto\' method, the config must exist')
            self.Intepret(parse_operations, extra=_extra)
            
            # Next generate a case name
            self.case_name = _basename + self.CaseName()
            
            # After that, make a directory with case name
            _case_dir = os.path.join(_dirname, self.case_name)
            # By default, we don't update
            update_ = kwargs.get('update', 0)
            if not update_:
                my_assert(os.path.isdir(_case_dir) is False, ValueError, 'Going to update a pr-exiting case, but update is not included in the option')
            if not os.path.isdir(_case_dir):
                os.mkdir(_case_dir)            

            # write configs to _json
            _json_outputs = {'basename': _basename, 'config': self.config, 'test': self.test, 'extra': _extra, 'extra_file': _extra_files}
            _json_ofile = os.path.join(_case_dir, 'config.json')
            with open(_json_ofile, 'w') as fout:
                json.dump(_json_outputs, fout)

            # At last, export a .prm file
            _filename = os.path.join(_case_dir, 'case.prm')
            with open(_filename, 'w') as fout:
                ParseToDealiiInput(fout, self.idict)

            # output particle data to an ascii file
            if self.particle_data is not None:
                _filename = os.path.join(_case_dir, 'particle.dat')
                with open(_filename, 'w') as fout:
                    self.output_particle_ascii(fout)

            # also copy the extra files
            if type(_extra_files) is str:
                _extra_files = [_extra_files]
            for _extra_file in _extra_files:
                shutil.copy2(_extra_file, _case_dir)

        elif _method == 'manual':
            # export a .prm file
            _filename = kwargs.get('filename', None)
            with open(_filename, 'w') as fout:
                ParseToDealiiInput(fout, self.idict)
            pass
        return self.case_name
Beispiel #26
0
    def AppendAnalysis(self, _name, _project_dir, _case_dirs, _target_dir, kwargs):
        '''
        Append a analysis to doc
        Inputs:
            _name(str): name of analysis
            _case_dirs(list): list of directory of cases to include
            _target_dir(str): directory to put outputs
        '''
        update = kwargs.get('update', False)

        # check on target directory 
        if not os.path.isdir(_target_dir):
            os.mkdir(_target_dir)
        
        # create hard link for images
        _target_img_dir = os.path.join(_target_dir, 'img')
        if not os.path.isdir(_target_img_dir):
            os.mkdir(_target_img_dir)
        # loop imgs first, so as to create a two-d list
        _imgs_list = []
        for i in range(len(self.imgs)):
            _imgs_list.append([])
        # _imgs_list = [[]]*len(self.imgs)
        for i in range(len(self.imgs)):
            img = self.imgs[i]
            for _dir in _case_dirs:
                _img_dir = os.path.join(_project_dir, _dir, 'img')
                _imgs = ReturnFileList(_img_dir, [img])
                # transfer _dir to a name to append
                _dir_transfered = re.sub(os.sep, '-', _dir)
                #_dir_transfered = os.path.basename(_dir)
                # create hard links in target_dir
                for _img in _imgs:
                    _file = os.path.join(_img_dir, _img)
                    _target_file = os.path.join(_target_img_dir, "%s_%s" %(_dir_transfered, _img))
                    if not os.path.isfile(_target_file):
                        os.link(_file, _target_file)
                    elif filecmp.cmp(_file, _target_file) is False:
                        os.remove(_target_file)
                        os.link(_file, _target_file)
                    _imgs_list[i].append(_target_file)

        # deal with extra analysis
        extra_analysis = kwargs.get('extra_analysis', {})
        my_assert(type(extra_analysis)==dict, TypeError, "AppendAnalysis: extra_analysis must be a dict")
        self.AnalyzeExtra(_name, _project_dir, _case_dirs, _target_dir, extra_analysis, kwargs)
        
        # Append a summary.md
        # append image information
        _base_name = kwargs.get('basename', None)
        if os.path.isfile(os.path.join(_target_dir, 'summary.md')):
            if update == True:
                _filename = self.GenerateAnalysisMkd(_target_dir, _case_dirs, images=_imgs_list, extra_analysis=extra_analysis)
        else:
            _filename = self.GenerateAnalysisMkd(_target_dir, _case_dirs, images=_imgs_list, extra_analysis=extra_analysis)
            # in a mkdocs file, files are listed as 'name/_filename'
            _summary = os.path.join(_name, os.path.basename(_filename))
            if _base_name is None:
                self.new_files['Summary'] = _summary
            else:
                # a subcase of a group
                self.new_files[_name]['Summary'] = os.path.join(_base_name, _summary)
        pass
def analyze_affinity_test_results(test_results_dir, output_dir):
    '''
    analyze affinity test results
    '''
    total_wall_clock = []
    assemble_stokes_system = []
    solve_stokes_system = []
    cores = []
    resolutions = []
    setups = []
    # go into sub dirs
    temp_file = os.path.join(ASPECT_LAB_DIR, 'temp')  # file to save partial results
    path_obj = pathlib.Path(test_results_dir).rglob("output*")
    i = 0
    for _path in path_obj:
        i += 1
        output_file = str(_path)
        output_path = os.path.join(test_results_dir, output_file)
        patterns = output_file.split('_')
        print("Output file found: %s" % output_path)
        # append data
        subprocess.run("%s/bash_scripts/parse_block_output.sh  analyze_affinity_test_results %s %s" 
                       % (ASPECT_LAB_DIR, output_file, temp_file), shell=True)
        try:
            data = np.genfromtxt(temp_file)
            total_wall_clock.append(data[0, -1])
            assemble_stokes_system.append(data[1, -1])
            solve_stokes_system.append(data[2, -1])
        except Exception:
            pass
        else:
            setups.append(int(patterns[-1]))
            resolutions.append(int(patterns[-2]))
            cores.append(int(patterns[-3]))
    my_assert(i > 0, AssertionError, "There is no output* file in the folder %s" % test_results_dir)

    setups = np.array(setups)
    resolutions = np.array(resolutions)
    cores = np.array(cores)
    total_wall_clock = np.array(total_wall_clock)
    assemble_stokes_system = np.array(assemble_stokes_system)
    solve_stokes_system = np.array(solve_stokes_system)
    # rearrange and sort data
    print("Affinity Test Results:")
    print('setups:', setups)
    print('resolutions', resolutions)
    print("cores:", cores)
    print("total wall clocks:", total_wall_clock)
    print("assemble_stokes_system:", assemble_stokes_system)
    print("solve_stokes_system:", solve_stokes_system)
    
    # plot via matplotlib
    resolution_options = []
    for resolution in resolutions:
        if resolution not in resolution_options:
            resolution_options.append(resolution)
    resolution_options = np.array(resolution_options)
    resolution_options = np.sort(resolution_options)

    fig, axs = plt.subplots(1, 2, figsize=(10, 5))
    for i in range(len(resolution_options)):
        resolution = resolution_options[i]
        plot_indexes = (resolutions == resolution) 
        xs = cores[plot_indexes]
        sequential_indexes = np.argsort(xs)
        ys1 = total_wall_clock[plot_indexes]
        # labels
        _label0 = 'Total Wall Clock(resolution=%d)' % resolution
        _label1 = 'Assemble Stokes System(resolution=%d)' % resolution
        _label2 = 'Solve Stokes System(resolution=%d)' % resolution
        # plot
        axs[0].loglog(xs[sequential_indexes], ys1[sequential_indexes], ".-", color=cm.gist_rainbow(1.0*i/len(resolution_options)),
                label=_label0)
        ys2 = assemble_stokes_system[plot_indexes]
        axs[0].loglog(xs[sequential_indexes], ys2[sequential_indexes], ".--", color=cm.gist_rainbow(1.0*i/len(resolution_options)),
                label=_label1)
        ys3 = ys2 / ys1
        axs[1].semilogx(xs[sequential_indexes], ys3[sequential_indexes], ".--", color=cm.gist_rainbow(1.0*i/len(resolution_options)),
                label=_label1)
        ys4 = solve_stokes_system[plot_indexes]
        axs[0].loglog(xs[sequential_indexes], ys4[sequential_indexes], ".-.", color=cm.gist_rainbow(1.0*i/len(resolution_options)),
                label=_label2)
        ys5 = ys4 / ys1
        axs[1].semilogx(xs[sequential_indexes], ys5[sequential_indexes], ".-.", color=cm.gist_rainbow(1.0*i/len(resolution_options)),
                label=_label2)
    axs[0].set_xlabel('Cores')
    axs[0].set_ylabel('Time [s]')
    axs[0].grid()
    axs[0].set_title('Wall Clock')
    axs[0].legend(fontsize='x-small')
    axs[1].set_xlabel('Cores')
    axs[1].set_ylabel('Percentage')
    axs[1].grid()
    axs[1].set_title('Percentage of Each Part')
    # title and save path 
    basename = os.path.basename(test_results_dir)
    fig.tight_layout()
    filepath='%s/%s.png' % (output_dir, basename)
    print("output file generated: ", filepath)
    plt.savefig(filepath)

    pass
Beispiel #28
0
def main():
    '''
    main function of this module
    Inputs:
        sys.arg[1](str):
            commend
        sys.arg[2, :](str):
            options
    '''
    _commend = sys.argv[1]
    # parse options
    parser = argparse.ArgumentParser(description='TwoDSubdunction Project')
    parser.add_argument('-b',
                        '--base_file',
                        type=str,
                        default='./files/TwoDSubduction/base.prm',
                        help='Filename for base file')
    parser.add_argument('-U',
                        '--use_basename_as_base_file',
                        type=int,
                        default=1,
                        help='Whether we use basename as base file')
    parser.add_argument('-j',
                        '--json_file',
                        type=str,
                        default='./config_case.json',
                        help='Filename for json file')
    parser.add_argument('-o',
                        '--output_dir',
                        type=str,
                        default='../TwoDSubduction/',
                        help='Directory for output')
    parser.add_argument(
        '-e',
        '--operations_file',
        type=str,
        default=None,
        help=
        'A file that has a list of operations, if not given, do all the available operations'
    )
    parser.add_argument('-i',
                        '--input_dir',
                        type=str,
                        default=shilofue_DIR,
                        help='A directory that contains the input')
    parser.add_argument('-s', '--step', type=int, default=0, help='timestep')
    parser.add_argument('-ex',
                        '--extension',
                        type=str,
                        default='png',
                        help='extension for output')
    _options = []
    try:
        _options = sys.argv[2:]
    except IndexError:
        pass
    arg = parser.parse_args(_options)

    # execute commend
    if _commend == 'create_group':
        # create a group
        # example usage:
        #    python -m shilofue.TwoDSubduction create_group -j config_group.json 2>&1 > .temp
        # create a group of cases
        # read files
        # read configuration
        with open(arg.json_file, 'r') as fin:
            _config = json.load(fin)
        _base_name = _config.get('basename', '')
        # read base file
        if arg.use_basename_as_base_file == 1:
            _filename = './files/TwoDSubduction/%s.prm' % _base_name
        else:
            _filename = arg.base_file
        with open(_filename, 'r') as fin:
            _inputs = Parse.ParseFromDealiiInput(fin)
        if not os.path.isdir(arg.output_dir):
            os.mkdir(arg.output_dir)
            print('Now we create a group of cases:')  # screen output
        else:
            print('Now we update a group of cases:')  # screen output

        # create a directory under the name of the group
        _group_name = _config.get('name', 'foo')
        _odir = os.path.join(arg.output_dir, _group_name)
        # By default, we don't update
        update_ = _config.get('update', 0)
        if not update_:
            my_assert(
                os.path.isdir(_odir) is False, ValueError,
                'Going to update a pr-exiting group, but update is not included in the option'
            )
        if not os.path.isdir(_odir):
            os.mkdir(_odir)

        # initialte a class instance
        MyGroup = Parse.GROUP_CASE(MYCASE, _inputs, _config)
        # call __call__ function to generate
        _extra = _config.get('extra', {})
        # add an entry for parse_operations
        parse_operations = MY_PARSE_OPERATIONS()
        _case_names = MyGroup(parse_operations,
                              _odir,
                              extra=_extra,
                              basename=_base_name,
                              update=update_)
        # generate auto.md
        # check if there is alread a preexisting group
        Parse.AutoMarkdownGroup(_group_name, _config, dirname=_odir)
        for _case_name in _case_names:
            _case_dir = os.path.join(_odir, _case_name)
            _case_json_file = os.path.join(_case_dir, 'config.json')
            with open(_case_json_file, 'r') as fin:
                _case_config = json.load(fin)
            Parse.AutoMarkdownCase(_case_name, _case_config, dirname=_case_dir)
        print(_group_name)
        for _case_name in _case_names:
            # ouptut to screen
            print(_case_name)

    elif _commend == 'create':
        print('Now we create a single case:')  # screen output
        # create a case
        # read files
        # read configuration
        with open(arg.json_file, 'r') as fin:
            _config = json.load(fin)
        _base_name = _config.get('basename', '')
        # read base file
        if arg.use_basename_as_base_file == 1:
            _filename = './files/TwoDSubduction/%s.prm' % _base_name
        else:
            _filename = arg.base_file
        with open(_filename, 'r') as fin:
            _inputs = Parse.ParseFromDealiiInput(fin)
        if not os.path.isdir(arg.output_dir):
            os.mkdir(arg.output_dir)
        # Initial a case
        MyCase = MYCASE(_inputs,
                        config=_config['config'],
                        test=_config['test'])
        # call __call__ function to generate
        _extra = _config.get('extra', {})
        # also add extra files
        _extra_files = _config.get('extra_file', {})
        # add an entry for parse_operations
        parse_operations = MY_PARSE_OPERATIONS()
        _case_name = MyCase(parse_operations,
                            dirname=arg.output_dir,
                            extra=_config['extra'],
                            basename=_base_name,
                            extra_file=_extra_files)
        # generate markdown file
        _case_dir = os.path.join(arg.output_dir, _case_name)
        Parse.AutoMarkdownCase(_case_name, _config, dirname=_case_dir)
        print(_case_name)
        # check this group exist
        my_assert(os.path.isdir(arg.output_dir), FileExistsError,
                  "%s doesn't exist" % arg.output_dir)
        # initial class instance, future
        # MyCase = MYCASE(_inputs, config=_config['config'], test=_config['test'])
        # call function to return case names
        # check that these cases exit

        pass

    elif _commend == 'update_docs':
        # update the contents of the mkdocs
        # example usage:
        #   python -m shilofue.TwoDSubduction update_docs -o /home/lochy/ASPECT_PROJECT/TwoDSubduction -j post_process.json
        _project_dir = arg.output_dir
        _project_dict = Parse.UpdateProjectJson(
            _project_dir)  # update project json file

        # load options for post_process
        # load the project level configuration as default
        project_pp_json = os.path.join(ASPECT_LAB_DIR, 'files', project,
                                       'post_process.json')
        with open(project_pp_json, 'r') as fin:
            pdict = json.load(fin)
        # load explicitly defined parameters
        with open(arg.json_file, 'r') as fin:
            pdict1 = json.load(fin)
        pdict.update(pdict1)

        # append analysis
        analysis_file = os.path.join(ASPECT_LAB_DIR, 'analysis.json')
        if os.path.isfile(analysis_file):
            with open(analysis_file, 'r') as fin:
                analysis_dict = json.load(fin)
        else:
            analysis_dict = {}

        # update docs
        docs_dict = pdict.get('docs', {})
        imgs = docs_dict.get('imgs', [])
        Doc.UpdateProjectDoc(_project_dict,
                             _project_dir,
                             images=imgs,
                             analysis=analysis_dict)

    elif _commend == 'update':
        # update a case
        # example usage:
        #   python -m shilofue.TwoDSubduction update -o /home/lochy/ASPECT_PROJECT/TwoDSubduction -j post_process.json
        _project_dir = arg.output_dir
        _project_dict = Parse.UpdateProjectJson(
            _project_dir)  # update project json file

        # load options for post_process
        # load the project level configuration as default
        project_pp_json = os.path.join(ASPECT_LAB_DIR, 'files', project,
                                       'post_process.json')
        with open(project_pp_json, 'r') as fin:
            pdict = json.load(fin)
        # load explicitly defined parameters
        with open(arg.json_file, 'r') as fin:
            pdict1 = json.load(fin)
        # update every entry in pdict1
        for key, value in pdict1.items():
            if type(value) == dict:
                try:
                    _ = pdict[key]
                    pdict[key].update(value)
                except KeyError:
                    pdict[key] = value
            else:
                pdict[key] = value

        # update auto.md file for every case
        Parse.UpdateProjectMd(_project_dict, _project_dir)

        # plot figures for every case
        # get sub cases
        pp_source_dirs = pdict.get('dirs', [])
        _format = pdict.get('py_format', 'png')
        for pp_source_dir_base in pp_source_dirs:
            pp_source_dir = os.path.join(_project_dir, pp_source_dir_base)
            pp_case_dirs = Parse.GetSubCases(pp_source_dir)
            Plot.ProjectPlot(pp_case_dirs, _format, update=False, pdict=pdict)
            # deal with project defined plots
            ProjectPlot(pp_case_dirs, _format, update=False, pdict=pdict)

    elif _commend == 'plot_newton_solver_step':
        # Plot one step from Newton solver
        # use -i option as input and -o option as output dir
        # example usage:
        #   python -m shilofue.TwoDSubduction plot_newton_solver_step -i tests/integration/fixtures/test-plot/newton_solver -o .test -s 1 --extension pdf
        filein = arg.input_dir
        output_dir = arg.output_dir
        step = arg.step
        extension = arg.extension
        ofile_route = os.path.join(output_dir,
                                   'NewtonSolverStep.%s' % extension)
        # plot newton solver output
        NewtonSolverStep = Plot.NEWTON_SOLVER_PLOT('NewtonSolverStep')
        # plot step0
        NewtonSolverStep.GetStep(step)
        NewtonSolverStep(filein, fileout=ofile_route)
        pass

    elif _commend == 'plot_newton_solver':
        # plot the whole history outputs from Newton solver
        # use -i option as input and -o option as output dir
        # example usages:
        #   python -m shilofue.TwoDSubduction plot_newton_solver -i tests/integration/fixtures/test-plot/newton_solver -o .test
        filein = arg.input_dir
        output_dir = arg.output_dir
        step = arg.step
        ofile_route = os.path.join(output_dir, 'NewtonSolver.pdf')
        # plot newton solver output
        NewtonSolverStep = Plot.NEWTON_SOLVER_PLOT('NewtonSolver')
        # plot step0
        NewtonSolverStep(filein, fileout=ofile_route)
        pass

    elif _commend == 'plot_machine_time':
        # plot the machine time output
        # use -i option as input and -o option as output dir
        # example usages:
        #   python -m shilofue.TwoDSubduction plot_machine_time -i tests/integration/fixtures/test-plot/machine_time -o .test
        filein = arg.input_dir
        output_dir = arg.output_dir
        ofile = os.path.join(output_dir, 'MachineTime.pdf')
        # plot newton solver output
        MachineTime = Plot.MACHINE_TIME_PLOT('MachineTime')
        # plot step0
        MachineTime(filein, fileout=ofile)
        pass

    elif _commend == 'plot_slab_morph':
        # plot the slab morph output
        # use -i option as input and -o option as output dir
        # example usages:
        #   python -m shilofue.TwoDSubduction plot_slab_morph
        #       -i /home/lochy/ASPECT_PROJECT/TwoDSubduction/non_linear26/cr80w5ULV3.000e+01/output/slab_morph
        #       -o /home/lochy/ASPECT_PROJECT/TwoDSubduction/non_linear26/cr80w5ULV3.000e+01/img
        filein = arg.input_dir
        output_dir = arg.output_dir
        ofile = os.path.join(output_dir, 'slab_morph.png')
        # Init the UnitConvert class
        UnitConvert = UNITCONVERT()
        # Get options
        project_pp_json = os.path.join(ASPECT_LAB_DIR, 'files',
                                       'TwoDSubduction', 'post_process.json')
        with open(project_pp_json, 'r') as fin:
            pdict = json.load(fin)
        plot_options = pdict.get('slab_morph', {})
        Slab_morph_plot = SLAB_MORPH_PLOT('slab_morph',
                                          unit_convert=UnitConvert,
                                          options=plot_options)
        # plot
        Slab_morph_plot(filein, fileout=ofile)

    elif _commend == 'process_slab_morph':
        # process slab morphology from visit particle output
        # generate a file that can be used for plot
        # example usages:
        # python -m shilofue.TwoDSubduction process_slab_morph -i
        #   /home/lochy/ASPECT_PROJECT/TwoDSubduction/non_linear26/cr80w5ULV3.000e+01 -j post_process.json
        case_dir = arg.input_dir
        # process slab morph with extra options
        with open(arg.json_file, 'r') as fin:
            dict_in = json.load(fin)
            extra_options = dict_in.get('slab_morph', {})
        try:
            SlabMorph(case_dir, extra_options)
        except FileNotFoundError:
            warnings.warn(
                'process_slab_morph: file existence requirements are not met')

    elif _commend == 'plot_slab_morph_case':
        # plot the slab morph output for a case
        # first generate slab_morph output
        case_dir = arg.input_dir
        # process slab morph with extra options
        with open(arg.json_file, 'r') as fin:
            dict_in = json.load(fin)
            extra_options = dict_in.get('slab_morph', {})
        try:
            SlabMorph(case_dir, extra_options)
        except FileNotFoundError:
            warnings.warn(
                'process_slab_morph: file existence requirements are not met')
        # then plot the slab morph figure
        filein = os.path.join(case_dir, 'output', 'slab_morph')
        output_dir = os.path.join(case_dir, 'img')
        ofile = os.path.join(output_dir, 'slab_morph.png')
        # Init the UnitConvert class
        UnitConvert = UNITCONVERT()
        # Get options
        project_pp_json = os.path.join(ASPECT_LAB_DIR, 'files',
                                       'TwoDSubduction', 'post_process.json')
        with open(project_pp_json, 'r') as fin:
            pdict = json.load(fin)
        plot_options = pdict.get('slab_morph', {})
        Slab_morph_plot = SLAB_MORPH_PLOT('slab_morph',
                                          unit_convert=UnitConvert,
                                          options=plot_options)
        # plot
        Slab_morph_plot(filein, fileout=ofile)

    elif _commend == 'plot':
        # future
        # plot something
        pass

    elif _commend == 'visit_options':
        # output bash options to a file that could be
        # read by bash script
        # initiate class object
        case_dir = arg.input_dir

        Visit_Options = VISIT_OPTIONS(case_dir)

        # load extra options
        if arg.json_file == './config_case.json':
            # no json file is giving
            extra_options = {}
        else:
            with open(arg.json_file, 'r') as fin:
                dict_in = json.load(fin)
                extra_options = dict_in.get('visit', {})

        # call function
        ofile = os.path.join(ASPECT_LAB_DIR, 'visit_keys_values')
        Visit_Options(ofile, extra_options)
        pass

    elif _commend == 'plot_test_results':
        # plot the result of tests
        # example:
        # python -m shilofue.TwoDSubduction plot_test_results -i
        #  /home/lochy/softwares/aspect/build_TwoDSubduction/tests/ -o $TwoDSubduction_DIR/test_results
        source_dir = arg.input_dir
        # todo
        PlotTestResults(source_dir, output_dir=arg.output_dir)

    else:
        raise ValueError('Commend %s is not available.' % _commend)
Beispiel #29
0
def main():
    '''
    main function of this module
    Inputs:
        sys.arg[1](str):
            commend
        sys.arg[2, :](str):
            options
    '''
    # parse commend
    _available_commends = ['create', 'create_group', 'plot',
                           'update']  # only these commends are available now
    _commend = sys.argv[1]
    if _commend not in _available_commends:
        raise ValueError('Commend %s is not available.' % _commend)
    # parse options
    parser = argparse.ArgumentParser(description='LearnAspect Project')
    parser.add_argument('-b',
                        '--base_file',
                        type=str,
                        default='./files/LearnAspect/base.prm',
                        help='Filename for base file')
    parser.add_argument('-U',
                        '--use_basename_as_base_file',
                        type=int,
                        default=1,
                        help='Whether we use basename as base file')
    parser.add_argument('-j',
                        '--json_file',
                        type=str,
                        default='./config_case.json',
                        help='Filename for json file')
    parser.add_argument('-o',
                        '--output_dir',
                        type=str,
                        default='../LearnAspect/',
                        help='Directory for output')
    parser.add_argument(
        '-e',
        '--operations_file',
        type=str,
        default=None,
        help=
        'A file that has a list of operations, if not given, do all the available operations'
    )
    _options = []
    try:
        _options = sys.argv[2:]
    except IndexError:
        pass
    arg = parser.parse_args(_options)
    # execute commend
    if _commend == 'create_group':
        print('Now we create a group of cases:')  # screen output
        # create a group of cases
        # read files
        # read configuration
        with open(arg.json_file, 'r') as fin:
            _config = json.load(fin)
        _base_name = _config.get('basename', '')
        # read base prm file
        if arg.use_basename_as_base_file == 1:
            _filename = './files/LearnAspect/%s.prm' % _base_name
        else:
            _filename = arg.base_file
        with open(_filename, 'r') as fin:
            _inputs = ParseFromDealiiInput(fin)
        if not os.path.isdir(arg.output_dir):
            os.mkdir(arg.output_dir)
        # create a directory under the name of the group
        _group_name = _config.get('name', 'foo')
        _odir = os.path.join(arg.output_dir, _group_name)
        my_assert(not os.path.isdir(_odir), ValueError,
                  "The script doesn't support updating a pr-exiting group")
        os.mkdir(_odir)
        # initialte a class instance
        MyGroup = GROUP_CASE(MYCASE, _inputs, _config)
        # call __call__ function to generate
        _extra = _config.get('extra', {})
        if arg.operations_file is None:
            # take all availale operations
            _operations = _ALL_AVAILABLE_OPERATIONS
        _case_names = MyGroup(_odir,
                              operations=_operations,
                              extra=_extra,
                              basename=_base_name)
        # generate auto.md
        AutoMarkdownCase(_group_name, _config, dirname=_odir)
        for _case_name in _case_names:
            _case_dir = os.path.join(_odir, _case_name)
            _case_json_file = os.path.join(_case_dir, 'config.json')
            with open(_case_json_file, 'r') as fin:
                _case_config = json.load(fin)
            AutoMarkdownCase(_case_name, _case_config, dirname=_case_dir)
        print(_group_name)
        for _case_name in _case_names:
            # ouptut to screen
            print(_case_name)

    elif _commend == 'create':
        print('Now we create a single case:')  # screen output
        # create a case
        # read files
        # read configuration
        with open(arg.json_file, 'r') as fin:
            _config = json.load(fin)
        # read base prm file
        _base_name = _config.get('basename', '')
        if arg.use_basename_as_base_file == 1:
            _filename = './files/LearnAspect/%s.prm' % _base_name
        else:
            _filename = arg.base_file
        with open(_filename, 'r') as fin:
            _inputs = ParseFromDealiiInput(fin)
        if not os.path.isdir(arg.output_dir):
            os.mkdir(arg.output_dir)
        # Initial a case
        MyCase = MYCASE(_inputs,
                        config=_config['config'],
                        test=_config['test'])
        # call __call__ function to generate
        _extra = _config.get('extra', {})
        if arg.operations_file is None:
            # take all availale operations
            _operations = _ALL_AVAILABLE_OPERATIONS
        # also get extra files
        _extra_files = _config.get('extra_file', {})
        _case_name = MyCase(dirname=arg.output_dir,
                            extra=_config['extra'],
                            operations=_operations,
                            basename=_base_name,
                            extra_file=_extra_files)
        # generate markdown file
        _case_dir = os.path.join(arg.output_dir, _case_name)
        AutoMarkdownCase(_case_name, _config, dirname=_case_dir)
        print(_case_name)

    elif _commend == 'query':
        # for now, only out put the cases in this group
        print('Now we query into a group')
        _config_file = os.path.join(arg.output_dir, 'config.json')
        # check this group exist
        my_assert(os.path.isdir(arg.output_dir), FileExistsError,
                  "%s doesn't exist" % arg.output_dir)
        my_assert(os.path.isdir(_config_file), FileExistsError,
                  "%s doesn't exist" % arg._config_file)
        # initial class instance, todo
        # MyCase = MYCASE(_inputs, config=_config['config'], test=_config['test'])
        # call function to return case names
        # check that these cases exit

        pass

    elif _commend == 'update_doc':
        # todo_future
        pass

    elif _commend == 'update':
        # update a case
        _project_dir = arg.output_dir
        _project_dict = UpdateProjectJson(
            _project_dir)  # update project json file
        UpdateProjectMd(_project_dict,
                        _project_dir)  # update auto.md file for every case
        ProjectPlot(_project_dict, _project_dir, 'png',
                    update=False)  # plot figures for every case
        UpdateProjectDoc(_project_dict,
                         _project_dir,
                         images=['Statistics', 'DepthAverage', 'visit'])

    elif _commend == 'plot':
        # todo_future
        # plot something
        pass
Beispiel #30
0
    def Analyze(self, kwargs):
        """
        analyze data
        Args:
            kwargs(dict): options
                radius(float): radius of the earth
                depth_ranges(float): ranges of depth to compute dip angle
        """
        # get options
        col_x = self.column_indexes['x']
        col_y = self.column_indexes['y']
        col_id = self.column_indexes['id']
        radius = kwargs.get("radius", 6371e3)

        # sort by id
        self.data = self.data[self.data[:, col_id].argsort()]

        # transfer to sph
        x = self.data[:, col_x]
        y = self.data[:, col_y]
        r, ph = cart2sph2(x, y)
        depth = radius - r

        # get maximum depth
        max_depth = np.max(depth)

        # get trench position
        # a depth for trench, in m
        trench_depth = kwargs.get('trench_depth', 1e3)
        mask_slab = (depth > trench_depth)
        trench_position = ph[mask_slab][0]

        # get length of slab
        # both length and dip has n-1 component because they are computed on the intervals between 2 points
        length = ((r[0:-1] - r[1:])**2.0 + r[0:-1]**2.0 *
                  (ph[0:-1] - ph[1:])**2.0)**0.5
        slab_length = LA.norm(length, 1)

        # get dip angle
        dip = SlabDip(r[0:-1], ph[0:-1], r[1:], ph[1:])

        # get average curvature in depth range
        depth_ranges = kwargs.get('depth_ranges', [[0.0, 6371e3]])
        my_assert(
            type(depth_ranges) is list, TypeError,
            "VISIT_XYZ.Analyze: depth_ranges must be a list")
        dips_in_ranges = np.zeros(len(depth_ranges))
        limit = 1e-6
        for i in range(len(depth_ranges)):
            depth_range = depth_ranges[i]
            mask_range = (dip > depth_range[0]) * (dip < depth_range[1])
            total = dip[mask_range].dot(length[mask_range])
            weight = LA.norm(length[mask_range], 1)
            if weight < limit:
                # i.e. max_depth < depth_range[1]
                dips_in_ranges[i] = 0.0
            else:
                dips_in_ranges[i] = total / weight

        # construct header
        # append time if present
        try:
            _time = kwargs['time']
        except KeyError:
            self.output_header = {
                'Maximum depth': {
                    'col': 0,
                    'unit': self.header['x']['unit']
                },
                'Trench position': {
                    'col': 1,
                    'unit': 'rad'
                },
                'Slab length': {
                    'col': 2,
                    'unit': self.header['x']['unit']
                }
            }
            total_cols = 3
        else:
            _time_unit = kwargs.get('time_unit', 'yr')
            self.output_header = {
                'Time': {
                    'col': 0,
                    'unit': _time_unit
                },
                'Maximum depth': {
                    'col': 1,
                    'unit': self.header['x']['unit']
                },
                'Trench position': {
                    'col': 2,
                    'unit': 'rad'
                },
                'Slab length': {
                    'col': 3,
                    'unit': self.header['x']['unit']
                }
            }
            total_cols = 4
        for i in range(len(depth_ranges)):
            key = 'Dip angle %d_%d (rad)' % (int(
                depth_ranges[i][0]), int(depth_ranges[i][1]))
            self.output_header[key] = {'col': i + total_cols}

        # manage output
        # append time if present
        try:
            _time = kwargs['time']
        except KeyError:
            output_data_temp = [max_depth, trench_position, slab_length]
        else:
            output_data_temp = [_time, max_depth, trench_position, slab_length]
        for i in range(len(depth_ranges)):
            output_data_temp.append(dips_in_ranges[i])
        self.output_data = Make2dArray(output_data_temp)