Exemple #1
0
def prepare_output_file(report_info, data):
    if data['report'] == 'fieldComparisonReport':
        run_dir = report_info.run_dir
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
        if fn.exists():
            fn.remove()
            simulation_db.write_result(generate_field_comparison_report(data, run_dir), run_dir=run_dir)
Exemple #2
0
def prepare_output_file(report_info, data):
    report = data['report']
    if 'bunchReport' in report or 'twissReport' in report:
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, report_info.run_dir)
        if fn.exists():
            fn.remove()
            save_report_data(data, report_info.run_dir)
Exemple #3
0
def prepare_output_file(report_info, data):
    if data['report'] == 'fieldComparisonReport':
        run_dir = report_info.run_dir
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
        if fn.exists():
            fn.remove()
            simulation_db.write_result(generate_field_comparison_report(data, run_dir), run_dir=run_dir)
Exemple #4
0
def prepare_sequential_output_file(run_dir, data):
    report = data.report
    if 'bunchReport' in report or 'twissReport' in report or 'opticsReport' in report:
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
        if fn.exists():
            fn.remove()
            save_sequential_report_data(data, run_dir)
Exemple #5
0
def prepare_sequential_output_file(run_dir, sim_in):
    if 'fileColumnReport' not in sim_in.report:
        return
    fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
    if fn.exists():
        fn.remove()
        extract_report_data(run_dir, sim_in)
Exemple #6
0
 def _load_in_json(run_dir):
     p = simulation_db.json_filename(
         sirepo.template.template_common.INPUT_BASE_NAME, run_dir)
     c = simulation_db.read_json(p)
     return c, c.computeJobCacheKey.computeJobStart if \
         c.get('computejobCacheKey') else \
         int(p.mtime())
Exemple #7
0
def prepare_sequential_output_file(run_dir, data):
    r = data.report
    if r == 'twissReport':
        f = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
        if f.exists():
            f.remove()
            save_sequential_report_data(data, run_dir)
Exemple #8
0
def run_background(cfg_dir):
    res = {}
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    distribution = data['models']['bunch']['distribution']
    run_with_mpi = distribution == 'lattice' or distribution == 'file'
    try:
        with pkio.save_chdir(cfg_dir):
            if run_with_mpi:
                mpi.run_script(
                    pkio.read_text(template_common.PARAMETERS_PYTHON_FILE))
            else:
                #TODO(pjm): MPI doesn't work with rsbeams distributions yet
                exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE),
                     locals(), locals())
    except Exception as e:
        res = {
            'error': str(e),
        }
    if run_with_mpi and 'error' in res:
        text = pkio.read_text('mpi_run.out')
        m = re.search(r'^Traceback .*?^\w*Error: (.*?)\n\n', text,
                      re.MULTILINE | re.DOTALL)
        if m:
            res['error'] = m.group(1)
            # remove output file - write_result() will not overwrite an existing error output
            pkio.unchecked_remove(
                simulation_db.json_filename(template_common.OUTPUT_BASE_NAME))
    simulation_db.write_result(res)
Exemple #9
0
def prepare_output_file(report_info, data):
    if data['report'] == 'twissReport' or 'bunchReport' in data['report']:
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, report_info.run_dir)
        if fn.exists():
            fn.remove()
            output_file = report_info.run_dir.join(_report_output_filename(data['report']))
            if output_file.exists():
                save_report_data(data, report_info.run_dir)
Exemple #10
0
def prepare_sequential_output_file(run_dir, data):
    if data.report == 'twissReport' or 'bunchReport' in data.report:
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME,
                                         run_dir)
        if fn.exists():
            fn.remove()
            output_file = run_dir.join(_report_output_filename(data.report))
            if output_file.exists():
                save_sequential_report_data(data, run_dir)
Exemple #11
0
def prepare_output_file(report_info, data):
    if data['report'] == 'twissReport' or 'bunchReport' in data['report']:
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME,
                                         report_info.run_dir)
        if fn.exists():
            fn.remove()
            output_file = report_info.run_dir.join(
                _report_output_filename(data['report']))
            if output_file.exists():
                save_report_data(data, report_info.run_dir)
Exemple #12
0
def prepare_sequential_output_file(run_dir, data):
    report = data['report']
    if 'fileColumnReport' in report or 'partitionColumnReport':
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
        if fn.exists():
            fn.remove()
            try:
                save_sequential_report_data(run_dir, data)
            except IOError:
                # the output file isn't readable
                pass
Exemple #13
0
def background_percent_complete(report, run_dir, is_running):
    data_path = run_dir.join(template_common.INPUT_BASE_NAME)
    if not os.path.exists(str(simulation_db.json_filename(data_path))):
        return PKDict(
            percentComplete=0,
            frameCount=0,
        )
    return PKDict(
        percentComplete=100,
        # real frame count depends on the series selected
        frameCount=1,
    )
Exemple #14
0
def read_sequential_result(run_dir):
    """Read result data file from simulation

    Args:
        run_dir (py.path): where to find output

    Returns:
        dict: result
    """
    from sirepo import simulation_db

    return simulation_db.read_json(
        simulation_db.json_filename(OUTPUT_BASE_NAME, run_dir), )
Exemple #15
0
def prepare_sequential_output_file(run_dir, data):
    r = data.report
    if r == 'twissReport' or _is_report('bunchReport', r) or _is_report(
            'twissEllipseReport', r):
        f = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME,
                                        run_dir)
        if f.exists():
            f.remove()
            try:
                save_sequential_report_data(data, run_dir)
            except IOError:
                # the output file isn't readable
                pass
Exemple #16
0
def background_percent_complete(report, run_dir, is_running):
    data_path = run_dir.join(template_common.INPUT_BASE_NAME)
    if not os.path.exists(str(simulation_db.json_filename(data_path))):
        return {
            'percentComplete': 0,
            'frameCount': 0,
        }
    return {
        'percentComplete': 100,
        # real frame count depends on the series selected
        'frameCount': 1,
        'errors': '',
    }
Exemple #17
0
def background_percent_complete(report, run_dir, is_running):
    data_path = run_dir.join(template_common.INPUT_BASE_NAME)
    if not os.path.exists(str(simulation_db.json_filename(data_path))):
        return {
            'percentComplete': 0,
            'frameCount': 0,
        }
    return {
        'percentComplete': 100,
        # real frame count depends on the series selected
        'frameCount': 1,
        'errors': '',
    }
Exemple #18
0
def _reqd(req):
    """Read the run_dir and return cached_data.

    Only a hit if the models between data and cache match exactly. Otherwise,
    return cached data if it's there and valid.

    Args:
        req (dict): parsed simulation data

    Returns:
        Dict: report parameters and hashes
    """
    res = PKDict(
        cache_hit=False,
        cached_data=None,
        cached_hash=None,
        parameters_changed=False,
        run_dir=simulation_db.simulation_run_dir(req.req_data),
        sim_data=req.sim_data,
    )
    res.pkupdate(
        input_file=simulation_db.json_filename(
            template_common.INPUT_BASE_NAME,
            res.run_dir,
        ),
        is_parallel=res.sim_data.is_parallel(req.req_data),
        jid=res.sim_data.parse_jid(req.req_data),
        job_status=_read_status(res.run_dir),
        model_name=res.sim_data.parse_model(req.req_data.report),
        req_hash=(req.req_data.get('computeJobHash')
                  or res.sim_data.compute_job_hash(req.req_data)),
    )
    if not res.run_dir.check():
        return res
    try:
        c = simulation_db.read_json(res.input_file)
    except Exception as e:
        if pykern.pkio.exception_is_not_found(e):
            return res
        raise
    res.cached_data = c
    # backwards compatibility for old runs that don't have computeJobCacheKey
    res.cached_hash = c.models.pksetdefault(computeJobCacheKey=lambda: PKDict(
        computeJobHash=res.sim_data.compute_job_hash(c),
        computeJobSerial=int(res.input_file.mtime()),
    ), ).computeJobCacheKey.computeJobHash
    if res.req_hash == res.cached_hash:
        res.cache_hit = True
        return res
    res.parameters_changed = True
    return res
Exemple #19
0
def prepare_output_file(run_dir, data):
    if data.report == 'fieldComparisonReport' or data.report == 'fieldReport':
        fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME,
                                         run_dir)
        if fn.exists():
            fn.remove()
            if data.report == 'fieldComparisonReport':
                simulation_db.write_result(generate_field_comparison_report(
                    data, run_dir),
                                           run_dir=run_dir)
            else:
                simulation_db.write_result(generate_field_report(
                    data, run_dir),
                                           run_dir=run_dir)
Exemple #20
0
def write_sequential_result(result, run_dir=None):
    """Write the results of a sequential simulation to disk.

    Args:
        result (dict): The results of the simulation
        run_dir (py.path): Defaults to current dir
    """
    from sirepo import simulation_db

    if not run_dir:
        run_dir = pkio.py_path()
    f = simulation_db.json_filename(OUTPUT_BASE_NAME, run_dir)
    assert not f.exists(), \
        '{} file exists'.format(OUTPUT_BASE_NAME)
    simulation_db.write_json(f, result)
    t = sirepo.template.import_module(
        simulation_db.read_json(
            simulation_db.json_filename(
                INPUT_BASE_NAME,
                run_dir,
            ), ), )
    if hasattr(t, 'clean_run_dir'):
        t.clean_run_dir(run_dir)
Exemple #21
0
def run_background(cfg_dir):
    res = {}
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    distribution = data['models']['bunch']['distribution']
    run_with_mpi = distribution == 'lattice' or distribution == 'file'
    try:
        with pkio.save_chdir(cfg_dir):
            if run_with_mpi:
                mpi.run_script(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE))
            else:
                #TODO(pjm): MPI doesn't work with rsbeams distributions yet
                exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
    except Exception as e:
        res = {
            'error': str(e),
        }
    if run_with_mpi and 'error' in res:
        text = pkio.read_text('mpi_run.out')
        m = re.search(r'^Traceback .*?^\w*Error: (.*?)\n\n', text, re.MULTILINE|re.DOTALL)
        if m:
            res['error'] = m.group(1)
            # remove output file - write_result() will not overwrite an existing error output
            pkio.unchecked_remove(simulation_db.json_filename(template_common.OUTPUT_BASE_NAME))
    simulation_db.write_result(res)