Exemple #1
0
def _output_info(run_dir):
    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        try:
            res = simulation_db.read_json(info_file)
            if not res or res[0].get('_version', '') == _OUTPUT_INFO_VERSION:
                return res
        except ValueError as e:
            pass
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    files = _build_filename_map(data)
    res = []
    for k in files.keys_in_order:
        f = files[k]
        if run_dir.join(f.filename).exists():
            res.append(_file_info(f.filename, run_dir, k))
    if LatticeUtil.find_first_command(data, _END_MATCH_COMMAND):
        res.insert(
            0,
            PKDict(
                modelKey='matchAnimation',
                filename='madx.log',
                isHistogram=False,
                plottableColumns=[],
                pageCount=0,
            ))
    if res:
        res[0]['_version'] = _OUTPUT_INFO_VERSION
    simulation_db.write_json(info_file, res)
    return res
Exemple #2
0
def background_percent_complete(report, run_dir, is_running):
    #TODO(robnagler) remove duplication in run_dir.exists() (outer level?)
    errors, last_element = parse_elegant_log(run_dir)
    res = {
        'percentComplete': 100,
        'frameCount': 0,
        'errors': errors,
    }
    if is_running:
        data = simulation_db.read_json(
            run_dir.join(template_common.INPUT_BASE_NAME))
        res['percentComplete'] = _compute_percent_complete(data, last_element)
        return res
    if not run_dir.join(_ELEGANT_SEMAPHORE_FILE).exists():
        return res
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    output_info = _output_info(run_dir, data)
    return {
        'percentComplete': 100,
        'frameCount': 1,
        'outputInfo': output_info,
        'lastUpdateTime': output_info[0]['lastUpdateTime'],
        'errors': errors,
    }
Exemple #3
0
def get_data_file(run_dir, model, frame):
    if frame >= 0:
        data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
        # ex. elementAnimation17-55
        id = re.sub(r"elementAnimation", "", model).split("-")
        filename = _get_filename_for_element_id(id, data)
        path = str(run_dir.join(filename))
        with open(path) as f:
            return os.path.basename(path), f.read(), "application/octet-stream"

    if model == "animation":
        path = str(run_dir.join(ELEGANT_LOG_FILE))
        with open(path) as f:
            return "elegant-output.txt", f.read(), "text/plain"

    if model == "beamlineReport":
        data = simulation_db.read_json(str(run_dir.join("..", simulation_db.SIMULATION_DATA_FILE)))
        source = generate_parameters_file(data, is_parallel=True)
        return "python-source.py", source, "text/plain"

    for path in glob.glob(str(run_dir.join("elegant.bun"))):
        path = str(py.path.local(path))
        with open(path) as f:
            return os.path.basename(path), f.read(), "application/octet-stream"
    raise RuntimeError("no datafile found in run_dir: {}".format(run_dir))
Exemple #4
0
def _optimizer_percent_complete(run_dir, is_running):
    if not run_dir.exists():
        return PKDict(
            percentComplete=0,
            frameCount=0,
        )
    res, best_row = _read_optimizer_output(run_dir)
    summary_data = None
    frame_count = 0
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    optimizer = data.models.optimizer
    if res is not None:
        frame_count = len(res)
        if not is_running:
            result_file = run_dir.join(_OPTIMIZER_RESULT_FILE)
            if result_file.exists():
                summary_data = simulation_db.read_json(result_file)
        if not summary_data:
            best_row = best_row.tolist();
            summary_data = {
                'fun': best_row[3],
                'x': best_row[4:],
            }
        summary_data['fields'] = optimizer.fields
    if is_running:
        status_file = run_dir.join(_OPTIMIZER_STATUS_FILE)
        if status_file.exists():
            try:
                if not summary_data:
                    summary_data = {}
                rows = np.loadtxt(str(status_file))
                if len(rows.shape) == 1:
                    rows = np.array([rows])
                summary_data['statusRows'] = rows.tolist()
                summary_data['fields'] = optimizer.fields
                summary_data['frameCount'] = frame_count
                summary_data['initialSteps'] = optimizer.initialSteps
                summary_data['optimizerSteps'] = optimizer.optimizerSteps
            except TypeError:
                pass
            except ValueError:
                pass
    if summary_data:
        return PKDict(
            percentComplete=0 if is_running else 100,
            frameCount=frame_count,
            summary=summary_data,
        )
    if is_running:
        return PKDict(
            percentComplete=0,
            frameCount=0,
        )
    #TODO(pjm): determine optimization error
    return PKDict(
        percentComplete=0,
        frameCount=0,
        error='optimizer produced no data',
        state='error',
    )
Exemple #5
0
def get_data_file(run_dir, model, frame):
    if frame >= 0:
        data = simulation_db.read_json(
            run_dir.join(template_common.INPUT_BASE_NAME))
        # ex. elementAnimation17-55
        id = re.sub(r'elementAnimation', '', model).split('-')
        filename = _get_filename_for_element_id(id, data)
        path = str(run_dir.join(filename))
        with open(path) as f:
            return os.path.basename(path), f.read(), 'application/octet-stream'

    if model == 'animation':
        path = str(run_dir.join(ELEGANT_LOG_FILE))
        with open(path) as f:
            return 'elegant-output.txt', f.read(), 'text/plain'

    if model == 'beamlineReport':
        data = simulation_db.read_json(
            str(run_dir.join('..', simulation_db.SIMULATION_DATA_FILE)))
        source = generate_parameters_file(data, is_parallel=True)
        return 'python-source.py', source, 'text/plain'

    for path in glob.glob(str(run_dir.join('elegant.bun'))):
        path = str(py.path.local(path))
        with open(path) as f:
            return os.path.basename(path), f.read(), 'application/octet-stream'
    raise RuntimeError('no datafile found in run_dir: {}'.format(run_dir))
Exemple #6
0
def get_data_file(run_dir, model, frame, options=None):
    def _sdds(filename):
        path = run_dir.join(filename)
        assert path.check(file=True, exists=True), \
            '{}: not found'.format(path)
        if not options.suffix:
            with open(str(path)) as f:
                return path.basename, f.read(), 'application/octet-stream'
        if options.suffix == 'csv':
            out = elegant_common.subprocess_output(['sddsprintout', '-columns', '-spreadsheet=csv', str(path)])
            assert out, \
                '{}: invalid or empty output from sddsprintout'.format(path)
            return path.purebasename + '.csv', out, 'text/csv'
        raise AssertionError('{}: invalid suffix for download path={}'.format(options.suffix, path))

    if frame >= 0:
        data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
        # ex. elementAnimation17-55
        i = re.sub(r'elementAnimation', '', model).split(_FILE_ID_SEP)
        return _sdds(_get_filename_for_element_id(i, data))

    if model == 'animation':
        path = run_dir.join(ELEGANT_LOG_FILE)
        if not path.exists():
            return 'elegant-output.txt', '', 'text/plain'
        with open(str(path)) as f:
            return 'elegant-output.txt', f.read(), 'text/plain'

    if model == 'beamlineReport':
        data = simulation_db.read_json(str(run_dir.join('..', simulation_db.SIMULATION_DATA_FILE)))
        source = generate_parameters_file(data, is_parallel=True)
        return 'python-source.py', source, 'text/plain'

    return _sdds(_report_output_filename('bunchReport'))
Exemple #7
0
def _output_info(run_dir):
    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        try:
            res = simulation_db.read_json(info_file)
            if len(res) == 0 or res[0].get('_version',
                                           '') == _OUTPUT_INFO_VERSION:
                return res
        except ValueError as e:
            pass
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    res = []
    filename_map = _build_filename_map(data)
    for k in filename_map.keys_in_order:
        filename = filename_map[k]
        id = k.split(_FILE_ID_SEP)
        info = _file_info(filename, run_dir, id[0], id[1])
        if info:
            info.modelKey = 'elementAnimation{}'.format(info.id)
            res.append(info)
    if len(res):
        res[0]['_version'] = _OUTPUT_INFO_VERSION
    simulation_db.write_json(info_file, res)
    return res
Exemple #8
0
def run_background(cfg_dir):
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    if data.report == 'setupAnimation':
        return
    mpi.run_program([
        pkio.py_path(cfg_dir).join(
            _SIM_DATA.flash_exe_basename(
                simulation_db.read_json(template_common.INPUT_BASE_NAME, )), )
    ])
Exemple #9
0
def run(cfg_dir):
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    if data.report == 'dvhReport':
        filename = _SIM_DATA.lib_file_for_sim(data, _DVH_FILE_NAME)
        template_common.write_sequential_result(
            simulation_db.read_json(filename))
    elif data.report == 'dicom3DReport':
        template_common.write_sequential_result({})
    else:
        assert False, 'unknown report: {}'.format(data.report)
Exemple #10
0
def compute_field_range(args, compute_range):
    """ Computes the fieldRange values for all parameters across all animation files.
    Caches the value on the animation input file. compute_range() is called to
    read the simulation specific datafiles and extract the ranges by field.
    """
    from sirepo import simulation_db
    run_dir = simulation_db.simulation_run_dir({
        'simulationType':
        args['simulationType'],
        'simulationId':
        args['simulationId'],
        'report':
        'animation',
    })
    data = simulation_db.read_json(run_dir.join(INPUT_BASE_NAME))
    res = None
    model_name = args['modelName']
    if model_name in data.models:
        if 'fieldRange' in data.models[model_name]:
            res = data.models[model_name].fieldRange
        else:
            res = compute_range(run_dir, data)
            data.models[model_name].fieldRange = res
            simulation_db.write_json(run_dir.join(INPUT_BASE_NAME), data)
    return {
        'fieldRange': res,
    }
Exemple #11
0
 def get_application_data(self, args, schema, ignore_array_values=False):
     from sirepo import simulation_db
     if args.method == 'rpn_value':
         if ignore_array_values and re.search(r'^\{.*\}$', args.value):
             # accept array of values enclosed in curly braces
             args.result = ''
             return True
         v, err = self.eval_var(args.value)
         if err:
             args.error = err
         else:
             args.result = v
         return True
     if args.method == 'recompute_rpn_cache_values':
         self.recompute_cache(args.cache)
         return True
     if args.method == 'validate_rpn_delete':
         model_data = simulation_db.read_json(
             simulation_db.sim_data_file(
                 args.simulationType,
                 args.simulationId,
             ))
         args.error = self.validate_var_delete(
             args.name,
             model_data,
             schema,
         )
         return True
     return False
Exemple #12
0
def _mpi_core_count(run_dir):
    mpi_file = py.path.local(run_dir).join(MPI_SUMMARY_FILE)
    if mpi_file.exists():
        info = simulation_db.read_json(mpi_file)
        if 'mpiCores' in info:
            return info['mpiCores']
    return 0
Exemple #13
0
def run_background(cfg_dir):
    res = {}
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    distribution = data['models']['bunch']['distribution']
    run_with_mpi = distribution == 'lattice' or distribution == 'file'
    try:
        with pkio.save_chdir(cfg_dir):
            if run_with_mpi:
                mpi.run_script(
                    pkio.read_text(template_common.PARAMETERS_PYTHON_FILE))
            else:
                #TODO(pjm): MPI doesn't work with rsbeams distributions yet
                exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE),
                     locals(), locals())
    except Exception as e:
        res = {
            'error': str(e),
        }
    if run_with_mpi and 'error' in res:
        text = pkio.read_text('mpi_run.out')
        m = re.search(r'^Traceback .*?^\w*Error: (.*?)\n\n', text,
                      re.MULTILINE | re.DOTALL)
        if m:
            res['error'] = m.group(1)
            # remove output file - write_result() will not overwrite an existing error output
            pkio.unchecked_remove(
                simulation_db.json_filename(template_common.OUTPUT_BASE_NAME))
    simulation_db.write_result(res)
Exemple #14
0
    def _create_file():
        from sklearn.preprocessing import LabelEncoder

        # POSIT: Matches logic in package_data.template.ml.scale.py.jinja.read_data_and_encode_output_column()
        data = simulation_db.read_json(
            frame_args.run_dir.join(template_common.INPUT_BASE_NAME),
        )
        v = np.genfromtxt(
            str(simulation_db.simulation_lib_dir(SIM_TYPE).join(
                _filename(data.models.dataFile.file),
            )),
            delimiter=',',
            skip_header=data.models.columnInfo.hasHeaderRow,
            dtype=None,
            encoding='utf-8',
        )
        o = data.models.columnInfo.inputOutput.index('output')
        c = v[f'f{o}']
        e = LabelEncoder().fit(c)
        res = PKDict(
            zip(
                e.transform(e.classes_).astype(np.float).tolist(),
                e.classes_,
            ),
        )
        pkjson.dump_pretty(
            res,
            filename=_OUTPUT_FILE.classificationOutputColEncodingFile,
        )
        return res
Exemple #15
0
def background_percent_complete(report, run_dir, is_running):
    error = ''
    if not is_running:
        show_tunes_report = False
        show_spin_3d = False
        in_file = run_dir.join('{}.json'.format(
            template_common.INPUT_BASE_NAME))
        if in_file.exists():
            data = simulation_db.read_json(
                run_dir.join(template_common.INPUT_BASE_NAME))
            show_tunes_report = _particle_count(data) <= _SCHEMA.constants.maxFilterPlotParticles \
                and data.models.simulationSettings.npass >= 10
            show_spin_3d = data.models.SPNTRK.KSO == '1'
        count = read_frame_count(run_dir)
        if count:
            plt_file = run_dir.join(_ZGOUBI_PLT_DATA_FILE)
            return PKDict(
                hasPlotFile=plt_file.exists(),
                percentComplete=100,
                frameCount=count,
                showTunesReport=show_tunes_report,
                showSpin3d=show_spin_3d,
            )
        else:
            error = _parse_zgoubi_log(run_dir)
    res = PKDict(
        percentComplete=0,
        frameCount=0,
    )
    if error:
        res.error = error
    return res
Exemple #16
0
def save_report_data(data, run_dir):
    report_name = data['report']
    error = ''
    if 'twissReport' in report_name or 'opticsReport' in report_name:
        enum_name = _REPORT_ENUM_INFO[report_name]
        report = data['models'][report_name]
        plots = []
        col_names, rows = _read_data_file(
            py.path.local(run_dir).join(_ZGOUBI_TWISS_FILE))
        for f in ('y1', 'y2', 'y3'):
            if report[f] == 'none':
                continue
            points = column_data(report[f], col_names, rows)
            if any(map(lambda x: math.isnan(x), points)):
                error = 'Twiss data could not be computed for {}'.format(
                    template_common.enum_text(_SCHEMA, enum_name, report[f]))
            plots.append({
                'points':
                points,
                'label':
                template_common.enum_text(_SCHEMA, enum_name, report[f]),
            })
        #TODO(pjm): use template_common
        x = column_data('sums', col_names, rows)
        res = {
            'title': '',
            'x_range': [min(x), max(x)],
            'y_label': '',
            'x_label': 's [m]',
            'x_points': x,
            'plots': plots,
            'y_range': template_common.compute_plot_color_and_range(plots),
            'summaryData': _read_twiss_header(run_dir),
        }
    elif report_name == 'twissSummaryReport':
        res = {
            #TODO(pjm): x_range requied by sirepo-plotting.js
            'x_range': [],
            'summaryData': _read_twiss_header(run_dir),
        }
    elif 'bunchReport' in report_name:
        report = data['models'][report_name]
        col_names, rows = _read_data_file(
            py.path.local(run_dir).join(_ZGOUBI_FAI_DATA_FILE))
        res = _extract_heatmap_data(report, col_names, rows, '')
        summary_file = py.path.local(run_dir).join(BUNCH_SUMMARY_FILE)
        if summary_file.exists():
            res['summaryData'] = {
                'bunch': simulation_db.read_json(summary_file)
            }
    else:
        raise RuntimeError('unknown report: {}'.format(report_name))
    if error:
        res = {
            'error': error,
        }
    simulation_db.write_result(
        res,
        run_dir=run_dir,
    )
Exemple #17
0
def get_data_file(run_dir, model, frame, options=None, **kwargs):
    sim_in = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    f = sim_in.models.files
    if 'fileColumnReport' in model:
        source = _input_or_output(
            sim_in,
            int(re.search(r'(\d+)$', model).group(1)),
            'inputs',
            'outputs',
        )[0]
        return _SIM_DATA.lib_file_name_with_model_field(
            'files',
            source,
            sim_in.models.files[source],
        )
    if model == 'partitionSelectionReport' or 'partitionAnimation' in model:
        return _SIM_DATA.lib_file_name_with_model_field(
            'files',
            'inputs',
            sim_in.models.files.inputs,
        )
    if model == 'epochAnimation':
        return _OUTPUT_FILE.fitOutputFile
    if 'fitAnimation' in model:
        return PKDict(
            content=run_dir.join(_OUTPUT_FILE.testOutputFile).read_text() \
                + run_dir.join(_OUTPUT_FILE.predictOutputFile).read_text(),
            uri='test-and-predict.csv',
        )
    raise AssertionError('unknown model: {}'.format(model))
Exemple #18
0
 def _load_in_json(run_dir):
     p = simulation_db.json_filename(
         sirepo.template.template_common.INPUT_BASE_NAME, run_dir)
     c = simulation_db.read_json(p)
     return c, c.computeJobCacheKey.computeJobStart if \
         c.get('computejobCacheKey') else \
         int(p.mtime())
Exemple #19
0
def compute_field_range(args, compute_range):
    """ Computes the fieldRange values for all parameters across all animation files.
    Caches the value on the animation input file. compute_range() is called to
    read the simulation specific datafiles and extract the ranges by field.
    """
    from sirepo import simulation_db

    run_dir = simulation_db.simulation_run_dir(
        PKDict(
            simulationType=args['simulationType'],
            simulationId=args['simulationId'],
            report='animation',
        ))
    data = simulation_db.read_json(run_dir.join(INPUT_BASE_NAME))
    res = None
    model_name = args['modelName']
    if model_name in data.models:
        if 'fieldRange' in data.models[model_name]:
            res = data.models[model_name].fieldRange
        else:
            #TODO(pjm): second arg was never used
            res = compute_range(run_dir, None)
            data.models[model_name].fieldRange = res
            simulation_db.write_json(run_dir.join(INPUT_BASE_NAME), data)
    return PKDict(fieldRange=res)
Exemple #20
0
def api_simulationFrame(frame_id):
    #TODO(robnagler) startTime is reportParametersHash; need version on URL and/or param names in URL
    keys = [
        'simulationType', 'simulationId', 'modelName', 'animationArgs',
        'frameIndex', 'startTime'
    ]
    data = dict(zip(keys, frame_id.split('*')))
    template = sirepo.template.import_module(data)
    data['report'] = template.get_animation_name(data)
    run_dir = simulation_db.simulation_run_dir(data)
    model_data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    frame = template.get_simulation_frame(run_dir, data, model_data)
    response = _json_response(frame)
    if 'error' not in frame and template.WANT_BROWSER_FRAME_CACHE:
        now = datetime.datetime.utcnow()
        expires = now + datetime.timedelta(365)
        response.headers['Cache-Control'] = 'public, max-age=31536000'
        response.headers['Expires'] = expires.strftime(
            "%a, %d %b %Y %H:%M:%S GMT")
        response.headers['Last-Modified'] = now.strftime(
            "%a, %d %b %Y %H:%M:%S GMT")
    else:
        _no_cache(response)
    return response
Exemple #21
0
def background_percent_complete(report, run_dir, is_running):
    files = _h5_file_list(run_dir)
    if len(files) < 2:
        return PKDict(
            percentComplete=0,
            frameCount=0,
        )
    file_index = len(files) - 1
    last_update_time = int(os.path.getmtime(str(files[file_index])))
    # look at 2nd to last file if running, last one may be incomplete
    if is_running:
        file_index -= 1
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    Fr, info = field_reader.read_field_circ(str(files[file_index]), 'E/r')
    plasma_length = float(data.models.electronPlasma.length) / 1e3
    zmin = float(data.models.simulationGrid.zMin) / 1e6
    percent_complete = (info.imshow_extent[1] / (plasma_length - zmin))
    if percent_complete < 0:
        percent_complete = 0
    elif percent_complete > 1.0:
        percent_complete = 1.0
    return PKDict(
        lastUpdateTime=last_update_time,
        percentComplete=percent_complete * 100,
        frameCount=file_index + 1,
    )
Exemple #22
0
def background_percent_complete(report, run_dir, is_running):
    diag_file = run_dir.join(OUTPUT_FILE['beamEvolutionAnimation'])
    if diag_file.exists():
        particle_file_count = len(_particle_file_list(run_dir))
        # if is_running:
        #     particle_file_count -= 1
        try:
            data = simulation_db.read_json(
                run_dir.join(template_common.INPUT_BASE_NAME))
            with h5py.File(str(diag_file), 'r') as f:
                size = f['emitx'].shape[0]
                turn = int(f['repetition'][-1]) + 1
                complete = 100 * (
                    turn -
                    0.5) / data['models']['simulationSettings']['turn_count']
                return {
                    'percentComplete': complete if is_running else 100,
                    'frameCount': size,
                    'turnCount': turn,
                    'bunchAnimation.frameCount': particle_file_count,
                }
        except Exception as e:
            # file present but not hdf formatted
            pass
    return {
        'percentComplete': 0,
        'frameCount': 0,
    }
Exemple #23
0
def _optimization_values(run_dir):
    opt_file = run_dir.join(OPTIMIZER_RESULT_FILE)
    res = None
    if opt_file.exists():
        res = simulation_db.read_json(opt_file)
        os.remove(str(opt_file))
    return res
Exemple #24
0
def background_percent_complete(report, run_dir, is_running):
    files = _h5_file_list(run_dir)
    if len(files) < 2:
        return {
            'percentComplete': 0,
            'frameCount': 0,
        }
    file_index = len(files) - 1
    last_update_time = int(os.path.getmtime(str(files[file_index])))
    # look at 2nd to last file if running, last one may be incomplete
    if is_running:
        file_index -= 1
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    Fr, info = field_reader.read_field_circ(str(files[file_index]), 'E/r')
    plasma_length = float(data['models']['electronPlasma']['length']) / 1e3
    zmin = float(data['models']['simulationGrid']['zMin']) / 1e6
    percent_complete = (info.imshow_extent[1] / (plasma_length - zmin))
    if percent_complete < 0:
        percent_complete = 0
    elif percent_complete > 1.0:
        percent_complete = 1.0
    return {
        'lastUpdateTime': last_update_time,
        'percentComplete': percent_complete * 100,
        'frameCount': file_index + 1,
    }
Exemple #25
0
def background_percent_complete(report, run_dir, is_running):
    diag_file = run_dir.join(OUTPUT_FILE.beamEvolutionAnimation)
    if diag_file.exists():
        particle_file_count = len(_particle_file_list(run_dir))
        # if is_running:
        #     particle_file_count -= 1
        try:
            data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
            with h5py.File(str(diag_file), 'r') as f:
                size = f['emitx'].shape[0]
                turn = int(f['repetition'][-1]) + 1
                complete = 100 * (turn - 0.5) / data.models.simulationSettings.turn_count
                res = PKDict(
                    percentComplete=complete if is_running else 100,
                    frameCount=size,
                    turnCount=turn,
                )
                res['bunchAnimation.frameCount'] = particle_file_count
                return res

        except Exception:
            # file present but not hdf formatted
            pass
    return PKDict(
        percentComplete=0,
        frameCount=0,
    )
Exemple #26
0
def _extract_particle_plot(report, run_dir, page_index):
    xfield = _map_field_name(report['x'])
    yfield = _map_field_name(report['y'])
    bins = report['histogramBins']
    filename = _ion_files(run_dir)[page_index]
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    settings = data.models.simulationSettings
    time = settings.time / settings.step_number * settings.save_particle_interval * page_index
    if time > settings.time:
        time = settings.time
    x_col = sdds_util.extract_sdds_column(filename, xfield, 0)
    if x_col['err']:
        return x_col['err']
    x = x_col['values']
    y_col = sdds_util.extract_sdds_column(filename, yfield, 0)
    if y_col['err']:
        return y_col['err']
    y = y_col['values']
    particle_animation = data.models.particleAnimation
    range = None
    if report['plotRangeType'] == 'fixed':
        range = [_plot_range(report, 'horizontal'), _plot_range(report, 'vertical')]
    elif report['plotRangeType'] == 'fit' and 'fieldRange' in particle_animation:
        range = [particle_animation.fieldRange[xfield], particle_animation.fieldRange[yfield]]
    hist, edges = np.histogramdd([x, y], template_common.histogram_bins(bins), range=range)
    return {
        'x_range': [float(edges[0][0]), float(edges[0][-1]), len(hist)],
        'y_range': [float(edges[1][0]), float(edges[1][-1]), len(hist[0])],
        'x_label': _field_label(xfield, x_col['column_def']),
        'y_label': _field_label(yfield, y_col['column_def']),
        'title': 'Ions at time {:.2f} [s]'.format(time),
        'z_matrix': hist.T.tolist(),
    }
Exemple #27
0
def run(cfg_dir):
    """Run code in ``cfg_dir``

    Args:
        cfg_dir (str): directory to run code in
    """
    template = sirepo.template.import_module(pkinspect.module_basename(run))
    with pkio.save_chdir(cfg_dir):
        _run_code()
        data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
        data_file = template.open_data_file(py.path.local())
        model = data['models'][data['report']]

        if data['report'] == 'laserPreviewReport':
            field = model['field']
            coordinate = model['coordinate']
            mode = model['mode']
            if mode != 'all':
                mode = int(mode)
            res = template.extract_field_report(field, coordinate, mode, data_file)
        elif data['report'] == 'beamPreviewReport':
            res = template.extract_particle_report(
                model,
                'beam',
                cfg_dir,
                data_file,
            )

        simulation_db.write_result(res)
Exemple #28
0
def background_percent_complete(report, run_dir, is_running):
    errors = ''
    if not is_running:
        out_file = run_dir.join('{}.json'.format(
            template_common.OUTPUT_BASE_NAME))
        count = 0
        if out_file.exists():
            out = simulation_db.read_json(out_file)
            if 'frame_count' in out:
                count = out.frame_count
        if not count:
            count = read_frame_count(run_dir)
        if count:
            return {
                'percentComplete': 100,
                'frameCount': count,
            }
        else:
            errors = _parse_zgoubi_log(run_dir)
    res = {
        'percentComplete': 0,
        'frameCount': 0,
    }
    if errors:
        res['errors'] = errors
    return res
Exemple #29
0
def background_percent_complete(report, run_dir, is_running):
    diag_file = run_dir.join(OUTPUT_FILE['beamEvolutionAnimation'])
    if diag_file.exists():
        particle_file_count = len(_particle_file_list(run_dir))
        # if is_running:
        #     particle_file_count -= 1
        try:
            data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
            with h5py.File(str(diag_file), 'r') as f:
                size = f['emitx'].shape[0]
                turn = int(f['repetition'][-1]) + 1
                complete = 100 * (turn - 0.5) / data['models']['simulationSettings']['turn_count']
                return {
                    'percentComplete': complete if is_running else 100,
                    'frameCount': size,
                    'turnCount': turn,
                    'bunchAnimation.frameCount': particle_file_count,
                }
        except Exception as e:
            # file present but not hdf formatted
            pass
    return {
        'percentComplete': 0,
        'frameCount': 0,
    }
Exemple #30
0
def get_data_file(run_dir, model, frame, options=None, **kwargs):
    def _sdds(filename):
        path = run_dir.join(filename)
        assert path.check(file=True, exists=True), \
            '{}: not found'.format(path)
        if not options.suffix:
            return path
        if options.suffix == 'csv':
            out = elegant_common.subprocess_output(
                ['sddsprintout', '-columns', '-spreadsheet=csv',
                 str(path)])
            assert out, \
                '{}: invalid or empty output from sddsprintout'.format(path)
            return PKDict(
                uri=path.purebasename + '.csv',
                content=out,
            )
        raise AssertionError('{}: invalid suffix for download path={}'.format(
            options.suffix, path))

    if frame >= 0:
        data = simulation_db.read_json(
            run_dir.join(template_common.INPUT_BASE_NAME))
        # ex. elementAnimation17-55
        i = re.sub(r'elementAnimation', '', model).split(_FILE_ID_SEP)
        return _sdds(_get_filename_for_element_id(i, data))
    if model == 'animation':
        return ELEGANT_LOG_FILE
    return _sdds(_report_output_filename('bunchReport'))
Exemple #31
0
def _extract_bunch_plot(report, frame_index, run_dir):
    filename = _particle_file_list(run_dir)[frame_index]
    with h5py.File(str(filename), 'r') as f:
        x = f['particles'][:, _COORD6.index(report['x'])].tolist()
        y = f['particles'][:, _COORD6.index(report['y'])].tolist()
        data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
        if 'bunchAnimation' not in data.models:
            # In case the simulation was run before the bunchAnimation was added
            return {
                'error': 'Report not generated',
            }
        bunch_animation = data.models.bunchAnimation
        range = None
        if report['plotRangeType'] == 'fixed':
            range = [_plot_range(report, 'horizontal'), _plot_range(report, 'vertical')]
        elif report['plotRangeType'] == 'fit' and 'fieldRange' in bunch_animation:
            range = [bunch_animation.fieldRange[report['x']], bunch_animation.fieldRange[report['y']]]
        hist, edges = np.histogramdd([x, y], template_common.histogram_bins(report['histogramBins']), range=range)
        tlen = f['tlen'][()]
        s_n = f['s_n'][()]
        rep = 0 if s_n == 0 else int(round(tlen / s_n))
        return {
            'x_range': [float(edges[0][0]), float(edges[0][-1]), len(hist)],
            'y_range': [float(edges[1][0]), float(edges[1][-1]), len(hist[0])],
            'x_label': label(report['x']),
            'y_label': label(report['y']),
            'title': '{}-{} at {:.1f}m, turn {}'.format(report['x'], report['y'], tlen, rep),
            'z_matrix': hist.T.tolist(),
        }
Exemple #32
0
def get_application_data(data, **kwargs):
    if data.method == 'compute_particle_ranges':
        return template_common.compute_field_range(
            data, _compute_range_across_frames)
    if data.method == 'rpn_value':
        # accept array of values enclosed in curly braces
        if re.search(r'^\{.*\}$', data.value):
            data.result = ''
            return data
        v, err = _code_var(data.variables).eval_var(data.value)
        if err:
            data.error = err
        else:
            data.result = v
        return data
    if data.method == 'recompute_rpn_cache_values':
        _code_var(data.variables).recompute_cache(data.cache)
        return data
    if data.method == 'validate_rpn_delete':
        model_data = simulation_db.read_json(
            simulation_db.sim_data_file(SIM_TYPE, data.simulationId))
        data.error = _code_var(data.variables).validate_var_delete(
            data.name, model_data, _SCHEMA)
        return data
    raise AssertionError('unknown get_application_data: {}'.format(data))
Exemple #33
0
def background_percent_complete(report, run_dir, is_running):
    res = PKDict(
        percentComplete=0,
        frameCount=0,
    )
    if report == 'partitionAnimation':
        if not is_running and run_dir.join('x-train.csv').exists():
            res.percentComplete = 100
            res.frameCount = 1
        return res
    if report == 'elegantAnimation':
        if not is_running and run_dir.join('inputs.csv').exists():
            _compute_elegant_result_columns(run_dir, res)
        return res
    csv_file = run_dir.join(_OUTPUT_FILE.fitOutputFile)
    if csv_file.exists():
        line = _read_last_line(csv_file)
        m = re.search(r'^(\d+)', line)
        if m and int(m.group(1)) > 0:
            data = simulation_db.read_json(
                run_dir.join(template_common.INPUT_BASE_NAME))
            max_frame = data.models.neuralNet.epochs
            res.frameCount = int(m.group(1)) + 1
            res.percentComplete = float(res.frameCount) * 100 / max_frame
    return res
Exemple #34
0
def get_application_data(data):
    if data['method'] == 'get_elegant_sim_list':
        res = []
        for f in pkio.sorted_glob(_elegant_dir().join('*/', _ELEGANT_TWISS_PATH)):
            m = re.match(r'.*?/elegant/(.*?)/animation', str(f))
            if not m:
                continue
            id = m.group(1)
            name = simulation_db.read_json(_elegant_dir().join(id, '/', simulation_db.SIMULATION_DATA_FILE)).models.simulation.name
            res.append({
                'simulationId': id,
                'name': name,
            })
        return {
            'simList': res,
        }
    elif data['method'] == 'compute_particle_ranges':
        run_dir = simulation_db.simulation_run_dir({
            'simulationType': SIM_TYPE,
            'simulationId': data['simulationId'],
            'report': 'animation',
        })
        return {
            'fieldRange': _compute_range_across_files(run_dir),
        }
Exemple #35
0
def run(cfg_dir):
    """Run shadow in ``cfg_dir``

    Args:
        cfg_dir (str): directory to run shadow in
    """
    with pkio.save_chdir(cfg_dir):
        beam = _run_shadow()
        data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
        model = data['models'][data['report']]
        column_values = _SCHEMA['enum']['ColumnValue']

        if 'y' in model:
            x_range = None
            y_range = None
            if model['overrideSize'] == '1':
                x_range = (np.array([
                    model['horizontalOffset'] - model['horizontalSize'] / 2,
                    model['horizontalOffset'] + model['horizontalSize'] / 2,
                ]) * _MM_TO_CM).tolist()
                y_range = (np.array([
                    model['verticalOffset'] - model['verticalSize'] / 2,
                    model['verticalOffset'] + model['verticalSize'] / 2,
                ]) * _MM_TO_CM).tolist()
            ticket = beam.histo2(int(model['x']), int(model['y']), nbins=template_common.histogram_bins(model['histogramBins']), ref=int(model['weight']), nolost=1, calculate_widths=0, xrange=x_range, yrange=y_range)
            _scale_ticket(ticket)
            res = {
                'x_range': [ticket['xrange'][0], ticket['xrange'][1], ticket['nbins_h']],
                'y_range': [ticket['yrange'][0], ticket['yrange'][1], ticket['nbins_v']],
                'x_label': _label_with_units(model['x'], column_values),
                'y_label': _label_with_units(model['y'], column_values),
                'z_label': 'Frequency',
                'title': u'{}, {}'.format(_label(model['x'], column_values), _label(model['y'], column_values)),
                'z_matrix': ticket['histogram'].T.tolist(),
                'frameCount': 1,
            }
        else:
            weight = int(model['weight'])
            ticket = beam.histo1(int(model['column']), nbins=template_common.histogram_bins(model['histogramBins']), ref=weight, nolost=1, calculate_widths=0)
            _scale_ticket(ticket)
            res = {
                'title': _label(model['column'], column_values),
                'x_range': [ticket['xrange'][0], ticket['xrange'][1], ticket['nbins']],
                'y_label': u'{}{}'.format(
                    'Number of Rays',
                    u' weighted by {}'.format(_label_for_weight(model['weight'], column_values)) if weight else ''),
                'x_label': _label_with_units(model['column'], column_values),
                'points': ticket['histogram'].T.tolist(),
                'frameCount': 1,
            }
            #pkdp('range amount: {}', res['x_range'][1] - res['x_range'][0])
            #1.55431223448e-15
            dist = res['x_range'][1] - res['x_range'][0]
            #TODO(pjm): only rebalance range if outside of 0
            if dist < 1e-14:
                #TODO(pjm): include offset range for client
                res['x_range'][0] = 0
                res['x_range'][1] = dist
        simulation_db.write_result(res)
Exemple #36
0
def _extract_bunch_report():
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    if data['models']['bunchSource']['inputSource'] == 'sdds_beam':
        file = 'bunchFile-sourceFile.{}'.format(data['models']['bunchFile']['sourceFile'])
    else:
        file = 'elegant.bun'
    info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0)
    simulation_db.write_result(info)
Exemple #37
0
def run(cfg_dir):
    cfg_dir = pkio.py_path(cfg_dir)
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    if data['report'] == 'doseCalculation':
        _run_dose_calculation(data, cfg_dir)
    elif data['report'] == 'dvhReport':
        _run_dvh(data, cfg_dir)
    else:
        raise RuntimeError('unknown report: {}'.format(data['report']))
Exemple #38
0
def background_percent_complete(report, run_dir, is_running, schema):
    # TODO(robnagler) remove duplication in run_dir.exists() (outer level?)
    errors, last_element = _parse_elegant_log(run_dir)
    res = {"percentComplete": 100, "frameCount": 0, "errors": errors}
    if is_running:
        data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
        res["percentComplete"] = _compute_percent_complete(data, last_element)
        return res
    if not run_dir.join(_ELEGANT_SEMAPHORE_FILE).exists():
        return res
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    output_info = _output_info(run_dir, data, schema)
    return {
        "percentComplete": 100,
        "frameCount": 1,
        "outputInfo": output_info,
        "lastUpdateTime": output_info[0]["lastUpdateTime"],
        "errors": errors,
    }
Exemple #39
0
def _run_srw():
    #TODO(pjm): need to properly escape data values, untrusted from client
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    if data['report'] == 'mirrorReport':
        #TODO(pjm): mirror report should use it's own jinja template
        _process_output(_mirror_plot(data), data)
        return
    # This defines the main() function:
    exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
    main()
    _process_output(get_filename_for_model(data['report']), data)
Exemple #40
0
def run_background(cfg_dir):
    res = {}
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    try:
        _bunch_match_twiss(cfg_dir)
        _run_zgoubi(cfg_dir)
    except Exception as e:
        res = {
            'error': str(e),
        }
    simulation_db.write_result(res)
Exemple #41
0
def _output_info(run_dir):
    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        res = simulation_db.read_json(info_file)
        if len(res) == 0 or res[0].get('_version', '') == _OUTPUT_INFO_VERSION:
            return res
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    res = []
    filename_map = _build_filename_map(data)
    for k in filename_map['keys_in_order']:
        filename = filename_map[k]
        id = k.split(_FILE_ID_SEP)
        info = _file_info(filename, run_dir, id[0], id[1])
        if info:
            info['modelKey'] = 'elementAnimation{}'.format(info['id'])
            res.append(info)
    if len(res):
        res[0]['_version'] = _OUTPUT_INFO_VERSION
    simulation_db.write_json(info_file, res)
    return res
Exemple #42
0
def get_simulation_frame(run_dir, data, model_data):
    frame_index = int(data['frameIndex'])
    args = data['animationArgs'].split('_')
    if data['modelName'].startswith('dicomAnimation'):
        plane = args[0]
        res = simulation_db.read_json(_dicom_path(model_data['models']['simulation'], plane, frame_index))
        res['pixel_array'] = _read_pixel_plane(plane, frame_index, model_data)
        return res
    if data['modelName'] == 'dicomDose':
        return {
            'dose_array': _read_dose_frame(frame_index, model_data)
        }
    raise RuntimeError('{}: unknown simulation frame model'.format(data['modelName']))
Exemple #43
0
def _generate_rtstruct_file(sim_dir, target_dir):
    models = simulation_db.read_json(sim_dir.join(_ROI_FILE_NAME))['models']
    frame_data = models['dicomFrames']
    roi_data = models['regionsOfInterest']
    plan = _create_dicom_dataset(frame_data['StudyInstanceUID'], 'RT_STRUCT', 'RTSTRUCT')
    plan.StructureSetLabel = '{} Exported'.format(_RADIASOFT_ID)
    plan.StructureSetDate = plan.InstanceCreationDate
    plan.StructureSetTime = plan.InstanceCreationTime
    _generate_dicom_reference_frame_info(plan, frame_data)
    _generate_dicom_roi_info(plan, frame_data, roi_data)
    filename = str(target_dir.join(RTSTRUCT_EXPORT_FILENAME))
    plan.save_as(filename)
    return filename, models
Exemple #44
0
def get_data_file(run_dir, model, frame, options=None):
    if model in OUTPUT_FILE:
        path = run_dir.join(OUTPUT_FILE[model])
    elif model == 'bunchAnimation':
        path = py.path.local(_particle_file_list(run_dir)[frame])
    elif model == 'beamlineReport':
        data = simulation_db.read_json(str(run_dir.join('..', simulation_db.SIMULATION_DATA_FILE)))
        source = _generate_parameters_file(data)
        return 'python-source.py', source, 'text/plain'
    else:
        assert False, 'model data file not yet supported: {}'.format(model)
    with open(str(path)) as f:
        return path.basename, f.read(), 'application/octet-stream'
Exemple #45
0
def run(cfg_dir):
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    report = data['report']
    if 'bunchReport' in report or report == 'twissReport' or report == 'twissReport2':
        try:
            with pkio.save_chdir(cfg_dir):
                exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
            template.save_report_data(data, py.path.local(cfg_dir))
        except Exception as e:
            res = template.parse_error_log(py.path.local(cfg_dir)) or {
                'error': str(e),
            }
            simulation_db.write_result(res)
    else:
        raise RuntimeError('unknown report: {}'.format(report))
Exemple #46
0
def _run_srw():
    #TODO(pjm): need to properly escape data values, untrusted from client
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
    locals()['main']()
    # special case for importing python code
    if data['report'] == 'backgroundImport':
        sim_id = data['models']['simulation']['simulationId']
        parsed_data['models']['simulation']['simulationId'] = sim_id
        #TODO(pjm): assumes the parent directory contains the simulation data,
        # can't call simulation_db.save_simulation_json() because user isn't set for pkcli commands
        simulation_db.write_json('../{}'.format(simulation_db.SIMULATION_DATA_FILE), parsed_data)
        simulation_db.write_result({
            'simulationId': sim_id,
        })
    else:
        simulation_db.write_result(extract_report_data(get_filename_for_model(data['report']), data))
Exemple #47
0
def run(cfg_dir):
    """Run Hellweg in ``cfg_dir``

    Args:
        cfg_dir (str): directory to run hellweg in
    """
    _run_hellweg(cfg_dir)
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    report = data['models'][data['report']]
    res = None
    if data['report'] == 'beamReport':
        res = template.extract_beam_report(report, cfg_dir, 0)
    elif data['report'] == 'beamHistogramReport':
        res = template.extract_beam_histrogram(report, cfg_dir, 0)
    else:
        raise RuntimeError('unknown report: {}'.format(data['report']))
    simulation_db.write_result(res)
Exemple #48
0
def background_percent_complete(report, run_dir, is_running):
    files = _h5_file_list(run_dir, 'currentAnimation')
    if (is_running and len(files) < 2) or (not run_dir.exists()):
        return {
            'percentComplete': 0,
            'frameCount': 0,
        }
    if len(files) == 0:
        return {
            'percentComplete': 100,
            'frameCount': 0,
            'error': 'simulation produced no frames',
            'state': 'error',
        }
    file_index = len(files) - 1
    res = {
        'lastUpdateTime': int(os.path.getmtime(str(files[file_index]))),
    }
    # look at 2nd to last file if running, last one may be incomplete
    if is_running:
        file_index -= 1
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    percent_complete = 0
    if data.models.simulation.egun_mode == '1':
        status_file = run_dir.join(_EGUN_STATUS_FILE)
        if status_file.exists():
            with open(str(status_file), 'r') as f:
                m = re.search('([\d\.]+)\s*/\s*(\d+)', f.read())
            if m:
                percent_complete = float(m.group(1)) / int(m.group(2))
        egun_current_file = run_dir.join(_EGUN_CURRENT_FILE)
        if egun_current_file.exists():
            v = np.load(str(egun_current_file))
            res['egunCurrentFrameCount'] = len(v)
    else:
        percent_complete = (file_index + 1.0) * _PARTICLE_PERIOD / data.models.simulationGrid.num_steps

    if percent_complete < 0:
        percent_complete = 0
    elif percent_complete > 1.0:
        percent_complete = 1.0
    res['percentComplete'] = percent_complete * 100
    res['frameCount'] = file_index + 1
    return res
Exemple #49
0
def run(cfg_dir):
    """Run elegant in ``cfg_dir``

    The files in ``cfg_dir`` must be configured properly.

    Args:
        cfg_dir (str): directory to run elegant in
    """
    with pkio.save_chdir(cfg_dir):
        try:
            _run_elegant(bunch_report=True)
        except Exception as e:
            err = parse_elegant_log(py.path.local(cfg_dir))
            if not err:
                err = ['A server error occurred']
            simulation_db.write_result({
                'error': err[0],
            })
        save_report_data(simulation_db.read_json(template_common.INPUT_BASE_NAME), py.path.local(cfg_dir))
Exemple #50
0
def app_simulation_frame(frame_id):
    #TODO(robnagler) startTime is reportParametersHash; need version on URL and/or param names in URL
    keys = ['simulationType', 'simulationId', 'modelName', 'animationArgs', 'frameIndex', 'startTime']
    data = dict(zip(keys, frame_id.split('*')))
    template = sirepo.template.import_module(data)
    data['report'] = template.get_animation_name(data)
    run_dir = simulation_db.simulation_run_dir(data)
    model_data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    response = _json_response(template.get_simulation_frame(run_dir, data, model_data))

    if template.WANT_BROWSER_FRAME_CACHE:
        now = datetime.datetime.utcnow()
        expires = now + datetime.timedelta(365)
        response.headers['Cache-Control'] = 'public, max-age=31536000'
        response.headers['Expires'] = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
        response.headers['Last-Modified'] = now.strftime("%a, %d %b %Y %H:%M:%S GMT")
    else:
        _no_cache(response)
    return response
Exemple #51
0
def _bunch_match_twiss(cfg_dir):
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    bunch = data.models.bunch
    if bunch.match_twiss_parameters == '1' and ('bunchReport' in data.report or data.report == 'animation'):
        report = data['report']
        data['report'] = 'twissReport2'
        template.write_parameters(data, py.path.local(cfg_dir), False, 'twiss.py')
        _run_zgoubi(cfg_dir, python_file='twiss.py')
        col_names, row = template.extract_first_twiss_row(cfg_dir)
        for f in _TWISS_TO_BUNCH_FIELD.keys():
            v = template.column_data(f, col_names, [row])[0]
            bunch[_TWISS_TO_BUNCH_FIELD[f]] = v
            if f == 'btx' or f == 'bty':
                assert v > 0, 'invalid twiss parameter: {} <= 0'.format(f)
        simulation_db.write_json(py.path.local(cfg_dir).join(template.BUNCH_SUMMARY_FILE), bunch)
        data['report'] = report
        # rewrite the original report with original parameters
        template.write_parameters(data, py.path.local(cfg_dir), False)
    return data
Exemple #52
0
def _compute_range_across_files(run_dir):
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    if 'bunchAnimation' not in data.models:
        return None
    if 'fieldRange' in data.models.bunchAnimation:
        return data.models.bunchAnimation.fieldRange
    res = {}
    for v in _SCHEMA.enum.PhaseSpaceCoordinate6:
        res[v[0]] = []
    for filename in _particle_file_list(run_dir):
        with h5py.File(str(filename), 'r') as f:
            for field in res:
                values = f['particles'][:, _COORD6.index(field)].tolist()
                if len(res[field]):
                    res[field][0] = min(min(values), res[field][0])
                    res[field][1] = max(max(values), res[field][1])
                else:
                    res[field] = [min(values), max(values)]
    data.models.bunchAnimation.fieldRange = res
    simulation_db.write_json(run_dir.join(template_common.INPUT_BASE_NAME), data)
    return res
Exemple #53
0
def background_percent_complete(report, run_dir, is_running):
    #TODO(robnagler) remove duplication in run_dir.exists() (outer level?)
    errors, last_element = parse_elegant_log(run_dir)
    res = {
        'percentComplete': 100,
        'frameCount': 0,
        'errors': errors,
    }
    if is_running:
        data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
        res['percentComplete'] = _compute_percent_complete(data, last_element)
        return res
    if not run_dir.join(_ELEGANT_SEMAPHORE_FILE).exists():
        return res
    output_info = _output_info(run_dir)
    return {
        'percentComplete': 100,
        'frameCount': 1,
        'outputInfo': output_info,
        'lastUpdateTime': output_info[0]['lastUpdateTime'],
        'errors': errors,
    }
Exemple #54
0
def save_report_data(data, run_dir):
    report_name = data['report']
    if 'twissReport' in report_name or 'opticsReport' in report_name:
        filename, enum_name, x_field = _REPORT_INFO[report_name]
        report = data['models'][report_name]
        plots = []
        col_names, rows = read_data_file(py.path.local(run_dir).join(filename))
        for f in ('y1', 'y2', 'y3'):
            if report[f] == 'none':
                continue
            plots.append({
                'points': column_data(report[f], col_names, rows),
                'label': template_common.enum_text(_SCHEMA, enum_name, report[f]),
            })
        x = column_data(x_field, col_names, rows)
        res = {
            'title': '',
            'x_range': [min(x), max(x)],
            'y_label': '',
            'x_label': 's [m]',
            'x_points': x,
            'plots': plots,
            'y_range': template_common.compute_plot_color_and_range(plots),
        }
    elif 'bunchReport' in report_name:
        report = data['models'][report_name]
        col_names, rows = read_data_file(py.path.local(run_dir).join(_ZGOUBI_DATA_FILE))
        res = _extract_bunch_data(report, col_names, rows, '')
        summary_file = py.path.local(run_dir).join(BUNCH_SUMMARY_FILE)
        if summary_file.exists():
            res['summaryData'] = {
                'bunch': simulation_db.read_json(summary_file)
            }
    else:
        raise RuntimeError('unknown report: {}'.format(report_name))
    simulation_db.write_result(
        res,
        run_dir=run_dir,
    )
Exemple #55
0
def run(cfg_dir):
    with pkio.save_chdir(cfg_dir):
        data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
        if data['report'] == 'twissReport':
            simulation_db.write_result(_extract_twiss_report(data))
        elif data['report'] == 'rateCalculationReport':
            text = _run_jspec(data)
            res = {
                #TODO(pjm): x_range is needed for sirepo-plotting.js, need a better valid-data check
                'x_range': [],
                'rate': [],
            }
            for line in text.split("\n"):
                m = re.match(r'^(.*? rate.*?)\:\s+(\S+)\s+(\S+)\s+(\S+)', line)
                if m:
                    row = [m.group(1), [m.group(2), m.group(3), m.group(4)]]
                    row[0] = re.sub('\(', '[', row[0]);
                    row[0] = re.sub('\)', ']', row[0]);
                    res['rate'].append(row)
            simulation_db.write_result(res)
        else:
            assert False, 'unknown report: {}'.format(data['report'])
Exemple #56
0
def run(cfg_dir):
    with pkio.save_chdir(cfg_dir):
        exec(_script(), locals(), locals())
        data = simulation_db.read_json(template_common.INPUT_BASE_NAME)

        if data['report'] == 'fieldReport':
            if len(potential.shape) == 2:
                values = potential[xl:xu, zl:zu]
            else:
                # 3d results
                values = potential[xl:xu, int(NUM_Y / 2), zl:zu]
            res = _generate_field_report(data, values, {
                'tof_expected': tof_expected,
                'steps_expected': steps_expected,
                'e_cross': e_cross,
            })
        elif data['report'] == 'fieldComparisonReport':
            step(template.COMPARISON_STEP_SIZE)
            res = template.generate_field_comparison_report(data, cfg_dir)
        else:
            raise RuntimeError('unknown report: {}'.format(data['report']))
    simulation_db.write_result(res)
Exemple #57
0
def run(cfg_dir):
    """Run warp in ``cfg_dir``

    Args:
        cfg_dir (str): directory to run warp in
    """
    with pkio.save_chdir(cfg_dir):
        _run_warp()
        data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
        data_file = template.open_data_file(py.path.local())
        model = data['models'][data['report']]

        if data['report'] == 'laserPreviewReport':
            field = model['field']
            coordinate = model['coordinate']
            mode = model['mode']
            if mode != 'all':
                mode = int(mode)
            res = template.extract_field_report(field, coordinate, mode, data_file)
        elif data['report'] == 'beamPreviewReport':
            res = template.extract_particle_report([model['x'], model['y'], model['histogramBins']], 'beam', cfg_dir, data_file)

        simulation_db.write_result(res)
Exemple #58
0
def run_background(cfg_dir):
    res = {}
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    distribution = data['models']['bunch']['distribution']
    run_with_mpi = distribution == 'lattice' or distribution == 'file'
    try:
        with pkio.save_chdir(cfg_dir):
            if run_with_mpi:
                mpi.run_script(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE))
            else:
                #TODO(pjm): MPI doesn't work with rsbeams distributions yet
                exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
    except Exception as e:
        res = {
            'error': str(e),
        }
    if run_with_mpi and 'error' in res:
        text = pkio.read_text('mpi_run.out')
        m = re.search(r'^Traceback .*?^\w*Error: (.*?)\n\n', text, re.MULTILINE|re.DOTALL)
        if m:
            res['error'] = m.group(1)
            # remove output file - write_result() will not overwrite an existing error output
            pkio.unchecked_remove(simulation_db.json_filename(template_common.OUTPUT_BASE_NAME))
    simulation_db.write_result(res)