def api_runCancel(): data = _parse_data_input() jid = simulation_db.job_id(data) if feature_config.cfg.runner_daemon: jhash = template_common.report_parameters_hash(data) run_dir = simulation_db.simulation_run_dir(data) runner_client.cancel_report_job(run_dir, jhash) # Always true from the client's perspective return http_reply.gen_json({'state': 'canceled'}) else: # TODO(robnagler) need to have a way of listing jobs # Don't bother with cache_hit check. We don't have any way of canceling # if the parameters don't match so for now, always kill. #TODO(robnagler) mutex required if runner.job_is_processing(jid): run_dir = simulation_db.simulation_run_dir(data) # Write first, since results are write once, and we want to # indicate the cancel instead of the termination error that # will happen as a result of the kill. simulation_db.write_result({'state': 'canceled'}, run_dir=run_dir) runner.job_kill(jid) # TODO(robnagler) should really be inside the template (t.cancel_simulation()?) # the last frame file may not be finished, remove it t = sirepo.template.import_module(data) if hasattr(t, 'remove_last_frame'): t.remove_last_frame(run_dir) # Always true from the client's perspective return http_reply.gen_json({'state': 'canceled'})
def _extract_bunch_report(data): info = extract_report_data( BUNCH_OUTPUT_FILE, data['models'][data['report']], 0, ) simulation_db.write_result(info)
def run(cfg_dir): """Run code in ``cfg_dir`` Args: cfg_dir (str): directory to run code in """ template = sirepo.template.import_module(pkinspect.module_basename(run)) with pkio.save_chdir(cfg_dir): _run_code() data = simulation_db.read_json(template_common.INPUT_BASE_NAME) data_file = template.open_data_file(py.path.local()) model = data['models'][data['report']] if data['report'] == 'laserPreviewReport': field = model['field'] coordinate = model['coordinate'] mode = model['mode'] if mode != 'all': mode = int(mode) res = template.extract_field_report(field, coordinate, mode, data_file) elif data['report'] == 'beamPreviewReport': res = template.extract_particle_report( model, 'beam', cfg_dir, data_file, ) simulation_db.write_result(res)
def save_report_data(data, run_dir): report_name = data['report'] error = '' if 'twissReport' in report_name or 'opticsReport' in report_name: enum_name = _REPORT_ENUM_INFO[report_name] report = data['models'][report_name] plots = [] col_names, rows = _read_data_file( py.path.local(run_dir).join(_ZGOUBI_TWISS_FILE)) for f in ('y1', 'y2', 'y3'): if report[f] == 'none': continue points = column_data(report[f], col_names, rows) if any(map(lambda x: math.isnan(x), points)): error = 'Twiss data could not be computed for {}'.format( template_common.enum_text(_SCHEMA, enum_name, report[f])) plots.append({ 'points': points, 'label': template_common.enum_text(_SCHEMA, enum_name, report[f]), }) #TODO(pjm): use template_common x = column_data('sums', col_names, rows) res = { 'title': '', 'x_range': [min(x), max(x)], 'y_label': '', 'x_label': 's [m]', 'x_points': x, 'plots': plots, 'y_range': template_common.compute_plot_color_and_range(plots), 'summaryData': _read_twiss_header(run_dir), } elif report_name == 'twissSummaryReport': res = { #TODO(pjm): x_range requied by sirepo-plotting.js 'x_range': [], 'summaryData': _read_twiss_header(run_dir), } elif 'bunchReport' in report_name: report = data['models'][report_name] col_names, rows = _read_data_file( py.path.local(run_dir).join(_ZGOUBI_FAI_DATA_FILE)) res = _extract_heatmap_data(report, col_names, rows, '') summary_file = py.path.local(run_dir).join(BUNCH_SUMMARY_FILE) if summary_file.exists(): res['summaryData'] = { 'bunch': simulation_db.read_json(summary_file) } else: raise RuntimeError('unknown report: {}'.format(report_name)) if error: res = { 'error': error, } simulation_db.write_result( res, run_dir=run_dir, )
def prepare_output_file(report_info, data): if data['report'] == 'fieldComparisonReport': run_dir = report_info.run_dir fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir) if fn.exists(): fn.remove() simulation_db.write_result(generate_field_comparison_report(data, run_dir), run_dir=run_dir)
def api_runCancel(): jid = None try: req = http_request.parse_post(id=True, model=True, check_sim_exists=True) jid = req.sim_data.parse_jid(req.req_data) # TODO(robnagler) need to have a way of listing jobs # Don't bother with cache_hit check. We don't have any way of canceling # if the parameters don't match so for now, always kill. #TODO(robnagler) mutex required if runner.job_is_processing(jid): run_dir = simulation_db.simulation_run_dir(req.req_data) # Write first, since results are write once, and we want to # indicate the cancel instead of the termination error that # will happen as a result of the kill. try: simulation_db.write_result({'state': 'canceled'}, run_dir=run_dir) except Exception as e: if not pykern.pkio.exception_is_not_found(e): raise # else: run_dir may have been deleted runner.job_kill(jid) # TODO(robnagler) should really be inside the template (t.cancel_simulation()?) # the last frame file may not be finished, remove it t = sirepo.template.import_module(req.req_data) if hasattr(t, 'remove_last_frame'): t.remove_last_frame(run_dir) except Exception as e: pkdlog('ignoring exception={} jid={} stack={}', e, jid, pkdexc()) # Always true from the client's perspective return http_reply.gen_json({'state': 'canceled'})
def run_program(cmd, output='mpi_run.out', env=None): """Execute python script with mpi. Args: cmd (list): cmd to run output (str): where to write stdout and stderr env (dict): what to pass as env """ from sirepo import simulation_db try: cmd = [ 'mpiexec', '--bind-to', 'none', '-n', str(cfg.cores), ] + cmd pksubprocess.check_call_with_signals( cmd, msg=pkdp, output=str(output), env=env, ) except Exception as e: simulation_db.write_result({'state': 'error', 'error': str(e)}) raise
def run(cfg_dir): with pkio.save_chdir(cfg_dir): exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['report'] == 'dogReport': dog = data.models.dog max_age = _max_age(dog.weight) x = np.linspace(0, max_age, int(max_age) + 1).tolist() plots = [ _plot(dog, 'height', x), _plot(dog, 'weight', x), ] res = { 'title': 'Dog Height and Weight Over Time', 'x_range': [0, max_age], 'y_label': '', 'x_label': 'Age (years)', 'x_points': x, 'plots': plots, 'y_range': template_common.compute_plot_color_and_range(plots), } else: raise RuntimeError('unknown report: {}'.format(data['report'])) simulation_db.write_result(res)
def run_background(cfg_dir): res = {} data = simulation_db.read_json(template_common.INPUT_BASE_NAME) distribution = data['models']['bunch']['distribution'] run_with_mpi = distribution == 'lattice' or distribution == 'file' try: with pkio.save_chdir(cfg_dir): if run_with_mpi: mpi.run_script( pkio.read_text(template_common.PARAMETERS_PYTHON_FILE)) else: #TODO(pjm): MPI doesn't work with rsbeams distributions yet exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) except Exception as e: res = { 'error': str(e), } if run_with_mpi and 'error' in res: text = pkio.read_text('mpi_run.out') m = re.search(r'^Traceback .*?^\w*Error: (.*?)\n\n', text, re.MULTILINE | re.DOTALL) if m: res['error'] = m.group(1) # remove output file - write_result() will not overwrite an existing error output pkio.unchecked_remove( simulation_db.json_filename(template_common.OUTPUT_BASE_NAME)) simulation_db.write_result(res)
def run(cfg_dir): """Run shadow in ``cfg_dir`` Args: cfg_dir (str): directory to run shadow in """ with pkio.save_chdir(cfg_dir): beam = _run_shadow() data = simulation_db.read_json(template_common.INPUT_BASE_NAME) model = data['models'][data['report']] column_values = _SCHEMA['enum']['ColumnValue'] if 'y' in model: x_range = None y_range = None if model['overrideSize'] == '1': x_range = (np.array([ model['horizontalOffset'] - model['horizontalSize'] / 2, model['horizontalOffset'] + model['horizontalSize'] / 2, ]) * _MM_TO_CM).tolist() y_range = (np.array([ model['verticalOffset'] - model['verticalSize'] / 2, model['verticalOffset'] + model['verticalSize'] / 2, ]) * _MM_TO_CM).tolist() ticket = beam.histo2(int(model['x']), int(model['y']), nbins=template_common.histogram_bins(model['histogramBins']), ref=int(model['weight']), nolost=1, calculate_widths=0, xrange=x_range, yrange=y_range) _scale_ticket(ticket) res = { 'x_range': [ticket['xrange'][0], ticket['xrange'][1], ticket['nbins_h']], 'y_range': [ticket['yrange'][0], ticket['yrange'][1], ticket['nbins_v']], 'x_label': _label_with_units(model['x'], column_values), 'y_label': _label_with_units(model['y'], column_values), 'z_label': 'Frequency', 'title': u'{}, {}'.format(_label(model['x'], column_values), _label(model['y'], column_values)), 'z_matrix': ticket['histogram'].T.tolist(), 'frameCount': 1, } else: weight = int(model['weight']) ticket = beam.histo1(int(model['column']), nbins=template_common.histogram_bins(model['histogramBins']), ref=weight, nolost=1, calculate_widths=0) _scale_ticket(ticket) res = { 'title': _label(model['column'], column_values), 'x_range': [ticket['xrange'][0], ticket['xrange'][1], ticket['nbins']], 'y_label': u'{}{}'.format( 'Number of Rays', u' weighted by {}'.format(_label_for_weight(model['weight'], column_values)) if weight else ''), 'x_label': _label_with_units(model['column'], column_values), 'points': ticket['histogram'].T.tolist(), 'frameCount': 1, } #pkdp('range amount: {}', res['x_range'][1] - res['x_range'][0]) #1.55431223448e-15 dist = res['x_range'][1] - res['x_range'][0] #TODO(pjm): only rebalance range if outside of 0 if dist < 1e-14: #TODO(pjm): include offset range for client res['x_range'][0] = 0 res['x_range'][1] = dist simulation_db.write_result(res)
def save_report_data(data, run_dir): if 'bunchReport' in data.report: import synergia.bunch with h5py.File(str(run_dir.join(OUTPUT_FILE.twissReport)), 'r') as f: twiss0 = dict( map( lambda k: (k, format_float(f[k][0])), ('alpha_x', 'alpha_y', 'beta_x', 'beta_y'), )) report = data.models[data.report] bunch = data.models.bunch if bunch.distribution == 'file': bunch_file = _SIM_DATA.lib_file_name_with_model_field( 'bunch', 'particleFile', bunch.particleFile) else: bunch_file = OUTPUT_FILE.bunchReport if not run_dir.join(bunch_file).exists(): return with h5py.File(str(run_dir.join(bunch_file)), 'r') as f: x = f['particles'][:, getattr(synergia.bunch.Bunch, report['x'])] y = f['particles'][:, getattr(synergia.bunch.Bunch, report['y'])] res = template_common.heatmap( [x, y], report, { 'title': '', 'x_label': label(report.x, _SCHEMA.enum.PhaseSpaceCoordinate8), 'y_label': label(report.y, _SCHEMA.enum.PhaseSpaceCoordinate8), 'summaryData': { 'bunchTwiss': twiss0, }, }) else: report_name = data.report x = None plots = [] report = data.models[report_name] with h5py.File(str(run_dir.join(OUTPUT_FILE[report_name])), 'r') as f: x = f['s'][:].tolist() for yfield in ('y1', 'y2', 'y3'): if report[yfield] == 'none': continue name = report[yfield] plots.append({ 'name': name, 'label': label(report[yfield], _SCHEMA.enum.TwissParameter), 'points': f[name][:].tolist(), }) res = { 'title': '', 'x_range': [min(x), max(x)], 'y_range': template_common.compute_plot_color_and_range(plots), 'x_label': 's [m]', 'y_label': '', 'x_points': x, 'plots': plots, } simulation_db.write_result(res, run_dir=run_dir)
def _extract_bunch_report(): data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['models']['bunchSource']['inputSource'] == 'sdds_beam': file = 'bunchFile-sourceFile.{}'.format(data['models']['bunchFile']['sourceFile']) else: file = 'elegant.bun' info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0) simulation_db.write_result(info)
def save_report_data(data, run_dir): report = data['models'][data['report']] if data['report'] == 'twissReport': report['x'] = 's' report['y'] = report['y1'] simulation_db.write_result( extract_report_data(str(run_dir.join(_report_output_filename(data['report']))), report, 0), run_dir=run_dir, )
def run_background(cfg_dir): """Run srw with mpi in ``cfg_dir`` Args: cfg_dir (str): directory to run srw in """ with pkio.save_chdir(cfg_dir): mpi.run_script(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE)) simulation_db.write_result({})
def run_background(cfg_dir): """Run elegant as a background task Args: cfg_dir (str): directory to run elegant in """ with pkio.save_chdir(cfg_dir): _run_elegant(with_mpi=True) simulation_db.write_result({})
def run_background(cfg_dir): """Run code in ``cfg_dir`` with mpi Args: cfg_dir (str): directory to run code in """ with pkio.save_chdir(cfg_dir): mpi.run_script(_script()) simulation_db.write_result({})
def save_report_data(data, run_dir): if 'bunchReport' in data['report']: import synergia.bunch with h5py.File(str(run_dir.join(OUTPUT_FILE['twissReport'])), 'r') as f: twiss0 = dict(map( lambda k: (k, format_float(f[k][0])), ('alpha_x', 'alpha_y', 'beta_x', 'beta_y'), )) report = data.models[data['report']] bunch = data.models.bunch if bunch.distribution == 'file': bunch_file = template_common.lib_file_name('bunch', 'particleFile', bunch.particleFile) else: bunch_file = OUTPUT_FILE['bunchReport'] if not run_dir.join(bunch_file).exists(): return with h5py.File(str(run_dir.join(bunch_file)), 'r') as f: x = f['particles'][:, getattr(synergia.bunch.Bunch, report['x'])] y = f['particles'][:, getattr(synergia.bunch.Bunch, report['y'])] hist, edges = np.histogramdd([x, y], template_common.histogram_bins(report['histogramBins'])) res = { 'title': '', 'x_range': [float(edges[0][0]), float(edges[0][-1]), len(hist)], 'y_range': [float(edges[1][0]), float(edges[1][-1]), len(hist[0])], 'x_label': label(report['x'], _SCHEMA['enum']['PhaseSpaceCoordinate8']), 'y_label': label(report['y'], _SCHEMA['enum']['PhaseSpaceCoordinate8']), 'z_matrix': hist.T.tolist(), 'summaryData': { 'bunchTwiss': twiss0, }, } else: report_name = data['report'] x = None plots = [] report = data['models'][report_name] with h5py.File(str(run_dir.join(OUTPUT_FILE[report_name])), 'r') as f: x = f['s'][:].tolist() for yfield in ('y1', 'y2', 'y3'): if report[yfield] == 'none': continue name = report[yfield] plots.append({ 'name': name, 'label': label(report[yfield], _SCHEMA['enum']['TwissParameter']), 'points': f[name][:].tolist(), }) res = { 'title': '', 'x_range': [min(x), max(x)], 'y_range': template_common.compute_plot_color_and_range(plots), 'x_label': 's [m]', 'y_label': '', 'x_points': x, 'plots': plots, } simulation_db.write_result(res, run_dir=run_dir)
def run_background(cfg_dir): """Run elegant as a background task Args: cfg_dir (str): directory to run elegant in """ with pkio.save_chdir(cfg_dir): _run_elegant(with_mpi=True); simulation_db.write_result({})
def extract_report_data(run_dir, sim_in): if 'geometry' in sim_in.report: v_type = sim_in.models.magnetDisplay.viewType f_type = sim_in.models.magnetDisplay.fieldType if v_type == VIEW_TYPE_FIELD\ else None simulation_db.write_result(_read_data(sim_in.simulationId, v_type, f_type), run_dir=run_dir) return simulation_db.write_result(PKDict(), run_dir=run_dir)
def _run_tunes_report(cfg_dir, data): with pkio.save_chdir(cfg_dir): exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) pkio.write_text(template.TUNES_INPUT_FILE, tunes_file) #TODO(pjm): uses datafile from animation directory os.symlink('../animation/zgoubi.fai', 'zgoubi.fai') subprocess.call([_TUNES_PATH]) simulation_db.write_result(template.extract_tunes_report( cfg_dir, data))
def _extract_bunch_report(): data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['models']['bunchSource']['inputSource'] == 'sdds_beam': file = 'bunchFile-sourceFile.{}'.format( data['models']['bunchFile']['sourceFile']) else: file = 'elegant.bun' info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0) simulation_db.write_result(info)
def _extract_twiss_report(data): report = data['models'][data['report']] report['x'] = 's' report['y'] = report['y1'] info = extract_report_data( 'twiss_output.filename.sdds', report, 0, ) simulation_db.write_result(info)
def run_background(cfg_dir): res = {} data = simulation_db.read_json(template_common.INPUT_BASE_NAME) try: _bunch_match_twiss(cfg_dir) _run_zgoubi(cfg_dir) except Exception as e: res = { 'error': str(e), } simulation_db.write_result(res)
def run_background(cfg_dir): """Run warpvnd in ``cfg_dir`` with mpi Args: cfg_dir (str): directory to run warpvnd in """ with pkio.save_chdir(cfg_dir): #TODO(pjm): disable running with MPI for now # mpi.run_script(_script()) exec(_script(), locals(), locals()) simulation_db.write_result({})
def save_report_data(data, run_dir): report = data['models'][data['report']] if data['report'] == 'twissReport': report['x'] = 's' report['y'] = report['y1'] simulation_db.write_result( extract_report_data( str(run_dir.join(_report_output_filename(data['report']))), report, 0), run_dir=run_dir, )
def run_background(cfg_dir): res = {} try: with pkio.save_chdir(cfg_dir): exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) except Exception as e: res = { 'error': str(e), } simulation_db.write_result(res)
def save_report_data(data, run_dir): a = copy.deepcopy(data.models[data.report]) a.frameReport = data.report if a.frameReport == 'twissReport': a.x = 's' a.y = a.y1 a.frameIndex = 0 simulation_db.write_result( _extract_report_data( str(run_dir.join(_report_output_filename(a.frameReport))), a), run_dir=run_dir, )
def run_background(cfg_dir): res = {} data = simulation_db.read_json(template_common.INPUT_BASE_NAME) _validate_estimate_output_file_size(data, res) if 'error' not in res: try: _bunch_match_twiss(cfg_dir, data) _run_zgoubi(cfg_dir) res['frame_count'] = template.read_frame_count( py.path.local(cfg_dir)) except Exception as e: res['error'] = str(e) simulation_db.write_result(res)
def _run_dose_calculation(data, cfg_dir): if not feature_config.cfg().rs4pi_dose_calc: dicom_dose = _run_dose_calculation_fake(data, cfg_dir) else: with pkio.save_chdir(cfg_dir): pksubprocess.check_call_with_signals(['bash', str(cfg_dir.join(template.DOSE_CALC_SH))]) dicom_dose = template.generate_rtdose_file(data, cfg_dir) data['models']['dicomDose'] = dicom_dose # save results into simulation input data file, this is needed for further calls to get_simulation_frame() simulation_db.write_json(template_common.INPUT_BASE_NAME, data) simulation_db.write_result({ 'dicomDose': dicom_dose, })
def _run_dose_calculation(data, cfg_dir): if not feature_config.cfg.rs4pi_dose_calc: dicom_dose = _run_dose_calculation_fake(data, cfg_dir) else: with pkio.save_chdir(cfg_dir): pksubprocess.check_call_with_signals(['bash', str(cfg_dir.join(template.DOSE_CALC_SH))]) dicom_dose = template.generate_rtdose_file(data, cfg_dir) data['models']['dicomDose'] = dicom_dose # save results into simulation input data file, this is needed for further calls to get_simulation_frame() simulation_db.write_json(template_common.INPUT_BASE_NAME, data) simulation_db.write_result({ 'dicomDose': dicom_dose, })
def run_background(cfg_dir): res = {} data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if _estimated_output_file_size(data) > 5e7: res['error'] = 'Estimated output data too large.\nReduce particle count or number of runs,\nor increase diagnostic interval.' else: try: _bunch_match_twiss(cfg_dir) _run_zgoubi(cfg_dir) res['frame_count'] = template.read_frame_count( py.path.local(cfg_dir)) except Exception as e: res['error'] = str(e) simulation_db.write_result(res)
def prepare_output_file(run_dir, data): if data.report == 'fieldComparisonReport' or data.report == 'fieldReport': fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir) if fn.exists(): fn.remove() if data.report == 'fieldComparisonReport': simulation_db.write_result(generate_field_comparison_report( data, run_dir), run_dir=run_dir) else: simulation_db.write_result(generate_field_report( data, run_dir), run_dir=run_dir)
def run(cfg_dir): data = simulation_db.read_json(template_common.INPUT_BASE_NAME) report = data['report'] if 'bunchReport' in report or report == 'twissReport' or report == 'twissReport2': try: with pkio.save_chdir(cfg_dir): exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) template.save_report_data(data, py.path.local(cfg_dir)) except Exception as e: res = template.parse_error_log(py.path.local(cfg_dir)) or { 'error': str(e), } simulation_db.write_result(res) else: raise RuntimeError('unknown report: {}'.format(report))
def run(cfg_dir): text = _run_jspec(cfg_dir) res = { #TODO(pjm): x_range is needed for sirepo-plotting.js, need a better valid-data check 'x_range': [], 'rate': [], } for line in text.split("\n"): m = re.match(r'^(.*? rate.*?)\:\s+(\S+)\s+(\S+)\s+(\S+)', line) if m: row = [m.group(1), [m.group(2), m.group(3), m.group(4)]] row[0] = re.sub('\(', '[', row[0]); row[0] = re.sub('\)', ']', row[0]); res['rate'].append(row) simulation_db.write_result(res)
def run(cfg_dir): with pkio.save_chdir(cfg_dir): exec(_script(), locals(), locals()) data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['report'] == 'fieldReport': res = template.generate_field_report(data, cfg_dir) res['tof_expected'] = field_results.tof_expected res['steps_expected'] = field_results.steps_expected, res['e_cross'] = field_results.e_cross elif data['report'] == 'fieldComparisonReport': wp.step(template.COMPARISON_STEP_SIZE) res = template.generate_field_comparison_report(data, cfg_dir) else: raise RuntimeError('unknown report: {}'.format(data['report'])) simulation_db.write_result(res)
def run(cfg_dir): """Run Hellweg in ``cfg_dir`` Args: cfg_dir (str): directory to run hellweg in """ _run_hellweg(cfg_dir) data = simulation_db.read_json(template_common.INPUT_BASE_NAME) report = data['models'][data['report']] res = None if data['report'] == 'beamReport': res = template.extract_beam_report(report, cfg_dir, 0) elif data['report'] == 'beamHistogramReport': res = template.extract_beam_histrogram(report, cfg_dir, 0) else: raise RuntimeError('unknown report: {}'.format(data['report'])) simulation_db.write_result(res)
def _run_srw(): #TODO(pjm): need to properly escape data values, untrusted from client data = simulation_db.read_json(template_common.INPUT_BASE_NAME) exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) locals()['main']() # special case for importing python code if data['report'] == 'backgroundImport': sim_id = data['models']['simulation']['simulationId'] parsed_data['models']['simulation']['simulationId'] = sim_id #TODO(pjm): assumes the parent directory contains the simulation data, # can't call simulation_db.save_simulation_json() because user isn't set for pkcli commands simulation_db.write_json('../{}'.format(simulation_db.SIMULATION_DATA_FILE), parsed_data) simulation_db.write_result({ 'simulationId': sim_id, }) else: simulation_db.write_result(extract_report_data(get_filename_for_model(data['report']), data))
def run(cfg_dir): """Run elegant in ``cfg_dir`` The files in ``cfg_dir`` must be configured properly. Args: cfg_dir (str): directory to run elegant in """ with pkio.save_chdir(cfg_dir): try: _run_elegant(bunch_report=True) except Exception as e: err = parse_elegant_log(py.path.local(cfg_dir)) if not err: err = ['A server error occurred'] simulation_db.write_result({ 'error': err[0], }) save_report_data(simulation_db.read_json(template_common.INPUT_BASE_NAME), py.path.local(cfg_dir))
def app_run_cancel(): data = _parse_data_input() jid = simulation_db.job_id(data) # TODO(robnagler) need to have a way of listing jobs # Don't bother with cache_hit check. We don't have any way of canceling # if the parameters don't match so for now, always kill. #TODO(robnagler) mutex required if cfg.job_queue.is_processing(jid): run_dir = simulation_db.simulation_run_dir(data) # Write first, since results are write once, and we want to # indicate the cancel instead of the termination error that # will happen as a result of the kill. simulation_db.write_result({'state': 'canceled'}, run_dir=run_dir) cfg.job_queue.kill(jid) # TODO(robnagler) should really be inside the template (t.cancel_simulation()?) # the last frame file may not be finished, remove it t = sirepo.template.import_module(data) t.remove_last_frame(run_dir) # Always true from the client's perspective return _json_response({'state': 'canceled'})
def run_background(cfg_dir): """Run srw with mpi in ``cfg_dir`` Args: cfg_dir (str): directory to run srw in """ with pkio.save_chdir(cfg_dir): script = pkio.read_text(template_common.PARAMETERS_PYTHON_FILE) p = dict(pkcollections.map_items(cfg)) if pkconfig.channel_in('dev'): p['particles_per_core'] = 5 p['cores'] = mpi.cfg.cores script += ''' v.wm_na = v.sm_na = {particles_per_core} # Number of "iterations" per save is best set to num processes v.wm_ns = v.sm_ns = {cores} srwl_bl.SRWLBeamline(_name=v.name).calc_all(v, op) main() '''.format(**p) mpi.run_script(script) simulation_db.write_result({})
def run(cfg_dir): with pkio.save_chdir(cfg_dir): data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['report'] == 'twissReport': simulation_db.write_result(_extract_twiss_report(data)) elif data['report'] == 'rateCalculationReport': text = _run_jspec(data) res = { #TODO(pjm): x_range is needed for sirepo-plotting.js, need a better valid-data check 'x_range': [], 'rate': [], } for line in text.split("\n"): m = re.match(r'^(.*? rate.*?)\:\s+(\S+)\s+(\S+)\s+(\S+)', line) if m: row = [m.group(1), [m.group(2), m.group(3), m.group(4)]] row[0] = re.sub('\(', '[', row[0]); row[0] = re.sub('\)', ']', row[0]); res['rate'].append(row) simulation_db.write_result(res) else: assert False, 'unknown report: {}'.format(data['report'])
def save_report_data(data, run_dir): report_name = data['report'] if 'twissReport' in report_name or 'opticsReport' in report_name: filename, enum_name, x_field = _REPORT_INFO[report_name] report = data['models'][report_name] plots = [] col_names, rows = read_data_file(py.path.local(run_dir).join(filename)) for f in ('y1', 'y2', 'y3'): if report[f] == 'none': continue plots.append({ 'points': column_data(report[f], col_names, rows), 'label': template_common.enum_text(_SCHEMA, enum_name, report[f]), }) x = column_data(x_field, col_names, rows) res = { 'title': '', 'x_range': [min(x), max(x)], 'y_label': '', 'x_label': 's [m]', 'x_points': x, 'plots': plots, 'y_range': template_common.compute_plot_color_and_range(plots), } elif 'bunchReport' in report_name: report = data['models'][report_name] col_names, rows = read_data_file(py.path.local(run_dir).join(_ZGOUBI_DATA_FILE)) res = _extract_bunch_data(report, col_names, rows, '') summary_file = py.path.local(run_dir).join(BUNCH_SUMMARY_FILE) if summary_file.exists(): res['summaryData'] = { 'bunch': simulation_db.read_json(summary_file) } else: raise RuntimeError('unknown report: {}'.format(report_name)) simulation_db.write_result( res, run_dir=run_dir, )
def run(cfg_dir): with pkio.save_chdir(cfg_dir): exec(_script(), locals(), locals()) data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['report'] == 'fieldReport': if len(potential.shape) == 2: values = potential[xl:xu, zl:zu] else: # 3d results values = potential[xl:xu, int(NUM_Y / 2), zl:zu] res = _generate_field_report(data, values, { 'tof_expected': tof_expected, 'steps_expected': steps_expected, 'e_cross': e_cross, }) elif data['report'] == 'fieldComparisonReport': step(template.COMPARISON_STEP_SIZE) res = template.generate_field_comparison_report(data, cfg_dir) else: raise RuntimeError('unknown report: {}'.format(data['report'])) simulation_db.write_result(res)
def run(cfg_dir): """Run warp in ``cfg_dir`` Args: cfg_dir (str): directory to run warp in """ with pkio.save_chdir(cfg_dir): _run_warp() data = simulation_db.read_json(template_common.INPUT_BASE_NAME) data_file = template.open_data_file(py.path.local()) model = data['models'][data['report']] if data['report'] == 'laserPreviewReport': field = model['field'] coordinate = model['coordinate'] mode = model['mode'] if mode != 'all': mode = int(mode) res = template.extract_field_report(field, coordinate, mode, data_file) elif data['report'] == 'beamPreviewReport': res = template.extract_particle_report([model['x'], model['y'], model['histogramBins']], 'beam', cfg_dir, data_file) simulation_db.write_result(res)
def run_background(cfg_dir): res = {} data = simulation_db.read_json(template_common.INPUT_BASE_NAME) distribution = data['models']['bunch']['distribution'] run_with_mpi = distribution == 'lattice' or distribution == 'file' try: with pkio.save_chdir(cfg_dir): if run_with_mpi: mpi.run_script(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE)) else: #TODO(pjm): MPI doesn't work with rsbeams distributions yet exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) except Exception as e: res = { 'error': str(e), } if run_with_mpi and 'error' in res: text = pkio.read_text('mpi_run.out') m = re.search(r'^Traceback .*?^\w*Error: (.*?)\n\n', text, re.MULTILINE|re.DOTALL) if m: res['error'] = m.group(1) # remove output file - write_result() will not overwrite an existing error output pkio.unchecked_remove(simulation_db.json_filename(template_common.OUTPUT_BASE_NAME)) simulation_db.write_result(res)
def _run_dvh(data, cfg_dir): dvh_report = data['models']['dvhReport'] if not len(dvh_report['roiNumbers']): simulation_db.write_result({ 'error': 'No selection', }) y_range = None plots = [] max_x = 0 for roi_number in data['models']['dvhReport']['roiNumbers']: roi_number = int(roi_number) dp = dicomparser.DicomParser(_parent_file(cfg_dir, template.RTSTRUCT_EXPORT_FILENAME)) for roi in dp.ds.ROIContourSequence: if roi.ReferencedROINumber == roi_number: for c in roi.ContourSequence: if 'ContourImageSequence' not in c: c.ContourImageSequence = [] s = dp.GetStructures()[roi_number] s['planes'] = dp.GetStructureCoordinates(roi_number) s['thickness'] = dp.CalculatePlaneThickness(s['planes']) rtdose = dicomparser.DicomParser(_parent_file(cfg_dir, template._DOSE_DICOM_FILE)) calcdvh = dvhcalc.calculate_dvh(s, rtdose, None, True, None) counts = np.append(calcdvh.histogram, 0.0) if dvh_report['dvhType'] == 'cumulative': counts = counts[::-1].cumsum()[::-1] else: counts = np.append(abs(np.diff(counts) * -1), [0]) if dvh_report['dvhVolume'] == 'relative': if dvh_report['dvhType'] == 'differential': counts = counts[::-1].cumsum()[::-1] if len(counts) and counts.max() > 0: counts = 100 * counts / counts.max() if dvh_report['dvhType'] == 'differential': counts = np.append(abs(np.diff(counts) * -1), [0]) else: counts /= 10 max_x = max(max_x, counts.size) min_y = np.min(counts) max_y = np.max(counts) if y_range: if min_y < y_range[0]: y_range[0] = min_y if max_y > y_range[1]: y_range[1] = max_y else: y_range = [min_y, max_y] plots.append({ 'points': counts.tolist(), 'color': '#{}'.format(struct.pack('BBB', *s['color']).encode('hex')), 'label': s['name'], }) res = { 'title': '', 'x_range': [0, max_x / 100.0, max_x], 'y_label': 'Volume [{}]'.format('%' if dvh_report['dvhVolume'] == 'relative' else 'm³'), 'x_label': 'Dose [gy]', 'y_range': y_range, 'plots': sorted(plots, key=lambda v: v['label'].lower()), } simulation_db.write_result(res)
def run_background(cfg_dir): with pkio.save_chdir(cfg_dir): _run_jspec(simulation_db.read_json(template_common.INPUT_BASE_NAME)) simulation_db.write_result({})
def run_background(cfg_dir): _run_hellweg(cfg_dir) simulation_db.write_result({})
def _process_output(filename, model_data): simulation_db.write_result(extract_report_data(filename, model_data))
def run_background(cfg_dir): with pkio.save_chdir(cfg_dir): simulation_db.write_result({})