示例#1
0
文件: webcon.py 项目: ahebnl/Sirepo
def _enable_steering(data):
    sim_dir = _epics_dir(data['simulationId'])
    if sim_dir.exists():
        #TODO(pjm): use save to tmp followed by mv for atomicity
        simulation_db.write_json(sim_dir.join(STEERING_FILE),
                                 data['beamSteering'])
    return PKDict()
示例#2
0
def _bunch_match_twiss(cfg_dir, data):
    bunch = data.models.bunch
    if bunch.match_twiss_parameters == '1' \
       and bunch.method == 'MCOBJET3' \
       and ('bunchReport' in data.report or data.report == 'animation'):
        report = data['report']
        data['report'] = 'twissReport2'
        template.write_parameters(data, py.path.local(cfg_dir), False,
                                  'twiss.py')
        _run_zgoubi(cfg_dir, python_file='twiss.py')
        col_names, row = template.extract_first_twiss_row(cfg_dir)
        for f in _TWISS_TO_BUNCH_FIELD.keys():
            v = template.column_data(f, col_names, [row])[0]
            if (f == 'btx' or f == 'bty') and v <= 0:
                pkdlog('invalid calculated twiss parameter: {} <= 0', f)
                v = 1.0
            bunch[_TWISS_TO_BUNCH_FIELD[f]] = v
        found_fit = False
        lines = pkio.read_text(_ZGOUBI_FIT_FILE).split('\n')
        for i in range(len(lines)):
            line = lines[i]
            if re.search(r"^\s*'OBJET'", line):
                values = lines[i + 4].split()
                assert len(values) >= 5
                found_fit = True
                bunch['Y0'] = float(values[0]) * 1e-2
                bunch['T0'] = float(values[1]) * 1e-3
                break
        assert found_fit, 'failed to parse fit parameters'
        simulation_db.write_json(
            py.path.local(cfg_dir).join(template.BUNCH_SUMMARY_FILE), bunch)
        data['report'] = report
        # rewrite the original report with original parameters
        template.write_parameters(data, py.path.local(cfg_dir), False)
    return data
示例#3
0
def write_parameters(data, run_dir, is_parallel):
    rtfile = py.path.local(_parent_file(run_dir, RTSTRUCT_EXPORT_FILENAME))
    if data['report'] == 'dvhReport' and rtfile.exists():
        return
    if data['report'] in ('doseCalculation', 'dvhReport'):
        _, roi_models = _generate_rtstruct_file(_parent_dir(run_dir),
                                                _parent_dir(run_dir))
        if data['report'] == 'doseCalculation':
            dose_calc = data.models.doseCalculation
            roi_data = roi_models['regionsOfInterest']
            ptv_name = ''
            oar_names = []
            for roi_number in roi_data:
                if roi_number == dose_calc.selectedPTV:
                    ptv_name = roi_data[roi_number]['name']
                elif roi_number in dose_calc.selectedOARs:
                    oar_names.append(roi_data[roi_number]['name'])
            prescription = run_dir.join(PRESCRIPTION_FILENAME)
            simulation_db.write_json(prescription, {
                'ptv': ptv_name,
                'oar': oar_names,
            })
            pkjinja.render_file(
                RESOURCE_DIR.join(DOSE_CALC_SH + '.jinja'),
                {
                    'prescription': prescription,
                    'beamlist': run_dir.join(_BEAMLIST_FILENAME),
                    'dicom_zip': _sim_file(data['simulationId'],
                                           _ZIP_FILE_NAME),
                },
                output=run_dir.join(DOSE_CALC_SH),
                strict_undefined=True,
            ),
示例#4
0
文件: rs4pi.py 项目: e-carlin/sirepo
def write_parameters(data, run_dir, is_parallel):
    rtfile = py.path.local(_parent_file(run_dir, RTSTRUCT_EXPORT_FILENAME))
    if data['report'] == 'dvhReport' and rtfile.exists():
        return
    if data['report'] in ('doseCalculation', 'dvhReport'):
        _, roi_models = _generate_rtstruct_file(_parent_dir(run_dir), _parent_dir(run_dir))
        if data['report'] == 'doseCalculation':
            dose_calc = data.models.doseCalculation
            roi_data = roi_models['regionsOfInterest']
            ptv_name = ''
            oar_names = []
            for roi_number in roi_data:
                if roi_number == dose_calc.selectedPTV:
                    ptv_name = roi_data[roi_number]['name']
                elif roi_number in dose_calc.selectedOARs:
                    oar_names.append(roi_data[roi_number]['name'])
            prescription = run_dir.join(PRESCRIPTION_FILENAME)
            simulation_db.write_json(
                prescription,
                {
                    'ptv': ptv_name,
                    'oar': oar_names,
                })
            pkjinja.render_file(
                RESOURCE_DIR.join(DOSE_CALC_SH + '.jinja'),
                {
                    'prescription': prescription,
                    'beamlist': run_dir.join(_BEAMLIST_FILENAME),
                    'dicom_zip': _sim_file(data['simulationId'], _ZIP_FILE_NAME),
                },
                output=run_dir.join(DOSE_CALC_SH),
                strict_undefined=True,
            ),
示例#5
0
def compute_field_range(args, compute_range):
    """ Computes the fieldRange values for all parameters across all animation files.
    Caches the value on the animation input file. compute_range() is called to
    read the simulation specific datafiles and extract the ranges by field.
    """
    from sirepo import simulation_db
    run_dir = simulation_db.simulation_run_dir({
        'simulationType':
        args['simulationType'],
        'simulationId':
        args['simulationId'],
        'report':
        'animation',
    })
    data = simulation_db.read_json(run_dir.join(INPUT_BASE_NAME))
    res = None
    model_name = args['modelName']
    if model_name in data.models:
        if 'fieldRange' in data.models[model_name]:
            res = data.models[model_name].fieldRange
        else:
            res = compute_range(run_dir, data)
            data.models[model_name].fieldRange = res
            simulation_db.write_json(run_dir.join(INPUT_BASE_NAME), data)
    return {
        'fieldRange': res,
    }
示例#6
0
文件: madx.py 项目: cchall/sirepo
def _output_info(run_dir):
    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        try:
            res = simulation_db.read_json(info_file)
            if not res or res[0].get('_version', '') == _OUTPUT_INFO_VERSION:
                return res
        except ValueError as e:
            pass
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    files = _build_filename_map(data)
    res = []
    for k in files.keys_in_order:
        f = files[k]
        if run_dir.join(f.filename).exists():
            res.append(_file_info(f.filename, run_dir, k))
    if LatticeUtil.find_first_command(data, _END_MATCH_COMMAND):
        res.insert(
            0,
            PKDict(
                modelKey='matchAnimation',
                filename='madx.log',
                isHistogram=False,
                plottableColumns=[],
                pageCount=0,
            ))
    if res:
        res[0]['_version'] = _OUTPUT_INFO_VERSION
    simulation_db.write_json(info_file, res)
    return res
示例#7
0
def _output_info(run_dir):
    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        try:
            res = simulation_db.read_json(info_file)
            if len(res) == 0 or res[0].get('_version',
                                           '') == _OUTPUT_INFO_VERSION:
                return res
        except ValueError as e:
            pass
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    res = []
    filename_map = _build_filename_map(data)
    for k in filename_map.keys_in_order:
        filename = filename_map[k]
        id = k.split(_FILE_ID_SEP)
        info = _file_info(filename, run_dir, id[0], id[1])
        if info:
            info.modelKey = 'elementAnimation{}'.format(info.id)
            res.append(info)
    if len(res):
        res[0]['_version'] = _OUTPUT_INFO_VERSION
    simulation_db.write_json(info_file, res)
    return res
示例#8
0
def compute_field_range(args, compute_range):
    """ Computes the fieldRange values for all parameters across all animation files.
    Caches the value on the animation input file. compute_range() is called to
    read the simulation specific datafiles and extract the ranges by field.
    """
    from sirepo import simulation_db

    run_dir = simulation_db.simulation_run_dir(
        PKDict(
            simulationType=args['simulationType'],
            simulationId=args['simulationId'],
            report='animation',
        ))
    data = simulation_db.read_json(run_dir.join(INPUT_BASE_NAME))
    res = None
    model_name = args['modelName']
    if model_name in data.models:
        if 'fieldRange' in data.models[model_name]:
            res = data.models[model_name].fieldRange
        else:
            #TODO(pjm): second arg was never used
            res = compute_range(run_dir, None)
            data.models[model_name].fieldRange = res
            simulation_db.write_json(run_dir.join(INPUT_BASE_NAME), data)
    return PKDict(fieldRange=res)
示例#9
0
文件: madx.py 项目: mkeilman/sirepo
def _generate_ptc_particles_file(run_dir, data, twiss):
    bunch = data.models.bunch
    p = _ptc_particles(
        PKDict(
            x=_bunch_twiss(bunch, 'x'),
            y=_bunch_twiss(bunch, 'y'),
        ),
        bunch.numberOfParticles,
        bunch.randomSeed,
    )
    v = PKDict(
        x=template.to_floats(p.x.pos),
        px=template.to_floats(p.x.p),
        y=template.to_floats(p.y.pos),
        py=template.to_floats(p.y.p),
        t=template.to_floats(p.t.pos),
        pt=template.to_floats(p.t.p),
    )
    if 'bunchReport' in data.report:
        v.summaryData = twiss
        simulation_db.write_json(run_dir.join(template.BUNCH_PARTICLES_FILE), v)
    r = ''
    for i in range(len(v.x)):
        r += 'ptc_start'
        for f in ('x', 'px', 'y', 'py', 't', 'pt'):
           r += f', {f}={v[f][i]}'
        r +=';\n'
    pkio.write_text(run_dir.join(template.PTC_PARTICLES_FILE), r)
示例#10
0
文件: rs4pi.py 项目: mkeilman/sirepo
def _run_dose_calculation(data, cfg_dir):
    if not feature_config.cfg().rs4pi_dose_calc:
        dicom_dose = _run_dose_calculation_fake(data, cfg_dir)
    else:
        pksubprocess.check_call_with_signals(
            ['bash', str(cfg_dir.join(template.DOSE_CALC_SH))])
        dicom_dose = template.generate_rtdose_file(data, cfg_dir)
    data['models']['dicomDose'] = dicom_dose
    # save results into simulation input data file, this is needed for further calls to get_simulation_frame()
    simulation_db.write_json(template_common.INPUT_BASE_NAME, data)
    template_common.write_sequential_result(PKDict(dicomDose=dicom_dose))
示例#11
0
def _compute_range_across_files(run_dir):
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    if 'fieldRange' in data.models.particleAnimation:
        return data.models.particleAnimation.fieldRange
    res = {}
    for v in _SCHEMA.enum.ParticleColumn:
        res[_map_field_name(v[0])] = []
    for filename in _ion_files(run_dir):
        sdds_util.process_sdds_page(filename, 0, _compute_sdds_range, res)
    data.models.particleAnimation.fieldRange = res
    simulation_db.write_json(run_dir.join(template_common.INPUT_BASE_NAME), data)
    return res
示例#12
0
文件: rs4pi.py 项目: e-carlin/sirepo
def _run_dose_calculation(data, cfg_dir):
    if not feature_config.cfg.rs4pi_dose_calc:
        dicom_dose = _run_dose_calculation_fake(data, cfg_dir)
    else:
        with pkio.save_chdir(cfg_dir):
            pksubprocess.check_call_with_signals(['bash', str(cfg_dir.join(template.DOSE_CALC_SH))])
            dicom_dose = template.generate_rtdose_file(data, cfg_dir)
    data['models']['dicomDose'] = dicom_dose
    # save results into simulation input data file, this is needed for further calls to get_simulation_frame()
    simulation_db.write_json(template_common.INPUT_BASE_NAME, data)
    simulation_db.write_result({
        'dicomDose': dicom_dose,
    })
示例#13
0
def _update_roi_file(sim_id, contours):
    data = _read_roi_file(sim_id)
    rois = data['models']['regionsOfInterest']
    for roi_number in contours:
        if roi_number not in rois:
            rois[roi_number] = contours[roi_number]
        else:
            for frame_id in contours[roi_number]:
                points = contours[roi_number][frame_id]
                rois[roi_number]['contour'][frame_id] = points
    #TODO(pjm): file locking or atomic update
    simulation_db.write_json(_roi_file(sim_id), data)
    return {}
示例#14
0
文件: rs4pi.py 项目: e-carlin/sirepo
def _update_roi_file(sim_id, contours):
    data = _read_roi_file(sim_id)
    rois = data['models']['regionsOfInterest']
    for roi_number in contours:
        if roi_number not in rois:
            rois[roi_number] = contours[roi_number]
        else:
            for frame_id in contours[roi_number]:
                points = contours[roi_number][frame_id]
                rois[roi_number]['contour'][frame_id] = points
    #TODO(pjm): file locking or atomic update
    simulation_db.write_json(_roi_file(sim_id), data)
    return {}
示例#15
0
文件: webcon.py 项目: JiayangY/sirepo
def _run_beam_steering(server_address, steering, periodic_callback):
    method = steering.steeringMethod
    try:
        if method == 'nmead':
            res = _optimize_nelder_mead(server_address, periodic_callback)
        elif method == 'polyfit':
            res = _optimize_polyfit(server_address, periodic_callback)
        if 'result' in res:
            template.write_epics_values(server_address, template.CURRENT_FIELDS, res['result'])
        simulation_db.write_json(template.OPTIMIZER_RESULT_FILE, {
            'message': res['message'],
            'success': res['success'],
        })
    except AbortOptimizationException as e:
        pass
示例#16
0
文件: rs4pi.py 项目: e-carlin/sirepo
def _compute_histogram(simulation, frames):
    pixels = []
    for frame in frames:
        pixels.append(frame['pixels'])
    histogram = _histogram_from_pixels(pixels)
    filename = _roi_file(simulation['simulationId'])
    if os.path.exists(filename):
        roi_data = _read_roi_file(simulation['simulationId'])
    else:
        roi_data = {
            'models': {
                'regionsOfInterest': {},
            },
        }
    roi_data['models']['dicomHistogram'] = histogram
    roi_data['models']['dicomFrames'] = _summarize_frames(frames)
    simulation_db.write_json(filename, roi_data)
示例#17
0
def _compute_histogram(simulation, frames):
    pixels = []
    for frame in frames:
        pixels.append(frame['pixels'])
    histogram = _histogram_from_pixels(pixels)
    filename = _roi_file(simulation['simulationId'])
    if os.path.exists(filename):
        roi_data = _read_roi_file(simulation['simulationId'])
    else:
        roi_data = {
            'models': {
                'regionsOfInterest': {},
            },
        }
    roi_data['models']['dicomHistogram'] = histogram
    roi_data['models']['dicomFrames'] = _summarize_frames(frames)
    simulation_db.write_json(filename, roi_data)
示例#18
0
文件: srw.py 项目: e-carlin/sirepo
def _run_srw():
    #TODO(pjm): need to properly escape data values, untrusted from client
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
    locals()['main']()
    # special case for importing python code
    if data['report'] == 'backgroundImport':
        sim_id = data['models']['simulation']['simulationId']
        parsed_data['models']['simulation']['simulationId'] = sim_id
        #TODO(pjm): assumes the parent directory contains the simulation data,
        # can't call simulation_db.save_simulation_json() because user isn't set for pkcli commands
        simulation_db.write_json('../{}'.format(simulation_db.SIMULATION_DATA_FILE), parsed_data)
        simulation_db.write_result({
            'simulationId': sim_id,
        })
    else:
        simulation_db.write_result(extract_report_data(get_filename_for_model(data['report']), data))
示例#19
0
文件: warpvnd.py 项目: ahebnl/Sirepo
def run_background(cfg_dir):
    """Run warpvnd in ``cfg_dir`` with mpi

    Args:
        cfg_dir (str): directory to run warpvnd in
    """
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    #TODO(pjm): only run with mpi for 3d case for now
    if data.models.simulationGrid.simulation_mode == '3d' \
        and not data.report == 'optimizerAnimation' \
        and data.models.simulation.executionMode == 'parallel':
        simulation_db.write_json(
            py.path.local(cfg_dir).join(template.MPI_SUMMARY_FILE), {
                'mpiCores': mpi.cfg.cores,
            })
        template_common.exec_parameters_with_mpi()
    else:
        template_common.exec_parameters()
示例#20
0
def _output_info(run_dir):
    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        return simulation_db.read_json(info_file)
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    res = []
    filename_map = _build_filename_map(data)
    for k in filename_map['keys_in_order']:
        filename = filename_map[k]
        id = k.split(_FILE_ID_SEP)
        info = _file_info(filename, run_dir, id[0], id[1])
        if info:
            info['modelKey'] = 'elementAnimation{}'.format(info['id'])
            res.append(info)
    simulation_db.write_json(info_file, res)
    return res
示例#21
0
def run_background(cfg_dir):
    """Run warpvnd in ``cfg_dir`` with mpi

    Args:
        cfg_dir (str): directory to run warpvnd in
    """
    # limit to 1 until we do parallel properly
    res = PKDict()
    mpi.cfg.cores = 1
    simulation_db.write_json(
        py.path.local(cfg_dir).join(template.MPI_SUMMARY_FILE), {
            'mpiCores': mpi.cfg.cores,
        })
    try:
        template_common.exec_parameters_with_mpi()
    except Exception as e:
        res.error = str(e)
    simulation_db.write_result(res)
示例#22
0
文件: zgoubi.py 项目: e-carlin/sirepo
def _bunch_match_twiss(cfg_dir):
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    bunch = data.models.bunch
    if bunch.match_twiss_parameters == '1' and ('bunchReport' in data.report or data.report == 'animation'):
        report = data['report']
        data['report'] = 'twissReport2'
        template.write_parameters(data, py.path.local(cfg_dir), False, 'twiss.py')
        _run_zgoubi(cfg_dir, python_file='twiss.py')
        col_names, row = template.extract_first_twiss_row(cfg_dir)
        for f in _TWISS_TO_BUNCH_FIELD.keys():
            v = template.column_data(f, col_names, [row])[0]
            bunch[_TWISS_TO_BUNCH_FIELD[f]] = v
            if f == 'btx' or f == 'bty':
                assert v > 0, 'invalid twiss parameter: {} <= 0'.format(f)
        simulation_db.write_json(py.path.local(cfg_dir).join(template.BUNCH_SUMMARY_FILE), bunch)
        data['report'] = report
        # rewrite the original report with original parameters
        template.write_parameters(data, py.path.local(cfg_dir), False)
    return data
示例#23
0
def _run_srw():
    #TODO(pjm): need to properly escape data values, untrusted from client
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(),
         locals())
    locals()['main']()
    # special case for importing python code
    if data['report'] == 'backgroundImport':
        sim_id = data['models']['simulation']['simulationId']
        parsed_data['models']['simulation']['simulationId'] = sim_id
        #TODO(pjm): assumes the parent directory contains the simulation data,
        # can't call simulation_db.save_simulation_json() because user isn't set for pkcli commands
        simulation_db.write_json(
            '../{}'.format(simulation_db.SIMULATION_DATA_FILE), parsed_data)
        simulation_db.write_result({
            'simulationId': sim_id,
        })
    else:
        simulation_db.write_result(
            extract_report_data(get_filename_for_model(data['report']), data))
示例#24
0
def _output_info(run_dir):
    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        res = simulation_db.read_json(info_file)
        if len(res) == 0 or res[0].get('_version', '') == _OUTPUT_INFO_VERSION:
            return res
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    res = []
    filename_map = _build_filename_map(data)
    for k in filename_map['keys_in_order']:
        filename = filename_map[k]
        id = k.split(_FILE_ID_SEP)
        info = _file_info(filename, run_dir, id[0], id[1])
        if info:
            info['modelKey'] = 'elementAnimation{}'.format(info['id'])
            res.append(info)
    if len(res):
        res[0]['_version'] = _OUTPUT_INFO_VERSION
    simulation_db.write_json(info_file, res)
    return res
示例#25
0
def _compute_range_across_files(run_dir):
    data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    if 'bunchAnimation' not in data.models:
        return None
    if 'fieldRange' in data.models.bunchAnimation:
        return data.models.bunchAnimation.fieldRange
    res = {}
    for v in _SCHEMA.enum.PhaseSpaceCoordinate6:
        res[v[0]] = []
    for filename in _particle_file_list(run_dir):
        with h5py.File(str(filename), 'r') as f:
            for field in res:
                values = f['particles'][:, _COORD6.index(field)].tolist()
                if len(res[field]):
                    res[field][0] = min(min(values), res[field][0])
                    res[field][1] = max(max(values), res[field][1])
                else:
                    res[field] = [min(values), max(values)]
    data.models.bunchAnimation.fieldRange = res
    simulation_db.write_json(run_dir.join(template_common.INPUT_BASE_NAME), data)
    return res
示例#26
0
def _summarize_rt_structure(simulation, plan, frame_ids):
    rois = {}
    for roi in plan.StructureSetROISequence:
        rois[roi.ROINumber] = {
            'name': roi.ROIName,
        }
    res = {}
    for roi_contour in plan.ROIContourSequence:
        roi = rois[roi_contour.ReferencedROINumber]
        if 'contour' in roi:
            raise RuntimeError('duplicate contour sequence for roi')
        if not hasattr(roi_contour, 'ContourSequence'):
            continue
        roi['contour'] = {}
        for contour in roi_contour.ContourSequence:
            if contour.ContourGeometricType != 'CLOSED_PLANAR':
                continue
            if len(contour.ContourData):
                # the z index is the key
                ct_id = _frame_id(contour.ContourData[2])
                if ct_id not in frame_ids:
                    raise RuntimeError('contour z not in frames: {}', ct_id)
                contour_data = _float_list(contour.ContourData)
                if len(contour_data) > 3 and ct_id != _frame_id(
                        contour_data[5]):
                    raise RuntimeError('expected contour data z to be equal')
                del contour_data[2::3]
                if ct_id not in roi['contour']:
                    roi['contour'][ct_id] = []
                roi['contour'][ct_id].append(contour_data)
        if roi['contour']:
            roi['color'] = _string_list(roi_contour.ROIDisplayColor)
            res[roi_contour.ReferencedROINumber] = roi
    simulation_db.write_json(_roi_file(simulation['simulationId']), {
        'models': {
            'regionsOfInterest': res,
        },
    })
    return res
示例#27
0
def run_background(cfg_dir):
    """Run warpvnd in ``cfg_dir`` with mpi

    Args:
        cfg_dir (str): directory to run warpvnd in
    """
    with pkio.save_chdir(cfg_dir):
        data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
        #TODO(pjm): only run with mpi for 3d case for now
        if data.models.simulationGrid.simulation_mode == '3d' \
           and not data.report == 'optimizerAnimation' \
           and data.models.simulation.executionMode == 'parallel':
            pkdc('RUNNING MPI')
            simulation_db.write_json(
                py.path.local(cfg_dir).join(template.MPI_SUMMARY_FILE), {
                    'mpiCores': mpi.cfg.cores,
                })
            mpi.run_script(_script())
        else:
            pkdc('RUNNING SINGLE PROCESS')
            exec(_script(), locals(), locals())
        simulation_db.write_result({})
示例#28
0
def _bunch_match_twiss(cfg_dir):
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    bunch = data.models.bunch
    if bunch.match_twiss_parameters == '1' and ('bunchReport' in data.report
                                                or data.report == 'animation'):
        report = data['report']
        data['report'] = 'twissReport2'
        template.write_parameters(data, py.path.local(cfg_dir), False,
                                  'twiss.py')
        _run_zgoubi(cfg_dir, python_file='twiss.py')
        col_names, row = template.extract_first_twiss_row(cfg_dir)
        for f in _TWISS_TO_BUNCH_FIELD.keys():
            v = template.column_data(f, col_names, [row])[0]
            bunch[_TWISS_TO_BUNCH_FIELD[f]] = v
            if f == 'btx' or f == 'bty':
                assert v > 0, 'invalid twiss parameter: {} <= 0'.format(f)
        simulation_db.write_json(
            py.path.local(cfg_dir).join(template.BUNCH_SUMMARY_FILE), bunch)
        data['report'] = report
        # rewrite the original report with original parameters
        template.write_parameters(data, py.path.local(cfg_dir), False)
    return data
示例#29
0
文件: rs4pi.py 项目: e-carlin/sirepo
def _summarize_rt_structure(simulation, plan, frame_ids):
    rois = {}
    for roi in plan.StructureSetROISequence:
        rois[roi.ROINumber] = {
            'name': roi.ROIName,
        }
    res = {}
    for roi_contour in plan.ROIContourSequence:
        roi = rois[roi_contour.ReferencedROINumber]
        if 'contour' in roi:
            raise RuntimeError('duplicate contour sequence for roi')
        if not hasattr(roi_contour, 'ContourSequence'):
            continue
        roi['contour'] = {}
        for contour in roi_contour.ContourSequence:
            if contour.ContourGeometricType != 'CLOSED_PLANAR':
                continue
            if len(contour.ContourData):
                # the z index is the key
                ct_id = _frame_id(contour.ContourData[2])
                if ct_id not in frame_ids:
                    raise RuntimeError('contour z not in frames: {}', ct_id)
                contour_data = _float_list(contour.ContourData)
                if len(contour_data) > 3 and ct_id != _frame_id(contour_data[5]):
                    raise RuntimeError('expected contour data z to be equal')
                del contour_data[2::3]
                if ct_id not in roi['contour']:
                    roi['contour'][ct_id] = []
                roi['contour'][ct_id].append(contour_data)
        if roi['contour']:
            roi['color'] = _string_list(roi_contour.ROIDisplayColor)
            res[roi_contour.ReferencedROINumber] = roi
    simulation_db.write_json(_roi_file(simulation['simulationId']), {
        'models': {
            'regionsOfInterest': res,
        },
    })
    return res
示例#30
0
def write_sequential_result(result, run_dir=None):
    """Write the results of a sequential simulation to disk.

    Args:
        result (dict): The results of the simulation
        run_dir (py.path): Defaults to current dir
    """
    from sirepo import simulation_db

    if not run_dir:
        run_dir = pkio.py_path()
    f = simulation_db.json_filename(OUTPUT_BASE_NAME, run_dir)
    assert not f.exists(), \
        '{} file exists'.format(OUTPUT_BASE_NAME)
    simulation_db.write_json(f, result)
    t = sirepo.template.import_module(
        simulation_db.read_json(
            simulation_db.json_filename(
                INPUT_BASE_NAME,
                run_dir,
            ), ), )
    if hasattr(t, 'clean_run_dir'):
        t.clean_run_dir(run_dir)
示例#31
0
def _summarize_dicom_series(simulation, frames):
    idx = 0
    z_space = abs(
        float(frames[0]['ImagePositionPatient'][2]) -
        float(frames[1]['ImagePositionPatient'][2]))
    os.mkdir(_sim_file(simulation['simulationId'], _DICOM_DIR))
    for frame in frames:
        res = {
            'shape': frame['shape'],
            'ImagePositionPatient': frame['ImagePositionPatient'],
            'PixelSpacing': frame['PixelSpacing'],
            'domain': _calculate_domain(frame),
            'frameId': frame['frameId'],
        }
        filename = _dicom_path(simulation, 't', idx)
        simulation_db.write_json(filename, res)
        idx += 1

    frame0 = frames[0]
    shape = [
        len(frames),
        len(frame0['pixels'][0]),
    ]
    res = {
        'shape':
        shape,
        'ImagePositionPatient': [
            frame0['ImagePositionPatient'][0],
            frame0['ImagePositionPatient'][2],
            frame0['ImagePositionPatient'][1],
        ],
        'PixelSpacing': [
            frame0['PixelSpacing'][0],
            z_space,
        ],
    }
    for idx in range(len(frame0['pixels'][0])):
        res['ImagePositionPatient'][2] = str(
            float(frame0['ImagePositionPatient'][1]) +
            idx * float(frame0['PixelSpacing'][0]))
        res['domain'] = _calculate_domain(res)
        filename = _dicom_path(simulation, 'c', idx)
        simulation_db.write_json(filename, res)

    shape = [
        len(frames),
        len(frame0['pixels'][1]),
    ]
    res = {
        'shape':
        shape,
        'ImagePositionPatient': [
            frame0['ImagePositionPatient'][1],
            frame0['ImagePositionPatient'][2],
            frame0['ImagePositionPatient'][0],
        ],
        'PixelSpacing': [
            frame0['PixelSpacing'][0],
            z_space,
        ],
    }
    for idx in range(len(frame0['pixels'][0])):
        res['ImagePositionPatient'][2] = str(
            float(frame0['ImagePositionPatient'][0]) +
            idx * float(frame0['PixelSpacing'][1]))
        res['domain'] = _calculate_domain(res)
        filename = _dicom_path(simulation, 's', idx)
        simulation_db.write_json(filename, res)
    spacing = frame0['PixelSpacing']
    return _string_list([spacing[0], spacing[1], z_space])
示例#32
0
def _output_info(run_dir):
    def _info(filename, run_dir, file_id):
        def _defs(parameters):
            """Convert parameters to useful definitions"""
            return PKDict({
                p: PKDict(
                    zip(
                        [
                            'symbol', 'units', 'description', 'format_string',
                            'type', 'fixed_value'
                        ],
                        sdds.sddsdata.GetParameterDefinition(_SDDS_INDEX, p),
                    ), )
                for p in parameters
            })

        def _fix(v):
            if isinstance(v, float) and (math.isinf(v) or math.isnan(v)):
                return 0
            return v

        file_path = run_dir.join(filename)
        if not re.search(r'.sdds$', filename, re.IGNORECASE):
            if file_path.exists():
                return PKDict(
                    isAuxFile=True,
                    filename=filename,
                    id=file_id,
                    lastUpdateTime=int(os.path.getmtime(str(file_path))),
                )
            return None
        try:
            if sdds.sddsdata.InitializeInput(_SDDS_INDEX, str(file_path)) != 1:
                return None
            column_names = sdds.sddsdata.GetColumnNames(_SDDS_INDEX)
            plottable_columns = []
            double_column_count = 0
            field_range = PKDict()
            for col in column_names:
                col_type = sdds.sddsdata.GetColumnDefinition(_SDDS_INDEX,
                                                             col)[4]
                if col_type < _SDDS_STRING_TYPE:
                    plottable_columns.append(col)
                if col_type in _SDDS_DOUBLE_TYPES:
                    double_column_count += 1
                field_range[col] = []
            parameter_names = sdds.sddsdata.GetParameterNames(_SDDS_INDEX)
            parameters = PKDict([(p, []) for p in parameter_names])
            page_count = 0
            row_counts = []
            while True:
                if sdds.sddsdata.ReadPage(_SDDS_INDEX) <= 0:
                    break
                row_counts.append(sdds.sddsdata.RowCount(_SDDS_INDEX))
                page_count += 1
                for i, p in enumerate(parameter_names):
                    parameters[p].append(
                        _fix(sdds.sddsdata.GetParameter(_SDDS_INDEX, i)))
                for col in column_names:
                    try:
                        values = sdds.sddsdata.GetColumn(
                            _SDDS_INDEX,
                            column_names.index(col),
                        )
                    except SystemError:
                        # incorrectly generated sdds file
                        break
                    if not values:
                        pass
                    elif field_range[col]:
                        field_range[col][0] = min(_fix(min(values)),
                                                  field_range[col][0])
                        field_range[col][1] = max(_fix(max(values)),
                                                  field_range[col][1])
                    else:
                        field_range[col] = [
                            _fix(min(values)),
                            _fix(max(values))
                        ]
            return PKDict(
                isAuxFile=False if double_column_count > 1 else True,
                filename=filename,
                id=file_id,
                rowCounts=row_counts,
                pageCount=page_count,
                columns=column_names,
                parameters=parameters,
                parameterDefinitions=_defs(parameters),
                plottableColumns=plottable_columns,
                lastUpdateTime=int(os.path.getmtime(str(file_path))),
                isHistogram=_is_histogram_file(filename, column_names),
                fieldRange=field_range,
            )
        finally:
            try:
                sdds.sddsdata.Terminate(_SDDS_INDEX)
            except Exception:
                pass

    # cache outputInfo to file, used later for report frames
    info_file = run_dir.join(_OUTPUT_INFO_FILE)
    if os.path.isfile(str(info_file)):
        try:
            res = simulation_db.read_json(info_file)
            if not res or res[0].get('_version', '') == _OUTPUT_INFO_VERSION:
                return res
        except ValueError as e:
            pass
    _sdds_init()
    data = simulation_db.read_json(
        run_dir.join(template_common.INPUT_BASE_NAME))
    res = []
    filename_map = _build_filename_map(data)
    for k in filename_map.keys_in_order:
        filename = filename_map[k]
        info = _info(filename, run_dir, k)
        if info:
            info.modelKey = 'elementAnimation{}'.format(info.id)
            res.append(info)
    if res:
        res[0]['_version'] = _OUTPUT_INFO_VERSION
    simulation_db.write_json(info_file, res)
    return res
示例#33
0
文件: rs4pi.py 项目: e-carlin/sirepo
def _summarize_dicom_series(simulation, frames):
    idx = 0
    z_space = abs(float(frames[0]['ImagePositionPatient'][2]) - float(frames[1]['ImagePositionPatient'][2]))
    os.mkdir(_sim_file(simulation['simulationId'], _DICOM_DIR))
    for frame in frames:
        res = {
            'shape': frame['shape'],
            'ImagePositionPatient': frame['ImagePositionPatient'],
            'PixelSpacing': frame['PixelSpacing'],
            'domain': _calculate_domain(frame),
            'frameId': frame['frameId'],
        }
        filename = _dicom_path(simulation, 't', idx)
        simulation_db.write_json(filename, res)
        idx += 1

    frame0 = frames[0]
    shape = [
        len(frames),
        len(frame0['pixels'][0]),
    ]
    res = {
        'shape': shape,
        'ImagePositionPatient': [
            frame0['ImagePositionPatient'][0],
            frame0['ImagePositionPatient'][2],
            frame0['ImagePositionPatient'][1],
        ],
        'PixelSpacing': [
            frame0['PixelSpacing'][0],
            z_space,
        ],
    }
    for idx in range(len(frame0['pixels'][0])):
        res['ImagePositionPatient'][2] = str(float(frame0['ImagePositionPatient'][1]) + idx * float(frame0['PixelSpacing'][0]))
        res['domain'] = _calculate_domain(res)
        filename = _dicom_path(simulation, 'c', idx)
        simulation_db.write_json(filename, res)

    shape = [
        len(frames),
        len(frame0['pixels'][1]),
    ]
    res = {
        'shape': shape,
        'ImagePositionPatient': [
            frame0['ImagePositionPatient'][1],
            frame0['ImagePositionPatient'][2],
            frame0['ImagePositionPatient'][0],
        ],
        'PixelSpacing': [
            frame0['PixelSpacing'][0],
            z_space,
        ],
    }
    for idx in range(len(frame0['pixels'][0])):
        res['ImagePositionPatient'][2] = str(float(frame0['ImagePositionPatient'][0]) + idx * float(frame0['PixelSpacing'][1]))
        res['domain'] = _calculate_domain(res)
        filename = _dicom_path(simulation, 's', idx)
        simulation_db.write_json(filename, res)
    spacing = frame0['PixelSpacing']
    return _string_list([spacing[0], spacing[1], z_space])