예제 #1
0
def update_rsmanifest(data):
    try:
        data.rsmanifest = read_json(_RSMANIFEST_PATH)
    except Exception as e:
        if pkio.exception_is_not_found(e):
            return
        raise
예제 #2
0
파일: radia.py 프로젝트: QJohn2017/sirepo
def _read_h5_path(sim_id, h5path):
    try:
        with h5py.File(_geom_file(sim_id), 'r') as hf:
            return template_common.h5_to_dict(hf, path=h5path)
    except IOError as e:
        if pkio.exception_is_not_found(e):
            # need to generate file
            return None
    except KeyError:
        # no such path in file
        return None
예제 #3
0
def api_downloadFile(simulation_type, simulation_id, filename):
    #TODO(pjm): simulation_id is an unused argument
    req = http_request.parse_params(type=simulation_type, filename=filename)
    n = req.sim_data.lib_file_name_without_type(req.filename)
    p = req.sim_data.lib_file_abspath(req.filename)
    try:
        return http_reply.gen_file_as_attachment(p, filename=n)
    except Exception as e:
        if pkio.exception_is_not_found(e):
            sirepo.util.raise_not_found('lib_file={} not found', p)
        raise
예제 #4
0
def read_result(run_dir):
    """Read result data file from simulation

    Args:
        run_dir (py.path): where to find output

    Returns:
        dict: result or describes error
    """
    fn = json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
    res = None
    err = None
    try:
        res = read_json(fn)
    except Exception as e:
        pkdc('{}: exception={}', fn, e)
        err = pkdexc()
        if pkio.exception_is_not_found(e):
            #TODO(robnagler) change POSIT matches _SUBPROCESS_ERROR_RE
            err = 'ERROR: Terminated unexpectedly'
            # Not found so return run.log as err
            rl = run_dir.join(template_common.RUN_LOG)
            try:
                e = pkio.read_text(rl)
                if _RUN_LOG_CANCEL_RE.search(e):
                    err = None
                elif e:
                    err = e
            except Exception as e:
                if not pkio.exception_is_not_found(e):
                    pkdlog('{}: error reading log: {}', rl, pkdexc())
        else:
            pkdlog('{}: error reading output: {}', fn, err)
    if err:
        return None, err
    if not res:
        res = {}
    if not 'state' in res:
        # Old simulation or other error, just say is canceled so restarts
        res = {'state': 'canceled'}
    return res, None
예제 #5
0
def read_result(run_dir):
    """Read result data file from simulation

    Args:
        run_dir (py.path): where to find output

    Returns:
        dict: result or describes error
    """
    fn = json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
    res = None
    err = None
    try:
        res = read_json(fn)
    except Exception as e:
        pkdc('{}: exception={}', fn, e)
        err = pkdexc()
        if pkio.exception_is_not_found(e):
            #TODO(robnagler) change POSIT matches _SUBPROCESS_ERROR_RE
            err = 'ERROR: Terminated unexpectedly'
            # Not found so return run.log as err
            rl = run_dir.join(template_common.RUN_LOG)
            try:
                e = pkio.read_text(rl)
                if _RUN_LOG_CANCEL_RE.search(e):
                    err = None
                elif e:
                    err = e
            except Exception as e:
                if not pkio.exception_is_not_found(e):
                    pkdlog('{}: error reading log: {}', rl, pkdexc())
        else:
            pkdlog('{}: error reading output: {}', fn, err)
    if err:
        return None, err
    if not res:
        res = {}
    if 'state' not in res:
        # Old simulation or other error, just say is canceled so restarts
        res = {'state': 'canceled'}
    return res, None
예제 #6
0
def read_status(run_dir):
    """Read status from simulation dir

    Args:
        run_dir (py.path): where to read
    """
    try:
        return pkio.read_text(run_dir.join(_STATUS_FILE))
    except IOError as e:
        if pkio.exception_is_not_found(e):
            # simulation may never have been run
            return 'stopped'
        return 'error'
예제 #7
0
def read_status(run_dir):
    """Read status from simulation dir

    Args:
        run_dir (py.path): where to read
    """
    try:
        return pkio.read_text(run_dir.join(_STATUS_FILE))
    except IOError as e:
        if pkio.exception_is_not_found(e):
            # simulation may never have been run
            return 'stopped'
        return 'error'
예제 #8
0
def _read_h5_path(sim_id, filename, h5path, run_dir=_GEOM_DIR):
    try:
        with h5py.File(_get_res_file(sim_id, filename, run_dir=run_dir), 'r') as hf:
            return template_common.h5_to_dict(hf, path=h5path)
    except IOError as e:
        if pkio.exception_is_not_found(e):
            pkdc(f'{filename} not found in {run_dir}')
            # need to generate file
            return None
    except KeyError:
        # no such path in file
        pkdc(f'path {h5path} not found in {run_dir}/{filename}')
        return None
예제 #9
0
def add_code(name, version, uri, source_d, virtual_env=None, pyenv=None):
    """Add a new code to ~?rsmanifest.json

    Args:
        name (str): name of the package
        version (str): commit or version
        uri (str): repo, source link
        source_d (str): directory containing
        virtual_env (str): DEPRECATED
        pyenv (str): pyenv version
    """
    from pykern import pkcollections
    from pykern import pkio
    from pykern import pkjson
    import datetime
    import json

    fn = pkio.py_path(USER_FILE)
    try:
        values = pkcollections.json_load_any(fn)
    except Exception as e:
        if not (pkio.exception_is_not_found(e) or isinstance(e, ValueError)):
            raise
        values = pkcollections.Dict(
            version=FILE_VERSION,
            codes=pkcollections.Dict({_NO_PYENV: pkcollections.Dict()}),
        )
    if pyenv:
        assert not virtual_env, \
            'only one of pyenv or virtual-env (DEPRECATED)'
    elif virtual_env:
        assert not pyenv, \
            'only one of pyenv or virtual-env (DEPRECATED)'
        pyenv = virtual_env
    if not pyenv:
        pyenv = _NO_PYENV
    v = values.codes.get(pyenv) or pkcollections.Dict()
    v[name.lower()] = pkcollections.Dict(
        installed=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
        name=name,
        source_d=source_d,
        uri=uri,
        version=version,
    )
    values.codes[pyenv] = v
    pkjson.dump_pretty(values, filename=fn)
예제 #10
0
def start_sbatch():
    def get_host():
        h = socket.gethostname()
        if '.' not in h:
            h = socket.getfqdn()
        return h

    def kill_agent(pid_file):
        if get_host() == pid_file.host:
            os.kill(pid_file.pid, signal.SIGKILL)
        else:
            try:
                subprocess.run(
                    ('ssh', pid_file.host, 'kill', '-KILL', str(pid_file.pid)),
                    capture_output=True,
                    text=True,
                ).check_returncode()
            except subprocess.CalledProcessError as e:
                if '({}) - No such process'.format(
                        pid_file.pid) not in e.stderr:
                    pkdlog('cmd={cmd} returncode={returncode} stderr={stderr}',
                           **vars(e))

    f = None
    try:
        f = pkjson.load_any(pkio.py_path(_PID_FILE))
    except Exception as e:
        if not pkio.exception_is_not_found(e):
            pkdlog('error={} stack={}', e, pkdexc())
    try:
        if f:
            kill_agent(f)
    except Exception as e:
        pkdlog('error={} stack={}', e, pkdexc())
    pkjson.dump_pretty(
        PKDict(
            host=get_host(),
            pid=os.getpid(),
        ),
        _PID_FILE,
    )
    try:
        start()
    finally:
        #TODO(robnagler) https://github.com/radiasoft/sirepo/issues/2195
        pkio.unchecked_remove(_PID_FILE)
예제 #11
0
파일: ml.py 프로젝트: cchall/sirepo
def _get_classification_output_col_encoding(frame_args):
    """Create _OUTPUT_FILE.classificationOutputColEncodingFile if not found.

    This file is a "new" addition so "older" runs may not have it.
    """
    def _create_file():
        from sklearn.preprocessing import LabelEncoder

        # POSIT: Matches logic in package_data.template.ml.scale.py.jinja.read_data_and_encode_output_column()
        data = simulation_db.read_json(
            frame_args.run_dir.join(template_common.INPUT_BASE_NAME),
        )
        v = np.genfromtxt(
            str(simulation_db.simulation_lib_dir(SIM_TYPE).join(
                _filename(data.models.dataFile.file),
            )),
            delimiter=',',
            skip_header=data.models.columnInfo.hasHeaderRow,
            dtype=None,
            encoding='utf-8',
        )
        o = data.models.columnInfo.inputOutput.index('output')
        c = v[f'f{o}']
        e = LabelEncoder().fit(c)
        res = PKDict(
            zip(
                e.transform(e.classes_).astype(np.float).tolist(),
                e.classes_,
            ),
        )
        pkjson.dump_pretty(
            res,
            filename=_OUTPUT_FILE.classificationOutputColEncodingFile,
        )
        return res
    try:
        return simulation_db.read_json(
            frame_args.run_dir.join(_OUTPUT_FILE.classificationOutputColEncodingFile),
        )
    except Exception as e:
        if pkio.exception_is_not_found(e):
            return _create_file()
        raise e
예제 #12
0
    def _create_supervisor_state_file(run_dir):
        try:
            i, t = _load_in_json(run_dir)
        except Exception as e:
            if pkio.exception_is_not_found(e):
                return
            raise
        u = sirepo.simulation_db.uid_from_dir_name(run_dir)
        sirepo.auth.cfg.logged_in_user = u
        c = sirepo.sim_data.get_class(i.simulationType)
        d = PKDict(
            computeJid=c.parse_jid(i, u),
            computeJobHash=c.compute_job_hash(
                i),  # TODO(e-carlin): Another user cookie problem
            computeJobSerial=t,
            computeJobStart=t,
            computeModel=c.compute_model(i),
            error=None,
            history=[],
            isParallel=c.is_parallel(i),
            simulationId=i.simulationId,
            simulationType=i.simulationType,
            uid=u,
        )
        d.pkupdate(
            jobRunMode=sirepo.job.PARALLEL
            if d.isParallel else sirepo.job.SEQUENTIAL,
            nextRequestSeconds=c.poll_seconds(i),
        )
        _add_compute_status(run_dir, d)
        if d.status not in (sirepo.job.COMPLETED, sirepo.job.CANCELED):
            return

        if d.isParallel:
            _add_parallel_status(i, c, run_dir, d)
        sirepo.util.json_dump(d, path=_db_file(d.computeJid))
예제 #13
0
파일: radia.py 프로젝트: QJohn2017/sirepo
def get_application_data(data, **kwargs):
    #pkdp('get_application_data from {}', data)
    if 'method' not in data:
        raise RuntimeError('no application data method')
    if data.method not in _METHODS:
        raise RuntimeError('unknown application data method: {}'.format(
            data.method))

    g_id = -1
    sim_id = data.simulationId
    try:
        with open(str(_dmp_file(sim_id)), 'rb') as f:
            b = f.read()
            g_id = radia_tk.load_bin(b)
    except IOError as e:
        if pkio.exception_is_not_found(e):
            # No Radia dump file
            return PKDict(warning='No Radia dump')
        # propagate other errors
        #return PKDict()
    if data.method == 'get_field':
        f_type = data.get('fieldType')
        #pkdp('FT {}', f_type)
        if f_type in radia_tk.POINT_FIELD_TYPES:
            #TODO(mvk): won't work for subsets of available paths, figure that out
            pass
            #try:
            #    res = _read_data(sim_id, data.viewType, f_type)
            #except KeyError:
            #    res = None
            #pkdp('READ RES {}', res)
            #if res:
            #    v = [d.vectors.vertices for d in res.data if 'vectors' in d]
            #    old_pts = [p for a in v for p in a]
            #    new_pts = _build_field_points(data.fieldPaths)
            #pkdp('CHECK FOR CHANGE OLD {} VS NEW {}', old_pts, new_pts)
            #    if len(old_pts) == len(new_pts) and numpy.allclose(new_pts, old_pts):
            #        return res
        #return _read_or_generate(g_id, data)
        res = _generate_field_data(g_id, data.name, f_type,
                                   data.get('fieldPaths', None))
        res.solution = _read_solution(sim_id)
        return res

    if data.method == 'get_field_integrals':
        return _generate_field_integrals(g_id, data.fieldPaths)
    if data.method == 'get_geom':
        g_types = data.get('geomTypes', ['lines', 'polygons'])
        res = _read_or_generate(g_id, data)
        rd = res.data if 'data' in res else []
        res.data = [{k: d[k] for k in d.keys() if k in g_types} for d in rd]
        return res
    if data.method == 'save_field':
        #pkdp('DATA {}', data)
        data.method = 'get_field'
        res = get_application_data(data)
        file_path = simulation_db.simulation_lib_dir(SIM_TYPE).join(
            sim_id + '_' + res.name + '.' + data.fileType)
        # we save individual field paths, so there will be one item in the list
        vectors = res.data[0].vectors
        #pkdp('DATUM {}', datum)
        if data.fileType == 'sdds':
            return _save_fm_sdds(res.name, vectors,
                                 _BEAM_AXIS_ROTATIONS[data.beamAxis],
                                 file_path)
        elif data.fileType == 'csv':
            return _save_field_csv(data.fieldType, vectors,
                                   _BEAM_AXIS_ROTATIONS[data.beamAxis],
                                   file_path)
        return res
예제 #14
0
파일: radia.py 프로젝트: cchall/sirepo
def get_application_data(data, **kwargs):
    if 'method' not in data:
        raise RuntimeError('no application data method')
    if data.method not in _METHODS:
        raise RuntimeError('unknown application data method: {}'.format(data.method))

    g_id = -1
    sim_id = data.simulationId
    try:
        with open(str(_dmp_file(sim_id)), 'rb') as f:
            b = f.read()
            g_id = radia_tk.load_bin(b)
    except IOError as e:
        if pkio.exception_is_not_found(e):
            # No Radia dump file
            return PKDict(warning='No Radia dump')
        # propagate other errors
    id_map = _read_id_map(sim_id)
    if data.method == 'get_field':
        f_type = data.get('fieldType')
        if f_type in radia_tk.POINT_FIELD_TYPES:
            #TODO(mvk): won't work for subsets of available paths, figure that out
            pass
            #try:
            #    res = _read_data(sim_id, data.viewType, f_type)
            #except KeyError:
            #    res = None
            #if res:
            #    v = [d.vectors.vertices for d in res.data if _SCHEMA.constants.geomTypeVectors in d]
            #    old_pts = [p for a in v for p in a]
            #    new_pts = _build_field_points(data.fieldPaths)
            #    if len(old_pts) == len(new_pts) and numpy.allclose(new_pts, old_pts):
            #        return res
        #return _read_or_generate(g_id, data)
        res = _generate_field_data(
            g_id, data.name, f_type, data.get('fieldPaths', None)
        )
        res.solution = _read_solution(sim_id)
        res.idMap = id_map
        # moved addition of lines from client
        tmp_f_type = data.fieldType
        data.fieldType = None
        data.geomTypes = [_SCHEMA.constants.geomTypeLines]
        data.method = 'get_geom'
        data.viewType = _SCHEMA.constants.viewTypeObjects
        new_res = get_application_data(data)
        res.data += new_res.data
        data.fieldType = tmp_f_type
        return res

    if data.method == 'get_field_integrals':
        return _generate_field_integrals(g_id, data.fieldPaths)
    if data.method == 'get_geom':
        g_types = data.get(
            'geomTypes',
            [_SCHEMA.constants.geomTypeLines, _SCHEMA.constants.geomTypePolys]
        )
        g_types.extend(['center', 'name', 'size'])
        res = _read_or_generate(g_id, data)
        rd = res.data if 'data' in res else []
        res.data = [{k: d[k] for k in d.keys() if k in g_types} for d in rd]
        res.idMap = id_map
        return res
    if data.method == 'save_field':
        data.method = 'get_field'
        res = get_application_data(data)
        file_path = simulation_db.simulation_lib_dir(SIM_TYPE).join(
            sim_id + '_' + res.name + '.' + data.fileType
        )
        # we save individual field paths, so there will be one item in the list
        vectors = res.data[0].vectors
        if data.fileType == 'sdds':
            return _save_fm_sdds(
                res.name,
                vectors,
                _BEAM_AXIS_ROTATIONS[data.beamAxis],
                file_path
            )
        elif data.fileType == 'csv':
            return _save_field_csv(
                data.fieldType,
                vectors,
                _BEAM_AXIS_ROTATIONS[data.beamAxis],
                file_path
            )
        return res
예제 #15
0
def get_application_data(data, **kwargs):
    if 'method' not in data:
        raise RuntimeError('no application data method')
    if data.method not in _SCHEMA.constants.getDataMethods:
        raise RuntimeError('unknown application data method: {}'.format(data.method))

    g_id = -1
    sim_id = data.simulationId
    try:
        g_id = _get_g_id(sim_id)
    except IOError as e:
        if pkio.exception_is_not_found(e):
            # No Radia dump file
            return PKDict(warning='No Radia dump')
        # propagate other errors
    id_map = _read_id_map(sim_id)
    if data.method == 'get_field':
        f_type = data.get('fieldType')
        res = _generate_field_data(
            g_id, data.name, f_type, data.get('fieldPaths', None)
        )
        res.solution = _read_solution(sim_id)
        res.idMap = id_map
        tmp_f_type = data.fieldType
        data.fieldType = None
        data.geomTypes = [_SCHEMA.constants.geomTypeLines]
        data.method = 'get_geom'
        data.viewType = _SCHEMA.constants.viewTypeObjects
        new_res = get_application_data(data)
        res.data += new_res.data
        data.fieldType = tmp_f_type
        return res

    if data.method == 'get_field_integrals':
        return _generate_field_integrals(g_id, data.fieldPaths)
    if data.method == 'get_kick_map':
        return _read_or_generate_kick_map(g_id, data)
    if data.method == 'get_geom':
        g_types = data.get(
            'geomTypes',
            [_SCHEMA.constants.geomTypeLines, _SCHEMA.constants.geomTypePolys]
        )
        g_types.extend(['center', 'name', 'size', 'id'])
        res = _read_or_generate(g_id, data)
        rd = res.data if 'data' in res else []
        res.data = [{k: d[k] for k in d.keys() if k in g_types} for d in rd]
        res.idMap = id_map
        return res
    if data.method == 'save_field':
        data.method = 'get_field'
        res = get_application_data(data)
        file_path = simulation_db.simulation_lib_dir(SIM_TYPE).join(
            f'{sim_id}_{res.name}.{data.fileExt}'
        )
        # we save individual field paths, so there will be one item in the list
        vectors = res.data[0].vectors
        if data.exportType == 'sdds':
            return _save_fm_sdds(
                res.name,
                vectors,
                _BEAM_AXIS_ROTATIONS[data.beamAxis],
                file_path
            )
        elif data.exportType == 'csv':
            return _save_field_csv(
                data.fieldType,
                vectors,
                _BEAM_AXIS_ROTATIONS[data.beamAxis],
                file_path
            )
        elif data.exportType == 'SRW':
            return _save_field_srw(
                data.fieldType,
                data.gap,
                vectors,
                _BEAM_AXIS_ROTATIONS[data.beamAxis],
                file_path
            )
        return res