Beispiel #1
0
def api_runCancel():
    data = _parse_data_input()
    jid = simulation_db.job_id(data)
    if feature_config.cfg.runner_daemon:
        jhash = template_common.report_parameters_hash(data)
        run_dir = simulation_db.simulation_run_dir(data)
        runner_client.cancel_report_job(run_dir, jhash)
        # Always true from the client's perspective
        return http_reply.gen_json({'state': 'canceled'})
    else:
        # TODO(robnagler) need to have a way of listing jobs
        # Don't bother with cache_hit check. We don't have any way of canceling
        # if the parameters don't match so for now, always kill.
        #TODO(robnagler) mutex required
        if runner.job_is_processing(jid):
            run_dir = simulation_db.simulation_run_dir(data)
            # Write first, since results are write once, and we want to
            # indicate the cancel instead of the termination error that
            # will happen as a result of the kill.
            simulation_db.write_result({'state': 'canceled'}, run_dir=run_dir)
            runner.job_kill(jid)
            # TODO(robnagler) should really be inside the template (t.cancel_simulation()?)
            # the last frame file may not be finished, remove it
            t = sirepo.template.import_module(data)
            if hasattr(t, 'remove_last_frame'):
                t.remove_last_frame(run_dir)
        # Always true from the client's perspective
        return http_reply.gen_json({'state': 'canceled'})
Beispiel #2
0
def api_simulationFrame(frame_id):
    #TODO(robnagler) startTime is reportParametersHash; need version on URL and/or param names in URL
    keys = ['simulationType', 'simulationId', 'modelName', 'animationArgs', 'frameIndex', 'startTime']
    data = dict(zip(keys, frame_id.split('*')))
    template = sirepo.template.import_module(data)
    data['report'] = template.get_animation_name(data)
    run_dir = simulation_db.simulation_run_dir(data)
    model_data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
    if feature_config.cfg.runner_daemon:
        # XX TODO: it would be better if the frontend passed the jhash to this
        # call. Since it doesn't, we have to read it out of the run_dir, which
        # creates a race condition -- we might return a frame from a different
        # version of the report than the one the frontend expects.
        jhash = template_common.report_parameters_hash(model_data)
        frame = runner_client.run_extract_job(
            run_dir, jhash, 'get_simulation_frame', data,
        )
    else:
        frame = template.get_simulation_frame(run_dir, data, model_data)
    resp = http_reply.gen_json(frame)
    if 'error' not in frame and template.WANT_BROWSER_FRAME_CACHE:
        now = datetime.datetime.utcnow()
        expires = now + datetime.timedelta(365)
        resp.headers['Cache-Control'] = 'public, max-age=31536000'
        resp.headers['Expires'] = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
        resp.headers['Last-Modified'] = now.strftime("%a, %d %b %Y %H:%M:%S GMT")
    else:
        _no_cache(resp)
    return resp
Beispiel #3
0
def report_info(data):
    """Read the run_dir and return cached_data.

    Only a hit if the models between data and cache match exactly. Otherwise,
    return cached data if it's there and valid.

    Args:
        data (dict): parameters identifying run_dir and models or reportParametersHash

    Returns:
        Dict: report parameters and hashes
    """
    # Sets data['reportParametersHash']
    rep = pkcollections.Dict(
        cache_hit=False,
        cached_data=None,
        cached_hash=None,
        job_id=job_id(data),
        model_name=data['report'],
        parameters_changed=False,
        run_dir=simulation_run_dir(data),
    )
    rep.input_file = json_filename(template_common.INPUT_BASE_NAME,
                                   rep.run_dir)
    rep.job_status = read_status(rep.run_dir)
    rep.req_hash = template_common.report_parameters_hash(data)
    if not rep.run_dir.check():
        return rep
    #TODO(robnagler) Lock
    try:
        cd = read_json(rep.input_file)
        rep.cached_hash = template_common.report_parameters_hash(cd)
        rep.cached_data = cd
        if rep.req_hash == rep.cached_hash:
            rep.cache_hit = True
            return rep
        rep.parameters_changed = True
    except IOError as e:
        pkdlog('{}: ignore IOError: {} errno={}', rep.run_dir, e, e.errno)
    except Exception as e:
        pkdlog('{}: ignore other error: {}', rep.run_dir, e)
        # No idea if cache is valid or not so throw away
    return rep
Beispiel #4
0
def report_info(data):
    """Read the run_dir and return cached_data.

    Only a hit if the models between data and cache match exactly. Otherwise,
    return cached data if it's there and valid.

    Args:
        data (dict): parameters identifying run_dir and models or reportParametersHash

    Returns:
        Dict: report parameters and hashes
    """
    # Sets data['reportParametersHash']
    rep = pkcollections.Dict(
        cache_hit=False,
        cached_data=None,
        cached_hash=None,
        job_id=job_id(data),
        model_name=_report_name(data),
        parameters_changed=False,
        run_dir=simulation_run_dir(data),
    )
    rep.input_file = json_filename(template_common.INPUT_BASE_NAME, rep.run_dir)
    rep.job_status = read_status(rep.run_dir)
    rep.req_hash = template_common.report_parameters_hash(data)
    if not rep.run_dir.check():
        return rep
    #TODO(robnagler) Lock
    try:
        cd = read_json(rep.input_file)
        rep.cached_hash = template_common.report_parameters_hash(cd)
        rep.cached_data = cd
        if rep.req_hash == rep.cached_hash:
            rep.cache_hit = True
            return rep
        rep.parameters_changed = True
    except IOError as e:
        pkdlog('{}: ignore IOError: {} errno={}', rep.run_dir, e, e.errno)
    except Exception as e:
        pkdlog('{}: ignore other error: {}', rep.run_dir, e)
        # No idea if cache is valid or not so throw away
    return rep
Beispiel #5
0
def api_runSimulation():
    from pykern import pkjson
    data = _parse_data_input(validate=True)
    # if flag is set
    # - check status
    # - if status is bad, rewrite the run dir (XX race condition, to fix later)
    # - then request it be started
    if feature_config.cfg.runner_daemon:
        jhash = template_common.report_parameters_hash(data)
        run_dir = simulation_db.simulation_run_dir(data)
        status = runner_client.report_job_status(run_dir, jhash)
        already_good_status = [
            runner_client.JobStatus.RUNNING, runner_client.JobStatus.COMPLETED
        ]
        if status not in already_good_status:
            data['simulationStatus'] = {
                'startTime': int(time.time()),
                'state': 'pending',
            }
            tmp_dir = run_dir + '-' + jhash + '-' + uuid.uuid4(
            ) + srdb.TMP_DIR_SUFFIX
            cmd, _ = simulation_db.prepare_simulation(data, tmp_dir=tmp_dir)
            runner_client.start_report_job(run_dir, jhash, cfg.backend, cmd,
                                           tmp_dir)
        res = _simulation_run_status_runner_daemon(data, quiet=True)
        return http_reply.gen_json(res)
    else:
        res = _simulation_run_status(data, quiet=True)
        if ((not res['state'] in _RUN_STATES and
             (res['state'] != 'completed' or data.get('forceRun', False)))
                or res.get('parametersChanged', True)):
            try:
                _start_simulation(data)
            except runner.Collision:
                pkdlog('{}: runner.Collision, ignoring start',
                       simulation_db.job_id(data))
            res = _simulation_run_status(data)
        return http_reply.gen_json(res)
Beispiel #6
0
def _simulation_run_status_runner_daemon(data, quiet=False):
    """Look for simulation status and output

    Args:
        data (dict): request
        quiet (bool): don't write errors to log

    Returns:
        dict: status response
    """
    try:
        run_dir = simulation_db.simulation_run_dir(data)
        jhash = template_common.report_parameters_hash(data)
        status = runner_client.report_job_status(run_dir, jhash)
        is_running = status is runner_client.JobStatus.RUNNING
        rep = simulation_db.report_info(data)
        res = {'state': status.value}

        if not is_running:
            if status is not runner_client.JobStatus.MISSING:
                res, err = runner_client.run_extract_job(
                    run_dir,
                    jhash,
                    'result',
                    data,
                )
                if err:
                    return _simulation_error(err, 'error in read_result',
                                             run_dir)
        if simulation_db.is_parallel(data):
            new = runner_client.run_extract_job(
                run_dir,
                jhash,
                'background_percent_complete',
                is_running,
            )
            new.setdefault('percentComplete', 0.0)
            new.setdefault('frameCount', 0)
            res.update(new)
        res['parametersChanged'] = rep.parameters_changed
        if res['parametersChanged']:
            pkdlog(
                '{}: parametersChanged=True req_hash={} cached_hash={}',
                rep.job_id,
                rep.req_hash,
                rep.cached_hash,
            )
        #TODO(robnagler) verify serial number to see what's newer
        res.setdefault('startTime', _mtime_or_now(rep.input_file))
        res.setdefault('lastUpdateTime', _mtime_or_now(rep.run_dir))
        res.setdefault('elapsedTime', res['lastUpdateTime'] - res['startTime'])
        if is_running:
            res['nextRequestSeconds'] = simulation_db.poll_seconds(
                rep.cached_data)
            res['nextRequest'] = {
                'report': rep.model_name,
                'reportParametersHash': rep.cached_hash,
                'simulationId': rep.cached_data['simulationId'],
                'simulationType': rep.cached_data['simulationType'],
            }
        pkdc(
            '{}: processing={} state={} cache_hit={} cached_hash={} data_hash={}',
            rep.job_id,
            is_running,
            res['state'],
            rep.cache_hit,
            rep.cached_hash,
            rep.req_hash,
        )
    except Exception:
        return _simulation_error(pkdexc(), quiet=quiet)
    return res