def run_model(request):
    '''
    Spawns a gevent greenlet that runs the model and writes the output to the
    web socket. Until interrupted using halt_model(), it will run to
    completion
    '''
    print('async_step route hit')
    log_prefix = 'req{0}: run_model()'.format(id(request))
    log.info('>>' + log_prefix)

    sess_id = request.session.session_id
    ns = request.registry.get('sio_ns')

    if ns is None:
        raise ValueError('no namespace associated with session')

    active_model = get_active_model(request)
    sid = ns.get_sockid_from_sessid(request.session.session_id)
    if sid is None:
        raise ValueError('no sock_session associated with pyramid_session')
    with ns.session(sid) as sock_session:
        sock_session['num_sent'] = 0
        if active_model and not ns.active_greenlets.get(sid):
            gl = ns.active_greenlets[sid] = gevent.spawn(
                execute_async_model, active_model, ns, sid, request)
            gl.session_hash = request.session_hash
            return None
        else:
            print("Already started")
            return None
Beispiel #2
0
def load_location_file(location_file, request):
    '''
        We would like to merge the current active model into the new model
        created by our location file prior to clearing our session
    '''
    if isdir(location_file):
        active_model = get_active_model(request)

        new_model = load(location_file)
        new_model._cache.enabled = False

        if active_model is not None:
            active_model._map = new_model._map
            active_model._time_step = new_model._time_step
            active_model._num_time_steps = new_model._num_time_steps
            active_model.merge(new_model)
        else:
            active_model = new_model

        name = split(location_file)[1]
        if name != '':
            active_model.name = name

        init_session_objects(request, force=True)

        log.debug("model loaded - begin registering objects")
        RegisterObject(active_model, request)

        set_active_model(request, active_model.id)
Beispiel #3
0
def get_model(request):
    '''
        Returns Model object in JSON.
        - This method varies slightly from the common object method in that
          if we don't specify a model ID, we:
          - return the current active model if it exists or...
          - return the specification.
    '''
    ret = None
    obj_id = obj_id_from_url(request)

    session_lock = acquire_session_lock(request)
    log.info('  session lock acquired (sess:{}, thr_id: {})'.format(
        id(session_lock),
        current_thread().ident))

    try:
        if obj_id is None:
            my_model = get_active_model(request)
            if my_model is not None:
                ret = my_model.serialize(options=web_ser_opts)

        if ret is None:
            ret = get_object(request, implemented_types)
    finally:
        session_lock.release()
        log.info('  session lock released (sess:{}, thr_id: {})'.format(
            id(session_lock),
            current_thread().ident))

    return ret
Beispiel #4
0
def load_location_file(location_file, request):
    '''
        We would like to merge the current active model into the new model
        created by our location file prior to clearing our session
    '''
    if isdir(location_file):
        active_model = get_active_model(request)

        new_model = Model.load(location_file)
        new_model._cache.enabled = False

        if active_model is not None:
            active_model._map = new_model._map
            active_model._time_step = new_model._time_step
            active_model._num_time_steps = new_model._num_time_steps
            active_model.merge(new_model)
        else:
            active_model = new_model

        name = split(location_file)[1]
        if name != '':
            active_model.name = name

        init_session_objects(request, force=True)

        log.debug("model loaded - begin registering objects")

        RegisterObject(active_model, request)

        set_active_model(request, active_model.id)
Beispiel #5
0
def get_rewind(request):
    '''
        rewinds the current active Model.
    '''
    print 'rewinding', request.session.session_id
    active_model = get_active_model(request)
    ns = sess_namespaces.get(request.session.session_id, None)
    if active_model:
        session_lock = acquire_session_lock(request)
        log.info('  session lock acquired (sess:{}, thr_id: {})'
                 .format(id(session_lock), current_thread().ident))

        try:
            if (ns and ns.active_greenlet):
                ns.active_greenlet.kill(block=False)
                ns.num_sent = 0
            active_model.rewind()
        except Exception:
            raise cors_exception(request, HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            session_lock.release()
            log.info('  session lock released (sess:{}, thr_id: {})'
                     .format(id(session_lock), current_thread().ident))
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #6
0
def get_model(request):
    '''
        Returns Model object in JSON.
        - This method varies slightly from the common object method in that
          if we don't specify a model ID, we:
          - return the current active model if it exists or...
          - return the specification.
    '''
    ret = None
    obj_id = obj_id_from_url(request)
    gnome_sema = request.registry.settings['py_gnome_semaphore']
    gnome_sema.acquire()

    try:
        if not obj_id:
            my_model = get_active_model(request)
            if my_model:
                ret = my_model.serialize()
            else:
                ret = get_specifications(request, implemented_types)
        else:
            obj = get_session_object(obj_id, request)
            if obj:
                if ObjectImplementsOneOf(obj, implemented_types):
                    set_active_model(request, obj.id)
                    ret = obj.serialize()
                else:
                    # we refer to an object, but it is not a Model
                    raise cors_exception(request, HTTPBadRequest)
            else:
                raise cors_exception(request, HTTPNotFound)
    finally:
        gnome_sema.release()

    return ret
Beispiel #7
0
def get_model(request):
    '''
        Returns Model object in JSON.
        - This method varies slightly from the common object method in that
          if we don't specify a model ID, we:
          - return the current active model if it exists or...
          - return the specification.
    '''
    ret = None
    obj_id = obj_id_from_url(request)
    gnome_sema = request.registry.settings['py_gnome_semaphore']
    gnome_sema.acquire()

    try:
        if not obj_id:
            my_model = get_active_model(request)
            if my_model:
                ret = my_model.serialize()
            else:
                ret = get_specifications(request, implemented_types)
        else:
            obj = get_session_object(obj_id, request)
            if obj:
                if ObjectImplementsOneOf(obj, implemented_types):
                    set_active_model(request, obj.id)
                    ret = obj.serialize()
                else:
                    # we refer to an object, but it is not a Model
                    raise cors_exception(request, HTTPBadRequest)
            else:
                raise cors_exception(request, HTTPNotFound)
    finally:
        gnome_sema.release()

    return ret
def get_rewind(request):
    '''
        rewinds the current active Model.
    '''
    print('rewinding', request.session.session_id)
    active_model = get_active_model(request)
    ns = request.registry.get('sio_ns')
    if active_model:
        session_lock = acquire_session_lock(request)
        log.info('  session lock acquired (sess:{}, thr_id: {})'.format(
            id(session_lock),
            current_thread().ident))

        try:
            if ns:
                sio = ns.get_sockid_from_sessid(request.session.session_id)
                if (ns.active_greenlets.get(sio)):
                    with ns.session(sio) as sock_session:
                        ns.active_greenlets.get(sio).kill(block=False)
                        sock_session['num_sent'] = 0
            active_model.rewind()
        except Exception:
            raise cors_exception(request,
                                 HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            session_lock.release()
            log.info('  session lock released (sess:{}, thr_id: {})'.format(
                id(session_lock),
                current_thread().ident))
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #9
0
def update_model(request):
    '''
        Returns Model object in JSON.
        - This method varies slightly from the common object method in that
          if we don't specify a model ID, we:
          - update the current active model if it exists or...
          - generate a 'Not Found' exception.
    '''
    log_prefix = 'req({0}): update_model():'.format(id(request))
    log.info('>>' + log_prefix)

    ret = None
    try:
        json_request = ujson.loads(request.body.decode('utf-8'))
    except Exception:
        raise cors_exception(request, HTTPBadRequest)

    if not JSONImplementsOneOf(json_request, implemented_types):
        raise cors_exception(request, HTTPNotImplemented)

    session_lock = acquire_session_lock(request)
    log.info('  {} session lock acquired (sess:{}, thr_id: {})'.format(
        log_prefix, id(session_lock),
        current_thread().ident))

    obj_id = obj_id_from_req_payload(json_request)
    if obj_id:
        active_model = get_session_object(obj_id, request)
    else:
        active_model = get_active_model(request)

    if active_model:
        try:
            if UpdateObject(active_model, json_request,
                            get_session_objects(request)):
                set_session_object(active_model, request)
            ret = active_model.serialize(options=web_ser_opts)
        except Exception:
            raise cors_exception(request,
                                 HTTPUnsupportedMediaType,
                                 with_stacktrace=True)
        finally:
            session_lock.release()
            log.info('  ' + log_prefix + 'session lock released...')
    else:
        session_lock.release()
        log.info('  {} session lock released (sess:{}, thr_id: {})'.format(
            log_prefix, id(session_lock),
            current_thread().ident))

        msg = ("raising cors_exception() in update_model. "
               "Updating model before it exists.")
        log.warning('  ' + log_prefix + msg)

        raise cors_exception(request, HTTPNotFound)

    log.info('<<' + log_prefix)
    return ret
Beispiel #10
0
def update_model(request):
    '''
        Returns Model object in JSON.
        - This method varies slightly from the common object method in that
          if we don't specify a model ID, we:
          - update the current active model if it exists or...
          - generate a 'Not Found' exception.
    '''
    log_prefix = 'req({0}): update_model():'.format(id(request))
    log.info('>>' + log_prefix)

    ret = None
    try:
        json_request = ujson.loads(request.body)
    except:
        raise cors_exception(request, HTTPBadRequest)

    if not JSONImplementsOneOf(json_request, implemented_types):
        raise cors_exception(request, HTTPNotImplemented)

    gnome_sema = request.registry.settings['py_gnome_semaphore']
    gnome_sema.acquire()
    log.info('  ' + log_prefix + 'semaphore acquired...')

    obj_id = obj_id_from_req_payload(json_request)
    if obj_id:
        active_model = get_session_object(obj_id, request)
    else:
        active_model = get_active_model(request)

    if active_model:
        try:
            if UpdateObject(active_model, json_request,
                            get_session_objects(request)):
                set_session_object(active_model, request)
            ret = active_model.serialize()
        except:
            raise cors_exception(request,
                                 HTTPUnsupportedMediaType,
                                 with_stacktrace=True)
        finally:
            gnome_sema.release()
            log.info('  ' + log_prefix + 'semaphore released...')
    else:
        gnome_sema.release()
        log.info('  ' + log_prefix + 'semaphore released...')

        msg = ("raising cors_exception() in update_model. "
               "Updating model before it exists.")
        log.warning('  ' + log_prefix + msg)

        raise cors_exception(request, HTTPNotFound)

    log.info('<<' + log_prefix)
    return ret
Beispiel #11
0
def update_model(request):
    '''
        Returns Model object in JSON.
        - This method varies slightly from the common object method in that
          if we don't specify a model ID, we:
          - update the current active model if it exists or...
          - generate a 'Not Found' exception.
    '''
    log_prefix = 'req({0}): update_model():'.format(id(request))
    log.info('>>' + log_prefix)

    ret = None
    try:
        json_request = ujson.loads(request.body)
    except:
        raise cors_exception(request, HTTPBadRequest)

    if not JSONImplementsOneOf(json_request, implemented_types):
        raise cors_exception(request, HTTPNotImplemented)

    gnome_sema = request.registry.settings['py_gnome_semaphore']
    gnome_sema.acquire()
    log.info('  ' + log_prefix + 'semaphore acquired...')

    obj_id = obj_id_from_req_payload(json_request)
    if obj_id:
        active_model = get_session_object(obj_id, request)
    else:
        active_model = get_active_model(request)

    if active_model:
        try:
            if UpdateObject(active_model, json_request,
                            get_session_objects(request)):
                set_session_object(active_model, request)
            ret = active_model.serialize()
        except:
            raise cors_exception(request, HTTPUnsupportedMediaType,
                                 with_stacktrace=True)
        finally:
            gnome_sema.release()
            log.info('  ' + log_prefix + 'semaphore released...')
    else:
        gnome_sema.release()
        log.info('  ' + log_prefix + 'semaphore released...')

        msg = ("raising cors_exception() in update_model. "
               "Updating model before it exists.")
        log.warning('  ' + log_prefix + msg)

        raise cors_exception(request, HTTPNotFound)

    log.info('<<' + log_prefix)
    return ret
Beispiel #12
0
def get_rewind(request):
    '''
        rewinds the current active Model.
    '''
    active_model = get_active_model(request)
    if active_model:
        gnome_sema = request.registry.settings['py_gnome_semaphore']
        gnome_sema.acquire()

        try:
            active_model.rewind()
        except:
            raise cors_exception(request, HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            gnome_sema.release()
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #13
0
def download_file(request):
    log_prefix = 'req({0}): download_file():'.format(id(request))
    log.info('>>' + log_prefix)

    session_path = get_session_dir(request)
    file_path = sep.join(map(str, request.matchdict['file_path']))
    output_path = join(session_path, file_path)

    log.info('  {} output_path: {}'.format(log_prefix, output_path))

    try:
        model_name = get_active_model(request).name
    except Exception:
        raise cors_response(request, HTTPNotFound('No Active Model!'))

    if isdir(output_path):
        short_dirname = basename(output_path).split('.')[-1]
        output_zip_path = "{0}_{1}.zip".format(model_name, short_dirname)
        zip_path = join(output_path, output_zip_path)
        zf = zipfile.ZipFile(zip_path, "w")

        for dirname, _subdirs, files in walk(output_path):
            for filename in files:
                if (not filename.endswith(output_zip_path)
                        and not isdir(filename)):
                    zipfile_path = join(dirname, filename)
                    zf.write(zipfile_path, basename(zipfile_path))

        zf.close()

        response = FileResponse(zip_path, request)
        response.headers['Content-Disposition'] = (
            "attachment; filename={0}".format(basename(zip_path)))
        log.info('<<' + log_prefix)
        return response
    elif isfile(output_path):
        log.info('<<' + log_prefix)

        return FileResponse(output_path, request)
    else:
        raise cors_response(
            request,
            HTTPNotFound('File(s) requested do not '
                         'exist on the server!'))
Beispiel #14
0
def get_rewind(request):
    '''
        rewinds the current active Model.
    '''
    active_model = get_active_model(request)
    if active_model:
        gnome_sema = request.registry.settings['py_gnome_semaphore']
        gnome_sema.acquire()

        try:
            active_model.rewind()
        except:
            raise cors_exception(request,
                                 HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            gnome_sema.release()
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #15
0
def download_model(request):
    '''
        Here is where we save the active model as a zipfile and
        download it to the client
    '''
    my_model = get_active_model(request)

    if my_model:
        tf = tempfile.NamedTemporaryFile()
        dir_name, base_name = os.path.split(tf.name)

        my_model.save(saveloc=dir_name, name=base_name)
        response_filename = ('{0}.zip'.format(my_model.name))

        tf.seek(0)

        response = request.response
        response.content_type = 'application/zip'
        response.content_disposition = (
            'attachment; filename={0}'.format(response_filename))
        response.app_iter = FileIter(tf)
        return response
    else:
        raise cors_response(request, HTTPNotFound('No Active Model!'))
Beispiel #16
0
def download_model(request):
    '''
        Here is where we save the active model as a zipfile and
        download it to the client
    '''
    my_model = get_active_model(request)

    if my_model:
        tf = tempfile.NamedTemporaryFile()
        dir_name, base_name = os.path.split(tf.name)

        my_model.save(saveloc=dir_name, name=base_name)
        response_filename = ('{0}.zip'.format(my_model.name))

        tf.seek(0)

        response = request.response
        response.content_type = 'application/zip'
        response.content_disposition = ('attachment; filename={0}'
                                        .format(response_filename))
        response.app_iter = FileIter(tf)
        return response
    else:
        raise cors_response(request, HTTPNotFound('No Active Model!'))
Beispiel #17
0
def get_full_run(request):
    '''
        Performs a full run of the current active Model, turning off any
        response options.
        Returns the final step results.
    '''
    active_model = get_active_model(request)
    if active_model:
        gnome_sema = request.registry.settings['py_gnome_semaphore']
        gnome_sema.acquire()

        try:
            weatherer_enabled_flags = [w.on for w in active_model.weatherers]

            for w in active_model.weatherers:
                if isinstance(w, (Skimmer, Burn, ChemicalDispersion)):
                    w.on = False

            active_model.rewind()

            drop_uncertain_models(request)

            if active_model.has_weathering_uncertainty:
                log.info('Model has weathering uncertainty')
                set_uncertain_models(request)
            else:
                log.info('Model does not have weathering uncertainty')

            begin = time.time()

            for step in active_model:
                output = step
                steps = get_uncertain_steps(request)

            end = time.time()

            if steps and 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                aggregate = defaultdict(list)
                low = {}
                high = {}
                full_output = {}

                for idx, step_output in enumerate(steps):
                    for k, v in step_output['WeatheringOutput'].iteritems():
                        aggregate[k].append(v)

                for k, v in aggregate.iteritems():
                    low[k] = min(v)
                    high[k] = max(v)

                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': low,
                               'high': high}
                for idx, step_output in enumerate(steps):
                    full_output[idx] = step_output['WeatheringOutput']

                output['WeatheringOutput'] = full_output
                output['total_response_time'] = end - begin
            elif 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': None,
                               'high': None}
                output['WeatheringOutput'] = full_output
                output['total_response_time'] = end - begin
        except:
            raise cors_exception(request, HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            for a, w in zip(weatherer_enabled_flags, active_model.weatherers):
                w.on = a
            gnome_sema.release()

        return output
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #18
0
def get_step(request):
    '''
        Generates and returns an image corresponding to the step.
    '''
    log_prefix = 'req({0}): get_step():'.format(id(request))
    log.info('>>' + log_prefix)

    active_model = get_active_model(request)
    if active_model:
        # generate the next step in the sequence.
        session_lock = acquire_session_lock(request)
        log.info('  {} session lock acquired (sess:{}, thr_id: {})'
                 .format(log_prefix, id(session_lock), current_thread().ident))

        try:
            if active_model.current_time_step == -1:
                # our first step, establish uncertain models
                drop_uncertain_models(request)

                log.info('\thas_weathering_uncertainty {0}'.
                         format(active_model.has_weathering_uncertainty))
                if active_model.has_weathering_uncertainty:
                    set_uncertain_models(request)
                else:
                    log.info('Model does not have weathering uncertainty')

            begin = time.time()
            output = active_model.step()

            begin_uncertain = time.time()
            steps = get_uncertain_steps(request)
            end = time.time()

            if steps and 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                aggregate = defaultdict(list)
                low = {}
                high = {}
                full_output = {}

                for idx, step_output in enumerate(steps):
                    # step_output could contain an exception from one
                    # of our uncertainty worker processes.  If so, then
                    # we should propagate the exception with its original
                    # context.
                    if (isinstance(step_output, tuple) and
                            len(step_output) >= 3 and
                            isinstance(step_output[1], Exception)):
                        raise step_output[1].with_traceback(step_output[2])

                    for k, v in step_output['WeatheringOutput'].items():
                        aggregate[k].append(v)

                for k, v in aggregate.items():
                    low[k] = min(v)
                    high[k] = max(v)

                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': low,
                               'high': high}
                for idx, step_output in enumerate(steps):
                    full_output[idx] = step_output['WeatheringOutput']

                output['WeatheringOutput'] = full_output
                output['uncertain_response_time'] = end - begin_uncertain
                output['total_response_time'] = end - begin
            elif 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': None,
                               'high': None}
                output['WeatheringOutput'] = full_output
                output['uncertain_response_time'] = end - begin_uncertain
                output['total_response_time'] = end - begin

        except StopIteration:
            log.info('  ' + log_prefix + 'stop iteration exception...')
            drop_uncertain_models(request)
            raise cors_exception(request, HTTPNotFound)
        except Exception:
            log.info('  ' + log_prefix + 'unknown exception...')
            raise cors_exception(request, HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            session_lock.release()
            log.info('  {} session lock released (sess:{}, thr_id: {})'
                     .format(log_prefix, id(session_lock),
                             current_thread().ident))

        return output
    else:
        raise cors_exception(request, HTTPPreconditionFailed,
                             explanation=(b'Your session timed out - the model is no longer active'))
Beispiel #19
0
def get_full_run(request):
    '''
        Performs a full run of the current active Model, turning off any
        response options.
        Returns the final step results.
    '''
    log_prefix = 'req({0}): get_full_run():'.format(id(request))
    log.info('>>' + log_prefix)

    response_on = request.json_body['response_on']

    active_model = get_active_model(request)
    if active_model:
        session_lock = acquire_session_lock(request)
        log.info('  session lock acquired (sess:{}, thr_id: {})'
                 .format(id(session_lock), current_thread().ident))

        try:
            weatherer_enabled_flags = [w.on for w in active_model.weatherers]

            if response_on is False:
                for w in active_model.weatherers:
                    if isinstance(w, (Skimmer, Burn, ChemicalDispersion)):
                        w.on = False

            active_model.rewind()

            drop_uncertain_models(request)

            if active_model.has_weathering_uncertainty:
                log.info('Model has weathering uncertainty')
                set_uncertain_models(request)
            else:
                log.info('Model does not have weathering uncertainty')

            begin = time.time()

            for step in active_model:
                output = step
                steps = get_uncertain_steps(request)

            end = time.time()

            if steps and 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                aggregate = defaultdict(list)
                low = {}
                high = {}
                full_output = {}

                for idx, step_output in enumerate(steps):
                    for k, v in step_output['WeatheringOutput'].items():
                        aggregate[k].append(v)

                for k, v in aggregate.items():
                    low[k] = min(v)
                    high[k] = max(v)

                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': low,
                               'high': high}
                for idx, step_output in enumerate(steps):
                    full_output[idx] = step_output['WeatheringOutput']

                output['WeatheringOutput'] = full_output
                output['total_response_time'] = end - begin
            elif 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': None,
                               'high': None}
                output['WeatheringOutput'] = full_output
                output['total_response_time'] = end - begin

            active_model.rewind()
        except Exception:
            raise cors_exception(request, HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            for a, w in zip(weatherer_enabled_flags, active_model.weatherers):
                w.on = a
            session_lock.release()
            log.info('  session lock released (sess:{}, thr_id: {})'
                     .format(id(session_lock), current_thread().ident))

        log.info('<<' + log_prefix)
        return output
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #20
0
def run_model(request):
    '''
    Spawns a gevent greenlet that runs the model and writes the output to the
    web socket. Until interrupted using halt_model(), it will run to
    completion
    '''
    print 'async_step route hit'
    log_prefix = 'req{0}: run_model()'.format(id(request))
    log.info('>>' + log_prefix)

    sess_id = request.session.session_id
    global sess_namespaces

    ns = sess_namespaces.get(sess_id, None)
    if ns is None:
        raise ValueError('no namespace associated with session')

    def execute_async_model(active_model, socket_namespace, request):
        '''
        Meant to run in a greenlet. This function should take an active model
        and run it, writing each step's output to the socket.
        '''
        print request.session_hash
        log = get_greenlet_logger(request)
        try:
            wait_time = 16
            socket_namespace.emit('prepared')

            unlocked = socket_namespace.lock.wait(wait_time)
            if not unlocked:
                socket_namespace.emit('timeout',
                                      'Model not started, timed out after '
                                      '{0} sec'.format(wait_time))
                socket_namespace.on_kill()

            log.info('model run triggered')
            while True:
                output = None
                try:
                    if active_model.current_time_step == -1:
                        # our first step, establish uncertain models
                        drop_uncertain_models(request)

                        if active_model.has_weathering_uncertainty:
                            log.info('Model has weathering uncertainty')
                            set_uncertain_models(request)
                        else:
                            log.info('Model does not have '
                                     'weathering uncertainty')

                    begin = time.time()

                    output = active_model.step()

                    begin_uncertain = time.time()
                    steps = get_uncertain_steps(request)
                    end = time.time()

                    if steps and 'WeatheringOutput' in output:
                        nominal = output['WeatheringOutput']
                        aggregate = defaultdict(list)
                        low = {}
                        high = {}
                        full_output = {}

                        for idx, step_output in enumerate(steps):
                            # step_output could contain an exception from one
                            # of our uncertainty worker processes.  If so, then
                            # we should propagate the exception with its
                            # original context.
                            if (isinstance(step_output, tuple) and
                                    len(step_output) >= 3 and
                                    isinstance(step_output[1], Exception)):
                                raise step_output[1], None, step_output[2]

                            for k, v in step_output['WeatheringOutput'].iteritems():
                                aggregate[k].append(v)

                        for k, v in aggregate.iteritems():
                            low[k] = min(v)
                            high[k] = max(v)

                        full_output = {'time_stamp': nominal['time_stamp'],
                                       'nominal': nominal,
                                       'low': low,
                                       'high': high}

                        for idx, step_output in enumerate(steps):
                            full_output[idx] = step_output['WeatheringOutput']

                        output['WeatheringOutput'] = full_output
                        output['uncertain_response_time'] = end - begin_uncertain
                        output['total_response_time'] = end - begin
                    elif 'WeatheringOutput' in output:
                        nominal = output['WeatheringOutput']
                        full_output = {'time_stamp': nominal['time_stamp'],
                                       'nominal': nominal,
                                       'low': None,
                                       'high': None}

                        output['WeatheringOutput'] = full_output
                        output['uncertain_response_time'] = end - begin_uncertain
                        output['total_response_time'] = end - begin
                except StopIteration:
                    log.info('  {} stop iteration exception...'
                             .format(log_prefix))
                    drop_uncertain_models(request)
                    break
                except Exception:
                    exc_type, exc_value, _exc_traceback = sys.exc_info()
                    traceback.print_exc()
                    if ('develop_mode' in request.registry.settings.keys() and
                                request.registry.settings['develop_mode'].lower() == 'true'):
                        import pdb
                        pdb.post_mortem(sys.exc_info()[2])

                    msg = ('  {}{}'
                           .format(log_prefix, traceback.format_exception_only(exc_type,
                                                                   exc_value)))
                    log.critical(msg)
                    raise   

                if output:
                    socket_namespace.num_sent += 1
                    log.debug(socket_namespace.num_sent)
                    socket_namespace.emit('step', output)

                if not socket_namespace.is_async:
                    socket_namespace.lock.clear()
                    print 'lock!'

                # kill greenlet after 100 minutes unless unlocked
                wait_time = 6000
                unlocked = socket_namespace.lock.wait(wait_time)
                if not unlocked:
                    socket_namespace.emit('timeout',
                                          'Model run timed out after {0} sec'
                                          .format(wait_time))
                    socket_namespace.on_kill()

                gevent.sleep(0.001)
        except GreenletExit:
            log.info('Greenlet exiting early')
            socket_namespace.emit('killed', 'Model run terminated early')
            return GreenletExit

        except Exception:
            log.info('Greenlet terminated due to exception')

            json_exc = json_exception(2, True)
            socket_namespace.emit('runtimeError', json_exc['message'])

        socket_namespace.emit('complete', 'Model run completed')

    active_model = get_active_model(request)

    if active_model and not ns.active_greenlet:
        ns.active_greenlet = ns.spawn(execute_async_model, active_model,
                                      ns, request)
        ns.active_greenlet.session_hash = request.session_hash
        return None
    else:
        print "Already started"
        return None
Beispiel #21
0
def get_step(request):
    '''
        Generates and returns an image corresponding to the step.
    '''
    log_prefix = 'req({0}): get_step():'.format(id(request))
    log.info('>>' + log_prefix)

    active_model = get_active_model(request)
    if active_model:
        # generate the next step in the sequence.
        gnome_sema = request.registry.settings['py_gnome_semaphore']
        gnome_sema.acquire()
        log.info('  ' + log_prefix + 'semaphore acquired...')

        try:
            if active_model.current_time_step == -1:
                # our first step, establish uncertain models
                drop_uncertain_models(request)

                log.info('\thas_weathering_uncertainty {0}'.format(
                    active_model.has_weathering_uncertainty))
                if active_model.has_weathering_uncertainty:
                    set_uncertain_models(request)
                else:
                    log.info('Model does not have weathering uncertainty')

            begin = time.time()
            output = active_model.step()

            begin_uncertain = time.time()
            steps = get_uncertain_steps(request)
            end = time.time()

            if steps and 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                aggregate = defaultdict(list)
                low = {}
                high = {}
                full_output = {}

                for idx, step_output in enumerate(steps):
                    for k, v in step_output['WeatheringOutput'].iteritems():
                        aggregate[k].append(v)

                for k, v in aggregate.iteritems():
                    low[k] = min(v)
                    high[k] = max(v)

                full_output = {
                    'time_stamp': nominal['time_stamp'],
                    'nominal': nominal,
                    'low': low,
                    'high': high
                }
                for idx, step_output in enumerate(steps):
                    full_output[idx] = step_output['WeatheringOutput']

                output['WeatheringOutput'] = full_output
                output['uncertain_response_time'] = end - begin_uncertain
                output['total_response_time'] = end - begin
            elif 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                full_output = {
                    'time_stamp': nominal['time_stamp'],
                    'nominal': nominal,
                    'low': None,
                    'high': None
                }
                output['WeatheringOutput'] = full_output
                output['uncertain_response_time'] = end - begin_uncertain
                output['total_response_time'] = end - begin

        except StopIteration:
            log.info('  ' + log_prefix + 'stop iteration exception...')
            drop_uncertain_models(request)
            raise cors_exception(request, HTTPNotFound)
        except:
            log.info('  ' + log_prefix + 'unknown exception...')
            raise cors_exception(request,
                                 HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            gnome_sema.release()
            log.info('  ' + log_prefix + 'semaphore released...')

        return output
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #22
0
def get_full_run(request):
    '''
        Performs a full run of the current active Model, turning off any
        response options.
        Returns the final step results.
    '''
    active_model = get_active_model(request)
    if active_model:
        gnome_sema = request.registry.settings['py_gnome_semaphore']
        gnome_sema.acquire()

        try:
            weatherer_enabled_flags = [w.on for w in active_model.weatherers]

            for w in active_model.weatherers:
                if isinstance(w, (Skimmer, Burn, ChemicalDispersion)):
                    w.on = False

            active_model.rewind()

            drop_uncertain_models(request)

            if active_model.has_weathering_uncertainty:
                log.info('Model has weathering uncertainty')
                set_uncertain_models(request)
            else:
                log.info('Model does not have weathering uncertainty')

            begin = time.time()

            for step in active_model:
                output = step
                steps = get_uncertain_steps(request)

            end = time.time()

            if steps and 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                aggregate = defaultdict(list)
                low = {}
                high = {}
                full_output = {}

                for idx, step_output in enumerate(steps):
                    for k, v in step_output['WeatheringOutput'].iteritems():
                        aggregate[k].append(v)

                for k, v in aggregate.iteritems():
                    low[k] = min(v)
                    high[k] = max(v)

                full_output = {
                    'time_stamp': nominal['time_stamp'],
                    'nominal': nominal,
                    'low': low,
                    'high': high
                }
                for idx, step_output in enumerate(steps):
                    full_output[idx] = step_output['WeatheringOutput']

                output['WeatheringOutput'] = full_output
                output['total_response_time'] = end - begin
            elif 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                full_output = {
                    'time_stamp': nominal['time_stamp'],
                    'nominal': nominal,
                    'low': None,
                    'high': None
                }
                output['WeatheringOutput'] = full_output
                output['total_response_time'] = end - begin
        except:
            raise cors_exception(request,
                                 HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            for a, w in zip(weatherer_enabled_flags, active_model.weatherers):
                w.on = a
            gnome_sema.release()

        return output
    else:
        raise cors_exception(request, HTTPPreconditionFailed)
Beispiel #23
0
def run_export_model(request):
    '''
    Configures the active model as specified by the request, then
    spawns a gevent greenlet that runs the model, writing only step number
    to the web socket.

    When the greenlet running the model dies, it removes the outputters that were added
    via a linked function
    '''
    print('async export hit')
    log_prefix = 'req{0}: run_export_model()'.format(id(request))
    log.info('>>' + log_prefix)

    sess_id = request.session.session_id
    ns = request.registry.get('sio_ns')

    if ns is None:
        raise ValueError('no namespace associated with session')

    active_model = get_active_model(request)

    #setup temporary outputters and temporary output directory
    session_path = get_session_dir(request)
    temporary_outputters = []
    payload = ujson.loads(request.body)
    outpjson = payload['outputters']
    model_filename = payload['model_name']
    td = tempfile.mkdtemp()
    for itm in list(outpjson.values()):
        itm['filename'] = os.path.join(td, itm['filename'])
        obj = CreateObject(itm, get_session_objects(request))
        temporary_outputters.append(obj)
    for o in temporary_outputters:
        #separated these just in case an exception occurs when
        #creating an outputter, which may leave a different successfully added
        #outputter behind if one was created before the exception
        active_model.outputters += o
        log.info('attaching export outputter: ' + o.filename)

    sid = ns.get_sockid_from_sessid(request.session.session_id)

    def get_export_cleanup():
        def cleanup(grn):
            try:
                #remove outputters from the model
                num = 0
                for m in temporary_outputters:
                    active_model.outputters.remove(m.id)
                    num += 1
                active_model.rewind()
                log.info(grn.__repr__() + ': cleaned up ' + str(num) +
                         ' outputters')

                end_filename = None
                if (grn.exception or isinstance(grn.value, GreenletExit)):
                    #A cleanly stopped Greenlet may exit with GreenletExit
                    #Do not consider this a 'successful' export run even if files exist
                    ns.emit('export_failed', room=sid)
                else:
                    if len(temporary_outputters) > 1:
                        #need to zip up outputs
                        end_filename = model_filename + '_output.zip'
                        zipfile_ = zipfile.ZipFile(
                            os.path.join(session_path, end_filename),
                            'w',
                            compression=zipfile.ZIP_DEFLATED)
                        for m in temporary_outputters:
                            obj_fn = m.filename
                            if not os.path.exists(obj_fn):
                                obj_fn = obj_fn + '.zip'  #special case for shapefile outputter which strips extensions...
                            zipfile_.write(obj_fn, os.path.basename(obj_fn))
                    else:
                        #only one output file, because one outputter selected
                        obj_fn = temporary_outputters[0].filename
                        if not os.path.exists(obj_fn):
                            obj_fn = obj_fn + '.zip'  #special case for shapefile outputter
                        end_filename = os.path.basename(obj_fn)

                        shutil.move(obj_fn,
                                    os.path.join(session_path, end_filename))

                    ns.emit('export_finished', end_filename, room=sid)

            except Exception:
                if ('develop_mode' in list(request.registry.settings.keys())
                        and request.registry.settings['develop_mode'].lower()
                        == 'true'):
                    import pdb
                    pdb.post_mortem(sys.exc_info()[2])
                raise

        return cleanup

    if sid is None:
        raise ValueError('no sock_session associated with pyramid_session')
    with ns.session(sid) as sock_session:
        sock_session['num_sent'] = 0
        if active_model and not ns.active_greenlets.get(sid):
            gl = ns.active_greenlets[sid] = gevent.spawn(
                execute_async_model, active_model, ns, sid, request)
            gl.session_hash = request.session_hash
            gl.link(get_export_cleanup())
            return None
        else:
            print("Already started")
            return None
Beispiel #24
0
def get_step(request):
    '''
        Generates and returns an image corresponding to the step.
    '''
    log_prefix = 'req({0}): get_step():'.format(id(request))
    log.info('>>' + log_prefix)

    active_model = get_active_model(request)
    if active_model:
        # generate the next step in the sequence.
        gnome_sema = request.registry.settings['py_gnome_semaphore']
        gnome_sema.acquire()
        log.info('  ' + log_prefix + 'semaphore acquired...')

        try:
            if active_model.current_time_step == -1:
                # our first step, establish uncertain models
                drop_uncertain_models(request)

                log.info('\thas_weathering_uncertainty {0}'.
                         format(active_model.has_weathering_uncertainty))
                if active_model.has_weathering_uncertainty:
                    set_uncertain_models(request)
                else:
                    log.info('Model does not have weathering uncertainty')

            begin = time.time()
            output = active_model.step()

            begin_uncertain = time.time()
            steps = get_uncertain_steps(request)
            end = time.time()

            if steps and 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                aggregate = defaultdict(list)
                low = {}
                high = {}
                full_output = {}

                for idx, step_output in enumerate(steps):
                    for k, v in step_output['WeatheringOutput'].iteritems():
                        aggregate[k].append(v)

                for k, v in aggregate.iteritems():
                    low[k] = min(v)
                    high[k] = max(v)

                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': low,
                               'high': high}
                for idx, step_output in enumerate(steps):
                    full_output[idx] = step_output['WeatheringOutput']

                output['WeatheringOutput'] = full_output
                output['uncertain_response_time'] = end - begin_uncertain
                output['total_response_time'] = end - begin
            elif 'WeatheringOutput' in output:
                nominal = output['WeatheringOutput']
                full_output = {'time_stamp': nominal['time_stamp'],
                               'nominal': nominal,
                               'low': None,
                               'high': None}
                output['WeatheringOutput'] = full_output
                output['uncertain_response_time'] = end - begin_uncertain
                output['total_response_time'] = end - begin

        except StopIteration:
            log.info('  ' + log_prefix + 'stop iteration exception...')
            drop_uncertain_models(request)
            raise cors_exception(request, HTTPNotFound)
        except:
            log.info('  ' + log_prefix + 'unknown exception...')
            raise cors_exception(request, HTTPUnprocessableEntity,
                                 with_stacktrace=True)
        finally:
            gnome_sema.release()
            log.info('  ' + log_prefix + 'semaphore released...')

        return output
    else:
        raise cors_exception(request, HTTPPreconditionFailed)