示例#1
0
def mark_as_done(job_id, walltime=1, cputime=1):
    # For now, only used explicitly by user
    set_job_userobject(job_id, None)
    cache = Cache(Cache.DONE)
    cache.captured_stderr = ""
    cache.captured_stdout = ""
    cache.state = Cache.DONE
    cache.timestamp = time()
    cache.walltime_used = walltime  # XXX: use none?
    cache.cputime_used = cputime 
    cache.host = get_compmake_config('hostname')  # XXX
    set_job_cache(job_id, cache)
示例#2
0
def use_filesystem(directory=None, compress=False):
    db = CompmakeGlobalState.compmake_db     
    
    if directory is None:
        directory = get_compmake_config('path')
    
    if db is not None:
        if isinstance(db, StorageFilesystem):
            # logger.warning('Switching from db %r to %r' % (db, directory))
            pass
#            raise ValueError() # TMP

    directory = os.path.expandvars(directory)
    directory = os.path.expanduser(directory)
    
    sf = StorageFilesystem(directory, compress=compress)
#     sf = StorageFilesystem2(directory)
#     sf = MemoryCache(sf)
    set_compmake_db(sf)
示例#3
0
def get_readline():
    """
    Returns a reference to the readline (or Pyreadline) module if they
    are available and the config "readline" is True, otherwise None.
    :return:Reference to readline module or None
    """
    use_readline = get_compmake_config('readline')
    if not use_readline:
        return None
    else:
        try:
            import readline
            return readline
        except BaseException as e:
            try:
                import pyreadline as readline  # @UnresolvedImport
                return readline
            except Exception as e2:
                # TODO: write message
                msg = 'Neither readline or pyreadline available.'
                msg += '\n- readline error: %s' % e
                msg += '\n- pyreadline error: %s' % e2
                logger.warning(msg)
                return None
示例#4
0
def get_readline():
    """
    Returns a reference to the readline (or Pyreadline) module if they
    are available and the config "readline" is True, otherwise None.
    :return:Reference to readline module or None
    """
    use_readline = get_compmake_config('readline')
    if not use_readline:
        return None
    else:
        try:
            import readline
            return readline
        except BaseException as e:
            try:
                import pyreadline as readline  # @UnresolvedImport
                return readline
            except Exception as e2:
                # TODO: write message
                msg = 'Neither readline or pyreadline available.'
                msg += '\n- readline error: %s' % e
                msg += '\n- pyreadline error: %s' % e2
                logger.warning(msg)
                return None
示例#5
0
def make(job_id):
    """ 
        Makes a single job. Returns the user-object or raises JobFailed
        or JobInterrupted. Also SystemExit, KeyboardInterrupt, MemoryError are 
        captured.
    """
    host = get_compmake_config('hostname')

    setproctitle(job_id)

    # TODO: should we make sure we are up to date???
    up, reason = up_to_date(job_id)  # @UnusedVariable
    cache = get_job_cache(job_id)
    if up:
        # assert is_job_userobject_available(job_id)
        return get_job_userobject(job_id)
    else:
        computation = get_job(job_id)

        assert(cache.state in [Cache.NOT_STARTED, Cache.IN_PROGRESS,
                               Cache.BLOCKED,
                               Cache.DONE, Cache.FAILED])

        if cache.state == Cache.NOT_STARTED:
            cache.state = Cache.IN_PROGRESS
        if cache.state in [Cache.FAILED, Cache.BLOCKED]:
            cache.state = Cache.IN_PROGRESS
        elif cache.state == Cache.IN_PROGRESS:
            pass
        elif cache.state == Cache.DONE:
            assert(not up)
        else:
            assert(False)

        # update state
        cache.time_start = time()
        cpu_start = clock()
        set_job_cache(job_id, cache)

        def progress_callback(stack):
            publish('job-progress-plus', job_id=job_id, host=host, stack=stack)

        init_progress_tracking(progress_callback)

        user_object = None

        capture = OutputCapture(prefix=job_id,
                                echo_stdout=False,
                                echo_stderr=False)

        # TODO: add whether we should just capture and not echo
        old_emit = logging.StreamHandler.emit

        def my_emit(_, log_record):
            # note that log_record.msg might be an exception
            msg = colorize_loglevel(log_record.levelno, str(log_record.msg)) 
            #  levelname = log_record.levelname
            name = log_record.name

            # print('%s:%s:%s' % (name, levelname, msg)) 
            print('%s:%s' % (name, msg))

        logging.StreamHandler.emit = my_emit

        try:
            result = computation.compute()
            
            #            # XXX: remove this logic
            #            if type(result) == GeneratorType:
            #                try:
            #                    while True:
            #                        next = result.next()  # @ReservedAssignment
            #                        if isinstance(next, tuple):
            #                            if len(next) != 3:
            #                                msg = ('If computation yields a tuple, '
            #                                        'should be a tuple with 3 elemnts.'
            #                                          'Got: %s') % str(next)
            #                                raise CompmakeException(msg)
            #                            user_object, num, total = next
            #
            #                            publish('job-progress', job_id=job_id, host=host,
            #                                    done=None, progress=num, goal=total)
            #
            #                except StopIteration:
            #                    pass
            #            else:
            #                #publish('job-progress', job_id=job_id, host='XXX',
            #                #        done=1, progress=1, goal=1)
            #
            user_object = result

        except KeyboardInterrupt:
            publish('job-interrupted', job_id=job_id, host=host)
            mark_as_failed(job_id, 'KeyboardInterrupt',
                           traceback.format_exc())
            raise JobInterrupted('Keyboard interrupt')
        except (Exception, SystemExit, MemoryError) as e:
            bt = traceback.format_exc()
            mark_as_failed(job_id, str(e), bt)

            publish('job-failed', job_id=job_id,
                    host=host, reason=str(e), bt=bt)
            raise JobFailed('Job %s failed on host %s: %s' % (job_id, host, e))
        finally:
            capture.deactivate()
            # even if we send an error, let's save the output of the process
            cache = get_job_cache(job_id)

            cache.captured_stderr = \
                capture.stderr_replacement.buffer.getvalue()
            cache.captured_stdout = \
                capture.stdout_replacement.buffer.getvalue()

            # Do not save more than a few lines
            max_lines = 10
            cache.captured_stderr = limit_to_last_lines(cache.captured_stderr,
                                                        max_lines)
            cache.captured_stdout = limit_to_last_lines(cache.captured_stdout,
                                                        max_lines)

            set_job_cache(job_id, cache)

            logging.StreamHandler.emit = old_emit

        set_job_userobject(job_id, user_object)

        cache.state = Cache.DONE
        cache.timestamp = time()
        walltime = cache.timestamp - cache.time_start
        cputime = clock() - cpu_start
        # FIXME walltime/cputime not precise
        cache.walltime_used = walltime
        cache.cputime_used = cputime
        cache.host = get_compmake_config('hostname')  # XXX

        set_job_cache(job_id, cache)

        publish('job-succeeded', job_id=job_id, host=host)

        # TODO: clear these records in other place
        return user_object
示例#6
0
def make(job_id, context, echo=False):  # @UnusedVariable
    """
        Makes a single job.

        Returns a dictionary with fields:
             "user_object"
             "user_object_deps" = set of Promises
             "new_jobs" -> new jobs defined
             "deleted_jobs" -> jobs that were defined but not anymore

        Raises JobFailed
        or JobInterrupted. Also SystemExit, KeyboardInterrupt, MemoryError are
        captured.
    """
    db = context.get_compmake_db()

    int_make = IntervalTimer()

    host = 'hostname'  # XXX

    if get_compmake_config('set_proc_title'):
        setproctitle('cm-%s' % job_id)

    # TODO: should we make sure we are up to date???
    #     up, reason = up_to_date(job_id, db=db)  # @UnusedVariable
    #     if up:
    #         msg = 'Job %r appears already done.' % job_id
    #         msg += 'This can only happen if another compmake process uses the ' \
    #                'same DB.'
    # logger.error(msg)
    #         user_object = get_job_userobject(job_id, db=db)
    #         # XXX: this is not right anyway
    #         return dict(user_object=user_object,
    #                     user_object_deps=collect_dependencies(user_object),
    #                     deleted_jobs=[],
    #                     new_jobs=[])

    job = get_job(job_id, db=db)
    cache = get_job_cache(job_id, db=db)

    if cache.state == Cache.DONE:
        prev_defined_jobs = set(cache.jobs_defined)
        # print('%s had previously defined %s' % (job_id, prev_defined_jobs))
    else:
        # print('%s was not DONE' % job_id)
        prev_defined_jobs = None

    # Note that at this point we save important information in the Cache
    # so if we set this then it's going to destroy it
    # cache.state = Cache.IN _ PROGRESS
    # set_job_cache(job_id, cache, db=db)

    # TODO: delete previous user object

    def progress_callback(stack):
        publish(context, 'job-progress-plus', job_id=job_id, host=host,
                stack=stack)

    init_progress_tracking(progress_callback)

    disable_capture = False
    if disable_capture:
        capture = None
    else:
        echo = False
        capture = OutputCapture(context=context, prefix=job_id,
                                # This is instantaneous echo and should be False
                                # They will generate events anyway.
                                echo_stdout=echo,
                                echo_stderr=echo)

    # TODO: add whether we should just capture and not echo
    old_emit = logging.StreamHandler.emit

    from compmake.ui.coloredlog import colorize_loglevel

    FORMAT = "%(name)10s|%(filename)15s:%(lineno)-4s - %(funcName)-15s| %(message)s"

    formatter = Formatter(FORMAT)

    class Store(object):
        nhidden = 0

    def my_emit(_, log_record):
        # note that log_record.msg might be an exception
        try:
            try:
                s = str(log_record.msg)
            except UnicodeEncodeError:
                s = unicode(log_record.msg)
            except:
                s = 'Could not print log_record %s' % id(log_record)
            log_record.msg = colorize_loglevel(log_record.levelno, s)
            res = formatter.format(log_record)
            print(res)
            # this will be captured by OutputCapture anyway
        except:
            Store.nhidden += 1

    logging.StreamHandler.emit = my_emit

    already = set(context.get_jobs_defined_in_this_session())

    def get_deleted_jobs():
        generated = set(context.get_jobs_defined_in_this_session()) - already
        # print('failure: rolling back %s' % generated)

        from compmake.ui.ui import delete_jobs_recurse_definition

        todelete_ = set()
        # delete the jobs that were previously defined
        if prev_defined_jobs:
            todelete_.update(prev_defined_jobs)
        # and also the ones that were generated
        todelete_.update(generated)

        deleted_jobs_ = delete_jobs_recurse_definition(jobs=todelete_, db=db)
        # now we failed, so we need to roll back other changes
        # to the db
        return deleted_jobs_

    try:
        result = job_compute(job=job, context=context)

        assert isinstance(result, dict) and len(result) == 5
        user_object = result['user_object']
        new_jobs = result['new_jobs']
        int_load_results = result['int_load_results']
        int_compute = result['int_compute']
        int_gc = result['int_gc']
        int_gc.stop()

    except KeyboardInterrupt as e:
        bt = traceback.format_exc()
        deleted_jobs = get_deleted_jobs()
        mark_as_failed(job_id, 'KeyboardInterrupt: ' + str(e), backtrace=bt, db=db)

        cache = get_job_cache(job_id, db=db)
        if capture is not None:
            cache.captured_stderr = capture.get_logged_stderr()
            cache.captured_stdout = capture.get_logged_stdout()
        else:
            msg = '(Capture turned off.)'
            cache.captured_stderr = msg
            cache.captured_stdout = msg
        set_job_cache(job_id, cache, db=db)

        raise JobInterrupted(job_id=job_id, deleted_jobs=deleted_jobs)

    except (BaseException, ArithmeticError,
            BufferError, LookupError, Exception, SystemExit, MemoryError) as e:
        bt = traceback.format_exc()
        if six.PY2:
            s = '%s: %s' % (type(e).__name__, e)
            #
            # s = type(e).__name__ + ': ' + e.__str__().strip()
            # try:
            #     s = s.decode('utf-8', 'replace').encode('utf-8', 'replace')
            # except (UnicodeDecodeError, UnicodeEncodeError) as ue:
            #     print(ue)  # XXX
            #     s = 'Could not represent string.'
        else:
            s = '%s: %s' % (type(e).__name__, e)
        mark_as_failed(job_id, s, backtrace=bt, db=db)
        deleted_jobs = get_deleted_jobs()

        cache = get_job_cache(job_id, db=db)
        if capture is not None:
            cache.captured_stderr = capture.get_logged_stderr()
            cache.captured_stdout = capture.get_logged_stdout()
        else:
            msg = '(Capture turned off.)'
            cache.captured_stderr = msg
            cache.captured_stdout = msg

        set_job_cache(job_id, cache, db=db)

        raise JobFailed(job_id=job_id, reason=s, bt=bt,
                        deleted_jobs=deleted_jobs)
    finally:
        int_finally = IntervalTimer()
        if capture is not None:
            capture.deactivate()
        # even if we send an error, let's save the output of the process
        logging.StreamHandler.emit = old_emit
        if Store.nhidden > 0:
            msg = 'compmake: There were %d messages hidden due to bugs in logging.' % Store.nhidden
            print(msg)
        int_finally.stop()
    #        print('finally: %s' % int_finally)

    int_save_results = IntervalTimer()

    # print('Now %s has defined %s' % (job_id, new_jobs))
    if prev_defined_jobs is not None:
        # did we defined fewer jobs this time around?
        # then we need to delete them
        todelete = set()
        for x in prev_defined_jobs:
            if x not in new_jobs:
                todelete.add(x)
        from compmake.ui.ui import delete_jobs_recurse_definition
        deleted_jobs = delete_jobs_recurse_definition(jobs=todelete, db=db)
    else:
        deleted_jobs = set()

    # print('Now %s has deleted %s' % (job_id, deleted_jobs))

    set_job_userobject(job_id, user_object, db=db)
    int_save_results.stop()

    #    logger.debug('Save time for %s: %s s' % (job_id, walltime_save_result))

    int_make.stop()
    end_time = time()

    cache = Cache(Cache.DONE)

    #    print('int_make: %s' % int_make)
    #    print('int_load_results: %s' % int_load_results)
    #    print('int_compute: %s' % int_compute)
    if int_gc.get_walltime_used() > 1.0:
        logger.warning('Expensive garbage collection detected at the end of %s: %s' % (job_id, int_gc))
    #    print('int_save_results: %s' % int_save_results)

    cache.int_make = int_make
    cache.int_load_results = int_load_results
    cache.int_compute = int_compute
    cache.int_gc = int_gc
    cache.int_save_results = int_save_results

    cache.timestamp = end_time

    cache.walltime_used = int_make.get_walltime_used()
    cache.cputime_used = int_make.get_cputime_used()
    cache.host = host
    cache.jobs_defined = new_jobs
    set_job_cache(job_id, cache, db=db)

    return dict(user_object=user_object,
                user_object_deps=collect_dependencies(user_object),
                new_jobs=new_jobs,
                deleted_jobs=deleted_jobs)
示例#7
0
def make(job_id, context, echo=False):
    """ 
        Makes a single job. 
        
        Returns a dictionary with fields:
             "user_object"
             "user_object_deps" = set of Promises
             "new_jobs" -> new jobs defined 
             "deleted_jobs" -> jobs that were defined but not anymore
        
        Raises JobFailed
        or JobInterrupted. Also SystemExit, KeyboardInterrupt, MemoryError are 
        captured.
    """
    db = context.get_compmake_db()

    host = 'hostname'  # XXX

    if get_compmake_config('set_proc_title'):
        setproctitle('cm-%s' % job_id)

    # TODO: should we make sure we are up to date???
#     up, reason = up_to_date(job_id, db=db)  # @UnusedVariable
#     if up:
#         msg = 'Job %r appears already done.' % job_id
#         msg += 'This can only happen if another compmake process uses the ' \
#                'same DB.'
        #logger.error(msg)
#         user_object = get_job_userobject(job_id, db=db)
#         # XXX: this is not right anyway
#         return dict(user_object=user_object,
#                     user_object_deps=collect_dependencies(user_object),
#                     deleted_jobs=[],
#                     new_jobs=[])

    job = get_job(job_id, db=db)
    cache = get_job_cache(job_id, db=db)
    
    if cache.state == Cache.DONE:
        prev_defined_jobs = set(cache.jobs_defined)
        #print('%s had previously defined %s' % (job_id, prev_defined_jobs))
    else:
        #print('%s was not DONE' % job_id)
        prev_defined_jobs = None
      
    # Note that at this point we save important information in the Cache
    # so if we set this then it's going to destroy it
    # cache.state = Cache.IN _ PROGRESS
    # set_job_cache(job_id, cache, db=db)
    
    
    # TODO: delete previous user object
    
    # update state
    time_start = time()
    cpu_start = clock()

    def progress_callback(stack):
        publish(context, 'job-progress-plus', job_id=job_id, host=host,
                stack=stack)

    init_progress_tracking(progress_callback)

    capture = OutputCapture(context=context, prefix=job_id,
                            # This is instantaneous echo and should be False
                            # They will generate events anyway.
                            echo_stdout=False,
                            echo_stderr=False)

    # TODO: add whether we should just capture and not echo
    old_emit = logging.StreamHandler.emit

    from compmake.ui.coloredlog import colorize_loglevel

    def my_emit(_, log_record):
        # note that log_record.msg might be an exception
        msg2 = colorize_loglevel(log_record.levelno, str(log_record.msg))
        # levelname = log_record.levelname
        name = log_record.name
        # print('%s:%s:%s' % (name, levelname, msg))

        # this will be captured by OutputCapture anyway 
        print('%s:%s' % (name, msg2))

    logging.StreamHandler.emit = my_emit

    already = set(context.get_jobs_defined_in_this_session())
    
    def get_deleted_jobs():
        generated = set(context.get_jobs_defined_in_this_session()) - already
        #print('failure: rolling back %s' % generated)
        
        from compmake.ui.ui import delete_jobs_recurse_definition

        todelete = set()
        # delete the jobs that were previously defined
        if prev_defined_jobs:
            todelete.update(prev_defined_jobs)
        # and also the ones that were generated
        todelete.update(generated)
        
        deleted_jobs = delete_jobs_recurse_definition(jobs=todelete, db=db)
        # now we failed, so we need to roll back other changes
        # to the db
        return deleted_jobs
    
    try:
        result = job_compute(job=job, context=context)
        assert isinstance(result, dict) and len(result) == 2
        user_object = result['user_object']
        new_jobs = result['new_jobs']

    except KeyboardInterrupt as e:
        bt = my_format_exc(e)
        deleted_jobs = get_deleted_jobs()
        mark_as_failed(job_id, str(e), backtrace=bt, db=db)
        raise JobInterrupted(job_id=job_id, deleted_jobs=deleted_jobs)
    
    except (BaseException, StandardError, ArithmeticError,
            BufferError, LookupError, Exception, SystemExit, MemoryError) as e:
        bt = my_format_exc(e)
        mark_as_failed(job_id, str(e), backtrace=bt, db=db)
        deleted_jobs = get_deleted_jobs()    
        raise JobFailed(job_id=job_id, reason=str(e), bt=bt,
                        deleted_jobs=deleted_jobs)
    finally:
        capture.deactivate()
        # even if we send an error, let's save the output of the process
        cache = get_job_cache(job_id, db=db)
        cache.captured_stderr = capture.get_logged_stderr()
        cache.captured_stdout = capture.get_logged_stdout()
        set_job_cache(job_id, cache, db=db)
        logging.StreamHandler.emit = old_emit

    #print('Now %s has defined %s' % (job_id, new_jobs))
    if prev_defined_jobs is not None:
        # did we defined fewer jobs this time around?
        # then we need to delete them
        todelete = set()
        for x in prev_defined_jobs:
            if not x in new_jobs:
                todelete.add(x)
        from compmake.ui.ui import delete_jobs_recurse_definition
        deleted_jobs = delete_jobs_recurse_definition(jobs=todelete, db=db)
    else:
        deleted_jobs = set()
    
    #print('Now %s has deleted %s' % (job_id, deleted_jobs))
    
    set_job_userobject(job_id, user_object, db=db)
    cache = Cache(Cache.DONE)
    cache.timestamp = time()
    walltime = cache.timestamp - time_start
    cputime = clock() - cpu_start
    cache.walltime_used = walltime
    cache.cputime_used = cputime
    cache.host = host
    cache.jobs_defined = new_jobs
    set_job_cache(job_id, cache, db=db)

    return dict(user_object=user_object,
                user_object_deps=collect_dependencies(user_object),
                new_jobs=new_jobs,
                deleted_jobs=deleted_jobs)
示例#8
0
def make(job_id, context, echo=False):  # @UnusedVariable
    """
        Makes a single job.

        Returns a dictionary with fields:
             "user_object"
             "user_object_deps" = set of Promises
             "new_jobs" -> new jobs defined
             "deleted_jobs" -> jobs that were defined but not anymore

        Raises JobFailed
        or JobInterrupted. Also SystemExit, KeyboardInterrupt, MemoryError are
        captured.
    """
    db = context.get_compmake_db()

    int_make = IntervalTimer()

    host = 'hostname'  # XXX

    if get_compmake_config('set_proc_title'):
        setproctitle('cm-%s' % job_id)

    # TODO: should we make sure we are up to date???
    #     up, reason = up_to_date(job_id, db=db)  # @UnusedVariable
    #     if up:
    #         msg = 'Job %r appears already done.' % job_id
    #         msg += 'This can only happen if another compmake process uses the ' \
    #                'same DB.'
    # logger.error(msg)
    #         user_object = get_job_userobject(job_id, db=db)
    #         # XXX: this is not right anyway
    #         return dict(user_object=user_object,
    #                     user_object_deps=collect_dependencies(user_object),
    #                     deleted_jobs=[],
    #                     new_jobs=[])

    job = get_job(job_id, db=db)
    cache = get_job_cache(job_id, db=db)

    if cache.state == Cache.DONE:
        prev_defined_jobs = set(cache.jobs_defined)
        # print('%s had previously defined %s' % (job_id, prev_defined_jobs))
    else:
        # print('%s was not DONE' % job_id)
        prev_defined_jobs = None

    # Note that at this point we save important information in the Cache
    # so if we set this then it's going to destroy it
    # cache.state = Cache.IN _ PROGRESS
    # set_job_cache(job_id, cache, db=db)

    # TODO: delete previous user object

    def progress_callback(stack):
        publish(context,
                'job-progress-plus',
                job_id=job_id,
                host=host,
                stack=stack)

    init_progress_tracking(progress_callback)

    disable_capture = False
    if disable_capture:
        capture = None
    else:
        echo = False
        capture = OutputCapture(
            context=context,
            prefix=job_id,
            # This is instantaneous echo and should be False
            # They will generate events anyway.
            echo_stdout=echo,
            echo_stderr=echo)

    # TODO: add whether we should just capture and not echo
    old_emit = logging.StreamHandler.emit

    from compmake.ui.coloredlog import colorize_loglevel

    FORMAT = "%(name)10s|%(filename)15s:%(lineno)-4s - %(funcName)-15s| %(message)s"

    formatter = Formatter(FORMAT)

    class Store(object):
        nhidden = 0

    def my_emit(_, log_record):
        # note that log_record.msg might be an exception
        try:
            try:
                s = str(log_record.msg)
            except UnicodeEncodeError:
                s = unicode(log_record.msg)
            except:
                s = 'Could not print log_record %s' % id(log_record)
            log_record.msg = colorize_loglevel(log_record.levelno, s)
            res = formatter.format(log_record)
            print(res)
            # this will be captured by OutputCapture anyway
        except:
            Store.nhidden += 1

    logging.StreamHandler.emit = my_emit

    already = set(context.get_jobs_defined_in_this_session())

    def get_deleted_jobs():
        generated = set(context.get_jobs_defined_in_this_session()) - already
        # print('failure: rolling back %s' % generated)

        from compmake.ui.ui import delete_jobs_recurse_definition

        todelete_ = set()
        # delete the jobs that were previously defined
        if prev_defined_jobs:
            todelete_.update(prev_defined_jobs)
        # and also the ones that were generated
        todelete_.update(generated)

        deleted_jobs_ = delete_jobs_recurse_definition(jobs=todelete_, db=db)
        # now we failed, so we need to roll back other changes
        # to the db
        return deleted_jobs_

    try:
        result = job_compute(job=job, context=context)

        assert isinstance(result, dict) and len(result) == 5
        user_object = result['user_object']
        new_jobs = result['new_jobs']
        int_load_results = result['int_load_results']
        int_compute = result['int_compute']
        int_gc = result['int_gc']
        int_gc.stop()

    except KeyboardInterrupt as e:
        bt = traceback.format_exc()
        deleted_jobs = get_deleted_jobs()
        mark_as_failed(job_id,
                       'KeyboardInterrupt: ' + str(e),
                       backtrace=bt,
                       db=db)

        cache = get_job_cache(job_id, db=db)
        if capture is not None:
            cache.captured_stderr = capture.get_logged_stderr()
            cache.captured_stdout = capture.get_logged_stdout()
        else:
            msg = '(Capture turned off.)'
            cache.captured_stderr = msg
            cache.captured_stdout = msg
        set_job_cache(job_id, cache, db=db)

        raise JobInterrupted(job_id=job_id, deleted_jobs=deleted_jobs)

    except (BaseException, ArithmeticError, BufferError, LookupError,
            Exception, SystemExit, MemoryError) as e:
        bt = traceback.format_exc()
        if six.PY2:
            s = '%s: %s' % (type(e).__name__, e)
            #
            # s = type(e).__name__ + ': ' + e.__str__().strip()
            # try:
            #     s = s.decode('utf-8', 'replace').encode('utf-8', 'replace')
            # except (UnicodeDecodeError, UnicodeEncodeError) as ue:
            #     print(ue)  # XXX
            #     s = 'Could not represent string.'
        else:
            s = '%s: %s' % (type(e).__name__, e)
        mark_as_failed(job_id, s, backtrace=bt, db=db)
        deleted_jobs = get_deleted_jobs()

        cache = get_job_cache(job_id, db=db)
        if capture is not None:
            cache.captured_stderr = capture.get_logged_stderr()
            cache.captured_stdout = capture.get_logged_stdout()
        else:
            msg = '(Capture turned off.)'
            cache.captured_stderr = msg
            cache.captured_stdout = msg

        set_job_cache(job_id, cache, db=db)

        raise JobFailed(job_id=job_id,
                        reason=s,
                        bt=bt,
                        deleted_jobs=deleted_jobs)
    finally:
        int_finally = IntervalTimer()
        if capture is not None:
            capture.deactivate()
        # even if we send an error, let's save the output of the process
        logging.StreamHandler.emit = old_emit
        if Store.nhidden > 0:
            msg = 'compmake: There were %d messages hidden due to bugs in logging.' % Store.nhidden
            print(msg)
        int_finally.stop()
    #        print('finally: %s' % int_finally)

    int_save_results = IntervalTimer()

    # print('Now %s has defined %s' % (job_id, new_jobs))
    if prev_defined_jobs is not None:
        # did we defined fewer jobs this time around?
        # then we need to delete them
        todelete = set()
        for x in prev_defined_jobs:
            if x not in new_jobs:
                todelete.add(x)
        from compmake.ui.ui import delete_jobs_recurse_definition
        deleted_jobs = delete_jobs_recurse_definition(jobs=todelete, db=db)
    else:
        deleted_jobs = set()

    # print('Now %s has deleted %s' % (job_id, deleted_jobs))

    set_job_userobject(job_id, user_object, db=db)
    int_save_results.stop()

    #    logger.debug('Save time for %s: %s s' % (job_id, walltime_save_result))

    int_make.stop()
    end_time = time()

    cache = Cache(Cache.DONE)

    #    print('int_make: %s' % int_make)
    #    print('int_load_results: %s' % int_load_results)
    #    print('int_compute: %s' % int_compute)
    if int_gc.get_walltime_used() > 1.0:
        logger.warning(
            'Expensive garbage collection detected at the end of %s: %s' %
            (job_id, int_gc))
    #    print('int_save_results: %s' % int_save_results)

    cache.int_make = int_make
    cache.int_load_results = int_load_results
    cache.int_compute = int_compute
    cache.int_gc = int_gc
    cache.int_save_results = int_save_results

    cache.timestamp = end_time

    cache.walltime_used = int_make.get_walltime_used()
    cache.cputime_used = int_make.get_cputime_used()
    cache.host = host
    cache.jobs_defined = new_jobs
    set_job_cache(job_id, cache, db=db)

    return dict(user_object=user_object,
                user_object_deps=collect_dependencies(user_object),
                new_jobs=new_jobs,
                deleted_jobs=deleted_jobs)