Пример #1
0
    def from_dict(res):
        from compmake.jobs.result_dict import result_dict_check

        result_dict_check(res)
        assert 'bug' in res
        e = CompmakeBug(res['bug'])
        return e
Пример #2
0
    def get(self, timeout=0):  # @UnusedVariable
        if not self.told_you_ready:
            msg = 'Should call get() only after ready().'
            raise CompmakeBug(msg)

        res = self._execute()
        result_dict_check(res)
        return res
Пример #3
0
    def from_dict(res):
        from compmake.jobs.result_dict import result_dict_check

        result_dict_check(res)
        assert 'interrupted' in res
        e = JobInterrupted(job_id=res['job_id'],
                      deleted_jobs=res['deleted_jobs'])
        return e
Пример #4
0
    def get(self, timeout=0):  # @UnusedVariable
        if not self.told_you_ready:
            msg = 'Should call get() only after ready().'
            raise CompmakeBug(msg)

        res = self._execute()
        result_dict_check(res)
        return res
Пример #5
0
    def from_dict(res):
        from compmake.jobs.result_dict import result_dict_check

        result_dict_check(res)
        assert 'fail' in res
        e = JobFailed(job_id=res['job_id'],
                      bt=res['bt'],
                      reason=res['reason'],
                      deleted_jobs=res['deleted_jobs'])
        return e
Пример #6
0
    def from_dict(res):
        from compmake.jobs.result_dict import result_dict_check

        result_dict_check(res)
        try:
            res['abort']
            e = HostFailed(host=res['host'],
                           job_id=res['job_id'],
                           bt=res['bt'],
                           reason=res['reason'])
        except KeyError as e:
            raise_wrapped(CompmakeBug, e, 'Incomplete dict', res=res,
                          keys=list(res.keys()))

        return e
Пример #7
0
def mvac_job(args):
    """
    args = tuple job_id, context,  queue_name, show_events
        
    Returns a dictionary with fields "user_object", "new_jobs", 'delete_jobs'.
    "user_object" is set to None because we do not want to 
    load in our thread if not necessary. Sometimes it is necessary
    because it might contain a Promise. 
   
    """
    job_id, context, event_queue_name, show_output, volumes, cwd = args  # @UnusedVariable
    check_isinstance(job_id, str)
    check_isinstance(event_queue_name, str)
    
    # Disable multyvac logging
    disable_logging_if_config(context)
    
    db = context.get_compmake_db()
    job = get_job(job_id=job_id, db=db)

    if job.needs_context:
        msg = 'Cannot use multyvac for dynamic job.'
        raise CompmakeException(msg)

    time_start = time.time()

    multyvac_job = mvac_instance(db, job_id, volumes, cwd)
    multyvac_job.wait()
    
    errors = [multyvac_job.status_error, multyvac_job.status_killed]
    if multyvac_job.status in errors:
        e = 'Multyvac error (status: %r)' % multyvac_job.status 
        bt = str(multyvac_job.stderr)

        cache = Cache(Cache.FAILED)
        cache.exception = e
        cache.backtrace = bt
        cache.timestamp = time.time()
        cache.captured_stderr = str(multyvac_job.stderr)
        cache.captured_stdout = str(multyvac_job.stdout)
        set_job_cache(job_id, cache, db=db)

        raise JobFailed(job_id=job_id, reason=str(e), bt=bt)
        
    user_object = multyvac_job.result

    user_object_deps = collect_dependencies(user_object)
    set_job_userobject(job_id, user_object, db=db)
    
    cache = get_job_cache(job_id, db=db)
    cache.captured_stderr = str(multyvac_job.stderr)
    cache.captured_stdout = str(multyvac_job.stdout)

    cache.state = Cache.DONE
    cache.timestamp = time.time()
    walltime = cache.timestamp - time_start
    cache.walltime_used = walltime
    cache.cputime_used = multyvac_job.cputime_system
    cache.host = 'multyvac'
    cache.jobs_defined = set()
    set_job_cache(job_id, cache, db=db)
    
    result_dict = dict(user_object=user_object,
                user_object_deps=user_object_deps, 
                new_jobs=[], deleted_jobs=[])
    result_dict_check(result_dict)
    return result_dict
Пример #8
0
def mvac_job(args):
    """
    args = tuple job_id, context,  queue_name, show_events
        
    Returns a dictionary with fields "user_object", "new_jobs", 'delete_jobs'.
    "user_object" is set to None because we do not want to 
    load in our thread if not necessary. Sometimes it is necessary
    because it might contain a Promise. 
   
    """
    job_id, context, event_queue_name, show_output, volumes, cwd = args  # @UnusedVariable
    check_isinstance(job_id, str)
    check_isinstance(event_queue_name, str)
    
    # Disable multyvac logging
    disable_logging_if_config(context)
    
    db = context.get_compmake_db()
    job = get_job(job_id=job_id, db=db)

    if job.needs_context:
        msg = 'Cannot use multyvac for dynamic job.'
        raise CompmakeException(msg)

    time_start = time.time()

    multyvac_job = mvac_instance(db, job_id, volumes, cwd)
    multyvac_job.wait()
    
    errors = [multyvac_job.status_error, multyvac_job.status_killed]
    if multyvac_job.status in errors:
        e = 'Multyvac error (status: %r)' % multyvac_job.status 
        bt = str(multyvac_job.stderr)

        cache = Cache(Cache.FAILED)
        cache.exception = e
        cache.backtrace = bt
        cache.timestamp = time.time()
        cache.captured_stderr = str(multyvac_job.stderr)
        cache.captured_stdout = str(multyvac_job.stdout)
        set_job_cache(job_id, cache, db=db)

        raise JobFailed(job_id=job_id, reason=str(e), bt=bt)
        
    user_object = multyvac_job.result

    user_object_deps = collect_dependencies(user_object)
    set_job_userobject(job_id, user_object, db=db)
    
    cache = get_job_cache(job_id, db=db)
    cache.captured_stderr = str(multyvac_job.stderr)
    cache.captured_stdout = str(multyvac_job.stdout)

    cache.state = Cache.DONE
    cache.timestamp = time.time()
    walltime = cache.timestamp - time_start
    cache.walltime_used = walltime
    cache.cputime_used = multyvac_job.cputime_system
    cache.host = 'multyvac'
    cache.jobs_defined = set()
    set_job_cache(job_id, cache, db=db)
    
    result_dict = dict(user_object=user_object,
                user_object_deps=user_object_deps, 
                new_jobs=[], deleted_jobs=[])
    result_dict_check(result_dict)
    return result_dict
Пример #9
0
def parmake_job2(args):
    """
    args = tuple job_id, context, queue_name, show_events
        
    Returns a dictionary with fields "user_object", "new_jobs", 'delete_jobs'.
    "user_object" is set to None because we do not want to 
    load in our thread if not necessary. Sometimes it is necessary
    because it might contain a Promise. 
   
    """
    job_id, context, event_queue_name, show_output = args  # @UnusedVariable
    check_isinstance(job_id, str)
    check_isinstance(event_queue_name, str)
    from .pmake_manager import PmakeManager

    event_queue = PmakeManager.queues[event_queue_name]

    db = context.get_compmake_db()

    setproctitle('compmake:%s' % job_id)

    class G():
        nlostmessages = 0

    try:
        # We register a handler for the events to be passed back
        # to the main process
        def handler(event):
            try:
                if not CompmakeConstants.disable_interproc_queue:
                    event_queue.put(event, block=False)
            except Full:
                G.nlostmessages += 1
                # Do not write messages here, it might create a recursive
                # problem.
                # sys.stderr.write('job %s: Queue is full, message is lost.\n'
                # % job_id)

        remove_all_handlers()

        if show_output:
            register_handler("*", handler)

        def proctitle(event):
            stat = '[%s/%s %s] (compmake)' % (event.progress, event.goal,
                                              event.job_id)
            setproctitle(stat)

        register_handler("job-progress", proctitle)

        publish(context, 'worker-status', job_id=job_id, status='started')

        # Note that this function is called after the fork.
        # All data is conserved, but resources need to be reopened
        try:
            db.reopen_after_fork()  # @UndefinedVariable
        except:
            pass

        publish(context, 'worker-status', job_id=job_id, status='connected')

        res = make(job_id, context=context)

        publish(context, 'worker-status', job_id=job_id, status='ended')

        res['user_object'] = None
        result_dict_check(res)
        return res

    except KeyboardInterrupt:
        assert False, 'KeyboardInterrupt should be captured by make() (' \
                      'inside Job.compute())'
    except JobInterrupted:
        publish(context, 'worker-status', job_id=job_id, status='interrupted')
        raise
    except JobFailed:
        raise
    except BaseException:
        # XXX
        raise
    except:
        raise
    finally:
        publish(context, 'worker-status', job_id=job_id, status='cleanup')
        setproctitle('compmake-worker-finished %s' % job_id)
Пример #10
0
def parmake_job2(args):
    """
    args = tuple job_id, context, queue_name, show_events
        
    Returns a dictionary with fields "user_object", "new_jobs", 'delete_jobs'.
    "user_object" is set to None because we do not want to 
    load in our thread if not necessary. Sometimes it is necessary
    because it might contain a Promise. 
   
    """
    job_id, context, event_queue_name, show_output = args  # @UnusedVariable
    check_isinstance(job_id, str)
    check_isinstance(event_queue_name, str)
    from .pmake_manager import PmakeManager

    event_queue = PmakeManager.queues[event_queue_name]

    db = context.get_compmake_db()

    setproctitle('compmake:%s' % job_id)

    class G():
        nlostmessages = 0

    try:
        # We register a handler for the events to be passed back 
        # to the main process
        def handler( event):
            try:
                if not CompmakeConstants.disable_interproc_queue:
                    event_queue.put(event, block=False)
            except Full:
                G.nlostmessages += 1
                # Do not write messages here, it might create a recursive
                # problem.
                # sys.stderr.write('job %s: Queue is full, message is lost.\n'
                # % job_id)

        remove_all_handlers()

        if show_output:
            register_handler("*", handler)

        def proctitle(event):
            stat = '[%s/%s %s] (compmake)' % (event.progress,
                                              event.goal, event.job_id)
            setproctitle(stat)

        register_handler("job-progress", proctitle)

        publish(context, 'worker-status', job_id=job_id, status='started')

        # Note that this function is called after the fork.
        # All data is conserved, but resources need to be reopened
        try:
            db.reopen_after_fork()  # @UndefinedVariable
        except:
            pass

        publish(context, 'worker-status', job_id=job_id, status='connected')

        res = make(job_id, context=context)

        publish(context, 'worker-status', job_id=job_id, status='ended')

        res['user_object'] = None
        result_dict_check(res)
        return res
        
    except KeyboardInterrupt:
        assert False, 'KeyboardInterrupt should be captured by make() (' \
                      'inside Job.compute())'
    except JobInterrupted:
        publish(context, 'worker-status', job_id=job_id, status='interrupted')
        raise
    except JobFailed:
        raise
    except BaseException:
        # XXX
        raise
    except:
        raise
    finally:
        publish(context, 'worker-status', job_id=job_id, status='cleanup')
        setproctitle('compmake-worker-finished')