Esempio n. 1
0
class Capture:
    def __init__(self, prefix, echo_stdout=True, echo_stderr=True,
                 capture_logging=False):
        self.prefix = prefix
        self.echo_stdout = echo_stdout
        self.echo_stderr = echo_stderr
        self.capture_logging = capture_logging
    
    def start(self):
        self.capture = OutputCapture(prefix=self.prefix,
                                echo_stdout=self.echo_stdout,
                                echo_stderr=self.echo_stderr)

        # TODO: add whether we should just capture and not echo
        self.old_emit = logging.StreamHandler.emit
        
#        if self.capture_logging:
        def my_emit(_, log_record):
            msg = colorize_loglevel(log_record.levelno, log_record.msg)
            #  levelname = log_record.levelname
            name = log_record.name

            #print('%s:%s:%s' % (name, levelname, msg)) 
#                print('%s:%s' % (name, msg))
            self.capture.old_stderr.write('>%s:%s\n' % (name, msg))

        logging.StreamHandler.emit = my_emit
    
    @contextmanager
    def go(self):
        self.start()
        try:
            yield
        finally:
            self.stop()
    
    def stop(self):
        self.capture.deactivate() 
        logging.StreamHandler.emit = self.old_emit
        
    def get_logged_stderr(self):
        return self.capture.get_logged_stderr()
    
    def get_logged_stdout(self):
        return self.capture.get_logged_stdout()
Esempio n. 2
0
    def start(self):
        self.capture = OutputCapture(prefix=self.prefix,
                                echo_stdout=self.echo_stdout,
                                echo_stderr=self.echo_stderr)

        # TODO: add whether we should just capture and not echo
        self.old_emit = logging.StreamHandler.emit
        
#        if self.capture_logging:
        def my_emit(_, log_record):
            msg = colorize_loglevel(log_record.levelno, log_record.msg)
            #  levelname = log_record.levelname
            name = log_record.name

            #print('%s:%s:%s' % (name, levelname, msg)) 
#                print('%s:%s' % (name, msg))
            self.capture.old_stderr.write('>%s:%s\n' % (name, msg))

        logging.StreamHandler.emit = my_emit
Esempio n. 3
0
def make(job_id, more=False):
    """ Makes a single job. Returns the user-object or raises JobFailed """
    host = compmake_config.hostname #@UndefinedVariable
    
    setproctitle(job_id)
     
    # TODO: should we make sure we are up to date???
    up, reason = up_to_date(job_id) #@UnusedVariable
    cache = get_job_cache(job_id)
    want_more = cache.state == Cache.MORE_REQUESTED
    if up and not (more and want_more):
        # print "%s is up to date" % job_id
        assert is_job_userobject_available(job_id)
        return get_job_userobject(job_id)
    else:
        # if up and (more and want_more): # XXX review the logic 
        #    reason = 'want more'
        # print "Making %s (%s)" % (job_id, reason)
        computation = get_job(job_id)
        
        assert(cache.state in [Cache.NOT_STARTED, Cache.IN_PROGRESS,
                               Cache.MORE_REQUESTED, Cache.DONE, Cache.FAILED])
        
        if cache.state == Cache.NOT_STARTED:
            previous_user_object = None
            cache.state = Cache.IN_PROGRESS
        if cache.state == Cache.FAILED:
            previous_user_object = None
            cache.state = Cache.IN_PROGRESS
        elif cache.state == Cache.IN_PROGRESS:
            if is_job_tmpobject_available(job_id):
                previous_user_object = get_job_tmpobject(job_id)
            else:
                previous_user_object = None
        elif cache.state == Cache.MORE_REQUESTED:
            assert(is_job_userobject_available(job_id))
            if is_job_tmpobject_available(job_id):
                # resuming more computation
                previous_user_object = get_job_tmpobject(job_id)
            else:
                # starting more computation
                previous_user_object = get_job_userobject(job_id)
        elif cache.state == Cache.DONE:
            # If we are done, it means children have been updated
            assert(not up)
            previous_user_object = None
        else:
            assert(False)
        
        # update state
        cache.time_start = time()
        cpu_start = clock()
        set_job_cache(job_id, cache)
        
        def progress_callback(stack):
            publish('job-progress-plus', job_id=job_id, host=host, stack=stack)
        
        init_progress_tracking(progress_callback)
        
        num, total = 0, None
        user_object = None

        capture = OutputCapture(prefix=job_id,
            echo_stdout=compmake_config.echo_stdout, #@UndefinedVariable
            echo_stderr=compmake_config.echo_stderr) #@UndefinedVariable
        try: 
            result = computation.compute(previous_user_object)
            
            if type(result) == GeneratorType:
                try:
                    while True:
                        next = result.next()
                        if isinstance(next, tuple):
                            if len(next) != 3:
                                raise CompmakeException('If computation yields a tuple, ' + 
                                                      'should be a tuple with 3 elemnts.' + 
                                                      'Got: %s' % str(next))
                            user_object, num, total = next

                            publish('job-progress', job_id=job_id, host=host,
                                    done=None, progress=num, goal=total)
                            if compmake_config.save_progress: #@UndefinedVariable
                                set_job_tmpobject(job_id, user_object)
                            
                except StopIteration:
                    pass
            else:
                publish('job-progress', job_id=job_id, host='XXX',
                        done=1, progress=1, goal=1)

                user_object = result

        
        except KeyboardInterrupt: 
            # TODO: clear progress cache
            # Save the current progress:
            cache.iterations_in_progress = num
            cache.iterations_goal = total
            if user_object:
                set_job_tmpobject(job_id, user_object)
            
            set_job_cache(job_id, cache)

            # clear progress cache
            publish('job-interrupted', job_id=job_id, host=host)
            raise JobInterrupted('Keyboard interrupt')
        
        except Exception as e:
            sio = StringIO()
            print_exc(file=sio)
            bt = sio.getvalue()
            
            error("Job %s failed: %s" % (job_id, e))
            error(bt)
            
            mark_as_failed(job_id, e, bt)
            
            # clear progress cache
            publish('job-failed', job_id=job_id, host=host, reason=e)
            raise JobFailed('Job %s failed: %s' % (job_id, e))
    
        finally:
            capture.deactivate()
            # even if we send an error, let's save the output of the process
            cache = get_job_cache(job_id)
            cache.captured_stderr = capture.stderr_replacement.buffer.getvalue()
            cache.captured_stdout = capture.stdout_replacement.buffer.getvalue()
            set_job_cache(job_id, cache)
            
        set_job_userobject(job_id, user_object)
                
        if is_job_tmpobject_available(job_id):
            # We only have one with yield
            delete_job_tmpobject(job_id)
        
        cache.state = Cache.DONE
        cache.timestamp = time()
        walltime = cache.timestamp - cache.time_start 
        cputime = clock() - cpu_start
        # FIXME walltime/cputime not precise (especially for "more" computation)
        cache.walltime_used = walltime
        cache.cputime_used = cputime
        cache.done_iterations = num # XXX not true
        cache.host = compmake_config.hostname #@UndefinedVariable
        
        set_job_cache(job_id, cache)
        
        publish('job-succeeded', job_id=job_id, host=host)

        # TODO: clear these records in other place
        return user_object