示例#1
0
 def __init__(self, nb_workers):
     self._lt = LogsFollower()
     self._logger = WorkflowRunner.get_logger()
     self._pool = None
     if nb_workers == -1:
         self._nb_workers = int((cpu_count() + 1) / 2)
     else:
         self._nb_workers = nb_workers
     self._free_workers = None
     self._completed_processes = []
示例#2
0
 def test_thread_protection(self):
     """When a section of code using the LogsFollower is complete, the thread should stop"""
     lf = LogsFollower()
     lf.follow_process("stdout", "stderr", "process_id")
     with lf.trace_in_background():
         assert lf._thread.is_alive(), "Backgroung thread isn't running..."
         with open("stdout", "w") as fout, \
              open("stderr", "w") as ferr:
             fout.write("file stdout")
             ferr.write("file stderr")
     assert not lf._thread.is_alive(), "Backgroung hasn't stopped !"
示例#3
0
    def test_log_format_stdout_stderr(self):
        """logs should display log level and message"""
        with CaptureOutputs() as co:
            lf = LogsFollower()
            lf.follow_process("stdout", "stderr", "process_id")
            with open("stdout", "w") as fout, \
                 open("stderr", "w") as ferr:
                fout.write("file stdout")
                ferr.write("file stderr")
            while lf.trace_logs():
                pass

        assert co.output.find(
            "[process_id::stdout] file stdout") >= 0, co.output
        assert co.output.find(
            "[process_id::stderr] file stderr") >= 0, co.output
示例#4
0
 def test_log_should_(self):
     """ The last char of the file must be logged even if the 
         file does not finish with CR """
     with CaptureOutputs() as co:
         logger = LogsFollower.get_logger()
         lt = LogTracer(logger, logging.INFO, "test.log")
         with open("test.log", "w") as f:
             f.write("line 1")
         lt.trace()
     output = co.output
     assert output.find("line 1") >= 0, output
示例#5
0
 def test_log_should_not_double_carriage_return(self):
     """ """
     with CaptureOutputs() as co:
         logger = LogsFollower.get_logger()
         lt = LogTracer(logger, logging.INFO, "test.log")
         with open("test.log", "w") as f:
             f.write("line 1\n")
             f.write("line 2\n")
         lt.trace()
     output = co.output
     assert output.find("\n\n") == -1, output
示例#6
0
    def run_pre_processes(self):
        """ Runs all the preprocesses

        :return:
        :raises ExecutionError if an error occurs
        """
        TuttleDirectories.create_tuttle_dirs()
        TuttleDirectories.empty_extension_dir()
        if not self.has_preprocesses():
            return
        lt = LogsFollower()
        WorkflowRunner.print_preprocesses_header()
        for process in self.iter_preprocesses():
            TuttleDirectories.prepare_and_assign_paths(process)
            lt.follow_process(process.log_stdout, process.log_stderr, None)

        with lt.trace_in_background(), TuttleEnv():
            for preprocess in self.iter_preprocesses():
                WorkflowRunner.print_preprocess_header(preprocess, lt._logger)
                success = True
                error_msg = None
                try:
                    preprocess.set_start()
                    preprocess.processor.run(preprocess,
                                             preprocess._reserved_path,
                                             preprocess.log_stdout,
                                             preprocess.log_stderr)
                except TuttleError as e:
                    success = False
                    error_msg = str(e)
                    raise
                except Exception:
                    exc_info = sys.exc_info()
                    stacktrace = "".join(format_exception(*exc_info))
                    error_msg = "An unexpected error have happened in tuttle preprocessor {} : \n" \
                                "{}\n" \
                                "Preprocess {} will not complete.".format(preprocess._processor.name, stacktrace, preprocess.id)
                finally:
                    preprocess.set_end(success, error_msg)
                    self.create_reports()
        WorkflowRunner.print_preprocesses_footer()
示例#7
0
文件: workflow.py 项目: lexman/tuttle
    def run_pre_processes(self):
        """ Runs all the preprocesses

        :return:
        :raises ExecutionError if an error occurs
        """
        TuttleDirectories.create_tuttle_dirs()
        TuttleDirectories.empty_extension_dir()
        if not self.has_preprocesses():
            return
        lt = LogsFollower()
        WorkflowRunner.print_preprocesses_header()
        for process in self.iter_preprocesses():
            TuttleDirectories.prepare_and_assign_paths(process)
            lt.follow_process(process.log_stdout, process.log_stderr, None)

        with lt.trace_in_background(), TuttleEnv():
            for preprocess in self.iter_preprocesses():
                WorkflowRunner.print_preprocess_header(preprocess, lt._logger)
                success = True
                error_msg = None
                try:
                    preprocess.set_start()
                    preprocess.processor.run(preprocess, preprocess._reserved_path,
                                             preprocess.log_stdout, preprocess.log_stderr)
                except TuttleError as e:
                    success = False
                    error_msg = str(e)
                    raise
                except Exception:
                    exc_info = sys.exc_info()
                    stacktrace = "".join(format_exception(*exc_info))
                    error_msg = "An unexpected error have happened in tuttle preprocessor {} : \n" \
                                "{}\n" \
                                "Preprocess {} will not complete.".format(preprocess._processor.name, stacktrace, preprocess.id)
                finally:
                    preprocess.set_end(success, error_msg)
                    self.create_reports()
        WorkflowRunner.print_preprocesses_footer()
示例#8
0
 def test_log_single_file(self):
     """LogTracer should log the content of a file"""
     with CaptureOutputs() as co:
         logger = LogsFollower.get_logger()
         lt = LogTracer(logger, logging.INFO, "test.log")
         with open("test.log", "w") as f:
             f.write("line 1\n")
             f.write("line 2\n")
             f.write("line 3\n")
         lt.trace()
     output = co.output
     assert output.find("line 1") >= 0, output
     assert output.find("line 2") >= 0, output
     assert output.find("line 3") >= 0, output
示例#9
0
 def test_log_huge_file(self):
     """LogTracer should log the content of a big file in stdout"""
     with CaptureOutputs() as co:
         logger = LogsFollower.get_logger()
         lt = LogTracer(logger, "namespace", "test.log")
         with open("test.log", "w") as f:
             for i in xrange(5000):
                 f.write("line {}\n".format(i))
         while lt.trace():
             pass
     output = co.output
     assert output.find("line 1") >= 0, output
     assert output.find("line 2") >= 0, output
     assert output.find("line 3") >= 0, output
     assert output.find("line 4999") >= 0, output
示例#10
0
 def test_log_in_background(self):
     """Should log in background ans stop when foreground processing 
        is over"""
     import time
     with CaptureOutputs() as co:
         lf = LogsFollower()
         lf.follow_process("stdout", "stderr", "process_id")
         lf.trace_in_background()
         with open("stdout", "w") as fout, \
              open("stderr", "w") as ferr:
             fout.write("file stdout")
             ferr.write("file stderr")
         lf.terminate()
     assert co.output.find(
         "[process_id::stdout] file stdout") >= 0, co.output
     assert co.output.find(
         "[process_id::stderr] file stderr") >= 0, co.output
示例#11
0
 def test_log_a_lot_in_background(self):
     """Should log in background ans stop when foreground processing 
        is over even with a lot a data"""
     with CaptureOutputs() as co:
         lf = LogsFollower()
         lf.follow_process("stdout", "stderr", "process_id")
         lf.trace_in_background()
         with open("stdout", "w") as fout, \
              open("stderr", "w") as ferr:
             fout.write("file stdout")
             ferr.write("file stderr")
             for i in xrange(5000):
                 fout.write("stdout - line {}\n".format(i))
                 ferr.write("stderr - line {}\n".format(i))
         lf.terminate()
     assert co.output.find(
         "[process_id::stdout] stdout - line 1") >= 0, co.output
     assert co.output.find(
         "[process_id::stderr] stderr - line 1") >= 0, co.output
     assert co.output.find(
         "[process_id::stdout] stdout - line 4999") >= 0, co.output
     assert co.output.find(
         "[process_id::stderr] stderr - line 4999") >= 0, co.output
示例#12
0
    def test_log_multiple_files(self):
        """LogTracer should log the content of several files in stdout"""
        with CaptureOutputs() as co:
            lf = LogsFollower()
            lf.follow_process("w1.stdout", "w1.stderr", "process1")
            lf.follow_process("w2.stdout", "w2.stderr", "process2")
            lf.follow_process("w3.stdout", "w3.stderr", "process3")

            with open("w1.stdout", "w") as fo1, \
                 open("w1.stderr", "w") as fe1, \
                 open("w2.stdout", "w") as fo2, \
                 open("w2.stderr", "w") as fe2, \
                 open("w3.stdout", "w") as fo3, \
                 open("w3.stderr", "w") as fe3 :

                for i in xrange(5000):
                    fo1.write("w1.stdout - line {}\n".format(i))
                    fe1.write("w1.stderr - line {}\n".format(i))
                    fo2.write("w2.stdout - line {}\n".format(i))
                    fe2.write("w2.stderr - line {}\n".format(i))
                    fo3.write("w3.stdout - line {}\n".format(i))
                    fe3.write("w3.stderr - line {}\n".format(i))

            while lf.trace_logs():
                pass
        output = co.output
        assert output.find("w1.stderr - line 1") >= 0, output
        assert output.find("w1.stdout - line 1") >= 0, output
        assert output.find("w2.stderr - line 1") >= 0, output
        assert output.find("w2.stdout - line 1") >= 0, output
        assert output.find("w3.stdout - line 1") >= 0, output
        assert output.find("w3.stderr - line 1") >= 0, output

        assert output.find("w1.stderr - line 4999") >= 0, output
        assert output.find("w1.stdout - line 4999") >= 0, output
        assert output.find("w2.stderr - line 4999") >= 0, output
        assert output.find("w2.stdout - line 4999") >= 0, output
        assert output.find("w3.stdout - line 4999") >= 0, output
        assert output.find("w3.stderr - line 4999") >= 0, output
示例#13
0
 def test_log_format(self):
     """logs should display log level and message"""
     with CaptureOutputs() as co:
         logger = LogsFollower.get_logger()
         logger.info("MESSAGE")
     assert co.output.find("MESSAGE") == 0, co.output
示例#14
0
class WorkflowRunner:

    @staticmethod
    def resources2list(resources):
        res = "\n".join(("* {}".format(resource.url) for resource in resources))
        return res

    def __init__(self, nb_workers):
        self._lt = LogsFollower()
        self._logger = WorkflowRunner.get_logger()
        self._pool = None
        if nb_workers == -1:
            self._nb_workers = int((cpu_count() + 1) / 2)
        else:
            self._nb_workers = nb_workers
        self._free_workers = None
        self._completed_processes = []

    def start_process_in_background(self, process):
        self.acquire_worker()

        def process_run_callback(result):
            success, error_msg, signatures = result
            process.set_end(success, error_msg)
            self.release_worker()
            self._completed_processes.append((process, signatures))

        process.set_start()
        resp = self._pool.apply_async(run_process_without_exception, [process], callback = process_run_callback)

    def start_processes_on_available_workers(self, runnables):
        started_a_process = False
        while self.workers_available() and runnables:
            # No error
            process = runnables.pop()
            self.start_process_in_background(process)
            started_a_process = True
        return started_a_process

    def handle_completed_process(self, workflow, runnables, success_processes, failure_processes):
        handled_completed_process = False
        while self._completed_processes:
            completed_process, signatures = self._completed_processes.pop()
            if completed_process.success:
                success_processes.append(completed_process)
                workflow.update_signatures(signatures)
                new_runnables = workflow.discover_runnable_processes(completed_process)
                runnables.update(new_runnables)
            else:
                failure_processes.append(completed_process)
            handled_completed_process = True
        return handled_completed_process

    def run_parallel_workflow(self, workflow, keep_going=False):
        """ Runs a workflow by running every process in the right order
        :return: success_processes, failure_processes :
        list of processes ended with success, list of processes ended with failure
        """
        for process in workflow.iter_processes():
            if process.start is None:
                # Don't display logs if the process has already run
                self._lt.follow_process(process.log_stdout, process.log_stderr, process.id)

        failure_processes, success_processes = [], []
        with self._lt.trace_in_background():
            runnables = workflow.runnable_processes()
            self.init_workers()
            try:
                while (keep_going or not failure_processes) and \
                        (self.active_workers() or self._completed_processes or runnables):
                    started_a_process = self.start_processes_on_available_workers(runnables)
                    handled_completed_process = self.handle_completed_process(workflow, runnables,
                                                                              success_processes, failure_processes)
                    if handled_completed_process or started_a_process:
                        workflow.export()
                    else:
                        sleep(0.1)
                if failure_processes and not keep_going:
                    self._logger.error("Process {} has failled".format(failure_processes[0].id))
                    self._logger.warn("Waiting for all processes already started to complete")
                while self.active_workers() or self._completed_processes:
                    if self.handle_completed_process(workflow, runnables, success_processes, failure_processes):
                        workflow.export()
                    else:
                        sleep(0.1)
            finally:
                self.terminate_workers_and_clean_subprocesses()
                self.mark_unfinished_processes_as_failure(workflow)

        return success_processes, failure_processes

    def init_workers(self):
        self._pool = Pool(self._nb_workers)
        self._free_workers = self._nb_workers

    def terminate_workers_and_clean_subprocesses(self):
        direct_procs = set(psutil.Process().children())
        all_procs = set(psutil.Process().children(recursive=True))
        sub_procs = all_procs - direct_procs

        # Terminate cleanly direct procs instanciated by multiprocess
        self._pool.terminate()
        self._pool.join()

        # Then terminate subprocesses that have not been terminated
        for p in sub_procs:
            try:
                p.terminate()
            except NoSuchProcess:
                pass
        gone, still_alive = psutil.wait_procs(sub_procs, timeout=2)
        for p in still_alive:
            p.kill()

    def acquire_worker(self):
        assert self._free_workers > 0
        self._free_workers -= 1

    def release_worker(self):
        self._free_workers += 1

    def workers_available(self):
        return self._free_workers

    def active_workers(self):
        return self._free_workers != self._nb_workers

    @staticmethod
    def mark_unfinished_processes_as_failure(workflow):
        for process in workflow.iter_processes():
            if process.start and not process.end:
                error_msg = "This process was aborted"
                process.set_end(False, error_msg)
        workflow.export()

    @staticmethod
    def print_preprocess_header(process, logger):
        title = "Preprocess : {}".format(process.id)
        logger.info("[" + title + "]")
        #logger.info(underline(title))

    @staticmethod
    def print_preprocesses_header():
        title = "Running preprocesses".upper()
        print("[" + title + "]")
        #print(underline(title, '='))

    @staticmethod
    def print_preprocesses_footer():
        title = "End of preprocesses... Running the workflow".upper()
        #print(underline(title, '='))
        print("[" + title + "]")

    @staticmethod
    def get_logger():
        logger = logging.getLogger(__name__)
        formater = logging.Formatter("%(message)s")
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(formater)
        handler.setLevel(logging.INFO)
        logger.setLevel(logging.INFO)
        logger.addHandler(handler)
        return logger