def test_then_it_should_pass_all_logs(self):
        stream = StringIO()
        subject = MultiProcessingHandler('mp-handler',
                                         logging.StreamHandler(stream=stream))
        logger = logging.Logger('root')
        logger.addHandler(subject)

        def worker(wid, logger):
            for _ in range(10):
                logger.info("Worker %d log.", wid)

        logger.info("Starting workers...")
        procs = [
            mp.Process(target=worker, args=(wid, logger)) for wid in range(2)
        ]
        for proc in procs:
            proc.start()
        logger.info("Workers started.")

        for proc in procs:
            proc.join()
        logger.info("Workers done.")

        subject.close()

        stream.seek(0)
        lines = stream.readlines()
        self.assertIn("Starting workers...\n", lines)
        self.assertIn("Workers started.\n", lines)
        self.assertIn("Workers done.\n", lines)
        self.assertEqual(10 * 2 + 3, len(lines))
    def test_then_it_should_keep_the_last_record_sent(self):
        stream = StringIO()
        subject = MultiProcessingHandler('mp-handler',
                                         logging.StreamHandler(stream=stream))
        logger = logging.Logger('root')
        logger.addHandler(subject)

        logger.info("Last record.")

        subject.close()

        value = stream.getvalue()
        self.assertEqual('Last record.\n', value)
    def test_then_records_should_not_be_garbled(self):
        stream = StringIO()
        subject = MultiProcessingHandler('mp-handler',
                                         logging.StreamHandler(stream=stream))
        logger = logging.Logger('root')
        logger.addHandler(subject)

        def worker(wid, logger):
            logger.info("Worker %d started.", wid)

            time.sleep(random.random())

            logger.info("Worker %d finished processing.", wid)

        logger.info("Starting workers...")
        procs = [
            mp.Process(target=worker, args=(wid, logger)) for wid in range(100)
        ]
        for proc in procs:
            proc.daemon = True
            proc.start()

        logger.info("Workers started.")
        time.sleep(1)

        for proc in procs:
            proc.join(timeout=1)
        logger.info("Workers done.")

        time.sleep(0.5)  # For log records to propagate.

        subject.sub_handler.flush()
        subject.close()
        stream.seek(0)
        lines = stream.readlines()
        self.assertIn("Starting workers...\n", lines)
        self.assertIn("Workers done.\n", lines)

        valid_line = re.compile(r"(?:Starting workers...)"
                                r"|(?:Worker \d+ started\.)"
                                r"|(?:Workers started\.)"
                                r"|(?:Worker \d+ finished processing\.)"
                                r"|(?:Workers done.)")
        for line in lines[1:-1]:
            self.assertTrue(re.match(valid_line, line))
    def test_then_records_should_not_be_garbled(self):
        stream = StringIO()
        subject = MultiProcessingHandler(
            'mp-handler', logging.StreamHandler(stream=stream))
        logger = logging.Logger('root')
        logger.addHandler(subject)

        def worker(wid, logger):
            logger.info("Worker %d started.", wid)

            time.sleep(random.random())

            logger.info("Worker %d finished processing.", wid)

        logger.info("Starting workers...")
        procs = [mp.Process(target=worker, args=(wid, logger)) for wid in range(100)]
        for proc in procs:
            proc.daemon = True
            proc.start()

        logger.info("Workers started.")
        time.sleep(1)

        for proc in procs:
            proc.join(timeout=1)
        logger.info("Workers done.")

        time.sleep(0.5)  # For log records to propagate.

        subject.sub_handler.flush()
        subject.close()
        stream.seek(0)
        lines = stream.readlines()
        self.assertIn("Starting workers...\n", lines)
        self.assertIn("Workers done.\n", lines)

        valid_line = re.compile(
            r"(?:Starting workers...)"
            r"|(?:Worker \d+ started\.)"
            r"|(?:Workers started\.)"
            r"|(?:Worker \d+ finished processing\.)"
            r"|(?:Workers done.)"
        )
        for line in lines[1:-1]:
            self.assertTrue(re.match(valid_line, line))
    def test_when_the_connection_to_the_child_process_breaks_then_it_closes_the_queue(
            self):
        stream = StringIO()
        with mock.patch(
                'multiprocessing_logging.multiprocessing.Queue',
                autospec=True,
        ) as queue_class:
            # autospec failed.
            queue_class.return_value = queue_inst = mock.Mock()
            queue_inst.get.side_effect = queue.Empty()
            queue_inst.empty.side_effect = BrokenPipeError('error on empty')

            logger = logging.Logger('root')
            subject = MultiProcessingHandler(
                'mp-handler', logging.StreamHandler(stream=stream))
            try:
                logger.addHandler(subject)
            finally:
                subject.close()

            queue_inst.close.assert_called_once_with()
Example #6
0
    def __init__(self, cases):
        """

        :param cases: list of cases to execute
        """
        mtlog = MultiProcessingHandler('foo.log')
        self.logger = logging.getLogger(__name__)
        self.logger.addHandler(mtlog)

        #self.logger = logging.getLogger(__name__)
        self.logger.log(logging.DEBUG, "Initializing DOE object.")
        self.cases = cases
        self.results = None
        self.runs = None