Example #1
0
    def runChef(self, *args):
        root_dir = self.path('/kitchen')
        chef_args = ['--root', root_dir]
        if self._use_chef_debug:
            chef_args += ['--debug']
        chef_args += list(args)

        import logging
        from piecrust.main import (
            _make_chef_state, _recover_pre_chef_state,
            _pre_parse_chef_args, _run_chef)

        # If py.test added a log handler, remove it because Chef will
        # add its own logger.
        if self._pytest_log_handler:
            logging.getLogger().removeHandler(
                self._pytest_log_handler)

        state = _make_chef_state()
        pre_args = _pre_parse_chef_args(chef_args, state=state)
        exit_code = _run_chef(pre_args, chef_args)
        _recover_pre_chef_state(state)

        if self._pytest_log_handler:
            logging.getLogger().addHandler(
                self._pytest_log_handler)

        assert exit_code == 0
Example #2
0
    def runChef(self, *args):
        root_dir = self.path('/kitchen')
        chef_args = ['--root', root_dir]
        if self._use_chef_debug:
            chef_args += ['--debug']
        chef_args += list(args)

        import logging
        from piecrust.main import (_make_chef_state, _recover_pre_chef_state,
                                   _pre_parse_chef_args, _run_chef)

        # If py.test added a log handler, remove it because Chef will
        # add its own logger.
        if self._pytest_log_handler:
            logging.getLogger().removeHandler(self._pytest_log_handler)

        state = _make_chef_state()
        pre_args = _pre_parse_chef_args(chef_args, state=state)
        exit_code = _run_chef(pre_args, chef_args)
        _recover_pre_chef_state(state)

        if self._pytest_log_handler:
            logging.getLogger().addHandler(self._pytest_log_handler)

        assert exit_code == 0
Example #3
0
    def runtest(self):
        if not ChefTestItem.__initialized_logging__:
            colorama.init()
            hdl = logging.StreamHandler(stream=sys.stdout)
            logging.getLogger().addHandler(hdl)
            logging.getLogger().setLevel(logging.INFO)
            ChefTestItem.__initialized_logging__ = True

        fs = self._prepareMockFs()

        argv = self.spec['args']
        if isinstance(argv, str):
            argv = argv.split(' ')
        if self.is_theme_site:
            argv.insert(0, '--theme')
        if not self.spec.get('no_kitchen', False):
            argv = ['--root', fs.path('/kitchen')] + argv

        with mock_fs_scope(fs, keep=self.mock_debug):
            cwd = os.getcwd()
            memstream = io.StringIO()
            hdl = logging.StreamHandler(stream=memstream)
            logging.getLogger().addHandler(hdl)
            try:
                from piecrust.main import _pre_parse_chef_args, _run_chef
                os.chdir(fs.path('/'))
                pre_args = _pre_parse_chef_args(argv)
                exit_code = _run_chef(pre_args, argv)
            finally:
                logging.getLogger().removeHandler(hdl)
                os.chdir(cwd)

            expected_code = self.spec.get('code', 0)
            if expected_code != exit_code:
                raise UnexpectedChefExitCodeError("Got '%d', expected '%d'." %
                                                  (exit_code, expected_code))

            expected_out = self.spec.get('out', None)
            if expected_out is not None:
                actual_out = memstream.getvalue()
                if not self.spec.get('no_strip'):
                    actual_out = actual_out.rstrip(' \n')
                    expected_out = expected_out.rstrip(' \n')
                if self.spec.get('replace_out_path_sep'):
                    expected_out = expected_out.replace('/', os.sep)
                if expected_out != actual_out:
                    raise UnexpectedChefOutputError(expected_out, actual_out)

            expected_files = self.spec.get('files', None)
            if expected_files is not None:
                for path in expected_files:
                    path = '/' + path.lstrip('/')
                    if not os.path.exists(fs.path(path)):
                        raise MissingChefOutputFileError(fs, path)
Example #4
0
    def runtest(self):
        if not ChefTestItem.__initialized_logging__:
            colorama.init()
            hdl = logging.StreamHandler(stream=sys.stdout)
            logging.getLogger().addHandler(hdl)
            logging.getLogger().setLevel(logging.INFO)
            ChefTestItem.__initialized_logging__ = True

        fs = self._prepareMockFs()

        argv = self.spec['args']
        if isinstance(argv, str):
            argv = argv.split(' ')
        if self.is_theme_site:
            argv.insert(0, '--theme')
        if not self.spec.get('no_kitchen', False):
            argv = ['--root', fs.path('/kitchen')] + argv

        with mock_fs_scope(fs, keep=self.mock_debug):
            cwd = os.getcwd()
            memstream = io.StringIO()
            hdl = logging.StreamHandler(stream=memstream)
            logging.getLogger().addHandler(hdl)
            try:
                from piecrust.main import _pre_parse_chef_args, _run_chef
                os.chdir(fs.path('/'))
                pre_args = _pre_parse_chef_args(argv)
                exit_code = _run_chef(pre_args, argv)
            finally:
                logging.getLogger().removeHandler(hdl)
                os.chdir(cwd)

            expected_code = self.spec.get('code', 0)
            if expected_code != exit_code:
                raise UnexpectedChefExitCodeError("Got '%d', expected '%d'." %
                                                  (exit_code, expected_code))

            expected_out = self.spec.get('out', None)
            if expected_out is not None:
                actual_out = memstream.getvalue()
                if not self.spec.get('no_strip'):
                    actual_out = actual_out.rstrip(' \n')
                    expected_out = expected_out.rstrip(' \n')
                if self.spec.get('replace_out_path_sep'):
                    expected_out = expected_out.replace('/', os.sep)
                if expected_out != actual_out:
                    raise UnexpectedChefOutputError(expected_out, actual_out)

            expected_files = self.spec.get('files', None)
            if expected_files is not None:
                for path in expected_files:
                    path = '/' + path.lstrip('/')
                    if not os.path.exists(fs.path(path)):
                        raise MissingChefOutputFileError(fs, path)
Example #5
0
def _pre_parse_pytest_args():
    # If we are unit-testing, we need to translate our test logging
    # arguments into something Chef can understand.
    import argparse
    parser = argparse.ArgumentParser()
    # This is adapted from our `conftest.py`.
    parser.add_argument('--log-debug', action='store_true')
    parser.add_argument('--log-file')
    res, _ = parser.parse_known_args(sys.argv[1:])

    chef_args = []
    if res.log_debug:
        chef_args.append('--debug')
    if res.log_file:
        chef_args += ['--log', res.log_file]

    root_logger = logging.getLogger()
    while len(root_logger.handlers) > 0:
        root_logger.removeHandler(root_logger.handlers[0])

    from piecrust.main import _pre_parse_chef_args
    _pre_parse_chef_args(chef_args)
Example #6
0
def _pre_parse_pytest_args():
    # If we are unit-testing, we need to translate our test logging
    # arguments into something Chef can understand.
    import argparse
    parser = argparse.ArgumentParser()
    # This is adapted from our `conftest.py`.
    parser.add_argument('--log-debug', action='store_true')
    parser.add_argument('--log-file')
    res, _ = parser.parse_known_args(sys.argv[1:])

    chef_args = []
    if res.log_debug:
        chef_args.append('--debug')
    if res.log_file:
        chef_args += ['--log', res.log_file]

    root_logger = logging.getLogger()
    while len(root_logger.handlers) > 0:
        root_logger.removeHandler(root_logger.handlers[0])

    from piecrust.main import _pre_parse_chef_args
    _pre_parse_chef_args(chef_args)
Example #7
0
    def runtest(self):
        if not ChefTestItem.__initialized_logging__:
            colorama.init()
            hdl = logging.StreamHandler(stream=sys.stdout)
            logging.getLogger().addHandler(hdl)
            logging.getLogger().setLevel(logging.INFO)
            ChefTestItem.__initialized_logging__ = True

        fs = self._prepareMockFs()

        argv = self.spec['args']
        if isinstance(argv, str):
            argv = argv.split(' ')
        if self.is_theme_site:
            argv.insert(0, '--theme')
        argv = ['--root', fs.path('/kitchen')] + argv

        expected_code = self.spec.get('code', 0)
        expected_out = self.spec.get('out', None)

        with mock_fs_scope(fs, keep=self.mock_debug):
            memstream = io.StringIO()
            hdl = logging.StreamHandler(stream=memstream)
            logging.getLogger().addHandler(hdl)
            try:
                from piecrust.main import _pre_parse_chef_args, _run_chef
                pre_args = _pre_parse_chef_args(argv)
                exit_code = _run_chef(pre_args, argv)
            finally:
                logging.getLogger().removeHandler(hdl)

            assert expected_code == exit_code

            if expected_out is not None:
                actual_out = memstream.getvalue()
                if self.spec.get('replace_out_path_sep'):
                    expected_out = expected_out.replace('/', os.sep)
                assert expected_out == actual_out
Example #8
0
def _real_worker_func(params):
    # In a context where `multiprocessing` is using the `spawn` forking model,
    # the new process doesn't inherit anything, so we lost all our logging
    # configuration here. Let's set it up again.
    if (hasattr(multiprocessing, 'get_start_method') and
            multiprocessing.get_start_method() == 'spawn'):
        from piecrust.main import _pre_parse_chef_args
        _pre_parse_chef_args(sys.argv[1:])

    wid = params.wid
    logger.debug("Worker %d initializing..." % wid)

    params.inqueue._writer.close()
    params.outqueue._reader.close()

    w = params.worker_class(*params.initargs)
    w.wid = wid
    try:
        w.initialize()
    except Exception as ex:
        logger.error("Working failed to initialize:")
        logger.exception(ex)
        params.outqueue.put(None)
        return

    get = params.inqueue.get
    put = params.outqueue.put

    completed = 0
    while True:
        try:
            task = get()
        except (EOFError, OSError):
            logger.debug("Worker %d encountered connection problem." % wid)
            break

        task_type, task_data = task
        if task_type == TASK_END:
            logger.debug("Worker %d got end task, exiting." % wid)
            try:
                rep = (task_type, True, wid, (wid, w.getReport()))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                rep = (task_type, False, wid, (wid, e))
            put(rep)
            break

        if task_type == TASK_JOB:
            task_data = (task_data,)

        for t in task_data:
            try:
                res = (TASK_JOB, True, wid, w.process(t))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                res = (TASK_JOB, False, wid, e)
            put(res)

            completed += 1

    logger.debug("Worker %d completed %d tasks." % (wid, completed))
Example #9
0
def _real_worker_func(params):
    # In a context where `multiprocessing` is using the `spawn` forking model,
    # the new process doesn't inherit anything, so we lost all our logging
    # configuration here. Let's set it up again.
    if (hasattr(multiprocessing, 'get_start_method')
            and multiprocessing.get_start_method() == 'spawn'):
        from piecrust.main import _pre_parse_chef_args
        _pre_parse_chef_args(sys.argv[1:])

    wid = params.wid
    logger.debug("Worker %d initializing..." % wid)

    params.inqueue._writer.close()
    params.outqueue._reader.close()

    w = params.worker_class(*params.initargs)
    w.wid = wid
    try:
        w.initialize()
    except Exception as ex:
        logger.error("Working failed to initialize:")
        logger.exception(ex)
        params.outqueue.put(None)
        return

    get = params.inqueue.get
    put = params.outqueue.put

    completed = 0
    while True:
        try:
            task = get()
        except (EOFError, OSError):
            logger.debug("Worker %d encountered connection problem." % wid)
            break

        task_type, task_data = task
        if task_type == TASK_END:
            logger.debug("Worker %d got end task, exiting." % wid)
            try:
                rep = (task_type, True, wid, (wid, w.getReport()))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                        e, e.__traceback__)
                rep = (task_type, False, wid, (wid, e))
            put(rep)
            break

        if task_type == TASK_JOB:
            task_data = (task_data, )

        for t in task_data:
            try:
                res = (TASK_JOB, True, wid, w.process(t))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                        e, e.__traceback__)
                res = (TASK_JOB, False, wid, e)
            put(res)

            completed += 1

    logger.debug("Worker %d completed %d tasks." % (wid, completed))
Example #10
0
def _real_worker_func_unsafe(params):
    init_start_time = time.perf_counter()

    wid = params.wid

    stats = ExecutionStats()
    stats.registerTimer('WorkerInit')

    # In a context where `multiprocessing` is using the `spawn` forking model,
    # the new process doesn't inherit anything, so we lost all our logging
    # configuration here. Let's set it up again.
    if (hasattr(multiprocessing, 'get_start_method') and
            multiprocessing.get_start_method() == 'spawn'):
        if not params.is_unit_testing:
            from piecrust.main import _pre_parse_chef_args
            _pre_parse_chef_args(sys.argv[1:])
        else:
            _pre_parse_pytest_args()
    elif params.is_unit_testing:
        _pre_parse_pytest_args()

    from piecrust.main import ColoredFormatter
    root_logger = logging.getLogger()
    root_logger.handlers[0].setFormatter(ColoredFormatter(
        ('[W-%d]' % wid) + '[%(name)s] %(message)s'))

    logger.debug("Worker %d initializing..." % wid)

    # We don't need those.
    params.inqueue._writer.close()
    params.outqueue._reader.close()

    # Initialize the underlying worker class.
    w = params.worker_class(*params.initargs)
    w.wid = wid
    try:
        w.initialize()
    except Exception as ex:
        logger.error("Worker %d failed to initialize." % wid)
        logger.exception(ex)
        raise

    stats.stepTimerSince('WorkerInit', init_start_time)

    # Start pumping!
    completed = 0
    time_in_get = 0
    time_in_put = 0
    is_first_get = True
    get = params.inqueue.get
    put = params.outqueue.put

    while True:
        get_start_time = time.perf_counter()
        task = get()
        if not is_first_get:
            time_in_get += (time.perf_counter() - get_start_time)
        else:
            is_first_get = False

        task_type, task_data = task

        # Job task(s)... just do it.
        if task_type == TASK_JOB or task_type == TASK_JOB_BATCH:

            task_data_list = task_data
            if task_type == TASK_JOB:
                task_data_list = [task_data]

            result_list = []

            for td in task_data_list:
                try:
                    worker_res = w.process(td)
                    result_list.append((td, worker_res, True))
                except Exception as e:
                    logger.debug(
                        "Error processing job, sending exception to main process:")
                    logger.debug(traceback.format_exc())
                    error_res = _get_worker_exception_data(wid)
                    result_list.append((td, error_res, False))

            res = (task_type, wid, result_list)
            put_start_time = time.perf_counter()
            put(res)
            time_in_put += (time.perf_counter() - put_start_time)

            completed += len(task_data_list)

        # End task... gather stats to send back to the main process.
        elif task_type == TASK_END:
            logger.debug("Worker %d got end task, exiting." % wid)
            stats.registerTimer('Worker_%d_TaskGet' % wid, time=time_in_get)
            stats.registerTimer('Worker_all_TaskGet', time=time_in_get)
            stats.registerTimer('Worker_%d_ResultPut' % wid, time=time_in_put)
            stats.registerTimer('Worker_all_ResultPut', time=time_in_put)
            try:
                stats.mergeStats(w.getStats())
                stats_data = stats.toData()
                rep = (task_type, wid, [(task_data, (wid, stats_data), True)])
            except Exception as e:
                logger.debug(
                    "Error getting report, sending exception to main process:")
                logger.debug(traceback.format_exc())
                we = _get_worker_exception_data(wid)
                rep = (task_type, wid, [(task_data, (wid, we), False)])
            put(rep)
            break

        # Emergy abort.
        elif task_type == _TASK_ABORT_WORKER:
            logger.debug("Worker %d got abort signal." % wid)
            break

        else:
            raise Exception("Unknown task type: %s" % task_type)

    try:
        w.shutdown()
    except Exception as e:
        logger.error("Worker %s failed to shutdown.")
        logger.exception(e)
        raise

    logger.debug("Worker %d completed %d tasks." % (wid, completed))
Example #11
0
def _real_worker_func(params):
    # In a context where `multiprocessing` is using the `spawn` forking model,
    # the new process doesn't inherit anything, so we lost all our logging
    # configuration here. Let's set it up again.
    if (hasattr(multiprocessing, 'get_start_method') and
            multiprocessing.get_start_method() == 'spawn'):
        from piecrust.main import _pre_parse_chef_args
        _pre_parse_chef_args(sys.argv[1:])

    wid = params.wid
    logger.debug("Worker %d initializing..." % wid)

    # We don't need those.
    params.inqueue._writer.close()
    params.outqueue._reader.close()

    # Initialize the underlying worker class.
    w = params.worker_class(*params.initargs)
    w.wid = wid
    try:
        w.initialize()
    except Exception as ex:
        logger.error("Working failed to initialize:")
        logger.exception(ex)
        params.outqueue.put(None)
        return

    use_threads = False
    if use_threads:
        # Create threads to read/write the jobs and results from/to the
        # main arbitrator process.
        local_job_queue = queue.Queue()
        reader_thread = threading.Thread(
                target=_job_queue_reader,
                args=(params.inqueue.get, local_job_queue),
                name="JobQueueReaderThread")
        reader_thread.start()

        local_result_queue = queue.Queue()
        writer_thread = threading.Thread(
                target=_job_results_writer,
                args=(local_result_queue, params.outqueue.put),
                name="JobResultWriterThread")
        writer_thread.start()

        get = local_job_queue.get
        put = local_result_queue.put_nowait
    else:
        get = params.inqueue.get
        put = params.outqueue.put

    # Start pumping!
    completed = 0
    time_in_get = 0
    time_in_put = 0
    while True:
        get_start_time = time.perf_counter()
        task = get()
        time_in_get += (time.perf_counter() - get_start_time)

        task_type, task_data = task
        if task_type == TASK_END:
            logger.debug("Worker %d got end task, exiting." % wid)
            wprep = {
                    'WorkerTaskGet': time_in_get,
                    'WorkerResultPut': time_in_put}
            try:
                rep = (task_type, True, wid, (wid, w.getReport(wprep)))
            except Exception as e:
                logger.debug("Error getting report: %s" % e)
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                rep = (task_type, False, wid, (wid, e))
            put(rep)
            break

        if task_type == TASK_JOB:
            task_data = (task_data,)

        for t in task_data:
            try:
                res = (TASK_JOB, True, wid, w.process(t))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                res = (TASK_JOB, False, wid, e)

            put_start_time = time.perf_counter()
            put(res)
            time_in_put += (time.perf_counter() - put_start_time)

            completed += 1

    if use_threads:
        logger.debug("Worker %d waiting for reader/writer threads." % wid)
        local_result_queue.put_nowait(None)
        reader_thread.join()
        writer_thread.join()

    w.shutdown()

    logger.debug("Worker %d completed %d tasks." % (wid, completed))
Example #12
0
def _real_worker_func_unsafe(params):
    init_start_time = time.perf_counter()

    wid = params.wid

    stats = ExecutionStats()
    stats.registerTimer('WorkerInit')

    # In a context where `multiprocessing` is using the `spawn` forking model,
    # the new process doesn't inherit anything, so we lost all our logging
    # configuration here. Let's set it up again.
    if (hasattr(multiprocessing, 'get_start_method')
            and multiprocessing.get_start_method() == 'spawn'):
        if not params.is_unit_testing:
            from piecrust.main import _pre_parse_chef_args
            _pre_parse_chef_args(sys.argv[1:])
        else:
            _pre_parse_pytest_args()
    elif params.is_unit_testing:
        _pre_parse_pytest_args()

    from piecrust.main import ColoredFormatter
    root_logger = logging.getLogger()
    root_logger.handlers[0].setFormatter(
        ColoredFormatter(('[W-%d]' % wid) + '[%(name)s] %(message)s'))

    logger.debug("Worker %d initializing..." % wid)

    # We don't need those.
    params.inqueue._writer.close()
    params.outqueue._reader.close()

    # Initialize the underlying worker class.
    w = params.worker_class(*params.initargs)
    w.wid = wid
    try:
        w.initialize()
    except Exception as ex:
        logger.error("Worker %d failed to initialize." % wid)
        logger.exception(ex)
        raise

    stats.stepTimerSince('WorkerInit', init_start_time)

    # Start pumping!
    completed = 0
    time_in_get = 0
    time_in_put = 0
    is_first_get = True
    get = params.inqueue.get
    put = params.outqueue.put

    while True:
        get_start_time = time.perf_counter()
        task = get()
        if not is_first_get:
            time_in_get += (time.perf_counter() - get_start_time)
        else:
            is_first_get = False

        task_type, task_data = task

        # Job task(s)... just do it.
        if task_type == TASK_JOB or task_type == TASK_JOB_BATCH:

            task_data_list = task_data
            if task_type == TASK_JOB:
                task_data_list = [task_data]

            result_list = []

            for td in task_data_list:
                try:
                    worker_res = w.process(td)
                    result_list.append((td, worker_res, True))
                except Exception as e:
                    logger.debug(
                        "Error processing job, sending exception to main process:"
                    )
                    logger.debug(traceback.format_exc())
                    error_res = _get_worker_exception_data(wid)
                    result_list.append((td, error_res, False))

            res = (task_type, wid, result_list)
            put_start_time = time.perf_counter()
            put(res)
            time_in_put += (time.perf_counter() - put_start_time)

            completed += len(task_data_list)

        # End task... gather stats to send back to the main process.
        elif task_type == TASK_END:
            logger.debug("Worker %d got end task, exiting." % wid)
            stats.registerTimer('Worker_%d_TaskGet' % wid, time=time_in_get)
            stats.registerTimer('Worker_all_TaskGet', time=time_in_get)
            stats.registerTimer('Worker_%d_ResultPut' % wid, time=time_in_put)
            stats.registerTimer('Worker_all_ResultPut', time=time_in_put)
            try:
                stats.mergeStats(w.getStats())
                stats_data = stats.toData()
                rep = (task_type, wid, [(task_data, (wid, stats_data), True)])
            except Exception as e:
                logger.debug(
                    "Error getting report, sending exception to main process:")
                logger.debug(traceback.format_exc())
                we = _get_worker_exception_data(wid)
                rep = (task_type, wid, [(task_data, (wid, we), False)])
            put(rep)
            break

        # Emergy abort.
        elif task_type == _TASK_ABORT_WORKER:
            logger.debug("Worker %d got abort signal." % wid)
            break

        else:
            raise Exception("Unknown task type: %s" % task_type)

    try:
        w.shutdown()
    except Exception as e:
        logger.error("Worker %s failed to shutdown.")
        logger.exception(e)
        raise

    logger.debug("Worker %d completed %d tasks." % (wid, completed))