Example #1
0
    def test_latin_hypercube_equally_spaced(self):
        """Test that generate_latin_hypercube_points returns properly spaced points.

        Sampling from a latin hypercube results in a set of points that in each dimension are drawn
        uniformly from sub-intervals of the domain this tests that every sub-interval in each dimension
        contains exactly one point.

        """
        for domain in self.domains_to_test:
            for num_points in self.num_points_to_test:
                domain_bounds = domain._domain_bounds
                points = generate_latin_hypercube_points(
                    num_points, domain_bounds)

                for dim in xrange(domain.dim):
                    # This size of each slice
                    sub_domain_width = domain_bounds[dim].length / float(
                        num_points)
                    # Sort in dim dimension
                    points = sorted(points, key=lambda points: points[dim])
                    for i, point in enumerate(points):
                        # This point must fall somewhere within the slice
                        min_val = domain_bounds[dim].min + sub_domain_width * i
                        max_val = min_val + sub_domain_width
                        T.assert_gte(point[dim], min_val)
                        T.assert_lte(point[dim], max_val)
Example #2
0
 def _test_interface_returns_as_expected(self):
     """Integration test for the bandit endpoints."""
     for subtype in BANDIT_ENDPOINTS_TO_SUBTYPES[self._endpoint]:
         for historical_info in self._historical_infos:
             json_payload = self._build_json_payload(
                 subtype, historical_info)
             arm_names = set([
                 arm_name
                 for arm_name in historical_info.arms_sampled.iterkeys()
             ])
             resp = self.testapp.post(self._moe_route.endpoint,
                                      json_payload)
             resp_schema = BanditResponse()
             resp_dict = resp_schema.deserialize(json.loads(resp.body))
             resp_arm_names = set([
                 arm_name
                 for arm_name in resp_dict['arm_allocations'].iterkeys()
             ])
             T.assert_sets_equal(arm_names, resp_arm_names)
             # The allocations should be in range [0, 1]
             # The sum of all allocations should be 1.0.
             total_allocation = 0
             for allocation in resp_dict['arm_allocations'].itervalues():
                 T.assert_gte(allocation, 0)
                 T.assert_lte(allocation, 1)
                 total_allocation += allocation
             T.assert_equal(total_allocation, 1.0)
Example #3
0
    def test_all_constant_liar_methods_function(self):
        """Test that each contant liar ``lie_method`` runs to completion. This is an integration test."""
        for test_case in self.gp_test_environments:
            python_domain, python_gp = test_case
            python_cov, historical_data = python_gp.get_core_data_copy()

            for constant_liar_method in CONSTANT_LIAR_METHODS:

                json_payload = self._build_json_payload(
                        python_domain,
                        python_cov,
                        historical_data,
                        2,  # num_to_sample
                        lie_method=constant_liar_method,
                        )

                resp = self.testapp.post(GP_NEXT_POINTS_CONSTANT_LIAR_ENDPOINT, json_payload)
                resp_schema = GpNextPointsResponse()
                resp_dict = resp_schema.deserialize(json.loads(resp.body))

                T.assert_in('points_to_sample', resp_dict)
                T.assert_equal(len(resp_dict['points_to_sample']), 2)  # num_to_sample
                T.assert_equal(len(resp_dict['points_to_sample'][0]), python_gp.dim)

                T.assert_in('status', resp_dict)
                T.assert_in('expected_improvement', resp_dict['status'])
                T.assert_gte(resp_dict['status']['expected_improvement'], 0.0)
Example #4
0
    def test_interface_returns_same_as_cpp(self):
        """Integration test for the /gp/next_points/* endpoints."""
        for moe_route in ALL_NEXT_POINTS_MOE_ROUTES:
            for test_case in self.gp_test_environments:
                for num_to_sample in (1, 2, 4):
                    python_domain, python_gp = test_case
                    python_cov, historical_data = python_gp.get_core_data_copy()

                    # Next point from REST
                    if moe_route.route_name == GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME:
                        json_payload = self._build_json_payload(python_domain, python_cov, historical_data, num_to_sample, lie_value=0.0)
                    elif moe_route.route_name == GP_NEXT_POINTS_EPI_ROUTE_NAME and num_to_sample > 1:
                        json_payload = self._build_json_payload(python_domain, python_cov, historical_data, num_to_sample, l_bfgs_b=True)
                    else:
                        json_payload = self._build_json_payload(python_domain, python_cov, historical_data, num_to_sample)
                    resp = self.testapp.post(moe_route.endpoint, json_payload)
                    resp_schema = GpNextPointsResponse()
                    resp_dict = resp_schema.deserialize(json.loads(resp.body))

                    T.assert_in('points_to_sample', resp_dict)
                    T.assert_equal(len(resp_dict['points_to_sample']), num_to_sample)
                    T.assert_equal(len(resp_dict['points_to_sample'][0]), python_gp.dim)

                    T.assert_in('status', resp_dict)
                    T.assert_in('expected_improvement', resp_dict['status'])
                    T.assert_gte(resp_dict['status']['expected_improvement'], 0.0)
Example #5
0
    def test_all_fds_cleaned_up(self):
        initial_open_fds = get_open_fds()
        pool = vimap.pool.fork_identical(basic_worker, num_workers=1)
        after_fork_open_fds = get_open_fds()
        list(pool.imap([1, 2, 3]).zip_in_out())
        after_finish_open_fds = get_open_fds()

        # Check that some FDs were opened after forking
        after_fork = difference_open_fds(initial_open_fds, after_fork_open_fds)
        # T.assert_equal(after_fork['closed'], [])
        T.assert_gte(len(after_fork['opened']),
                     2)  # should have at least 3 open fds
        # All opened files should be FIFOs
        if not all(info.modes == ['fifo']
                   for info in after_fork['opened'].values()):
            print("Infos: {0}".format(after_fork['opened']))
            T.assert_not_reached("Some infos are not FIFOs")

        after_cleanup = difference_open_fds(after_fork_open_fds,
                                            after_finish_open_fds)
        T.assert_gte(len(after_cleanup['closed']), 2)

        left_around = difference_open_fds(initial_open_fds,
                                          after_finish_open_fds)
        if len(left_around['opened']) != 0:
            queue_fds_left_around = dict(item
                                         for item in self.queue_fds.items()
                                         if item[0] in left_around['opened'])
            print("Queue FDs left around: {0}".format(queue_fds_left_around))
        T.assert_equal(len(left_around['opened']), 0)
Example #6
0
    def test_all_fds_cleaned_up(self):
        initial_open_fds = get_open_fds()
        pool = vimap.pool.fork_identical(basic_worker, num_workers=1)
        after_fork_open_fds = get_open_fds()
        list(pool.imap([1, 2, 3]).zip_in_out())
        after_finish_open_fds = get_open_fds()

        # Check that some FDs were opened after forking
        after_fork = difference_open_fds(initial_open_fds,
                                         after_fork_open_fds)
        # T.assert_equal(after_fork['closed'], [])
        T.assert_gte(len(after_fork['opened']),
                     2)  # should have at least 3 open fds
        # All opened files should be FIFOs
        if not all(info.modes == ['fifo'] for info in
                   after_fork['opened'].values()):
            print("Infos: {0}".format(after_fork['opened']))
            T.assert_not_reached("Some infos are not FIFOs")

        after_cleanup = difference_open_fds(after_fork_open_fds,
                                            after_finish_open_fds)
        T.assert_gte(len(after_cleanup['closed']), 2)

        left_around = difference_open_fds(initial_open_fds,
                                          after_finish_open_fds)
        if len(left_around['opened']) != 0:
            queue_fds_left_around = dict(
                item for item in self.queue_fds.items() if
                item[0] in left_around['opened'])
            print(
                "Queue FDs left around: {0}".format(queue_fds_left_around))
        T.assert_equal(len(left_around['opened']), 0)
Example #7
0
    def test_all_constant_liar_methods_function(self):
        """Test that each contant liar ``lie_method`` runs to completion. This is an integration test."""
        for test_case in self.gp_test_environments:
            python_domain, python_gp = test_case
            python_cov, historical_data = python_gp.get_core_data_copy()

            for constant_liar_method in CONSTANT_LIAR_METHODS:

                json_payload = self._build_json_payload(
                    python_domain,
                    python_cov,
                    historical_data,
                    2,  # num_to_sample
                    lie_method=constant_liar_method,
                )

                resp = self.testapp.post(GP_NEXT_POINTS_CONSTANT_LIAR_ENDPOINT,
                                         json_payload)
                resp_schema = GpNextPointsResponse()
                resp_dict = resp_schema.deserialize(json.loads(resp.body))

                T.assert_in('points_to_sample', resp_dict)
                T.assert_equal(len(resp_dict['points_to_sample']),
                               2)  # num_to_sample
                T.assert_equal(len(resp_dict['points_to_sample'][0]),
                               python_gp.dim)

                T.assert_in('status', resp_dict)
                T.assert_in('expected_improvement', resp_dict['status'])
                T.assert_gte(resp_dict['status']['expected_improvement'], 0.0)
Example #8
0
    def test_weekly(self):
        sch = scheduler_from_config('every monday at 01:00')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(calendar.weekday(next_run_date.year,
                                      next_run_date.month,
                                      next_run_date.day), 0)
Example #9
0
    def test_daily(self):
        sch = scheduler_from_config('every day')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.month, 6)
        assert_equal(next_run_date.day, 2)
        assert_equal(next_run_date.hour, 0)
Example #10
0
    def test_daily(self):
        sch = scheduler_from_config('every day')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.month, 6)
        assert_equal(next_run_date.day, 2)
        assert_equal(next_run_date.hour, 0)
Example #11
0
    def test_weekly(self):
        sch = scheduler_from_config('every monday at 01:00')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(
            calendar.weekday(next_run_date.year, next_run_date.month,
                             next_run_date.day), 0)
Example #12
0
    def test_daily(self):
        cfg = parse_daily('every day')
        sch = scheduler.GeneralScheduler(**cfg._asdict())
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.month, 6)
        assert_equal(next_run_date.day, 2)
        assert_equal(next_run_date.hour, 0)
Example #13
0
    def test_weekly(self):
        cfg = parse_daily('every monday at 01:00')
        sch = scheduler.GeneralScheduler(**cfg._asdict())
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(calendar.weekday(next_run_date.year,
                                      next_run_date.month,
                                      next_run_date.day), 0)
Example #14
0
    def test_daily_with_time(self):
        sch = scheduler_from_config('every day at 02:00')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.year, self.now.year)
        assert_equal(next_run_date.month, 6)
        assert_equal(next_run_date.day, 1)
        assert_equal(next_run_date.hour, 2)
        assert_equal(next_run_date.minute, 0)
Example #15
0
    def test_daily_with_time(self):
        sch = scheduler_from_config('every day at 02:00')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.year, self.now.year)
        assert_equal(next_run_date.month, 6)
        assert_equal(next_run_date.day, 1)
        assert_equal(next_run_date.hour, 2)
        assert_equal(next_run_date.minute, 0)
Example #16
0
    def test_daily_with_time(self):
        cfg = parse_daily('every day at 02:00')
        sch = scheduler.GeneralScheduler(**cfg._asdict())
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.year, self.now.year)
        assert_equal(next_run_date.month, 6)
        assert_equal(next_run_date.day, 1)
        assert_equal(next_run_date.hour, 2)
        assert_equal(next_run_date.minute, 0)
Example #17
0
    def test_weekly_in_month(self):
        sch = scheduler_from_config('every monday of january at 00:01')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.year, self.now.year+1)
        assert_equal(next_run_date.month, 1)
        assert_equal(next_run_date.hour, 0)
        assert_equal(next_run_date.minute, 1)
        assert_equal(calendar.weekday(next_run_date.year,
                                      next_run_date.month,
                                      next_run_date.day), 0)
Example #18
0
    def test_weekly_in_month(self):
        sch = scheduler_from_config('every monday of january at 00:01')
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.year, self.now.year + 1)
        assert_equal(next_run_date.month, 1)
        assert_equal(next_run_date.hour, 0)
        assert_equal(next_run_date.minute, 1)
        assert_equal(
            calendar.weekday(next_run_date.year, next_run_date.month,
                             next_run_date.day), 0)
Example #19
0
    def test_pushes_order(self):
        self.insert_pushes()
        pushes, _ = self.api_call("pushes")
        T.assert_length(pushes, 6)

        lastpush = None
        for push in pushes:
            if lastpush is not None:
                if push['state'] == 'accepting':
                    T.assert_equal('accepting', lastpush['state'])
                elif lastpush['state'] != 'accepting':
                    T.assert_gte(push['modified'], lastpush['modified'])
            lastpush = push
Example #20
0
    def test_pushes_order(self):
        self.insert_pushes()
        pushes, _ = self.api_call("pushes")
        T.assert_length(pushes, 6)

        lastpush = None
        for push in pushes:
            if lastpush is not None:
                if push['state'] == 'accepting':
                    T.assert_equal('accepting', lastpush['state'])
                elif lastpush['state'] != 'accepting':
                    T.assert_gte(push['modified'], lastpush['modified'])
            lastpush = push
    def test_pushes_order(self):
        self.insert_pushes()
        pushes, _ = self.api_call("pushes")
        T.assert_length(pushes, 6)

        lastpush = None
        for push in pushes:
            if lastpush is not None:
                if push["state"] == "accepting":
                    T.assert_equal("accepting", lastpush["state"])
                elif lastpush["state"] != "accepting":
                    T.assert_gte(push["modified"], lastpush["modified"])
            lastpush = push
Example #22
0
    def test_weekly_in_month(self):
        cfg = parse_daily('every monday of january at 00:01')
        sch = scheduler.GeneralScheduler(**cfg._asdict())
        next_run_date = sch.next_run_time(None)

        assert_gte(next_run_date, self.now)
        assert_equal(next_run_date.year, self.now.year+1)
        assert_equal(next_run_date.month, 1)
        assert_equal(next_run_date.hour, 0)
        assert_equal(next_run_date.minute, 1)
        assert_equal(calendar.weekday(next_run_date.year,
                                      next_run_date.month,
                                      next_run_date.day), 0)
Example #23
0
    def test_streaming(self):
        """Cleverish test to check that vimap is really streaming. Essentially
        we make the input generator that emits,

            [0, 1, 2, 3, ..., 99]  # variable inputs_which_must_be_processed

        and then emits [None, None, ...] until each of the numerical inputs
        have been processed (fed through the worker, and retrieved as output).
        """
        inputs_which_must_be_processed = frozenset(xrange(100))
        already_processed = set()
        num_elements_total = 0

        def input_generator():
            for i in sorted(inputs_which_must_be_processed):
                yield i
            while not already_processed.issuperset(
                    inputs_which_must_be_processed):
                yield None

        pool = self.fork_pool()
        for in_, _ in pool.imap(input_generator()).zip_in_out():
            already_processed.add(in_)
            num_elements_total += 1

        # NOTE: streaming_lookahead is the number of None elements emitted by
        # input_generator(). It can be greater than zero, when the worker
        # hasn't finished processing the first 100 numerical inputs, but our
        # main thread wants to enqueue more inputs (to keep the workers busy).
        streaming_lookahead = num_elements_total - len(
            inputs_which_must_be_processed)
        T.assert_gte(
            streaming_lookahead,
            0,
            "Sanity check failed.")

        # Note: This can *very* occasionally flake, since we can feed a bunch
        # of stuff to the input queue, pull a bunch to the temporary output
        # buffer (in the queue manager), but only yield one element from the
        # zip_in_out() function.
        #
        # We may refine streaming properties to make this impossible, but in
        # general vimap works under the assumption that the input may be an
        # infinte stream, but should be something we can do some limited
        # non-blocking read-ahead with.
        T.assert_lte(
            streaming_lookahead,
            pool.qm.max_total_in_flight,
            "max_total_in_flight is a hard upper bound, but was violated.")
Example #24
0
    def test_auto_everything(self):
        test_start = datetime.datetime.utcnow()

        os.environ['USER'] = '******'
        runner = MRTwoStepJob(['--no-conf']).make_runner()
        match = JOB_NAME_RE.match(runner.get_job_name())

        assert_equal(match.group(1), 'mr_two_step_job')
        assert_equal(match.group(2), 'mcp')

        job_start = datetime.datetime.strptime(
            match.group(3) + match.group(4), '%Y%m%d%H%M%S')
        job_start = job_start.replace(microsecond=int(match.group(5)))
        assert_gte(job_start, test_start)
        assert_lte(job_start - test_start, datetime.timedelta(seconds=5))
Example #25
0
    def test_auto_everything(self):
        test_start = datetime.datetime.utcnow()

        os.environ['USER'] = '******'
        runner = MRTwoStepJob(['--no-conf']).make_runner()
        match = JOB_NAME_RE.match(runner.get_job_name())

        assert_equal(match.group(1), 'mr_two_step_job')
        assert_equal(match.group(2), 'mcp')

        job_start = datetime.datetime.strptime(
            match.group(3) + match.group(4), '%Y%m%d%H%M%S')
        job_start = job_start.replace(microsecond=int(match.group(5)))
        assert_gte(job_start, test_start)
        assert_lte(job_start - test_start, datetime.timedelta(seconds=5))
Example #26
0
    def test_all_fds_cleaned_up(self):
        initial_open_fds = get_open_fds()
        pool = vimap.pool.fork_identical(basic_worker, num_workers=1)
        after_fork_open_fds = get_open_fds()
        list(pool.imap([1, 2, 3]).zip_in_out())
        after_finish_open_fds = get_open_fds()

        # Check that some FDs were opened after forking
        after_fork = difference_open_fds(initial_open_fds, after_fork_open_fds)
        # T.assert_equal(after_fork['closed'], [])
        T.assert_gte(len(after_fork['opened']), 2)  # should have at least 3 open fds
        # All opened files should be FIFOs
        T.assert_equal(all(typ == ['fifo'] for typ in after_fork['opened'].values()), True)

        after_cleanup = difference_open_fds(after_fork_open_fds, after_finish_open_fds)
        T.assert_gte(len(after_cleanup['closed']), 2)

        left_around = difference_open_fds(initial_open_fds, after_finish_open_fds)
        T.assert_equal(len(left_around['opened']), 0)
Example #27
0
 def _test_interface_returns_as_expected(self):
     """Integration test for the bandit endpoints."""
     for subtype in BANDIT_ENDPOINTS_TO_SUBTYPES[self._endpoint]:
         for historical_info in self._historical_infos:
             json_payload = self._build_json_payload(subtype, historical_info)
             arm_names = set([arm_name for arm_name in historical_info.arms_sampled.iterkeys()])
             resp = self.testapp.post(self._moe_route.endpoint, json_payload)
             resp_schema = BanditResponse()
             resp_dict = resp_schema.deserialize(json.loads(resp.body))
             resp_arm_names = set([arm_name for arm_name in resp_dict['arm_allocations'].iterkeys()])
             T.assert_sets_equal(arm_names, resp_arm_names)
             # The allocations should be in range [0, 1]
             # The sum of all allocations should be 1.0.
             total_allocation = 0
             for allocation in resp_dict['arm_allocations'].itervalues():
                 T.assert_gte(allocation, 0)
                 T.assert_lte(allocation, 1)
                 total_allocation += allocation
             T.assert_equal(total_allocation, 1.0)
Example #28
0
    def test_interface_returns_same_as_cpp(self):
        """Integration test for the /gp/next_points/* endpoints."""
        for moe_route in ALL_NEXT_POINTS_MOE_ROUTES:
            for test_case in self.gp_test_environments:
                for num_to_sample in (1, 2, 4):
                    python_domain, python_gp = test_case
                    python_cov, historical_data = python_gp.get_core_data_copy(
                    )

                    # Next point from REST
                    if moe_route.route_name == GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME:
                        json_payload = self._build_json_payload(
                            python_domain,
                            python_cov,
                            historical_data,
                            num_to_sample,
                            lie_value=0.0)
                    elif moe_route.route_name == GP_NEXT_POINTS_EPI_ROUTE_NAME and num_to_sample > 1:
                        json_payload = self._build_json_payload(
                            python_domain,
                            python_cov,
                            historical_data,
                            num_to_sample,
                            l_bfgs_b=True)
                    else:
                        json_payload = self._build_json_payload(
                            python_domain, python_cov, historical_data,
                            num_to_sample)
                    resp = self.testapp.post(moe_route.endpoint, json_payload)
                    resp_schema = GpNextPointsResponse()
                    resp_dict = resp_schema.deserialize(json.loads(resp.body))

                    T.assert_in('points_to_sample', resp_dict)
                    T.assert_equal(len(resp_dict['points_to_sample']),
                                   num_to_sample)
                    T.assert_equal(len(resp_dict['points_to_sample'][0]),
                                   python_gp.dim)

                    T.assert_in('status', resp_dict)
                    T.assert_in('expected_improvement', resp_dict['status'])
                    T.assert_gte(resp_dict['status']['expected_improvement'],
                                 0.0)
Example #29
0
 def test_interface_returns_as_expected(self):
     """Integration test for the /bandit/epsilon endpoint."""
     moe_route = BANDIT_EPSILON_MOE_ROUTE
     for subtype in EPSILON_SUBTYPES:
         for historical_info in self.historical_infos_to_test:
             json_payload = self._build_json_payload(subtype, historical_info, EPSILON_SUBTYPES_TO_DEFAULT_HYPERPARAMETER_INFOS[subtype])
             arm_names = set([arm_name for arm_name in historical_info.arms_sampled.iterkeys()])
             resp = self.testapp.post(moe_route.endpoint, json_payload)
             resp_schema = BanditEpsilonResponse()
             resp_dict = resp_schema.deserialize(json.loads(resp.body))
             resp_arm_names = set([arm_name for arm_name in resp_dict['arm_allocations'].iterkeys()])
             T.assert_sets_equal(arm_names, resp_arm_names)
             # The allocations should be in range [0, 1]
             # The sum of all allocations should be 1.0.
             total_allocation = 0
             for allocation in resp_dict['arm_allocations'].itervalues():
                 T.assert_gte(allocation, 0)
                 T.assert_lte(allocation, 1)
                 total_allocation += allocation
             T.assert_equal(total_allocation, 1.0)
Example #30
0
    def test_latin_hypercube_equally_spaced(self):
        """Test that generate_latin_hypercube_points returns properly spaced points.

        Sampling from a latin hypercube results in a set of points that in each dimension are drawn
        uniformly from sub-intervals of the domain this tests that every sub-interval in each dimension
        contains exactly one point.

        """
        for domain in self.domains_to_test:
            for num_points in self.num_points_to_test:
                domain_bounds = domain._domain_bounds
                points = generate_latin_hypercube_points(num_points, domain_bounds)

                for dim in xrange(domain.dim):
                    # This size of each slice
                    sub_domain_width = domain_bounds[dim].length / float(num_points)
                    # Sort in dim dimension
                    points = sorted(points, key=lambda points: points[dim])
                    for i, point in enumerate(points):
                        # This point must fall somewhere within the slice
                        min_val = domain_bounds[dim].min + sub_domain_width * i
                        max_val = min_val + sub_domain_width
                        T.assert_gte(point[dim], min_val)
                        T.assert_lte(point[dim], max_val)
Example #31
0
 def check_died_prematurely_warning(self, print_warning_mock):
     T.assert_gte(print_warning_mock.call_args_list, 1)
     for (args, kwargs) in print_warning_mock.call_args_list:
         T.assert_equal(args, ('All processes died prematurely!',))
Example #32
0
 def check_died_prematurely_warning(self, print_warning_mock):
     T.assert_gte(print_warning_mock.call_args_list, 1)
     for (args, kwargs) in print_warning_mock.call_args_list:
         T.assert_equal(args, ('All processes died prematurely!', ))