def test_list_path_no_path(self):
        """Tests the behavior of list_path when asking for the root keys (no
		path specified).
		"""
        test_path = None

        expected_paths = [{
            'name': 'src.MajorSource%d' % i,
            'type': 'dir'
        } for i in xrange(len(self.data_source.data_sources))]

        with self._mock_ds_method(
                '_request_paths_from_ds') as mock_request_paths:
            mock_request_paths.side_effect = [[path]
                                              for path in expected_paths]

            actual_paths = self.data_source.list_path(test_path)

            T.assert_equal(mock_request_paths.call_count,
                           len(self.data_source.data_sources))

            for ds in self.data_source.data_sources:
                mock_request_paths.assert_any_call(ds, test_path)

        T.assert_equal(expected_paths, actual_paths)

        for expected_path, expected_data_source in zip(
                expected_paths, self.data_source.data_sources):
            T.assert_in(expected_path['name'],
                        self.data_source.key_mapping_cache)
            T.assert_equal(
                self.data_source.key_mapping_cache[expected_path['name']],
                expected_data_source)
Exemple #2
0
 def test_overlap_node_and_node_pools(self):
     tron_config = dict(
         nodes=[dict(name="sameName", hostname="localhost")],
         node_pools=[dict(name="sameName", nodes=["sameNode"])])
     expected_msg = "Node and NodePool names must be unique sameName"
     exception = assert_raises(ConfigError, valid_config, tron_config)
     assert_in(expected_msg, str(exception))
 def test_discover_test_with_unknown_import_error(self):
     """Insure that DiscoveryError is raised when a test which raises an unusual exception upon import is discovered."""
     stdout, stderr = cmd_output(
         'python', '-m', 'testify.test_program', self.broken_import_module,
     )
     T.assert_in('DISCOVERY FAILURE', stdout)
     T.assert_in('AttributeError: aaaaa!', stderr)
 def test_overlap_node_and_node_pools(self):
     tron_config = dict(
         nodes=[dict(name="sameName", hostname="localhost")], node_pools=[dict(name="sameName", nodes=["sameNode"])]
     )
     expected_msg = "Node and NodePool names must be unique sameName"
     exception = assert_raises(ConfigError, valid_config, tron_config)
     assert_in(expected_msg, str(exception))
    def test_exception_in_setup_phase(self):
        """If a class_setup method raises an exception, this exception is
        reported as an error in all of the test methods in the test case. The
        methods are then treated as flakes and re-run.
        """
        # Pull and run the test case, thereby causing class_setup to run.
        test_case = get_test(self.server, 'runner')
        assert_equal(len(test_case['methods']), 3)
        # The last method will be the special 'run' method which signals the
        # entire test case is complete (including class_teardown).
        assert_equal(test_case['methods'][-1], 'run')

        self.run_test('runner')

        # 'classTearDown' is a deprecated synonym for 'class_teardown'. We
        # don't especially care about it, but it's in there.
        #
        # Exceptions during execution of class_setup cause test methods to fail
        # and get requeued as flakes. They aren't reported now because they
        # aren't complete.
        expected_methods = set(['classTearDown', 'run'])
        # self.run_test configures us up to collect results submitted at
        # class_teardown completion time. class_setup_teardown methods report
        # the result of their teardown phase at "class_teardown completion"
        # time. So, when testing the setup phase of class_setup_teardown, we
        # will see an "extra" method.
        #
        # Child classes which exercise class_setup_teardown will set
        # self.class_setup_teardown_method_name so we can add it to
        # expected_methods here.
        if hasattr(self, 'class_setup_teardown_method_name'):
            expected_methods.add(self.class_setup_teardown_method_name)
        seen_methods = self.get_seen_methods(self.test_reporter.test_complete.calls)
        # This produces a clearer diff than simply asserting the sets are
        # equal.
        assert_equal(expected_methods.symmetric_difference(seen_methods), set())

        # Verify the failed test case is re-queued for running.
        assert_equal(self.server.test_queue.empty(), False)
        requeued_test_case = get_test(self.server, 'runner2')
        assert_in(self.dummy_test_case.__name__, requeued_test_case['class_path'])

        # Reset reporter.
        self.test_reporter.test_complete = turtle.Turtle()

        # Run tests again.
        self.run_test('runner2')

        # This time, test methods have been re-run as flakes. Now that these
        # methods are are complete, they should be reported.
        expected_methods = set(['test1', 'test2', 'classTearDown', 'run'])
        if hasattr(self, 'class_setup_teardown_method_name'):
            expected_methods.add(self.class_setup_teardown_method_name)
        seen_methods = self.get_seen_methods(self.test_reporter.test_complete.calls)
        # This produces a clearer diff than simply asserting the sets are
        # equal.
        assert_equal(expected_methods.symmetric_difference(seen_methods), set())

        # Verify no more test cases have been re-queued for running.
        assert_equal(self.server.test_queue.empty(), True)
    def assert_checklist_for_tags(self, tags, requestid=None):
        num_checks = 0
        checks = []

        # Gather reference checklists from the code
        for tag in tags:
            # While the tag name is 'search-backend', the checklist type
            # is truncated to 'search'.
            if tag == 'search-backend':
                tag = 'search'

            if tag not in checklist_reminders:
                continue

            plain_list = checklist_reminders[tag]
            checks += [(tag, check) for check in plain_list]

            cleanup_tag = '%s-cleanup' % tag
            cleanup_list = checklist_reminders[cleanup_tag]
            checks += [(cleanup_tag, check) for check in cleanup_list]

        num_checks = len(checks)

        reqid = self.make_request_with_tags(tags, requestid)
        checklists = self.get_checklists(reqid)

        T.assert_equal(num_checks, len(checklists))
        for check in checks:
            T.assert_in((reqid, check[0], check[1]), checklists)

        return reqid
Exemple #7
0
    def test_invalid_node_pool_config(self):
        test_config = textwrap.dedent("""
            nodes:
                - name: node0
                  hostname: node0

            node_pools:
                - name: pool0
                  hostname: node1
                - name: pool1
                  nodes: [node0, pool0]
            jobs:
                - name: somejob
                  node: pool1
                  schedule: "interval 30s"
                  actions:
                    - name: first
                      command: "echo 1"
        """)
        expected_msg = "NodePool pool0 is missing options"
        exception = assert_raises(
            ConfigError,
            valid_config_from_yaml,
            test_config,
        )
        assert_in(expected_msg, str(exception))
    def test_checklist_single_search_tag(self):
        with fake_checklist_request():
            # insert fake data from FakeDataMixin
            fake_pushid = 2
            self.insert_pushes()
            self.insert_requests()
            test1_request = self.get_requests_by_user('testuser1')[0]
            self.insert_pushcontent(test1_request['id'], fake_pushid)

            # insert fake checklist data
            checklist_queries = [
                db.push_checklist.insert({
                    'request': test1_request['id'],
                    'type': 'search',
                    'target': 'prod'
                }),
                db.push_checklist.insert({
                    'request': test1_request['id'],
                    'type': 'search-cleanup',
                    'target': 'post-verify-prod'
                }),
            ]
            db.execute_transaction_cb(checklist_queries, on_db_return)

            uri = "/checklist?id=%d" % fake_pushid
            response = self.fetch(uri)
            T.assert_equal(response.error, None)
            T.assert_not_in("No checklist items for this push", response.body)
            T.assert_not_in("multiple requests", response.body)
            T.assert_in("for testuser1", response.body)
            T.assert_in("Before Certifying - Do In Prod", response.body)
Exemple #9
0
    def test_deprecated_mapper_final_positional_arg(self):
        def mapper(k, v):
            pass

        def reducer(k, v):
            pass

        def mapper_final():
            pass

        stderr = StringIO()
        with no_handlers_for_logger():
            log_to_stream('mrjob.job', stderr)
            step = MRJob.mr(mapper, reducer, mapper_final)

        # should be allowed to specify mapper_final as a positional arg,
        # but we log a warning
        assert_equal(step, MRJob.mr(mapper=mapper,
                                    reducer=reducer,
                                    mapper_final=mapper_final))
        assert_in('mapper_final should be specified', stderr.getvalue())

        # can't specify mapper_final as a positional and keyword arg
        assert_raises(
            TypeError,
            MRJob.mr, mapper, reducer, mapper_final, mapper_final=mapper_final)
    def test_hoods_checklists(self):
        with fake_checklist_request():
            # insert fake data from FakeDataMixin
            fake_pushid = 2
            self.insert_pushes()
            self.insert_requests()
            req = self.get_requests_by_user('testuser1')[0]
            self.insert_pushcontent(req['id'], fake_pushid)

            # insert fake checklist data
            checklist_queries = []
            checklist_items = (
                {'request': req['id'], 'type': 'hoods', 'target': 'stage'},
                {'request': req['id'], 'type': 'hoods', 'target': 'prod'},
                {'request': req['id'], 'type': 'hoods-cleanup', 'target': 'post-verify-stage'},
            )
            for checklist_item in checklist_items:
                checklist_queries.append(db.push_checklist.insert(checklist_item))

            db.execute_transaction_cb(checklist_queries, on_db_return)

            uri = "/checklist?id=%d" % fake_pushid
            response = self.fetch(uri)
            T.assert_equal(response.error, None)
            T.assert_not_in("No checklist items for this push", response.body)
            T.assert_in("Notify testuser1 to deploy Geoservices to stage", response.body)
            T.assert_in("Notify testuser1 to deploy Geoservices to prod", response.body)
Exemple #11
0
    def test_create_scratch_uri(self):
        # "walrus" bucket will be ignored; it doesn't start with "mrjob-"
        self.add_mock_s3_data({'walrus': {}, 'zebra': {}})

        runner = EMRJobRunner(conf_path=False, s3_sync_wait_time=0.01)

        # bucket name should be mrjob- plus 16 random hex digits
        s3_scratch_uri = runner._opts['s3_scratch_uri']
        assert_equal(s3_scratch_uri[:11], 's3://mrjob-')
        assert_equal(s3_scratch_uri[27:], '/tmp/')

        # bucket shouldn't actually exist yet
        scratch_bucket, _ = parse_s3_uri(s3_scratch_uri)
        assert_not_in(scratch_bucket, self.mock_s3_fs.keys())

        # need to do something to ensure that the bucket actually gets
        # created. let's launch a (mock) job flow
        jfid = runner.make_persistent_job_flow()
        assert_in(scratch_bucket, self.mock_s3_fs.keys())
        runner.make_emr_conn().terminate_jobflow(jfid)

        # once our scratch bucket is created, we should re-use it
        runner2 = EMRJobRunner(conf_path=False)
        assert_equal(runner2._opts['s3_scratch_uri'], s3_scratch_uri)
        s3_scratch_uri = runner._opts['s3_scratch_uri']
 def assert_request_buttons(self, tree, button_classes, button_text):
     found_buttons = []
     for button in tree.iter('button'):
         T.assert_in(button.attrib['class'], button_classes)
         T.assert_in(button.text, button_text)
         found_buttons.append(button)
     T.assert_equal(len(button_classes), len(found_buttons))
    def test_invalid_job_collation(self):
        jobs = FrozenDict({'test_collision0': ConfigJob(name='test_collision0',
            node='node0',
            schedule=ConfigIntervalScheduler(timedelta=datetime.timedelta(0,
                                                                          20)),
            actions=FrozenDict({'action0_0': ConfigAction(name='action0_0',
                                                          command='test_command0.0',
                                                          requires=(),
                                                          node=None)}),
            queueing=True,
            run_limit=50,
            all_nodes=False,
            cleanup_action=ConfigCleanupAction(command='test_command0.1',
                                               requires=(),
                                               name='cleanup',
                                               node=None),
            enabled=True,
            allow_overlap=False)})

        services = FrozenDict({'test_collision0': ConfigService(name='test_collision0',
                        node='node0',
                        pid_file='/var/run/%(name)s-%(instance_number)s.pid',
                        command='service_command0',
                        monitor_interval=20,
                        restart_interval=None,
                        count=2)})
        fake_config = mock.Mock()
        setattr(fake_config, 'jobs', jobs)
        setattr(fake_config, 'services', services)
        expected_message = "Collision found for identifier 'MASTER.test_collision0'"
        exception = assert_raises(ConfigError, collate_jobs_and_services, {'MASTER': fake_config})
        assert_in(expected_message, str(exception))
Exemple #14
0
    def test_failing_child_initialized_hook(self):
        def child_initialized_hook(child_pid):
            raise Exception, "child_initialized hook raises exception"

        # When child_initialized hook fails parent process will
        # exit. To test a failing initilization hook we fork and watch
        # the new child.
        pid = os.fork()
        if not pid:
            event_hooks = {"child_initialized": child_initialized_hook}
            with testing.no_stderr():
                # This will fail. redirecting stderr to /dev/null will
                # silence the test output.
                self.run_child_function_in_catbox(event_hooks=event_hooks)
        else:
            status = 0
            wait_pid = 0
            try:
                for _ in range(5):
                    (wait_pid, status, _) = os.wait4(pid, os.WNOHANG)
                    if wait_pid == pid:
                        break
                    time.sleep(.1)
            except OSError, e:
                T.assert_in("No child processes", e)
            else:
Exemple #15
0
    def test_exception_in_setup_phase(self):
        """If a class_setup method raises an exception, this exception is
        reported as an error in all of the test methods in the test case. The
        methods are then treated as flakes and re-run.
        """
        # Pull and run the test case, thereby causing class_setup to run.
        test_case = get_test(self.server, 'runner')
        assert_equal(len(test_case['methods']), 3)
        # The last method will be the special 'run' method which signals the
        # entire test case is complete (including class_teardown).
        assert_equal(test_case['methods'][-1], 'run')

        self.run_test('runner')

        # 'classTearDown' is a deprecated synonym for 'class_teardown'. We
        # don't especially care about it, but it's in there.
        #
        # Exceptions during execution of class_setup cause test methods to fail
        # and get requeued as flakes. They aren't reported now because they
        # aren't complete.
        expected_methods = set(['classTearDown', 'run'])
        # self.run_test configures us up to collect results submitted at
        # class_teardown completion time. class_setup_teardown methods report
        # the result of their teardown phase at "class_teardown completion"
        # time. So, when testing the setup phase of class_setup_teardown, we
        # will see an "extra" method.
        #
        # Child classes which exercise class_setup_teardown will set
        # self.class_setup_teardown_method_name so we can add it to
        # expected_methods here.
        if hasattr(self, 'class_setup_teardown_method_name'):
            expected_methods.add(self.class_setup_teardown_method_name)
        seen_methods = self.get_seen_methods(self.test_reporter.test_complete.calls)
        # This produces a clearer diff than simply asserting the sets are
        # equal.
        assert_equal(expected_methods.symmetric_difference(seen_methods), set())

        # Verify the failed test case is re-queued for running.
        assert_equal(self.server.test_queue.empty(), False)
        requeued_test_case = get_test(self.server, 'runner2')
        assert_in(self.dummy_test_case.__name__, requeued_test_case['class_path'])

        # Reset reporter.
        self.test_reporter.test_complete = turtle.Turtle()

        # Run tests again.
        self.run_test('runner2')

        # This time, test methods have been re-run as flakes. Now that these
        # methods are are complete, they should be reported.
        expected_methods = set(['test1', 'test2', 'classTearDown', 'run'])
        if hasattr(self, 'class_setup_teardown_method_name'):
            expected_methods.add(self.class_setup_teardown_method_name)
        seen_methods = self.get_seen_methods(self.test_reporter.test_complete.calls)
        # This produces a clearer diff than simply asserting the sets are
        # equal.
        assert_equal(expected_methods.symmetric_difference(seen_methods), set())

        # Verify no more test cases have been re-queued for running.
        assert_equal(self.server.test_queue.empty(), True)
Exemple #16
0
    def test_traceback_size_limit(self):
        """Insert a failure with a long exception and make sure it gets truncated."""
        conn = self.reporter.conn

        test_case = DummyTestCase()
        result = TestResult(test_case.test_fail)
        result.start()
        result.end_in_failure(
            (type(AssertionError), AssertionError('A' * 200), None))

        with patch.object(self.reporter.options, 'sql_traceback_size', 50):
            with patch.object(
                    result,
                    'format_exception_info') as mock_format_exception_info:
                mock_format_exception_info.return_value = [
                    "AssertionError: %s" % ('A' * 200), 'A' * 200
                ]

                self.reporter.test_complete(result.to_dict())

            assert self.reporter.report()

        failure = conn.execute(Failures.select()).fetchone()
        assert_equal(len(failure.traceback), 50)
        assert_equal(len(failure.error), 50)
        assert_in('Exception truncated.', failure.traceback)
        assert_in('Exception truncated.', failure.error)
Exemple #17
0
    def test_known_match_different_path_prefix(self):
        self._known_sha1_input[0]['file_path'] = '/new_path/Libkern'
        output_blobs = self.run_test(LookupHashesFilter,
                                     self._known_sha1_input)
        T.assert_equal(1, len(output_blobs))

        T.assert_in('osxcollector_shadowserver', output_blobs[0])
Exemple #18
0
    def test_checklist_duplicate(self):
        with fake_checklist_request():
            # insert fake data from FakeDataMixin
            fake_pushid = 2
            self.insert_pushes()
            self.insert_requests()
            test1_request = self.get_requests_by_user('testuser1')[0]
            test2_request = self.get_requests_by_user('testuser2')[0]
            self.insert_pushcontent(test1_request['id'], fake_pushid)
            self.insert_pushcontent(test2_request['id'], fake_pushid)

            # insert fake checklist data
            checklist_queries = []
            for req in (test1_request, test2_request):
                checklist_queries.append(
                    db.push_checklist.insert({
                        'request': req['id'],
                        'type': 'search',
                        'target': 'prod'
                    }))
                checklist_queries.append(
                    db.push_checklist.insert({
                        'request': req['id'],
                        'type': 'search-cleanup',
                        'target': 'post-verify-prod'
                    }))
            db.execute_transaction_cb(checklist_queries, on_db_return)

            uri = "/checklist?id=%d" % fake_pushid
            response = self.fetch(uri)
            T.assert_equal(response.error, None)
            T.assert_not_in("No checklist items for this push", response.body)
            T.assert_not_equal(
                re.search("for testuser\d,testuser\d", response.body), None)
            T.assert_in("Before Certifying - Do In Prod", response.body)
Exemple #19
0
    def test_checklist_single_search_tag(self):
        with fake_checklist_request():
            # insert fake data from FakeDataMixin
            fake_pushid = 2
            self.insert_pushes()
            self.insert_requests()
            test1_request = self.get_requests_by_user('testuser1')[0]
            self.insert_pushcontent(test1_request['id'], fake_pushid)

            # insert fake checklist data
            checklist_queries = [
                db.push_checklist.insert({
                    'request': test1_request['id'],
                    'type': 'search',
                    'target': 'prod'
                }),
                db.push_checklist.insert({
                    'request': test1_request['id'],
                    'type': 'search-cleanup',
                    'target': 'post-verify-prod'
                }),
            ]
            db.execute_transaction_cb(checklist_queries, on_db_return)

            uri = "/checklist?id=%d" % fake_pushid
            response = self.fetch(uri)
            T.assert_equal(response.error, None)
            T.assert_not_in("No checklist items for this push", response.body)
            T.assert_not_in("multiple requests", response.body)
            T.assert_in("for testuser1", response.body)
            T.assert_in("Before Certifying - Do In Prod", response.body)
Exemple #20
0
    def test_process_queue_duplicate(self):
        duplicate_req = copy.deepcopy(self.fake_request)
        duplicate_req['id'] = 11
        with nested(
            mock.patch("%s.pushmanager.core.git.GitQueue.verify_branch_failure" % __name__),
            mock.patch("%s.pushmanager.core.git.GitQueue.verify_branch_successful" % __name__),
            # This will fail, stop logging errors
            mock.patch("%s.pushmanager.core.git.logging.error" % __name__),
            mock.patch(
                "%s.pushmanager.core.git.GitQueue._get_request_with_sha" % __name__,
                return_value={'id': 10, 'state': 'requested'}
            ),
            self.mocked_update_request(self.fake_request, duplicate_req)
        ):
            # GitQueue._get_request_with_sha returning a value means
            # we have a duplicated request. This should trigger a
            # failure
            T.assert_equal(pushmanager.core.git.GitQueue.verify_branch_failure.call_count, 1)
            T.assert_equal(pushmanager.core.git.GitQueue.verify_branch_successful.call_count, 0)

            # Match the error message for duplicate revision. error_msg
            # should be the last item of the first call object's *args list
            # (from mock library).
            T.assert_in(
                "another request with the same revision sha",
                pushmanager.core.git.GitQueue.verify_branch_failure.call_args_list[0][0][1]
            )
    def _test_cors(self, capabilities):
        with remote_webdriver(capabilities) as driver:
            with ProxyServer.in_context() as proxy:
                driver.get('http://localhost:{0}/'.format(HTTP_PORT))
                driver.find_element_by_css_selector('.first-name').send_keys(
                    self.SENSITIVE_INFO['first_name'])
                driver.find_element_by_css_selector('.last-name').send_keys(
                    self.SENSITIVE_INFO['last_name'])
                driver.find_element_by_css_selector('.email').send_keys(
                    self.SENSITIVE_INFO['email'])
                driver.find_element_by_css_selector('.phone').send_keys(
                    self.SENSITIVE_INFO['phone'])
                driver.find_element_by_css_selector('.cors-now').click()
                T.assert_equal(
                    {
                        'original_request': self.SENSITIVE_INFO,
                        'success': True,
                    },
                    json.loads(
                        driver.find_element_by_css_selector(
                            '.cors-status div').text))

                # Make sure none of our sensitive values were leaked
                for value in self.SENSITIVE_INFO.values():
                    T.assert_not_in(value, proxy.sniffable_content)
                T.assert_in('jquery', proxy.sniffable_content)
    def test_failing_child_initialized_hook(self):
        def child_initialized_hook(child_pid):
            raise Exception, "child_initialized hook raises exception"

        # When child_initialized hook fails parent process will
        # exit. To test a failing initilization hook we fork and watch
        # the new child.
        pid = os.fork()
        if not pid:
            event_hooks = {"child_initialized" : child_initialized_hook}
            with testing.no_stderr():
                # This will fail. redirecting stderr to /dev/null will
                # silence the test output.
                self.run_child_function_in_catbox(event_hooks=event_hooks)
        else:
            status = 0
            wait_pid = 0
            try:
                for _ in range(5):
                    (wait_pid, status, _) = os.wait4(pid, os.WNOHANG)
                    if wait_pid == pid:
                        break
                    time.sleep(.1)
            except OSError, e:
                T.assert_in("No child processes", e)
            else:
Exemple #23
0
    def test_find_counters_0_20(self):
        counters, step_num = parse_hadoop_counters_from_line(
            r'Job JOBID="job_201106092314_0003" FINISH_TIME="1307662284564" JOB_STATUS="SUCCESS" FINISHED_MAPS="2" FINISHED_REDUCES="1" FAILED_MAPS="0" FAILED_REDUCES="0" COUNTERS="{(org\.apache\.hadoop\.mapred\.JobInProgress$Counter)(Job Counters )[(TOTAL_LAUNCHED_REDUCES)(Launched reduce tasks)(1)][(TOTAL_LAUNCHED_MAPS)(Launched map tasks)(2)][(DATA_LOCAL_MAPS)(Data-local map tasks)(2)]}{(FileSystemCounters)(FileSystemCounters)[(FILE_BYTES_READ)(FILE_BYTES_READ)(10547174)][(HDFS_BYTES_READ)(HDFS_BYTES_READ)(49661008)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(21773078)][(S3_BYTES_WRITTEN)(S3_BYTES_WRITTEN)(49526580)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(18843)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(MAP_INPUT_RECORDS)(Map input records)(29884)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(11225840)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(29884)][(SPILLED_RECORDS)(Spilled Records)(59768)][(MAP_OUTPUT_BYTES)(Map output bytes)(50285563)][(MAP_INPUT_BYTES)(Map input bytes)(49645726)][(MAP_OUTPUT_RECORDS)(Map output records)(29884)][(COMBINE_INPUT_RECORDS)(Combine input records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(29884)]}{(profile)(profile)[(reducer time \\(processing\\): 2\.51)(reducer time \\(processing\\): 2\.51)(1)][(mapper time \\(processing\\): 0\.50)(mapper time \\(processing\\): 0\.50)(1)][(mapper time \\(other\\): 3\.78)(mapper time \\(other\\): 3\.78)(1)][(mapper time \\(processing\\): 0\.46)(mapper time \\(processing\\): 0\.46)(1)][(reducer time \\(other\\): 6\.31)(reducer time \\(other\\): 6\.31)(1)][(mapper time \\(other\\): 3\.72)(mapper time \\(other\\): 3\.72)(1)]}" .'
        )

        assert_in('reducer time (processing): 2.51', counters['profile'])
        assert_equal(step_num, 3)
Exemple #24
0
    def test_bad_requires(self):
        test_config = (
            BASE_CONFIG
            + """
jobs:
    -
        name: "test_job0"
        node: node0
        schedule: "interval 20s"
        actions:
            -
                name: "action0_0"
                command: "test_command0.0"
            -
                name: "action0_1"
                command: "test_command0.1"

    -
        name: "test_job1"
        node: node0
        schedule: "interval 20s"
        actions:
            -
                name: "action1_0"
                command: "test_command1.0"
                requires: action0_0

        """
        )
        expected_message = "jobs.test_job1.action1_0 has a dependency " '"action0_0" that is not in the same job!'
        exception = assert_raises(ConfigError, load_config, test_config)
        assert_in(expected_message, str(exception))
Exemple #25
0
 def test_messy_error(self):
     counter_string = 'Job JOBID="_001" FAILED_REDUCES="0" COUNTERS="THIS IS NOT ACTUALLY A COUNTER"'
     with no_handlers_for_logger(''):
         stderr = StringIO()
         log_to_stream('mrjob.parse', stderr, level=logging.WARN)
         assert_equal((None, None), parse_hadoop_counters_from_line(counter_string))
         assert_in('Cannot parse Hadoop counter line', stderr.getvalue())
 def assert_request_buttons(self, tree, button_classes, button_text):
     found_buttons = []
     for button in tree.iter('button'):
         T.assert_in(button.attrib['class'], button_classes)
         T.assert_in(button.text, button_text)
         found_buttons.append(button)
     T.assert_equal(len(button_classes), len(found_buttons))
Exemple #27
0
    def test_bad_requires(self):
        test_config = BASE_CONFIG + """
jobs:
    -
        name: "test_job0"
        node: node0
        schedule: "interval 20s"
        actions:
            -
                name: "action0_0"
                command: "test_command0.0"
            -
                name: "action0_1"
                command: "test_command0.1"

    -
        name: "test_job1"
        node: node0
        schedule: "interval 20s"
        actions:
            -
                name: "action1_0"
                command: "test_command1.0"
                requires: [action0_0]

        """
        expected_message = ('jobs.MASTER.test_job1.action1_0 has a dependency '
                            '"action0_0" that is not in the same job!')
        exception = assert_raises(ConfigError, valid_config_from_yaml,
                                  test_config)
        assert_in(expected_message, str(exception))
    def test_bad_requires(self):
        test_config = BASE_CONFIG + """
jobs:
    -
        name: "test_job0"
        node: node0
        schedule: "interval 20s"
        actions:
            -
                name: "action0_0"
                command: "test_command0.0"
            -
                name: "action0_1"
                command: "test_command0.1"

    -
        name: "test_job1"
        node: node0
        schedule: "interval 20s"
        actions:
            -
                name: "action1_0"
                command: "test_command1.0"
                requires: [action0_0]

        """
        expected_message = ('jobs.MASTER.test_job1.action1_0 has a dependency '
                '"action0_0" that is not in the same job!')
        exception = assert_raises(ConfigError, valid_config_from_yaml, test_config)
        assert_in(expected_message, str(exception))
Exemple #29
0
 def test_invalid_named_update(self):
     test_config = """bozray:"""
     test_config = yaml.load(test_config)
     expected_message = "Unknown keys in NamedConfigFragment : bozray"
     exception = assert_raises(ConfigError, validate_fragment, 'foo',
                               test_config)
     assert_in(expected_message, str(exception))
    def test_contains_ancestral(self):
        cd = ChainedDict(**{"the_key": True})
        cd2 = ChainedDict(parent=cd, **{"the_other_key": True})

        T.assert_in("the_key", cd2)
        T.assert_in("the_other_key", cd2)
        T.assert_not_in("the_other_key", cd)
Exemple #31
0
    def test_process_queue_duplicate(self):
        duplicate_req = copy.deepcopy(self.fake_request)
        duplicate_req['id'] = 11
        with nested(
            mock.patch("%s.pushmanager.core.git.GitQueue.verify_branch_failure" % __name__),
            mock.patch("%s.pushmanager.core.git.GitQueue.verify_branch_successful" % __name__),
            # This will fail, stop logging errors
            mock.patch("%s.pushmanager.core.git.logging.error" % __name__),
            mock.patch(
                "%s.pushmanager.core.git.GitQueue._get_request_with_sha" % __name__,
                return_value={'id': 10, 'state': 'requested'}
            ),
            self.mocked_update_request(self.fake_request, duplicate_req)
        ):
            # GitQueue._get_request_with_sha returning a value means
            # we have a duplicated request. This should trigger a
            # failure
            T.assert_equal(pushmanager.core.git.GitQueue.verify_branch_failure.call_count, 1)
            T.assert_equal(pushmanager.core.git.GitQueue.verify_branch_successful.call_count, 0)

            # Match the error message for duplicate revision. error_msg
            # should be the last item of the first call object's *args list
            # (from mock library).
            T.assert_in(
                "another request with the same revision sha",
                pushmanager.core.git.GitQueue.verify_branch_failure.call_args_list[0][0][1]
            )
	def test_list_path_no_path_duplicates(self):
		"""Tests that when no path is specified the correct results are returned
		and the repeat key is not cached with the wrong data source.
		"""
		test_path = None

		expected_paths = [{
			'name': 'src.MajorSource%d' % i,
			'type': 'dir'
		} for i in xrange(len(self.data_source.data_sources))]

		with self._mock_ds_method('_request_paths_from_ds') as mock_request_paths:
			mock_request_path_list = [[path] for path in expected_paths]
			mock_request_path_list[-1].append({
				'name': 'src.MajorSource1',
				'type': 'dir'
			})
			mock_request_paths.side_effect = mock_request_path_list

			actual_paths = self.data_source.list_path(test_path)

			T.assert_equal(mock_request_paths.call_count, len(self.data_source.data_sources))

			for ds in self.data_source.data_sources:
				mock_request_paths.assert_any_call(ds, test_path)

		T.assert_equal(expected_paths, actual_paths)

		for expected_path, expected_data_source in zip(expected_paths, self.data_source.data_sources):
			T.assert_in(expected_path['name'], self.data_source.key_mapping_cache)
			T.assert_equal(self.data_source.key_mapping_cache[expected_path['name']], expected_data_source)
    def test_checklist_duplicate(self):
        with fake_checklist_request():
            # insert fake data from FakeDataMixin
            fake_pushid = 2
            self.insert_pushes()
            self.insert_requests()
            test1_request = self.get_requests_by_user('testuser1')[0]
            test2_request = self.get_requests_by_user('testuser2')[0]
            self.insert_pushcontent(test1_request['id'], fake_pushid)
            self.insert_pushcontent(test2_request['id'], fake_pushid)

            # insert fake checklist data
            checklist_queries = []
            for req in (test1_request, test2_request):
                checklist_queries.append(db.push_checklist.insert({
                    'request': req['id'],
                    'type': 'search',
                    'target': 'prod'
                }))
                checklist_queries.append(db.push_checklist.insert({
                    'request': req['id'],
                    'type': 'search-cleanup',
                    'target': 'post-verify-prod'
                }))
            db.execute_transaction_cb(checklist_queries, on_db_return)

            uri = "/checklist?id=%d" % fake_pushid
            response = self.fetch(uri)
            T.assert_equal(response.error, None)
            T.assert_not_in("No checklist items for this push", response.body)
            T.assert_not_equal(re.search("for testuser\d,testuser\d", response.body), None)
            T.assert_in("Before Certifying - Do In Prod", response.body)
	def test_find_data_source_for_stat_key(self):
		"""Tests _find_data_source_for_stat_key when it's provided by one of
		the configured data sources.
		"""

		expected_data_source = {
			'data_server_url': "http://b.com",
			'data_source_hash': util.generate_ds_key("another.data.source"),
			'secret_key': "TEST_SECRET_TWO"
		}

		test_key = 'src.our_key'

		def fake_paths_from_ds(data_source, path):
			if data_source == expected_data_source:
				return [{"name": test_key},]
			else:
				return [{"name": "src.not_our_key"},]

		with mock.patch.object(self.data_source, '_request_paths_from_ds', fake_paths_from_ds):
			actual_ds = self.data_source._find_data_source_for_stat_key(test_key)

		T.assert_equal(expected_data_source, actual_ds)
		T.assert_in(test_key, self.data_source.key_mapping_cache)
		T.assert_equal(expected_data_source, self.data_source.key_mapping_cache[test_key])
	def test_list_path_no_path(self):
		"""Tests the behavior of list_path when asking for the root keys (no
		path specified).
		"""
		test_path = None

		expected_paths = [{
			'name': 'src.MajorSource%d' % i,
			'type': 'dir'
		} for i in xrange(len(self.data_source.data_sources))]

		with self._mock_ds_method('_request_paths_from_ds') as mock_request_paths:
			mock_request_paths.side_effect = [[path] for path in expected_paths]

			actual_paths = self.data_source.list_path(test_path)

			T.assert_equal(mock_request_paths.call_count, len(self.data_source.data_sources))

			for ds in self.data_source.data_sources:
				mock_request_paths.assert_any_call(ds, test_path)

		T.assert_equal(expected_paths, actual_paths)

		for expected_path, expected_data_source in zip(expected_paths, self.data_source.data_sources):
			T.assert_in(expected_path['name'], self.data_source.key_mapping_cache)
			T.assert_equal(self.data_source.key_mapping_cache[expected_path['name']], expected_data_source)
Exemple #36
0
    def test_fail_after_a_while(self, print_exc_mock, print_warning_mock):
        processes = vimap.pool.fork(
            (worker_raise_exc_with_curleys.init_args(init=i) for i in xrange(100)), in_queue_size_factor=2
        )
        processes.imap([-1] * 3000 + list(range(50)))

        # Check yielded output.
        res_to_compare = []
        for inp, out, typ in processes.zip_in_out_typ():
            if typ == "exception":
                res_to_compare.append((inp, serialize_error(out.value), typ))
            else:
                res_to_compare.append((inp, out, typ))
        # All the -1s will produce None output.
        expected_res_to_compare = [(-1, None, "output")] * 3000
        # Once we get to the positive numbers, we start causing 50 of
        # the 100 workers to throw exceptions.
        expected_res_to_compare.extend(
            [(i, serialize_error(ValueError("{0} curley braces!")), "exception") for i in range(50)]
        )
        T.assert_sorted_equal(res_to_compare, expected_res_to_compare)

        # Check out exception logging.
        calls = print_exc_mock.call_args_list
        errors = [serialize_error(call_args[0].value) for call_args, _ in calls]
        T.assert_equal(errors, [serialize_error(ValueError("{0} curley braces!"))] * 50)

        # NOTE: Sometimes, the weakref in the pool is deleted, so 'has_exceptions' is
        # not set, and the pool prints warnings we don't actually care about. Make
        # sure that this is the only warning printed.
        if print_warning_mock.call_args_list:
            T.assert_equal(len(print_warning_mock.call_args_list), 1)
            [warning] = print_warning_mock.call_args_list
            T.assert_in("Pool disposed before input was consumed", warning[0][0])
Exemple #37
0
 def verify_message_from_child(self, expected_message=None):
     expected_message = expected_message or self.default_expected_message_from_child
     actual_message_from_child = self.poll()
     if actual_message_from_child:
         T.assert_in(expected_message, actual_message_from_child)
     else:
         raise ChildDidNotReportBackException
    def assert_checklist_for_tags(self, tags, requestid=None):
        num_checks = 0
        checks = []

        # Gather reference checklists from the code
        for tag in tags:
            # While the tag name is 'search-backend', the checklist type
            # is truncated to 'search'.
            if tag == 'search-backend':
                tag = 'search'

            if tag not in checklist_reminders:
                continue

            plain_list = checklist_reminders[tag]
            checks += [(tag, check) for check in plain_list]

            cleanup_tag = '%s-cleanup' % tag
            cleanup_list = checklist_reminders[cleanup_tag]
            checks += [(cleanup_tag, check) for check in cleanup_list]

        num_checks = len(checks)

        reqid = self.make_request_with_tags(tags, requestid)
        checklists = self.get_checklists(reqid)

        T.assert_equal(num_checks, len(checklists))
        for check in checks:
            T.assert_in((reqid, check[0], check[1]), checklists)

        return reqid
Exemple #39
0
    def test_deprecated_mapper_final_positional_arg(self):
        def mapper(k, v):
            pass

        def reducer(k, v):
            pass

        def mapper_final():
            pass

        stderr = StringIO()
        with no_handlers_for_logger():
            log_to_stream('mrjob.job', stderr)
            step = MRJob.mr(mapper, reducer, mapper_final)

        # should be allowed to specify mapper_final as a positional arg,
        # but we log a warning
        assert_equal(
            step,
            MRJob.mr(mapper=mapper, reducer=reducer,
                     mapper_final=mapper_final))
        assert_in('mapper_final should be specified', stderr.getvalue())

        # can't specify mapper_final as a positional and keyword arg
        assert_raises(TypeError,
                      MRJob.mr,
                      mapper,
                      reducer,
                      mapper_final,
                      mapper_final=mapper_final)
Exemple #40
0
 def test_text_test_logger_prints_discovery_failure_message(self):
     runner = TestRunner(
         'does.not.exist',
         test_reporters=[TextTestLogger(self.options, stream=self.stream)],
     )
     runner.run()
     logger_output = self.stream.getvalue()
     assert_in('DISCOVERY FAILURE!', logger_output)
Exemple #41
0
 def test_login_post(self):
     request = {"username": "******", "password": "******"}
     with mock.patch.object(logging, "exception"):
         response = self.fetch("/login",
                               method="POST",
                               body=urllib.urlencode(request))
         T.assert_in("Invalid username or password specified.",
                     response.body)
Exemple #42
0
 def test_escape(self):
     T.assert_equal(
         [k for k in self.d if self.ed[k] != self.escaped[k]],
         [],
         "EscapedDict values doesn't match with pre-computed valued"
     )
     T.assert_in("&", self.ed['amp'])
     T.assert_not_in(">", self.ed['gt'])
    def verify_tag_rename(self, oldtag, newtag, success, db_results):
        self.check_db_results(success, db_results)

        #id, user, state, repo, branch, revision, *tags*, created, etc...
        tags = [result[6] for result in db_results.fetchall()]

        T.assert_not_in(oldtag, tags)
        T.assert_in(newtag, tags)
    def verify_tag_rename(self, oldtag, newtag, success, db_results):
        self.check_db_results(success, db_results)

        # id, user, state, repo, branch, revision, *tags*, created, etc...
        tags = [result[6] for result in db_results.fetchall()]

        T.assert_not_in(oldtag, tags)
        T.assert_in(newtag, tags)
Exemple #45
0
 def test_text_test_logger_prints_discovery_failure_message(self):
     runner = TestRunner(
         self.broken_import_module,
         test_reporters=[TextTestLogger(self.options, stream=self.stream)],
     )
     runner.run()
     logger_output = self.stream.getvalue()
     assert_in('Discovery failure!', logger_output)
Exemple #46
0
    def test_find_weird_counters_0_20(self):
        counters = parse_hadoop_counters_from_line(r'Job JOBID="job_201106132124_0001" FINISH_TIME="1308000435810" JOB_STATUS="SUCCESS" FINISHED_MAPS="2" FINISHED_REDUCES="1" FAILED_MAPS="0" FAILED_REDUCES="0" COUNTERS="{(org\.apache\.hadoop\.mapred\.JobInProgress$Counter)(Job Counters )[(TOTAL_LAUNCHED_REDUCES)(Launched reduce tasks)(1)][(RACK_LOCAL_MAPS)(Rack-local map tasks)(2)][(TOTAL_LAUNCHED_MAPS)(Launched map tasks)(2)]}{(FileSystemCounters)(FileSystemCounters)[(FILE_BYTES_READ)(FILE_BYTES_READ)(1494)][(S3_BYTES_READ)(S3_BYTES_READ)(3726)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(3459)][(S3_BYTES_WRITTEN)(S3_BYTES_WRITTEN)(1663)]}{(weird counters)(weird counters)[(\\[\\])(\\[\\])(68)][(\\\\)(\\\\)(68)][(\\{\\})(\\{\\})(68)][(\\(\\))(\\(\\))(68)][(\.)(\.)(68)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(154)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(MAP_INPUT_RECORDS)(Map input records)(68)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(1901)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(154)][(SPILLED_RECORDS)(Spilled Records)(672)][(MAP_OUTPUT_BYTES)(Map output bytes)(3446)][(MAP_INPUT_BYTES)(Map input bytes)(2483)][(MAP_OUTPUT_RECORDS)(Map output records)(336)][(COMBINE_INPUT_RECORDS)(Combine input records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(336)]}" .')

        assert_in('{}', counters['weird counters'])
        assert_in('()', counters['weird counters'])
        assert_in('.', counters['weird counters'])
        assert_in('[]', counters['weird counters'])
        assert_in('\\', counters['weird counters'])
 def test_item_decryption(self):
     source_data = base64.b64decode("R+JJyjeDfDC49x0XwaW5eJkJhG9COpfzFPSo8P2ZDa6ZYeLRzyjeukgdtDj5Yg7F0l2fMCbHKmOtQUXRQxCfsaCcsTeDR10WGMlzQtJoygmdMreG9joX18JPFWtDo/P94sbn8Wd0Q+Sx18Whdo0lRA==")
     item_data = "b3BkYXRhMDG0BgAAAAAAAJ8/vFjLfpCDOYs0hawjOFkZd6QTUS9A3QQi7IvEgsoBya8JWTRH/TiBsQi7KuzfxoCM1qmpiNgX9+ej8mfiS9SdzLNpZoCCz15ubLWR2vVpHBXs8ESX0ffbX6irvNI3vp+zYKXmnrP0BMCHjOVEOHWuW+8OIvsYSkkVZAYB0t4PaV+nQzlsg47huAI6VA7KGA7ZK/U6dNoCDoHBo/v8BKwEXmVy9Xg3O5b0EBHL0++jWd++d+TpwFuMWwgABEf+qLn8IO0oUww4wxEvpclB1k6Z/+Y+pNnB2aRDTBvATQ4wULPsRxOl9W7pwMpLcI9edwYJ2MmoDeCOUX7lnGg9HfUZKKguWDR/HY5N45r02J/C7N2bROSwkbjO5yPIn/PpTvH7+qUxeYXYxOpge5vYDwo/Mx2AmqRqA7olUWJFsBQSN6ZHGR7hYIXbAWUWfBy8vcZhWl5yGZNQ5HDxXiJ0hlN9aWk/sUyi4Loz09UexlAhj9IrAtEOGDJteiyuv9BsJFIQLqU7Lb8/R7d2IQCFcMHGd+gvKx1B/RjSQirViZHTjgUOE998u8QtEhBt5Bm0/yqi1D8ZKLgWHoRw9KrK/T/2q4i59tf8KWne4/hDSAX2vBVyAoRU/fEuelSSfWfAXmG32mkoHd32SL/nJA+IfvI0TLS+mSHPXkDkwNkaakeU1OBov/3g+1UpGo4yDioxBkn1L5hqmqJl4jf9rjXRnzVdAy3cON1PefhTFfYgYT/LQVgb1L6zoasIoC6FJuvEQuBXYKQFWpOmtEQgcEeBooJh3UnZe/YzsN5dR9EwxsJwAOgpOA0Bq0edSLyJtmW/wlGGkKhw7tHvpjaabBpmcBWbvjPfSbFhGxYQ7joxripEyaM937nZofN/a4vSH3KHvU0JvFd3f5P3wkgif9JkPq2bvcGxcI1tiisABteOXPbGi+KQZHzWFYTKzg9/ZGYhiw5a2p2gaZD+IcT1NjjQKo1o5+/iSWkLQaOOqBN3yY+WYcj9JJSrJ6ZkX+zkROaUClG1i7EWAPiW3SeKKzGLsDOmDJL9N16otP1j6mG3maI2TLoVcG1dZYXUtmhY+2zERStA5e+o78A3nVBGSI8JEo6mVSJdhJZTpEdldS8/PP5YsiMa27FoTQqfqh9aQA+9upKxe+ca7h5O8RgtJrbCeDgvxPsBljM51Y40fGfA9fCZynu+djXlirAFPsexgFRCkq6YILRUqQzS79FH7JCoptpKqApR0C3udsNo4Xhj6G0xEm7FvmvrWKn4ls8mCP225dlaMAu94qRq6BB7UGX0di6YlrhGgMOGThMIZEQrZ3Yt5KFAtPp4tJzhnL4G4691ErwKBVnp1TruXQHYv88gkmK16fEuYOFZlXhIaaVXD2QKRVPoNejA+Liq35FOxMMWJdAknOaUUqBOTSfRQrUPdO348u7XDYM0aH9RF+tio7qtZ9iBh6X1P/WRR20jQwPOHmulW/V6Lk0bKCYy8v7kPOV++IQowkd5B3D4yOgDs8N0EMoCN/N+PDX5xBCXKwa/tMSd5fvcf81SeOlSuZ+DSo0OCoEtZf56EDYg15GuYbT4oez8+0NYYe2MyjP5uG+yb2hEnVg9vuQVC63bMrHCbFNjUfawJnJdu3eLzLtisRZgFnYi6hqzbGDmozmgB0b/FfJBckKCTjs7qJVs9KLxGHmfbI5Yk5wo0POnlN92zL4t/E1WxOiCUzjKyhB4/rd+4na7xxoORB44DKSfLm4h4caGUUEM68Sif9F+U3Hchl62GsRSCXZMtX4CH/g/aKmwuTwqcMGP5e8csAa+/vaua16Y3MT0G5yROpyATZ6vdf5mI6ZUGFFfBj+gUVuvcrOvVH+wMGHqsat35GIz6uA831aVcFfSG43jc4LrfPev9DGjaSf2OUMvALV2pb13CmyNKhjHe3MmczwlrTqh2H0cOv81jPOW2E4GqPMRHCxpmtENvG+OxZcRBmVJwbZj9Zx+3OSdmMqPFoLlpAoDhZuWT7WsjSlHciNqVk3llllt70hinVF+bLL9WL2ELwMB2e26uXp++QWxa1jIGzCyziOby1pA4G7cNOX3hjLIpqnY1AVn7v/kS+kHtGdOuRw249UA4wgSQtSvWYXEmiDxfYLHdzkRnsUlU41Ldbzsvv5l0T2Dv5BdgyippAiStE0N0Xpm56uB5R03EHjuhN1uomYwAxQCTzvs+6dCsEtQ6ZOfVGeqGJ5PcBxJ8D7aEjbacGAYhpPj6aD4S6/mTwJud8u5AGBKPU1nMnIKeCpMXUvuEaaK9Uv0+HkAptrYOLOWm3Hkcy+5XGWPjIAOq8ykYS9YHnwKxejfkkzEqjuArZRJgaVLSD6C0Fy3CctNMNesWTNEiw=="
     item_key, item_hmac = crypt_util.opdata1_decrypt_key(source_data, self.MASTER_KEY, self.MASTER_HMAC)
     plaintext_item = crypt_util.opdata1_decrypt_item(item_data, item_key, item_hmac)
     item_dict = simplejson.loads(plaintext_item)
     T.assert_in('sections', item_dict)
     T.assert_equal(item_dict['sections'][0]['title'], 'set.name')
Exemple #48
0
 def test_messy_error(self):
     counter_string = 'Job JOBID="_001" FAILED_REDUCES="0" COUNTERS="THIS IS NOT ACTUALLY A COUNTER"'
     with no_handlers_for_logger(''):
         stderr = StringIO()
         log_to_stream('mrjob.parse', stderr, level=logging.WARN)
         assert_equal((None, None),
                      parse_hadoop_counters_from_line(counter_string))
         assert_in('Cannot parse Hadoop counter line', stderr.getvalue())
Exemple #49
0
 def test_both(self):
     nsca_sender = self.nsca_sender()
     nsca_sender.send_host('myhost', 3, 'UNKNOWN')
     nsca_sender.send_service('myhost', 'myservice', 0, 'OK')
     checks = self.expect_checks(2)
     # ordering is unpredictable
     assert_in(HostCheckResult(host_name='myhost', status=3, output='UNKNOWN'), checks)
     assert_in(ServiceCheckResult(host_name='myhost', service_name='myservice', status=0, output='OK'), checks)
 def test_discover_test_with_broken_import(self):
     """Insure that DiscoveryError is raised when a test which imports a
     non-existent module is discovered."""
     try:
         discovered_tests = test_discovery.discover(self.broken_import_module)
         discovered_tests.next()
     except DiscoveryError, exc:
         assert_in('No module named non_existent_module', str(exc))
Exemple #51
0
 def test_valid_identity_file_missing_private_key(self):
     exception = assert_raises(
         ConfigError,
         config_parse.valid_identity_file,
         '/file/not/exist',
         self.context,
     )
     assert_in("Private key file", str(exception))
Exemple #52
0
 def test_valid_known_hosts_file_missing(self):
     exception = assert_raises(
         ConfigError,
         config_parse.valid_known_hosts_file,
         '/bogus/path',
         self.context,
     )
     assert_in('Known hosts file /bogus/path', str(exception))
    def test_discover_test_with_unknown_import_error(self):
        """Insure that DiscoveryError is raised when a test which raises an unusual exception upon import is discovered."""

        try:
            discovered_tests = test_discovery.discover(self.broken_import_module)
            discovered_tests.next()
        except DiscoveryError, exc:
            assert_in('Got unknown error when trying to import', str(exc))
Exemple #54
0
 def test_missing_dir(self):
     exception = assert_raises(
         ConfigError,
         valid_output_stream_dir,
         'bogus-dir',
         NullConfigContext,
     )
     assert_in("is not a directory", str(exception))
Exemple #55
0
 def test_text_test_logger_prints_discovery_failure_message(self):
     runner = TestRunner(
         'does.not.exist',
         test_reporters=[TextTestLogger(self.options, stream=self.stream)],
     )
     runner.run()
     logger_output = self.stream.getvalue()
     assert_in('DISCOVERY FAILURE!', logger_output)
Exemple #56
0
 def test_mixed_behavior_2(self):
     stderr = StringIO()
     with no_handlers_for_logger():
         log_to_stream('mrjob.job', stderr)
         mr_job = self.MRInconsistentJob2()
         assert_equal(mr_job.options.input_protocol, None)
         assert_equal(mr_job.input_protocol().__class__, ReprProtocol)
         assert_in('custom behavior', stderr.getvalue())
Exemple #57
0
    def test_exit_42_job(self):
        mr_job = MRExit42Job(['--no-conf'])
        mr_job.sandbox()

        try:
            mr_job.run_job()
        except Exception, e:
            assert_in('returned non-zero exit status 42', repr(e))
            return
Exemple #58
0
 def test_environment_variables_018(self):
     runner = LocalMRJobRunner(hadoop_version='0.18', conf_path=False)
     # clean up after we're done. On windows, job names are only to
     # the millisecond, so these two tests end up trying to create
     # the same temp dir
     with runner as runner:
         runner._setup_working_dir()
         assert_in('mapred_cache_localArchives',
                   runner._subprocess_env('M', 0, 0).keys())