def test_walk_error(self): """Test error-handling function for os.walk(). """ from ..census import walk_error with patch('desiutil.log.get_logger') as mock_get_logger: mock = Mock() mock_get_logger.return_value = mock try: raise OSError(2, 'File not found', 'foo.txt') except OSError as e: walk_error(e) calls = [call.error("[Errno 2] File not found: 'foo.txt'")] self.assertListEqual(mock.mock_calls, calls) with patch('desiutil.log.get_logger') as mock_get_logger: mock = Mock() mock_get_logger.return_value = mock try: raise OSError(2, 'File not found', 'foo.txt', None, 'bar.txt') except OSError as e: walk_error(e) calls = [ call.error("[Errno 2] File not found: 'foo.txt' -> " + "'bar.txt'") ] self.assertListEqual(mock.mock_calls, calls)
def test_normal_send(self): """ Test normal messages sending """ socket_mock = Mock() logger_mock = Mock() output_wrapper = Output(socket_mock, logger_mock) output_wrapper.info('info(%s)', 'arg1') output_wrapper.debug('debug(%s)', 'arg2') output_wrapper.error('error(%s)', 'arg3') output_wrapper.error('no_args_error(%s)') output_wrapper.log('log(%s)', 'arg4') output_wrapper.say('say(%s)', 'arg5') output_wrapper.say('no_args_say(%s)') socket_mock.assert_has_calls([ call.send(b'{"text": "info(arg1)"}\x00'), call.send(b'{"error": "error(arg3)"}\x00'), call.send(b'{"error": "no_args_error(%s)"}\x00'), call.send(b'{"text": "log(arg4)"}\x00'), call.send(b'{"text": "say(arg5)"}\x00'), call.send(b'{"text": "no_args_say(%s)"}\x00'), ]) logger_mock.assert_has_calls([ call.info('info(%s)', 'arg1'), call.debug('debug(%s)', 'arg2'), call.error('error(%s)', 'arg3'), call.error('no_args_error(%s)'), call.info('log(%s)', 'arg4'), call.info('say(%s)', 'arg5'), call.info('no_args_say(%s)'), ])
def test_send_email_fails(self): with patch('%s.logger' % pbm, autospec=True) as mock_logger: error_response = { 'Error': { 'Code': '306', 'Message': 'Error with Email Address' } } self.mock_boto.send_raw_email = Mock( side_effect=ClientError(error_response, 'send_raw_email')) self.cls.send_email(sender='foo@maheim', recipient='*****@*****.**', subject='foo', body_text='body', body_html='<html></html>', attachments={'report.html': '<html></html>'}) assert self.mock_boto.mock_calls == [ call.send_raw_email(Destinations=['*****@*****.**'], RawMessage={'Data': ANY}, Source='foo@maheim') ] assert mock_logger.mock_calls == [ call.error('Error with Email Address') ]
def test_put_metric_catches_error(mock_session): client = boto3.client("cloudwatch") stubber = Stubber(client) stubber.add_client_error("put_metric_data", "InternalServiceError") stubber.activate() mock_session.client.return_value = client publisher = MetricPublisher(mock_session, NAMESPACE) dimensions = { "DimensionKeyActionType": Action.CREATE.name, "DimensionKeyResourceType": RESOURCE_TYPE, } with patch("cloudformation_cli_python_lib.metrics.LOG", auto_spec=True) as mock_logger: publisher.publish_metric( MetricTypes.HandlerInvocationCount, dimensions, StandardUnit.Count, 1.0, datetime.now(), ) stubber.deactivate() expected_calls = [ call.error( "An error occurred while publishing metrics: %s", "An error occurred (InternalServiceError) when calling the " "PutMetricData operation: ", ) ] assert mock_logger.mock_calls == expected_calls
def test_signals(): tracker = make_TestTracker() cb = Mock() tracker.signal.register('warning', cb.warn) tracker.signal.register('error', cb.error) tracker.signal.register('exception', cb.exception) tracker.warn('foo') assert cb.mock_calls == [call.warn('foo')] tracker.error('bar') assert cb.mock_calls == [call.warn('foo'), call.error('bar')] tracker.exception('baz') assert cb.mock_calls == [ call.warn('foo'), call.error('bar'), call.exception('baz') ]
def test_invalid_connection_id(self, setup, dependency_mocks): post_data = { "state": "response", "connection_id": "random test connection id" } with dependency_mocks as mocks: mocks["connection_invitation_accept"].return_value = None response = self.client.post( path=f"/{self.path}", data=post_data, format="json", ) returns_status_code_http_200_ok(response) mocks["connection_invitation_accept"].assert_called_once_with( "random test connection id") mocks["credential_offer_create"].assert_not_called() mocks["LOGGER"].assert_has_calls([ call.info( "webhook: received: topic: 'connections' - state: 'response' - message: {'state': 'response', 'connection_id': 'random test connection id'}" ), call.error( "webhook: connection_invitation_accept: connection_id: random test connection id not found" ), ])
def test_poll_jobs_timeout(self): self.poll_num = 0 def se_poll(): if self.poll_num == 0: self.poll_num = 1 return False self.cls._timeout = datetime(2017, 10, 20, 12, 30, 00) self.config.get_global.return_value = 3600 self.cls._timeout = datetime(2017, 10, 20, 13, 30, 00) j1 = Mock(name='job1') j1.poll.return_value = True j2 = Mock(name='job2') j2.poll.side_effect = se_poll j3 = Mock(name='job3') j3.poll.return_value = True self.cls._running = [j1, j2, j3] self.cls._finished = [] with patch('%s.sleep' % pbm) as mock_sleep: with patch('%s.logger' % pbm) as mock_logger: self.cls._poll_jobs() assert self.cls._finished == [j1, j3] assert self.cls._running == [j2] assert j1.mock_calls == [call.poll()] assert j2.mock_calls == [call.poll(), call.poll()] assert j3.mock_calls == [call.poll()] assert mock_sleep.mock_calls == [call(3600), call(3600)] assert call.error('Time limit reached; not polling any more jobs!' ) in mock_logger.mock_calls
def test_HTTPException_server_error(self, LOGGER): exc = HTTPInternalServerError() http_exc = exc_to_http_exc(exc) self.assertIs(http_exc, exc) self.assertEqual(http_exc.code, 500) self.assertEqual(LOGGER.mock_calls, [ call.error(ANY, exc, ANY, 500, exc_info=True), ])
def test_other_exception(self, LOGGER): exc = ZeroDivisionError http_exc = exc_to_http_exc(exc) self.assertIsInstance(http_exc, HTTPInternalServerError) self.assertEqual(http_exc.code, 500) self.assertIs(http_exc.detail, None) # no detail self.assertEqual(LOGGER.mock_calls, [ call.error(ANY, exc, exc_info=True), ])
def test_other_DataAPIError_2(self, LOGGER): exc = DataAPIError() # no specific public message http_exc = exc_to_http_exc(exc) self.assertIsInstance(http_exc, HTTPInternalServerError) self.assertEqual(http_exc.code, 500) self.assertIs(http_exc.detail, None) # *no* detail self.assertEqual(LOGGER.mock_calls, [ call.error(ANY, exc, ANY, exc_info=True), ])
def test_other_DataAPIError(self, LOGGER): exc = DataAPIError(public_message='FOO') # custom public message http_exc = exc_to_http_exc(exc) self.assertIsInstance(http_exc, HTTPInternalServerError) self.assertEqual(http_exc.code, 500) self.assertEqual(http_exc.detail, 'FOO') # detail == custom public message self.assertEqual(LOGGER.mock_calls, [ call.error(ANY, exc, ANY, exc_info=True), ])
async def test_reader_worker(): rq_queue, w_queue, rst_queue = asyncio.Queue(), asyncio.Queue( ), asyncio.Queue() rq_queue.put_nowait(0) main.logging = Mock() await main.reader_worker(0, '', 1, rq_queue, w_queue, rst_queue, None, leader=True, only_one_loop=True) main.logging.assert_has_calls([ call.debug('reader0: reading 1 items from offset 1...'), call.error('Mailchimp api-read: HTTP Error 404: Not Found') ], any_order=True) rq_queue.put_nowait(0) main.logging = Mock() await main.reader_worker(0, '1a2d7ebf82', 1, rq_queue, w_queue, rst_queue, None, leader=True, only_one_loop=True) main.logging.assert_has_calls( [call.debug('reader0: reading 1 items from offset 1...')], any_order=True) assert w_queue.qsize() == 1 assert rq_queue.qsize() > 0 for s in main.logging.method_calls: if s.startswith('call.info(\'reader0: items [1, 1] of'): assert True elif s.startswith('call.info(\'reader0: adding all remaining'): assert True elif s == 'call.debug(\'reader0: reading 1 items from offset 1...\')': assert True else: assert False
def test_broken_config(self): cp = configparser.ConfigParser() cp['DEFAULT']['account_type'] = 'unknown_type' with open(self.accounts_dir + '/error_account.ini', 'w') as f: cp.write(f) error_conf = mbm.config.Global(self.conf_file, self.accounts_dir) error_conf.delete_account("error_account") self.assertListEqual(mbm.config.log.mock_calls, [call.error("Could not instantiate account 'error" "_account': Unknown account type 'unk" "nown_type'"), call.info("Deleted errornous account 'error_acco" "unt'")]) mbm.config.log.mock_reset()
def test_broken_config(self): cp = configparser.ConfigParser() cp['DEFAULT']['account_type'] = 'unknown_type' with open(self.accounts_dir + '/error_account.ini', 'w') as f: cp.write(f) error_conf = mbm.config.Global(self.conf_file, self.accounts_dir) error_conf.delete_account("error_account") self.assertListEqual(mbm.config.log.mock_calls, [ call.error("Could not instantiate account 'error" "_account': Unknown account type 'unk" "nown_type'"), call.info("Deleted errornous account 'error_acco" "unt'") ]) mbm.config.log.mock_reset()
def test__given_config_with_non_existing_file_to_copy__when_running__should_print_to_ui( osfs_type_mock): opts = launch_options(copy=[ CopyInstruction("myfile.txt", "mycopy.txt"), CopyInstruction("otherfile.gif", "copy.gif") ]) osfs_type_mock.return_value.create("myfile.txt") ui_spy = Mock() sut = make_sut(opts, ui_spy) sut.run(opts) assert call.error( "FileNotFoundError: otherfile.gif") in ui_spy.method_calls
def test_skill_load_blacklisted(self): """Skill should not be loaded if it is blacklisted""" self.loader.config['skills']['blacklisted_skills'] = ['test_skill'] with patch(self.mock_package + 'SettingsMetaUploader'): self.loader.load() self.assertTrue(self.loader.load_attempted) self.assertFalse(self.loader.loaded) self.assertListEqual(['mycroft.skills.loading_failure'], self.message_bus_mock.message_types) log_messages = [ call.info('ATTEMPTING TO LOAD SKILL: test_skill'), call.info( 'Skill test_skill is blacklisted - it will not be loaded'), call.error('Skill test_skill failed to load') ] self.assertListEqual(log_messages, self.log_mock.method_calls)
def test_calling_tracker_with_payload_indicating_failed_informs_user_with_error( self, emissary_class_mock, *args, **mocks): emissary_mock = emissary_class_mock() factory = APIRequestFactory() request = factory.get( reverse('job_progress:tracker', kwargs=dict(export_id=self.export.id)), data=dict(status='failed', job='http://localhost:8901/api/conversion_job/1/')) views.tracker(request, export_id=str(self.export.id)) assert_that( emissary_mock.mock_calls, contains_in_any_order( call.error( 'Export #{export_id} "Neverland" to Esri File Geodatabase has failed.' .format(export_id=self.export.id), ), ))
def test_skipping_ResultCleaningError_if_flag_is_false(self, LOGGER): self.cleaned_list[1] = ResultCleaningError self.cls.break_on_result_cleaning_error = False self._do_call() self.assertEqual(LOGGER.mock_calls, [ call.error(ANY, ANY), ]) self.assertEqual(self.adjust_exc.call_count, 0) self.cls.get_clean_result_dict_kwargs.assert_called_once_with() self.cls.call_api_method.assert_called_once_with(sen.api_method) self.assertEqual(self.data_spec.clean_result_dict.mock_calls, [ call(sen.result_dict_1, kwarg=sen.kwarg), call(sen.result_dict_2, kwarg=sen.kwarg), call(sen.result_dict_3, kwarg=sen.kwarg), ]) self.assertEqual(self.results, [ sen.cleaned_result_dict_1, sen.cleaned_result_dict_3, ])
def test_run_jobs_timeout(self): def se_run(): self.cls._timeout = datetime(2017, 10, 20, 12, 20, 00) return None j1 = Mock(name='job1') j1.run.return_value = True type(j1).skip = PropertyMock(return_value=None) j2 = Mock(name='job2') j2.run.side_effect = se_run type(j2).skip = PropertyMock(return_value=None) j3 = Mock(name='job3') j3.run.return_value = False type(j3).skip = PropertyMock(return_value=None) j4 = Mock(name='job4') type(j4).error_repr = PropertyMock(return_value='j4erepr') exc = RuntimeError('foo') j4.run.side_effect = exc type(j4).skip = PropertyMock(return_value=None) self.config.get_global.return_value = 3600 self.cls._finished = ['a'] self.cls._running = ['b'] self.cls._run_exceptions['foo'] = 6 with patch('%s._poll_jobs' % pb, autospec=True) as mock_poll: with patch('%s._report' % pb, autospec=True) as mock_report: with patch('%s.logger' % pbm) as mock_logger: with patch('%s.format_exc' % pbm) as m_fmt_exc: m_fmt_exc.return_value = 'm_traceback' self.cls._run_jobs([j1, j2, j3, j4]) assert self.cls._finished == [j1] assert self.cls._running == [j2, j3, j4] assert self.cls._run_exceptions == {} assert mock_poll.mock_calls == [call(self.cls)] assert mock_report.mock_calls == [call(self.cls)] assert self.config.jobs_for_schedules.mock_calls == [] assert j1.mock_calls == [call.run()] assert j2.mock_calls == [call.run()] assert j3.mock_calls == [] assert j4.mock_calls == [] assert call.error('Time limit reached; not running any more jobs!' ) in mock_logger.mock_calls assert m_fmt_exc.mock_calls == []
def test_archive_feed_fail(self): with patch('oe_utils.scripts.archive_feed.log') as log_mock: responses.add( responses.POST, url="https://host.be/bron/atomfeed/archive", status=500) feed_list = [{ 'atom_feed_folder': fixture_directory, 'atom_feed_manager': TestAtomFeedManager, 'max_entries': 2, 'archive_feed_url': 'https://host.be/bron/atomfeed/archive' }] with self.assertRaises(HTTPError): archive_feed.archive_entries(feed_list=feed_list, session=self.db, system_token='test_token') debug_calls = [ call.debug('Number of entries: 2'), call.debug('Archive current feed') ] self.assertEqual(log_mock.debug.mock_calls, debug_calls) error_calls = [ call.error('Failed to archive on url https://host.be/bron/atomfeed/archive'), ] self.assertEqual(log_mock.error.mock_calls, error_calls)
def test_show_errors(self): common.show("Oops", color='error', log=self.log) expect(self.log.mock_calls) == [call.error("Oops")]
write_mock.assert_has_calls(expected_write_calls, any_order=True) # makedirs used makedirs_mock.assert_has_calls(expected_makedirs) # setuid used (at least number of times) expected_setuid_calls = [call.__enter__()] * expected_setuid_calls_count set_effective_root_uid_mock.assert_has_calls(expected_setuid_calls, any_order=True) @patch('wca.resctrl.SetEffectiveRootUid') @patch('os.makedirs') @pytest.mark.parametrize('side_effect, log_call', [ (OSError(errno.E2BIG, 'other'), call.error('Could not write pid to resctrl (%r): Unexpected errno %r.', '/sys/fs/resctrl/tasks', 7)), (OSError(errno.ESRCH, 'no such proc'), call.warning( 'Could not write pid to resctrl (%r): Process probably does not exist. ', '/sys/fs/resctrl/tasks')), (OSError(errno.EINVAL, 'no such proc'), call.error('Could not write pid to resctrl (%r): Invalid argument %r.', '/sys/fs/resctrl/tasks')), ]) def test_resgroup_add_pids_invalid(makedirs_mock, set_effective_root_uid_mock, side_effect, log_call): resgroup = ResGroup(name='') writes_mock = { '/sys/fs/resctrl/tasks': Mock(return_value=Mock(write=Mock(side_effect=side_effect))), '/sys/fs/resctrl/mon_groups/c1/tasks':