def test_patch_multiple(self): original_foo = Foo original_f = Foo.f original_g = Foo.g patcher1 = patch.multiple(foo_name, f=1, g=2) patcher2 = patch.multiple(Foo, f=1, g=2) for patcher in patcher1, patcher2: patcher.start() try: self.assertIs(Foo, original_foo) self.assertEqual(Foo.f, 1) self.assertEqual(Foo.g, 2) finally: patcher.stop() self.assertIs(Foo, original_foo) self.assertEqual(Foo.f, original_f) self.assertEqual(Foo.g, original_g) @patch.multiple(foo_name, f=3, g=4) def test(): self.assertIs(Foo, original_foo) self.assertEqual(Foo.f, 3) self.assertEqual(Foo.g, 4) test()
def test_add_existant_nin(self): from eduiddashboard.msgrelay import MsgRelay self.set_logged(user='******') response_form = self.testapp.get('/profile/nins/') form = response_form.forms[self.formname] nin = '200010100001' # First we add a nin... with patch.object(UserDB, 'exists_by_filter', clear=True): with patch.multiple(MsgRelay, nin_validator=return_true, nin_reachable=return_true): UserDB.exists_by_filter.return_value = True form['norEduPersonNIN'].value = nin form.submit('add') # ...and then we try to add it again. with patch.object(UserDB, 'exists_by_filter', clear=True): with patch.multiple(MsgRelay, nin_validator=return_true, nin_reachable=return_true): UserDB.exists_by_filter.return_value = True form['norEduPersonNIN'].value = nin response = form.submit('add') self.assertEqual(response.status, '200 OK') self.assertIn(nin, response.body) self.assertIn('alert-danger', response.body) self.assertIsNotNone(getattr(response, 'form', None))
def test_get_glottolog_data_download(self): data_dir = os.path.join(self.tmp.as_posix(), 'data') class URLopener(object): def retrieve(self, url, fname): with io.open(fname, 'w', encoding='utf8') as fp: fp.write('(B [abcd1234],C [abcd1234])A [abcd1234];') class URLopenerError(object): def retrieve(self, url, fname): raise IOError() with patch.multiple( 'beastling.configuration', user_data_dir=Mock(return_value=data_dir), URLopener=URLopenerError, ): with self.assertRaises(ValueError): get_glottolog_data('newick', '2.5') with patch.multiple( 'beastling.configuration', user_data_dir=Mock(return_value=data_dir), URLopener=URLopener, ): assert get_glottolog_data('newick', '2.5')
def test_remove_from_persistent(self): from MMApp.entities.project import ProjectService existingItem = {"qid" : 1, "pid": "52812ee5a6844e30fc4adcac" , "lid" : "52812fd8a6844e327e4adcac", "model_type": "Gradient Boosted Trees" } newItem = {"qid" : 1, "pid": "52812ee5a6844e30fc4adcac" , "lid" : "new", "model_type": "Gradient Boosted Trees"} update_vals = {'s': 0} # test remove_from_persistent when lb 'lid' key is 'new' with patch.multiple(ProjectService, get_leaderboard_item = DEFAULT, delete_leaderboard_item = DEFAULT, save_leaderboard_item = DEFAULT ) as mock_new: # make sure a lid of 'new' returns false self.assertFalse( self.q.remove_from_persistent(newItem) ) # test remove_from_persistent when lb 's' key is 0 with patch.multiple(ProjectService, get_leaderboard_item = DEFAULT, delete_leaderboard_item = DEFAULT, save_leaderboard_item = DEFAULT ) as mock_zero: output = self.q.remove_from_persistent(existingItem) mock_zero['delete_leaderboard_item'].assert_called_once_with(existingItem['lid']) self.assertTrue(output) # test remove_from_persistent when lb 's' key is 1 existingItem.update({'s':1}) with patch.multiple(ProjectService, get_leaderboard_item = DEFAULT, delete_leaderboard_item = DEFAULT, save_leaderboard_item = DEFAULT ) as mock_one: output = self.q.remove_from_persistent(existingItem) mock_one['save_leaderboard_item'].assert_called_once_with(existingItem['lid'],update_vals) self.assertTrue(output)
def test_main(self): suite_name = 'SUITE' throttle = '3' machine_type = 'burnupi' def prepare_and_schedule(obj): assert obj.base_config.suite == suite_name assert obj.args.throttle == throttle def fake_str(*args, **kwargs): return 'fake' def fake_bool(*args, **kwargs): return True with patch.multiple( 'teuthology.suite.run.util', fetch_repos=DEFAULT, package_version_for_hash=fake_str, git_branch_exists=fake_bool, git_ls_remote=fake_str, ): with patch.multiple( 'teuthology.suite.run.Run', prepare_and_schedule=prepare_and_schedule, ): main([ '--suite', suite_name, '--throttle', throttle, '--machine-type', machine_type, ])
def test_reset_password_mina(self): response_form = self.testapp.get('/profile/reset-password/mina/') form = response_form.forms['resetpasswordninview-form'] form['email_or_username'].value = '*****@*****.**' response = form.submit('reset') self.assertEqual(response.status, '302 Found') self.db.reset_passwords.remove() form['email_or_username'].value = '*****@*****.**' from eduiddashboard.msgrelay import MsgRelay with patch.multiple(MsgRelay, nin_validator=return_true, nin_reachable=return_true, nin_reset_password=return_true): response = form.submit('reset') self.assertEqual(response.status, '302 Found') self.db.reset_passwords.remove() form['email_or_username'].value = '0701234567' with patch.multiple(MsgRelay, nin_validator=return_true, nin_reachable=return_true, nin_reset_password=return_true): response = form.submit('reset') self.assertEqual(response.status, '302 Found') reset_passwords_after = list(self.db.reset_passwords.find()) self.assertEqual(len(reset_passwords_after), 1) form['email_or_username'].value = 'notexistingmail@foodomain' response = form.submit('reset') self.assertIn('Valid input formats are:', response.body)
def test_Providers_from_config__files(sysdir, userdir, dsdir): """Test configuration file precedence Ensure that provider precedence works in the correct order: datalad defaults < dataset defaults < system defaults < user defaults """ # Test the default, this is an arbitrary provider used from another # test providers = Providers.from_config_files(reload=True) provider = providers.get_provider('https://crcns.org/data....') assert_equal(provider.name, 'crcns') # Test that the dataset provider overrides the datalad # default with chpwd(dsdir): providers = Providers.from_config_files(reload=True) provider = providers.get_provider('https://crcns.org/data....') assert_equal(provider.name, 'dscrcns') # Test that the system defaults take precedence over the dataset # defaults (we're still within the dsdir) with patch.multiple("appdirs.AppDirs", site_config_dir=sysdir, user_config_dir=None): providers = Providers.from_config_files(reload=True) provider = providers.get_provider('https://crcns.org/data....') assert_equal(provider.name, 'syscrcns') # Test that the user defaults take precedence over the system # defaults with patch.multiple("appdirs.AppDirs", site_config_dir=sysdir, user_config_dir=userdir): providers = Providers.from_config_files(reload=True) provider = providers.get_provider('https://crcns.org/data....') assert_equal(provider.name, 'usercrcns')
def test_run_error(self): self.cls.reactor = Mock(spec_set=reactor) with patch.multiple( pbm, logger=DEFAULT, Site=DEFAULT, LoopingCall=DEFAULT, VaultRedirectorSite=DEFAULT ) as mod_mocks: with patch.multiple( pb, get_active_node=DEFAULT, run_reactor=DEFAULT, listentcp=DEFAULT, add_update_loop=DEFAULT ) as cls_mocks: cls_mocks['get_active_node'].return_value = None with pytest.raises(SystemExit) as excinfo: self.cls.run() assert excinfo.value.code == 3 assert mod_mocks['logger'].mock_calls == [ call.critical("ERROR: Could not get active vault node from " "Consul. Exiting.") ] assert mod_mocks['VaultRedirectorSite'].mock_calls == [] assert mod_mocks['Site'].mock_calls == [] assert self.cls.reactor.mock_calls == [] assert cls_mocks['run_reactor'].mock_calls == [] assert mod_mocks['LoopingCall'].mock_calls == []
def test_flavor(self): def get_sorted_flavors(self, arch, select): return [ { 'Name': 'too_small', 'RAM': 2048, 'Disk': 50, 'VCPUs': 1, }, ] with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): with pytest.raises(NoFlavorException): hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 } OpenStack().flavor(hint, 'arch', None) flavor = 'good-flavor' def get_sorted_flavors(self, arch, select): return [ { 'Name': flavor, 'RAM': 2048, 'Disk': 50, 'VCPUs': 2, }, ] with patch.multiple( OpenStack, get_sorted_flavors=get_sorted_flavors, ): hint = { 'ram': 1000, 'disk': 40, 'cpus': 2 } assert flavor == OpenStack().flavor(hint, 'arch', None)
def setUp(self): super(RedisRestoreTests, self).setUp() self.conf_man_patch = patch.object( configuration.ConfigurationManager, 'parse_configuration', mock.Mock(return_value={'dir': '/var/lib/redis', 'dbfilename': 'dump.rdb'})) self.conf_man_patch.start() self.addCleanup(self.conf_man_patch.stop) self.os_patch = patch.multiple(operating_system, chown=DEFAULT, create_directory=DEFAULT) self.os_patch.start() self.addCleanup(self.os_patch.stop) self.restore_runner = utils.import_class( RESTORE_REDIS_CLS)('swift', location='http://some.where', checksum='True_checksum', restore_location='/var/lib/somewhere') self.restore_runner_patch = patch.multiple( self.restore_runner, _run_restore=DEFAULT, pre_restore=DEFAULT, post_restore=DEFAULT) self.restore_runner_mocks = self.restore_runner_patch.start() self.expected_content_length = 123 self.restore_runner._run_restore = mock.Mock( return_value=self.expected_content_length) self.addCleanup(self.restore_runner_patch.stop)
def test_git_update(rmtree_mock): g = git.GitRepo('.', 'https://example.com') with patch.multiple(g, clone=DEFAULT, path=DEFAULT, diff=DEFAULT, pull=DEFAULT) as git_mock: git_mock['path'].is_dir.return_value = False g.update() assert git_mock['clone'].called git_mock['pull'].assert_not_called() rmtree_mock.reset_mock() with patch.multiple(g, clone=DEFAULT, path=DEFAULT, diff=DEFAULT, pull=DEFAULT) as git_mock: git_mock['path'].is_dir.return_value = True git_mock['path'].joinpath().is_dir.return_value = False g.update() rmtree_mock.assert_called_with(g.path_str, ignore_errors=True) assert git_mock['clone'].called git_mock['pull'].assert_not_called() rmtree_mock.reset_mock() with patch.multiple(g, clone=DEFAULT, path=DEFAULT, diff=DEFAULT, pull=DEFAULT) as git_mock: git_mock['path'].is_dir.return_value = True git_mock['path'].joinpath().is_dir.return_value = True val = g.update() rmtree_mock.assert_not_called() git_mock['clone'].assert_not_called() assert git_mock['pull'].called assert val == git_mock['pull'].return_value
def setUp(self): # Clear the queue while True: try: self.queue.get(block=False) except Empty: break self.server.data_version = 0 # Logger patching self.tmp_dir = mkdtemp() logger_patcher = patch.multiple('virtwho.log.Logger', _log_dir=self.tmp_dir, _stream_handler=None, _queue_logger=None) logger_patcher.start() self.addCleanup(logger_patcher.stop) self.addCleanup(rmtree, self.tmp_dir) log_patcher = patch.multiple('virtwho.log', DEFAULT_LOG_DIR=self.tmp_dir) log_patcher.start() self.addCleanup(log_patcher.stop) rhsm_log_patcher = patch('rhsm.connection.log') rhsm_log_patcher.start() self.addCleanup(rhsm_log_patcher.stop) # Reduce minimum send interval to allow for faster test completion minimum_patcher = patch('virtwho.config.MinimumSendInterval', 2) minimum_patcher.start() self.addCleanup(minimum_patcher.stop) # Mock PIDFILE (so we can run tests as an unprivledged user) pid_file_name = self.tmp_dir + 'virt-who.pid' pid_file_patcher = patch('virtwho.main.PIDFILE', pid_file_name) pid_file_patcher.start() self.addCleanup(pid_file_patcher.stop)
def patch_requests(mapping=None, allowed_domains=None, allowed_methods=None): # pylint: disable=too-complex """ mapping is a dict of str => data so that "toto" => {"response" => {"success" : 1}, "json" => True/False} means that any url called with *toto* will return {"success" : 1} json part is optional allowed_domains can be used in place of a mapping if you don't care about specifying specific return values but is required so as to ensure that you are only patching the specific domains that your test expects to hit. allowed_methods limits the methods that can be called on requests """ if mapping is None: if allowed_domains is None: raise ValueError('patch_requests(): you must specify a mapping or a list of allowed_domains') mapping = {domain: {} for domain in allowed_domains} def _request_response_from_query(_, url, **kwargs): # pylint: disable=C0111,W0613 return _response(url) def _other_response_from_query(url, **kwargs): # pylint: disable=C0111,W0613 return _response(url) def _response(url): """ If the requested URL is found in the mapping, returns the mocked response as configured """ logging.debug("mocking %s", url) for (token, config) in mapping.iteritems(): if token in url: resp = requests.Response() resp.url = config.get('url', url) resp.status_code = config.get('http_code', 200) if config.get("json", True) and 'response' in config: resp._content = json.dumps(config["response"]) # pylint: disable=W0212 elif config.get("stream", False): resp.raw = MagicMock( stream=MagicMock(return_value=config["response"]) ) else: # str: Requests uses str as bytes internally, at least on Python 2 resp._content = str(config.get("response", '')) # pylint: disable=W0212 if config.get('headers'): resp.headers = config.get('headers') return resp raise Exception("Requests mock called with unexpected URL, nothing in the mapping for %s" % url) if allowed_methods is None: allowed_methods = ['get', 'post', 'put', 'head', 'patch', 'options', 'delete'] methods_map = {method: MagicMock(side_effect=_other_response_from_query) for method in allowed_methods} methods_map['request'] = MagicMock(side_effect=_request_response_from_query) with patch.multiple('requests', **methods_map): with patch.multiple('requests.Session', **methods_map): yield {k: getattr(requests, k) for k in methods_map}
def test_list_multipath(self): # # multipath data partition # if platform.system() == "FreeBSD": return partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2" disk = "Xda" partition = "Xda1" def get_partition_type(dev): return main.PTYPE['mpath']['osd']['ready'] with patch.multiple( main, list_all_partitions=lambda: {disk: [partition]}, get_partition_uuid=lambda dev: partition_uuid, get_partition_type=get_partition_type, is_partition=lambda dev: True, ): expect = [{'path': '/dev/' + disk, 'partitions': [{ 'dmcrypt': {}, 'fs_type': None, 'is_partition': True, 'mount': None, 'multipath': True, 'path': '/dev/' + partition, 'ptype': main.PTYPE['mpath']['osd']['ready'], 'state': 'unprepared', 'type': 'data', 'uuid': partition_uuid, }]}] assert expect == main.list_devices() # # multipath journal partition # journal_partition_uuid = "2cc40457-259e-4542-b029-785c7cc37871" def get_partition_type(dev): return main.PTYPE['mpath']['journal']['ready'] with patch.multiple( main, list_all_partitions=lambda: {disk: [partition]}, get_partition_uuid=lambda dev: journal_partition_uuid, get_partition_type=get_partition_type, is_partition=lambda dev: True, ): expect = [{'path': '/dev/' + disk, 'partitions': [{ 'dmcrypt': {}, 'is_partition': True, 'multipath': True, 'path': '/dev/' + partition, 'ptype': main.PTYPE['mpath']['journal']['ready'], 'type': 'journal', 'uuid': journal_partition_uuid, }]}] assert expect == main.list_devices()
def testDownload2(self): from clld.web.adapters.download import CsvDump, N3Dump, RdfXmlDump from clld.web.adapters.cldf import CldfDownload tmp = mktemp() class Path(MagicMock, UnicodeMixin): @property def stem(self): return 'a' @property def parent(self): return Mock(exists=Mock(return_value=False)) def open(self, mode): return open(tmp, mode) with patch.multiple( 'clld.web.adapters.cldf', ZipFile=MagicMock(), Path=MagicMock(return_value=Path()), move=Mock(), remove=Mock(), ): with patch( 'clld.web.adapters.download.Path', new=MagicMock(return_value=Path()), ): dl = CldfDownload(Dataset, 'clld') dl.create(self.env['request'], verbose=False) with patch.multiple( 'clld.web.adapters.download', ZipFile=MagicMock(), Path=MagicMock(return_value=Path()), move=Mock(), remove=Mock(), ): dl = CsvDump(Language, 'clld') dl.create(self.env['request'], verbose=False) dl.create(self.env['request'], filename='name.n3', verbose=False) dl = N3Dump(Language, 'clld') dl.create(self.env['request'], verbose=False) if os.path.exists(tmp): os.remove(tmp) else: # pragma: no cover raise ValueError dl = RdfXmlDump(Language, 'clld') dl.create(self.env['request'], verbose=False) with closing(gzip.open(tmp, 'rb')) as fp: assert et.fromstring(fp.read()) if os.path.exists(tmp): os.remove(tmp) else: # pragma: no cover raise ValueError
def test_stale_openstack_volumes(self): ctx = Mock() ctx.teuthology_config = config ctx.dry_run = False now = datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%dT%H:%M:%S.000000") id = '4bee3af9-febb-40c1-a17e-ff63edb415c5' name = 'target1-0' volume_list = json.loads( '[{' ' "ID": "' + id + '"' '}]' ) # # A volume created a second ago is left untouched # volume_show = ( '[' ' {"Field": "id", "Value": "' + id + '"},' ' {"Field": "created_at", "Value": "' + now + '"},' ' {"Field": "display_name", "Value": "' + name + '"}' ']' ) def sh(cmd): if 'volume show' in cmd: return volume_show with patch.multiple( nuke, sh=sh, openstack_delete_volume=DEFAULT, ) as m: nuke.stale_openstack_volumes(ctx, volume_list) m['openstack_delete_volume'].assert_not_called() # # A volume created long ago is destroyed # ancient = "2000-11-02T15:43:12.000000" volume_show = ( '[' ' {"Field": "id", "Value": "' + id + '"},' ' {"Field": "created_at", "Value": "' + ancient + '"},' ' {"Field": "display_name", "Value": "' + name + '"}' ']' ) def sh(cmd): if 'volume show' in cmd: return volume_show with patch.multiple( nuke, sh=sh, openstack_delete_volume=DEFAULT, ) as m: nuke.stale_openstack_volumes(ctx, volume_list) m['openstack_delete_volume'].assert_called_with(id)
def test_exists(self): with patch.multiple(misc, sh=lambda cmd: self.teuthology_instance): o = OpenStackInstance("NAME") assert o.exists() def sh_raises(cmd): raise subprocess.CalledProcessError("FAIL", "BAD") with patch.multiple(misc, sh=sh_raises): o = OpenStackInstance("NAME") assert not o.exists()
def test_patch_multiple_create(self): patcher = patch.multiple(Foo, blam='blam') self.assertRaises(AttributeError, patcher.start) patcher = patch.multiple(Foo, blam='blam', create=True) patcher.start() try: self.assertEqual(Foo.blam, 'blam') finally: patcher.stop() self.assertFalse(hasattr(Foo, 'blam'))
def test_create_abstract_method_method(self): mocked_path = MagicMock() mocked_git = MagicMock() with patch.multiple('pyolite.managers.manager', Path=MagicMock(return_value=mocked_path), Git=MagicMock(return_value=mocked_git)): with patch.multiple('pyolite.managers.manager.Manager', __abstractmethods__=set()): manager = Manager('/path/to/admin/repo') manager.create('entity')
def test_if_admin_repository_is_not_dir_it_should_raise_ValueError(): mocked_path = MagicMock() mocked_git = MagicMock() mocked_path.isdir.return_value = False with patch.multiple('pyolite.managers.manager', Path=MagicMock(return_value=mocked_path), Git=MagicMock(return_value=mocked_git)): with patch.multiple('pyolite.managers.manager.Manager', __abstractmethods__=set()): with pytest.raises(ValueError): Manager('/path/to/repo')
def test_list_multipath(self): args = ceph_disk.parse_args(['list']) # # multipath data partition # partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2" disk = "Xda" partition = "Xda1" with patch.multiple( ceph_disk, list_all_partitions=lambda names: { disk: [partition] }, get_partition_uuid=lambda dev: partition_uuid, get_partition_type=lambda dev: ceph_disk.MPATH_OSD_UUID, is_partition=lambda dev: True, ): expect = [{'path': '/dev/' + disk, 'partitions': [{ 'dmcrypt': {}, 'fs_type': None, 'is_partition': True, 'mount': None, 'multipath': True, 'path': '/dev/' + partition, 'ptype': ceph_disk.MPATH_OSD_UUID, 'state': 'unprepared', 'type': 'data', 'uuid': partition_uuid, }]}] assert expect == ceph_disk.list_devices(args) # # multipath journal partition # journal_partition_uuid = "2cc40457-259e-4542-b029-785c7cc37871" with patch.multiple( ceph_disk, list_all_partitions=lambda names: { disk: [partition] }, get_partition_uuid=lambda dev: journal_partition_uuid, get_partition_type=lambda dev: ceph_disk.MPATH_JOURNAL_UUID, is_partition=lambda dev: True, ): expect = [{'path': '/dev/' + disk, 'partitions': [{ 'dmcrypt': {}, 'is_partition': True, 'multipath': True, 'path': '/dev/' + partition, 'ptype': ceph_disk.MPATH_JOURNAL_UUID, 'type': 'journal', 'uuid': journal_partition_uuid, }]}] assert expect == ceph_disk.list_devices(args)
def testDetectNotificationReaderThreadTargetCallFailed( self, _slotAgentClassMock, _modelSwapperInterfaceClassMock): # Reproduce failure to invoke the SwapController's input-reader thread # target and verify that SwapController's event loop raises the expected # exception sc = SwapController(concurrency=1) self.assertEqual(len(sc._slotAgents), 1) # Patch SwapController's input-thread object with one that will exhibit a # failure while trying to call the thread target def expectTwoArgs(_a, _b): pass t = threading.Thread(target=expectTwoArgs) t.setDaemon(True) patch.multiple(sc, _notificationReaderThread=t).start() # Attempt to run it in a thread def runSwapControllerThread(sc, runResultQ): try: g_logger.info("Swap Controller run-thread is running") try: r = sc.run() except Exception as e: runResultQ.put(e) else: runResultQ.put(r) finally: g_logger.info("Swap Controller run-thread is exiting") runResultQ = Queue.Queue() scThread = threading.Thread( target=runSwapControllerThread, name="runSwapControllerThread", args=(sc, runResultQ)) scThread.setDaemon(True) scThread.start() # Wait for the run-thread to stop g_logger.info("Waiting for SwapController run-thread to stop") scThread.join(timeout=5) self.assertFalse(scThread.isAlive()) g_logger.info("SwapController run-thread stopped") # Confirm the expected exception runResult = runResultQ.get_nowait() self.assertIsInstance(runResult, AssertionError) self.assertIn("Notification-reader thread failed to start in time", runResult.args[0])
def test_exists(self): with patch.multiple( misc, sh=lambda cmd: self.teuthology_instance, ): o = OpenStackInstance('NAME') assert o.exists() def sh_raises(cmd): raise subprocess.CalledProcessError('FAIL', 'BAD') with patch.multiple( misc, sh=sh_raises, ): o = OpenStackInstance('NAME') assert not o.exists()
def test_rename(self): mocked_re = MagicMock() mocked_index = MagicMock() mocked_os = MagicMock() mocked_result = MagicMock() mocked_result.rename.return_value = True mocked_re.sub.return_value = "new" mocked_os.path.split.return_value = [1, 1] with patch.multiple('gitfs.views.current', re=mocked_re, os=mocked_os): from gitfs.views import current as current_view old_rename = current_view.PassthroughView.rename current_view.PassthroughView.rename = lambda self, old, new: True current = CurrentView(regex="regex", repo="repo", repo_path="repo_path", ignore=CachedIgnore()) current._stage = mocked_index result = current.rename("old", "new") assert result is True mocked_index.assert_called_once_with(**{ 'remove': 1, 'add': "new", "message": "Rename old to new" }) mocked_os.path.split.assert_called_once_with("old") current_view.PassthroughView.rename = old_rename
def test_first_select_as(self): with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)): tmpfile = tempfile.NamedTemporaryFile('w').name try: self.exec_pipeline(""" - load: - file://%s/metadata/test01.xml - select as FOO - first - publish: %s """ % (self.datadir, tmpfile)) t1 = parse_xml(resource_filename("metadata/test01.xml", self.datadir)) assert t1 is not None entity_id = 'https://idp.example.com/saml2/idp/metadata.php' t2 = parse_xml(tmpfile) assert t2 is not None assert root(t1).get('entityID') == root(t2).get('entityID') assert root(t2).get('entityID') == entity_id except PipeException: pass except IOError: raise Skip finally: try: os.unlink(tmpfile) except: pass
def test_store_and_retrieve(self): with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)): tmpdir = tempfile.mkdtemp() os.rmdir(tmpdir) # lets make sure 'store' can recreate it try: self.exec_pipeline(""" - load: - file://%s/metadata/test01.xml - select - store: directory: %s """ % (self.datadir, tmpdir)) t1 = parse_xml(resource_filename("metadata/test01.xml", self.datadir)) assert t1 is not None entity_id = 'https://idp.example.com/saml2/idp/metadata.php' sha1id = hash_id(entity_id, prefix=False) fn = "%s/%s.xml" % (tmpdir, sha1id) assert os.path.exists(fn) t2 = parse_xml(fn) assert t2 is not None assert root(t1).get('entityID') == root(t2).get('entityID') assert root(t2).get('entityID') == entity_id except IOError: raise Skip finally: shutil.rmtree(tmpdir)
def test_reap_container(self): policy = random.choice(list(POLICIES)) r = self.init_reaper({}, fakelogger=True) with patch.multiple('swift.account.reaper', direct_get_container=DEFAULT, direct_delete_object=DEFAULT, direct_delete_container=DEFAULT) as mocks: headers = {'X-Backend-Storage-Policy-Index': policy.idx} obj_listing = [{'name': 'o'}] def fake_get_container(*args, **kwargs): try: obj = obj_listing.pop(0) except IndexError: obj_list = [] else: obj_list = [obj] return headers, obj_list mocks['direct_get_container'].side_effect = fake_get_container r.reap_container('a', 'partition', acc_nodes, 'c') mock_calls = mocks['direct_delete_object'].call_args_list self.assertEqual(3, len(mock_calls)) for call_args in mock_calls: _args, kwargs = call_args self.assertEqual(kwargs['headers'] ['X-Backend-Storage-Policy-Index'], policy.idx) self.assertEquals(mocks['direct_delete_container'].call_count, 3) self.assertEqual(r.stats_objects_deleted, 3)
def test_getattr(self): mocked_full = MagicMock() mocked_os = MagicMock() mocked_stat = MagicMock() mocked_repo = MagicMock() mocked_stat.simple = "stat" mocked_os.lstat.return_value = mocked_stat mocked_full.return_value = "full_path" mocked_repo._full_path = mocked_full with patch.multiple('gitfs.views.current', os=mocked_os, STATS=['simple']): current = CurrentView(repo=mocked_repo, uid=1, gid=1, repo_path="repo_path", ignore=CachedIgnore()) current._full_path = mocked_full result = current.getattr("path") asserted_result = { 'st_uid': 1, 'st_gid': 1, 'simple': "stat" } assert result == asserted_result mocked_os.lstat.assert_called_once_with("full_path") mocked_full.assert_called_once_with("path")
def test_find_usage(self): mock_conn = Mock() with patch('%s.connect' % self.pb) as mock_connect: with patch.multiple( self.pb, _find_usage_vpcs=DEFAULT, _find_usage_subnets=DEFAULT, _find_usage_ACLs=DEFAULT, _find_usage_route_tables=DEFAULT, _find_usage_gateways=DEFAULT, ) as mocks: cls = _VpcService(21, 43) cls.conn = mock_conn assert cls._have_usage is False cls.find_usage() assert mock_connect.mock_calls == [call()] assert cls._have_usage is True assert mock_conn.mock_calls == [] for x in [ '_find_usage_vpcs', '_find_usage_subnets', '_find_usage_ACLs', '_find_usage_route_tables', '_find_usage_gateways', ]: assert mocks[x].mock_calls == [call()]
def test_fetch_transaction_status_transactions_filtering(self): payment_method = PaymentMethodFactory.create( payment_processor=triggered_processor ) transactions = TransactionFactory.create_batch( 5, payment_method=payment_method, state=Transaction.States.Pending ) filtered_transactions = [ transactions[0], transactions[2], transactions[4] ] mock_fetch_status = MagicMock() with patch.multiple(TriggeredProcessor, fetch_transaction_status=mock_fetch_status): transactions_arg = [ str(transaction.pk) for transaction in filtered_transactions ] call_command('fetch_transactions_status', '--transactions=%s' % ','.join(transactions_arg)) for transaction in filtered_transactions: self.assertIn(call(transaction), mock_fetch_status.call_args_list) self.assertEqual(mock_fetch_status.call_count, len(filtered_transactions))
def test_find_usage(self): """test find usage method calls other methods""" mock_conn = Mock() with patch('%s.connect' % pb) as mock_connect: with patch.multiple( pb, _find_cluster_manual_snapshots=DEFAULT, _find_cluster_subnet_groups=DEFAULT, ) as mocks: cls = _RedshiftService(21, 43) cls.conn = mock_conn assert cls._have_usage is False cls.find_usage() assert mock_connect.mock_calls == [call()] assert cls._have_usage is True assert mock_conn.mock_calls == [] for x in [ '_find_cluster_manual_snapshots', '_find_cluster_subnet_groups', ]: assert mocks[x].mock_calls == [call()]
def test_it_should_be_possible_to_retrieve_by_name_a_repo(self): mocked_users = MagicMock() mocked_file = MagicMock() mocked_dir = MagicMock() mocked_path = MagicMock() mocked_dir.isdir.return_value = True mocked_file.isdir.return_value = False mocked_file.__str__ = lambda x: 'tests/fixtures/get_repo_by_name.conf' mocked_path.walk.return_value = [mocked_file, mocked_dir] with patch.multiple('models.repository', Path=MagicMock(return_value=mocked_path), ListUsers=MagicMock(return_value=mocked_users)): repo = Repository.get_by_name('new_one', 'simple_path', 'git') eq_(repo.name, 'new_one') eq_(repo.path, 'simple_path') eq_(repo.git, 'git') eq_(repo.users, mocked_users)
def test_generate_request_header_step_error(self): with patch.multiple(kerberos_module_name, authGSSClientInit=clientInit_complete, authGSSClientResponse=clientResponse, authGSSClientStep=clientStep_error): response = requests.Response() response.url = "http://www.example.org/" response.headers = {'www-authenticate': 'negotiate token'} host = urlparse(response.url).hostname auth = requests_kerberos.HTTPKerberosAuth() self.assertRaises( requests_kerberos.exceptions.KerberosExchangeError, auth.generate_request_header, response, host) clientInit_complete.assert_called_with( "*****@*****.**", gssflags=(kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG), mech_oid=kerberos.GSS_MECH_OID_KRB5, principal=None) clientStep_error.assert_called_with("CTX", "token") self.assertFalse(clientResponse.called)
def test_get_user_stack_status_launch_pending_timeout(self): self.init_block() self.update_stack({ "status": "LAUNCH_PENDING", "launch_task_id": "bogus_task_id" }) mock_result = Mock() mock_result.id = "bogus_task_id" mock_result.ready.return_value = False mock_launch_stack_task_result = Mock(return_value=mock_result) self.block.launch_timeout = -1 with patch.multiple( self.block, launch_stack_task_result=mock_launch_stack_task_result): data = {"initialize": False, "reset": False} response = self.call_handler("get_user_stack_status", data) self.assertTrue(mock_launch_stack_task_result.called) self.assertEqual(response["status"], "LAUNCH_ERROR")
def test_get_user_stack_status_reset_failure(self): self.init_block() self.update_stack({"status": "RESUME_FAILED"}) mock_result = Mock() mock_result.id = 'bogus_task_id' mock_result.ready.return_value = True mock_result.successful.return_value = False mock_launch_stack_task = Mock(return_value=mock_result) with patch.multiple( self.block, launch_stack_task=mock_launch_stack_task): data = { "initialize": True, "reset": True } response = self.call_handler("get_user_stack_status", data) self.assertTrue(mock_launch_stack_task.called) self.assertEqual(response["status"], "LAUNCH_ERROR")
def test_upgrade_charm_with_nrpe_relation_installs_dependencies( self, mock_config, mock_notify_client, mock_ceph): config = copy.deepcopy(CHARM_CONFIG) mock_config.side_effect = lambda key: config[key] with patch.multiple( ceph_hooks, apt_install=DEFAULT, rsync=DEFAULT, log=DEFAULT, write_file=DEFAULT, nrpe=DEFAULT, emit_cephconf=DEFAULT, mon_relation_joined=DEFAULT, is_relation_made=DEFAULT) as mocks, patch( "charmhelpers.contrib.hardening.harden.config"): mocks["is_relation_made"].return_value = True ceph_hooks.upgrade_charm() mocks["apt_install"].assert_called_with( ["python-dbus", "lockfile-progs"]) mock_notify_client.assert_called_once_with() mock_ceph.update_monfs.assert_called_once_with()
def test_destroy(self, ssh_command): """Check if destroy runs the required ssh commands""" self.configure_provisoning_server() image_dir = '/opt/robottelo/images' vm = VirtualMachine() with patch.multiple(vm, image_dir=image_dir, _created=True): vm.destroy() self.assertEqual(ssh_command.call_count, 3) ssh_command_args_list = [ call('virsh destroy {0}'.format(vm.hostname), hostname=self.provisioning_server), call('virsh undefine {0}'.format(vm.hostname), hostname=self.provisioning_server), call('rm {0}/{1}.img'.format(image_dir, vm.hostname), hostname=self.provisioning_server), ] self.assertListEqual(ssh_command.call_args_list, ssh_command_args_list)
def test_new_active_sub_no_trial_w_consolidated_billing(self): plan = PlanFactory.create(generate_after=120) subscription = SubscriptionFactory.create( plan=plan, state=Subscription.STATES.ACTIVE, start_date=datetime.date(2015, 8, 12)) incorrect_billing_date = datetime.date(2015, 8, 11) correct_billing_date = datetime.date(2015, 8, 12) true_property = PropertyMock(return_value=True) mocked_bucket_end_date = MagicMock( return_value=datetime.date(2015, 8, 31)) with patch.multiple( Subscription, _has_existing_customer_with_consolidated_billing=true_property, is_billed_first_time=true_property, bucket_end_date=mocked_bucket_end_date, ): assert subscription.should_be_billed(correct_billing_date) is True assert subscription.should_be_billed( incorrect_billing_date) is False
def test__server_maintenance_idle_time(self): with patch.multiple( "evennia.server.server", LoopingCall=DEFAULT, Evennia=DEFAULT, _FLUSH_CACHE=DEFAULT, connection=DEFAULT, _IDMAPPER_CACHE_MAXSIZE=1000, _MAINTENANCE_COUNT=(3600 * 7) - 1, SESSIONS=DEFAULT, _IDLE_TIMEOUT=10, time=DEFAULT, ServerConfig=DEFAULT, ) as mocks: sess1 = MagicMock() sess2 = MagicMock() sess3 = MagicMock() sess4 = MagicMock() sess1.cmd_last = 100 # should time out sess2.cmd_last = 999 # should not time out sess3.cmd_last = 100 # should not time (due to account) sess4.cmd_last = 100 # should time out (due to access) sess1.account = None sess2.account = None sess3.account = MagicMock() sess3.account = MagicMock() sess4.account.access = MagicMock(return_value=False) mocks["time"].time = MagicMock(return_value=1000) mocks["ServerConfig"].objects.conf = MagicMock(return_value=100) mocks["SESSIONS"].values = MagicMock( return_value=[sess1, sess2, sess3, sess4]) mocks["SESSIONS"].disconnect = MagicMock() self.server._server_maintenance() reason = "idle timeout exceeded" calls = [call(sess1, reason=reason), call(sess4, reason=reason)] mocks["SESSIONS"].disconnect.assert_has_calls(calls, any_order=True)
def test_start_fuse(self): mocked_parse_args = MagicMock() mocked_prepare = MagicMock() mocked_argp = MagicMock() mocked_fuse = MagicMock() mocked_args = MagicMock() mocked_merge = MagicMock() mocked_fetch = MagicMock() mocked_router = MagicMock() mocked_prepare.return_value = (mocked_merge, mocked_fetch, mocked_router) mocked_argp.ArgumentParser.return_value = "args" mocked_parse_args.return_value = mocked_args with patch.multiple('gitfs.mounter', argparse=mocked_argp, parse_args=mocked_parse_args, prepare_components=mocked_prepare, FUSE=mocked_fuse): start_fuse() mocked_argp.ArgumentParser.assert_called_once_with(prog='GitFS') mocked_parse_args.assert_called_once_with("args") mocked_prepare.assert_called_once_with(mocked_args) excepted_call = { 'foreground': mocked_args.foreground, 'allow_root': mocked_args.allow_root, 'allow_other': mocked_args.allow_other, 'subtype': 'gitfs', 'fsname': mocked_args.remote_url } if sys.platform != 'darwin': excepted_call['nonempty'] = True mocked_fuse.assert_called_once_with(mocked_router, mocked_args.mount_point, **excepted_call)
def test_delegation(self): with patch.multiple("gssapi.SecurityContext", __init__=fake_init, step=fake_resp): response_ok = requests.Response() response_ok.url = "http://www.example.org/" response_ok.status_code = 200 response_ok.headers = {'www-authenticate': b64_negotiate_server} connection = Mock() connection.send = Mock(return_value=response_ok) raw = Mock() raw.release_conn = Mock(return_value=None) request = requests.Request() response = requests.Response() response.request = request response.url = "http://www.example.org/" response.headers = {'www-authenticate': b64_negotiate_token} response.status_code = 401 response.connection = connection response._content = "" response.raw = raw auth = requests_gssapi.HTTPKerberosAuth(service="HTTP", delegate=True) r = auth.authenticate_user(response) self.assertTrue(response in r.history) self.assertEqual(r, response_ok) self.assertEqual(request.headers['Authorization'], b64_negotiate_response) connection.send.assert_called_with(request) raw.release_conn.assert_called_with() fake_init.assert_called_with( name=gssapi_name("*****@*****.**"), usage="initiate", flags=gssdelegflags, creds=None) fake_resp.assert_called_with(b"token")
def test_init_thresholds(self): mock_svc1 = Mock(spec_set=_AwsService) mock_svc2 = Mock(spec_set=_AwsService) mock_foo = Mock(spec_set=_AwsService) mock_bar = Mock(spec_set=_AwsService) mock_ta = Mock(spec_set=TrustedAdvisor) mock_foo.return_value = mock_svc1 mock_bar.return_value = mock_svc2 svcs = {'SvcFoo': mock_foo, 'SvcBar': mock_bar} with patch.dict('%s._services' % pbm, values=svcs, clear=True): with patch.multiple( 'awslimitchecker.checker', logger=DEFAULT, _get_version_info=DEFAULT, TrustedAdvisor=DEFAULT, autospec=True, ) as mocks: mock_version = mocks['_get_version_info'] mock_version.return_value = self.mock_ver_info mock_ta_constr = mocks['TrustedAdvisor'] mocks['TrustedAdvisor'].return_value = mock_ta cls = AwsLimitChecker( warning_threshold=5, critical_threshold=22, ) # dict should be of _AwsService instances services = {'SvcFoo': mock_svc1, 'SvcBar': mock_svc2} assert cls.services == services # _AwsService instances should exist, but have no other calls assert mock_foo.mock_calls == [call(5, 22, {'region_name': None})] assert mock_bar.mock_calls == [call(5, 22, {'region_name': None})] assert mock_ta_constr.mock_calls == [ call(services, {'region_name': None}, ta_refresh_mode=None, ta_refresh_timeout=None) ] assert mock_svc1.mock_calls == [] assert mock_svc2.mock_calls == [] assert self.mock_version.mock_calls == [call()] assert self.cls.vinfo == self.mock_ver_info
def test_dont_suspend_deleted_stack(self): suspend_timeout = self.configuration.get("suspend_timeout") timedelta = timezone.timedelta(seconds=(suspend_timeout + 1)) suspend_timestamp = timezone.now() - timedelta state = 'RESUME_COMPLETE' stack = Stack(student_id=self.student_id, course_id=self.course_id, suspend_timestamp=suspend_timestamp, name=self.stack_name, status=state) stack.save() mock_heat_client = Mock() mock_heat_client.stacks.get.side_effect = [HTTPNotFound] job = SuspenderJob(self.configuration) with patch.multiple( job, get_heat_client=Mock(return_value=mock_heat_client)): job.run() mock_heat_client.actions.suspend.assert_not_called() stack = Stack.objects.get(name=self.stack_name) self.assertEqual(stack.status, DELETED_STATE)
def test_dont_try_to_delete_certain_stack_states(self): delete_age = self.configuration.get("delete_age") delete_delta = timezone.timedelta(days=(delete_age + 1)) delete_timestamp = timezone.now() - delete_delta stack1_name = 'bogus_stack_1' stack1 = Stack(student_id=self.student_id, course_id=self.course_id, name=stack1_name, suspend_timestamp=delete_timestamp, status=DELETE_STATE) stack1.save() stack2_name = 'bogus_stack_2' stack2 = Stack(student_id=self.student_id, course_id=self.course_id, name=stack2_name, suspend_timestamp=delete_timestamp, status=DELETE_IN_PROGRESS_STATE) stack2.save() stack3_name = 'bogus_stack_3' stack3 = Stack(student_id=self.student_id, course_id=self.course_id, name=stack3_name, suspend_timestamp=delete_timestamp, status=DELETED_STATE) stack3.save() mock_heat_client = Mock() job = ReaperJob(self.configuration) with patch.multiple( job, get_heat_client=Mock(return_value=mock_heat_client)): job.run() mock_heat_client.stacks.delete.assert_not_called() stack1 = Stack.objects.get(name=stack1_name) self.assertEqual(stack1.status, DELETE_STATE) stack2 = Stack.objects.get(name=stack2_name) self.assertEqual(stack2.status, DELETE_IN_PROGRESS_STATE) stack3 = Stack.objects.get(name=stack3_name) self.assertEqual(stack3.status, DELETED_STATE)
def test_dont_delete_if_age_is_zero(self): self.configuration["delete_age"] = 0 delete_delta = timezone.timedelta(days=15) delete_timestamp = timezone.now() - delete_delta state = 'RESUME_COMPLETE' stack_name = 'bogus_stack' stack = Stack(student_id=self.student_id, course_id=self.course_id, name=stack_name, suspend_timestamp=delete_timestamp, status=state) stack.save() mock_heat_client = Mock() job = ReaperJob(self.configuration) with patch.multiple( job, get_heat_client=Mock(return_value=mock_heat_client)): job.run() mock_heat_client.stacks.delete.assert_not_called() stack = Stack.objects.get(name=stack_name) self.assertEqual(stack.status, state)
def start_patchers(self): m_fetch_repo = MagicMock() m_fetch_repo.return_value = 'PATH' def fake_get_scratch_devices(remote): return ['/dev/%s' % remote.shortname] self.patcher_get_scratch_devices = patch( 'teuthology.task.ceph_ansible.get_scratch_devices', fake_get_scratch_devices, ) self.patcher_get_scratch_devices.start() def fake_set_iface_and_cidr(self): self._interface = 'eth0' self._cidr = '172.21.0.0/20' self.patcher_remote = patch.multiple( Remote, _set_iface_and_cidr=fake_set_iface_and_cidr, ) self.patcher_remote.start()
def setUp(self, *args, **kwargs): super(GuestAgentCassandraDBManagerTest, self).setUp('cassandra') conn_patcher = patch.multiple(cass_service.CassandraConnection, _connect=DEFAULT, is_active=Mock(return_value=True)) self.addCleanup(conn_patcher.stop) conn_patcher.start() self.real_status = cass_service.CassandraAppStatus.set_status class FakeInstanceServiceStatus(object): status = ServiceStatuses.NEW def save(self): pass cass_service.CassandraAppStatus.set_status = MagicMock( return_value=FakeInstanceServiceStatus()) self.context = trove_testtools.TroveTestContext(self) self.manager = cass_manager.Manager() self.manager._app = cass_service.CassandraApp() self.manager._admin = cass_service.CassandraAdmin( models.CassandraUser('Test')) self.admin = self.manager._admin self.admin._CassandraAdmin__client = MagicMock() self.conn = self.admin._CassandraAdmin__client self.pkg = cass_service.packager self.origin_os_path_exists = os.path.exists self.origin_format = volume.VolumeDevice.format self.origin_migrate_data = volume.VolumeDevice.migrate_data self.origin_mount = volume.VolumeDevice.mount self.origin_mount_points = volume.VolumeDevice.mount_points self.origin_stop_db = cass_service.CassandraApp.stop_db self.origin_start_db = cass_service.CassandraApp.start_db self.origin_install_db = cass_service.CassandraApp._install_db self.original_get_ip = netutils.get_my_ipv4 self.orig_make_host_reachable = ( cass_service.CassandraApp.apply_initial_guestagent_configuration)
def test_schedule_suite_noverify(self): suite_name = 'noop' throttle = '3' machine_type = 'burnupi' with patch.multiple( suite, fetch_repos=DEFAULT, teuthology_schedule=DEFAULT, sleep=DEFAULT, get_arch=lambda x: 'x86_64', get_gitbuilder_hash=DEFAULT, git_ls_remote=lambda *args: '1234', package_version_for_hash=lambda *args: 'fake-9.5', ) as m: config.suite_verify_ceph_hash = False main([ '--suite', suite_name, '--suite-dir', 'teuthology/test', '--throttle', throttle, '--machine-type', machine_type ]) m['sleep'].assert_called_with(int(throttle)) m['get_gitbuilder_hash'].assert_not_called()
def test_build_log_file_name_with_dir(self): current_owner = getpass.getuser() log_dir = '/tmp' with patch.multiple(operating_system, exists=MagicMock(return_value=False), write_file=DEFAULT, create_directory=DEFAULT, chown=DEFAULT, chmod=DEFAULT) as os_mocks: log_file = self.manager.build_log_file_name(self.log_name_sys, current_owner, datastore_dir=log_dir) expected_filename = '%s/%s-%s.log' % ( log_dir, self.manager.manager, self.log_name_sys) expected_call_counts = {'exists': 1, 'write_file': 1, 'create_directory': 1, 'chown': 1, 'chmod': 1} self.assert_build_log_file_name(expected_filename, log_file, os_mocks, expected_call_counts)
def test_join_with_password(self): expected = xmpp.domish.Element( ('jabber:client', 'presence'), attribs={ 'to': '[email protected]/{0}'.format(self.client.nickname), 'from': self.client.jid.full(), }) muc = xmpp.domish.Element(('http://jabber.org/protocol/muc', 'x')) hist = xmpp.domish.Element(('', 'history'), attribs={ 'maxchars': '0', 'maxstanzas': '0', }) muc.addChild(hist) muc.addElement('password', content='foobar') expected.addChild(muc) with patch.multiple(self.client, stream=Mock(), joined=Mock()): self.client.join('*****@*****.**', 'foobar') assert self.client.stream.send.call_args[0][0].toXml( ) == expected.toXml() assert self.client.joined.called
def test_sync(self): upstream = "origin" branch = "master" mocked_repo = MagicMock() mocked_merge = MagicMock() mocked_sync_done = MagicMock() mocked_syncing = MagicMock() mocked_push_successful = MagicMock() mocked_fetch = MagicMock() mocked_strategy = MagicMock() mocked_repo.behind = True mocked_push_successful.set.side_effect = ValueError with patch.multiple('gitfs.worker.sync', sync_done=mocked_sync_done, syncing=mocked_syncing, push_successful=mocked_push_successful, fetch=mocked_fetch): worker = SyncWorker("name", "email", "name", "email", repository=mocked_repo, strategy=mocked_strategy, upstream=upstream, branch=branch) worker.merge = mocked_merge worker.sync() assert mocked_syncing.clear.call_count == 1 assert mocked_push_successful.clear.call_count == 1 assert mocked_sync_done.clear.call_count == 1 assert mocked_sync_done.set.call_count == 1 assert mocked_fetch.set.call_count == 1 assert mocked_push_successful.set.call_count == 1 assert mocked_repo.behind is False mocked_repo.push.assert_called_once_with(upstream, branch)
def test_other_type(self): func = Mock() with patch.multiple( pbm, invoke_with_throttling_retries=DEFAULT, _paginate_resultset=DEFAULT, _paginate_dict=DEFAULT, logger=DEFAULT, ) as mocks: mocks['invoke_with_throttling_retries'].return_value = 'foobar' res = paginate_query(func, 'foo', bar='barval') assert res == 'foobar' assert mocks['invoke_with_throttling_retries'].mock_calls == [ call(func, 'foo', bar='barval') ] assert mocks['_paginate_resultset'].mock_calls == [] assert mocks['_paginate_dict'].mock_calls == [] assert mocks['logger'].mock_calls == [ call.warning("Query result of type %s cannot be paginated", type('foo')) ]
def test_blacklist(self): with patch.multiple("sys", exit=self.sys_exit, stdout=StreamCapturing(sys.stdout)): tmpfile = tempfile.NamedTemporaryFile('w').name try: res, md = self.exec_pipeline(""" - when batch: - load: - %s/metadata via blacklist_example - loadstats - when blacklist_example: - fork merge remove: - filter: - https://idp.example.com/saml2/idp/metadata.php """ % self.datadir) except IOError: raise Skip print(md.lookup('https://idp.example.com/saml2/idp/metadata.php')) assert ( not md.lookup('https://idp.example.com/saml2/idp/metadata.php') )
def test_apply_post_restore_updates(self, conf_mock, _): fake_status = MagicMock() fake_status.is_running = False test_app = cass_service.CassandraApp() test_app.status = fake_status with patch.multiple( test_app, start_db=DEFAULT, stop_db=DEFAULT, _update_cluster_name_property=DEFAULT, _reset_admin_password=DEFAULT, change_cluster_name=DEFAULT) as calls: backup_info = {'instance_id': 'old_id'} conf_mock.guest_id = 'new_id' test_app._apply_post_restore_updates(backup_info) calls['_update_cluster_name_property'].assert_called_once_with( 'old_id') calls['_reset_admin_password'].assert_called_once_with() calls['start_db'].assert_called_once_with(update_db=False) calls['change_cluster_name'].assert_called_once_with('new_id') calls['stop_db'].assert_called_once_with()
def test_list_data(self): # # a data partition that fails to mount is silently # ignored # partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2" disk = "Xda" partition = "Xda1" fs_type = "ext4" def get_partition_type(dev): return main.PTYPE['regular']['osd']['ready'] with patch.multiple( main, list_all_partitions=lambda: {disk: [partition]}, get_partition_uuid=lambda dev: partition_uuid, get_partition_type=get_partition_type, get_dev_fs=lambda dev: fs_type, mount=fail_to_mount, unmount=DEFAULT, is_partition=lambda dev: True, ): expect = [{ 'path': '/dev/' + disk, 'partitions': [{ 'dmcrypt': {}, 'fs_type': fs_type, 'is_partition': True, 'mount': None, 'path': '/dev/' + partition, 'ptype': main.PTYPE['regular']['osd']['ready'], 'state': 'unprepared', 'type': 'data', 'uuid': partition_uuid, }] }] assert expect == main.list_devices()
def test_show(self, get_newsletters, mock_basket_request): # Newsletters are only listed if the user is subscribed to them, # or they are marked 'show' and 'active' in the settings get_newsletters.return_value = newsletters # Find a newsletter without 'show' and subscribe the user to it for newsletter, data in newsletters.iteritems(): if not data.get('show', False): self.user['newsletters'] = [newsletter] break url = reverse('newsletter.existing.token', args=(self.token, )) with patch.multiple('basket', update_user=DEFAULT, subscribe=DEFAULT, unsubscribe=DEFAULT, user=DEFAULT) as basket_patches: with patch('lib.l10n_utils.render') as render: basket_patches['user'].return_value = self.user render.return_value = HttpResponse('') self.client.get(url) request, template_name, context = render.call_args[0] forms = context['formset'].initial_forms shown = set([form.initial['newsletter'] for form in forms]) inactive = set([ newsletter for newsletter, data in newsletters.iteritems() if not data.get('active', False) ]) to_show = set([ newsletter for newsletter, data in newsletters.iteritems() if data.get('show', False) ]) - inactive subscribed = set(self.user['newsletters']) # All subscribed newsletters except inactive ones are shown self.assertEqual(set(), subscribed - inactive - shown) # All 'show' newsletters are shown self.assertEqual(set(), to_show - shown) # No other newsletters are shown self.assertEqual(set(), shown - subscribed - to_show)
def test_release(self): message = "I need to stage this" mocked_os = MagicMock() mocked_stage = MagicMock() mocked_os.close.return_value = 0 with patch.multiple('gitfs.views.current', os=mocked_os): current = CurrentView(repo="repo", repo_path="repo_path", ignore=CachedIgnore()) current._stage = mocked_stage current.dirty = { 4: { 'message': message } } assert current.release("/path", 4) == 0 mocked_os.close.assert_called_once_with(4) mocked_stage.assert_called_once_with(add="/path", message=message)
def test_reap_container_non_exist_policy_index(self): r = self.init_reaper({}, fakelogger=True) with patch.multiple('swift.account.reaper', direct_get_container=DEFAULT, direct_delete_object=DEFAULT, direct_delete_container=DEFAULT) as mocks: headers = {'X-Backend-Storage-Policy-Index': 2} obj_listing = [{'name': 'o'}] def fake_get_container(*args, **kwargs): try: obj = obj_listing.pop(0) except IndexError: obj_list = [] else: obj_list = [obj] return headers, obj_list mocks['direct_get_container'].side_effect = fake_get_container r.reap_container('a', 'partition', acc_nodes, 'c') self.assertEqual(r.logger.get_lines_for_level('error'), ['ERROR: invalid storage policy index: 2'])
def test_generate_request_header(self): with patch.multiple(kerberos_module_name, authGSSClientInit=clientInit_complete, authGSSClientResponse=clientResponse, authGSSClientStep=clientStep_continue): response = requests.Response() response.url = "http://www.example.org/" response.headers = {'www-authenticate': 'negotiate token'} host = urlparse(response.url).hostname auth = requests_kerberos.HTTPKerberosAuth() self.assertEqual( auth.generate_request_header(response, host), "Negotiate GSSRESPONSE" ) clientInit_complete.assert_called_with( "*****@*****.**", gssflags=( kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG), principal=None) clientStep_continue.assert_called_with("CTX", "token") clientResponse.assert_called_with("CTX")
def test_fetch_in_idle_mode(self): mocked_peasant = MagicMock() mocked_fetch = MagicMock(side_effect=ValueError) mocked_fetch_event = MagicMock() mocked_idle = MagicMock() mocked_idle.is_set.return_value = True with patch.multiple('gitfs.worker.fetch', Peasant=mocked_peasant, fetch=mocked_fetch_event, idle=mocked_idle): worker = FetchWorker() worker.fetch = mocked_fetch worker.timeout = 5 worker.idle_timeout = 20 with pytest.raises(ValueError): worker.work() assert mocked_fetch.call_count == 1 mocked_fetch_event.wait.assert_called_once_with(20)