def test_valid_v2_request(self): mock_auth = self.m.CreateMock(ks_v3_auth.Password) self.m.StubOutWithMock(ks_v3_auth, 'Password') ks_v3_auth.Password(auth_url=self.config['auth_uri'], password='******', project_id='tenant_id1', user_domain_id='domain1', username='******').AndReturn(mock_auth) m = mock_auth.get_access(mox.IsA(ks_session.Session)) m.AndReturn(TOKEN_V2_RESPONSE) self.app.expected_env['keystone.token_info'] = TOKEN_V2_RESPONSE self.m.ReplayAll() req = webob.Request.blank('/tenant_id1/') req.headers['X_AUTH_USER'] = '******' req.headers['X_AUTH_KEY'] = 'goodpassword' req.headers['X_AUTH_URL'] = self.config['auth_uri'] req.headers['X_USER_DOMAIN_ID'] = 'domain1' self.middleware(req.environ, self._start_fake_response) self.m.VerifyAll()
def test_update_pool_with_references_to_health_monitors(self): self._create_pool_with_health_monitors() neutronclient.Client.disassociate_health_monitor( '5678', mox.IsA(six.string_types)) self.m.ReplayAll() snippet = template_format.parse(pool_with_health_monitors_template) self.stack = utils.parse_stack(snippet) self.stack.create() self.assertEqual((self.stack.CREATE, self.stack.COMPLETE), self.stack.state) snippet['Resources']['pool']['Properties']['monitors'] = [{ u'Ref': u'monitor1' }] updated_stack = utils.parse_stack(snippet) self.stack.update(updated_stack) self.assertEqual((self.stack.UPDATE, self.stack.COMPLETE), self.stack.state) self.m.VerifyAll()
def test_what_provides_codec(self): """Test searching for package providing a codec.""" # invalid query self._catch_callbacks() self.backend.error("not-supported", mox.StrContains("search term is invalid"), True) self.backend.finished() self.mox.ReplayAll() self.backend._open_cache() self.backend.dispatch_command("what-provides", ["None", enums.PROVIDES_CODEC, "audio/mpeg"]) self._catch_callbacks("package") self.backend.package("gstreamer0.10-silly;0.1-0;all;", enums.INFO_AVAILABLE, mox.IsA(str)) self.backend.finished() self.mox.ReplayAll() self.backend._open_cache() self.backend.dispatch_command("what-provides", ["None", enums.PROVIDES_CODEC, "gstreamer0.10(decoder-audio/ac3)"])
def _create_pool_with_health_monitors(self, stack_name): neutronclient.Client.create_health_monitor({ 'health_monitor': { 'delay': 3, 'max_retries': 5, 'type': u'HTTP', 'timeout': 10, 'admin_state_up': True}} ).AndReturn({'health_monitor': {'id': '5555'}}) neutronclient.Client.create_health_monitor({ 'health_monitor': { 'delay': 3, 'max_retries': 5, 'type': u'HTTP', 'timeout': 10, 'admin_state_up': True}} ).AndReturn({'health_monitor': {'id': '6666'}}) self.stub_SubnetConstraint_validate() neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123', cmd_resource=None, ).MultipleTimes().AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName(stack_name, 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}} ).AndReturn({'pool': {'id': '5678'}}) neutronclient.Client.associate_health_monitor( '5678', {'health_monitor': {'id': '5555'}}).InAnyOrder() neutronclient.Client.associate_health_monitor( '5678', {'health_monitor': {'id': '6666'}}).InAnyOrder() neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80}} ).AndReturn({'vip': {'id': 'xyz'}}) neutronclient.Client.show_pool('5678').AndReturn( {'pool': {'status': 'ACTIVE'}}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': {'status': 'ACTIVE'}})
def testInsertPayloadSignatures(self): """Test inserting payload signatures.""" gen = self._GetStdGenerator(payload=self.delta_payload) payload_signatures = ('0' * 256, ) # Stub out the required functions. self.mox.StubOutWithMock(paygen_payload_lib._PaygenPayload, '_RunGeneratorCmd') self.mox.StubOutWithMock(gen, '_ReadMetadataSizeFile') # Record the expected function calls. cmd = [ 'delta_generator', '-in_file=' + gen.payload_file, mox.IsA(str), '-out_file=' + gen.signed_payload_file, '-out_metadata_size_file=' + gen.metadata_size_file ] gen._RunGeneratorCmd(cmd) gen._ReadMetadataSizeFile() # Run the test. self.mox.ReplayAll() gen._InsertPayloadSignatures(payload_signatures)
def test_create_failed_unexpected_vip_status(self): neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123' ).MultipleTimes().AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName('test_stack', 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}} ).AndReturn({'pool': {'id': '5678'}}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80}} ).AndReturn({'vip': {'id': 'xyz'}}) neutronclient.Client.show_pool('5678').MultipleTimes().AndReturn( {'pool': {'status': 'ACTIVE'}}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': {'status': 'SOMETHING', 'name': 'xyz'}}) snippet = template_format.parse(pool_template) self.stack = utils.parse_stack(snippet) resource_defns = self.stack.t.resource_definitions(self.stack) rsrc = loadbalancer.Pool( 'pool', resource_defns['pool'], self.stack) self.m.ReplayAll() error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.create)) self.assertEqual('ResourceUnknownStatus: resources.pool: ' 'Pool creation failed due to ' 'vip - Unknown status SOMETHING due to "Unknown"', six.text_type(error)) self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll()
def test_create_failed_error_status(self): cfg.CONF.set_override('action_retry_limit', 0) neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123' ).MultipleTimes().AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName('test_stack', 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}} ).AndReturn({'pool': {'id': '5678'}}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80}} ).AndReturn({'vip': {'id': 'xyz'}}) neutronclient.Client.show_pool('5678').AndReturn( {'pool': {'status': 'ERROR', 'name': '5678'}}) snippet = template_format.parse(pool_template) self.stack = utils.parse_stack(snippet) resource_defns = self.stack.t.resource_definitions(self.stack) rsrc = loadbalancer.Pool( 'pool', resource_defns['pool'], self.stack) self.m.ReplayAll() error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.create)) self.assertEqual( 'ResourceInError: resources.pool: ' 'Went to status ERROR due to "error in pool"', six.text_type(error)) self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll()
def test_create_pending(self): clients.OpenStackClients.keystone().AndReturn( fakes.FakeKeystoneClient()) neutron_utils.neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123' ).AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName('test_stack', 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}} ).AndReturn({'pool': {'id': '5678'}}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80}} ).AndReturn({'vip': {'id': 'xyz'}}) neutronclient.Client.show_pool('5678').AndReturn( {'pool': {'status': 'PENDING_CREATE'}}) neutronclient.Client.show_pool('5678').MultipleTimes().AndReturn( {'pool': {'status': 'ACTIVE'}}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': {'status': 'PENDING_CREATE'}}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': {'status': 'ACTIVE'}}) snippet = template_format.parse(pool_template) stack = utils.parse_stack(snippet) rsrc = loadbalancer.Pool( 'pool', snippet['Resources']['pool'], stack) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_upload_data_to_store_not_found_after_upload(self): req = unit_test_utils.get_fake_request() location = "file://foo/bar" size = 10 checksum = "checksum" image_meta = {'id': unit_test_utils.UUID1, 'size': size} image_data = "blah" notifier = self.mox.CreateMockAnything() store = self.mox.CreateMockAnything() store.add( image_meta['id'], mox.IgnoreArg(), image_meta['size']).AndReturn((location, size, checksum, {})) self.mox.StubOutWithMock(registry, "update_image_metadata") update_data = {'checksum': checksum, 'size': size} registry.update_image_metadata(req.context, image_meta['id'], update_data ).AndRaise(exception.NotFound) self.mox.StubOutWithMock(upload_utils, "initiate_deletion") upload_utils.initiate_deletion(req, location, image_meta['id'], mox.IsA(bool)) self.mox.StubOutWithMock(upload_utils, "safe_kill") upload_utils.safe_kill(req, image_meta['id']) notifier.error('image.upload', mox.IgnoreArg()) self.mox.ReplayAll() self.assertRaises(webob.exc.HTTPPreconditionFailed, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) self.mox.VerifyAll()
def _test_router_interface_with_port(self, resolve_port=True): port_key = 'port_id' neutronclient.Client.add_interface_router( 'ae478782-53c0-4434-ab16-49900c88016c', {'port_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'} ).AndReturn(None) if resolve_port: port_key = 'port' neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'port', '9577cafd-8e98-4059-a2e6-8a771b4d318e' ).AndReturn('9577cafd-8e98-4059-a2e6-8a771b4d318e') neutronclient.Client.remove_interface_router( 'ae478782-53c0-4434-ab16-49900c88016c', {'port_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'} ).AndReturn(None) neutronclient.Client.remove_interface_router( 'ae478782-53c0-4434-ab16-49900c88016c', {'port_id': '9577cafd-8e98-4059-a2e6-8a771b4d318e'} ).AndRaise(qe.NeutronClientException(status_code=404)) self.stub_PortConstraint_validate() self.stub_RouterConstraint_validate() self.m.ReplayAll() t = template_format.parse(neutron_template) stack = utils.parse_stack(t) rsrc = self.create_router_interface( t, stack, 'router_interface', properties={ 'router_id': 'ae478782-53c0-4434-ab16-49900c88016c', port_key: '9577cafd-8e98-4059-a2e6-8a771b4d318e' }) scheduler.TaskRunner(rsrc.delete)() rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again') scheduler.TaskRunner(rsrc.delete)() self.m.VerifyAll()
def test_run_instance_non_admin(self): self.was_admin = False def fake_get(context, *args, **kwargs): # make sure this is called with admin context, even though # we're using user context below self.was_admin = context.is_admin return {} sched = fakes.FakeFilterScheduler() self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get) fake_context = context.RequestContext('user', 'project') uuid = 'fake-uuid1' instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = { 'instance_type': { 'memory_mb': 1, 'local_gb': 1 }, 'instance_properties': instance_properties, 'instance_uuids': [uuid] } self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original( fake_context, uuid, { 'vm_state': vm_states.ERROR, 'task_state': None }).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc( fake_context, new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec, None, None, None, None, {}, False) self.assertTrue(self.was_admin)
def test_prep_resize_exception_host_in_error_state_and_raise(self): fake_instance_uuid = 'fake-instance-id' self._mox_schedule_method_helper('schedule_prep_resize') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') request_spec = {'instance_properties': {'uuid': fake_instance_uuid}} kwargs = { 'context': self.context, 'image': 'fake_image', 'request_spec': request_spec, 'filter_properties': 'fake_props', 'instance': 'fake_instance', 'instance_type': 'fake_type', 'reservations': list('fake_res'), } self.manager.driver.schedule_prep_resize(**kwargs).AndRaise( test.TestingException('something happened')) inst = { "vm_state": "", "task_state": "", } db.instance_update_and_get_original(self.context, fake_instance_uuid, {"vm_state": vm_states.ERROR, "task_state": None}).AndReturn((inst, inst)) compute_utils.add_instance_fault_from_exc(self.context, fake_instance_uuid, mox.IsA(test.TestingException), mox.IgnoreArg()) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.manager.prep_resize, **kwargs)
def test_create_failed_unexpected_status(self): clients.OpenStackClients.keystone().AndReturn( fakes.FakeKeystoneClient()) neutron_utils.neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123' ).AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName('test_stack', 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}} ).AndReturn({'pool': {'id': '5678'}}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80}} ).AndReturn({'vip': {'id': 'xyz'}}) neutronclient.Client.show_pool('5678').AndReturn( {'pool': {'status': 'ERROR', 'name': '5678'}}) snippet = template_format.parse(pool_template) stack = utils.parse_stack(snippet) resource_defns = stack.t.resource_definitions(stack) rsrc = loadbalancer.Pool( 'pool', resource_defns['pool'], stack) self.m.ReplayAll() error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.create)) self.assertEqual( 'Error: neutron reported unexpected pool ' 'resource[5678] status[ERROR]', str(error)) self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll()
def test_port_security_enabled(self): neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'network', 'abcd1234', cmd_resource=None, ).MultipleTimes().AndReturn('abcd1234') neutronclient.Client.create_port({ 'port': { 'network_id': u'abcd1234', 'port_security_enabled': False, 'name': utils.PhysName('test_stack', 'port'), 'admin_state_up': True } }).AndReturn({ 'port': { "status": "BUILD", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766" } }) neutronclient.Client.show_port( 'fc68ea2c-b60b-4b4f-bd82-94ec81110766').AndReturn({ 'port': { "status": "ACTIVE", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766", } }) self.m.ReplayAll() t = template_format.parse(neutron_port_security_template) stack = utils.parse_stack(t) port = stack['port'] scheduler.TaskRunner(port.create)() self.m.VerifyAll()
def test_missing_subnet_id(self): neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'network', 'net1234').MultipleTimes().AndReturn('net1234') neutronclient.Client.create_port({ 'port': { 'network_id': u'net1234', 'fixed_ips': [{ 'ip_address': u'10.0.3.21' }], 'name': utils.PhysName('test_stack', 'port'), 'admin_state_up': True, 'device_owner': u'network:dhcp' } }).AndReturn({ 'port': { "status": "BUILD", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766" } }) neutronclient.Client.show_port( 'fc68ea2c-b60b-4b4f-bd82-94ec81110766').AndReturn({ 'port': { "status": "ACTIVE", "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766" } }) self.m.ReplayAll() t = template_format.parse(neutron_port_template) t['resources']['port']['properties']['fixed_ips'][0].pop('subnet') stack = utils.parse_stack(t) port = stack['port'] scheduler.TaskRunner(port.create)() self.m.VerifyAll()
def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False, with_exception=False): self._instance_data = self._get_instance_data() instance = db.instance_create(self._context, self._instance_data) network_info = fake_network.fake_get_instance_nw_info(self.stubs, spectacular=True) fake_local_ip = '10.0.0.1' if same_host: fake_dest_ip = fake_local_ip else: fake_dest_ip = '10.0.0.2' fake_root_vhd_path = 'C:\\FakePath\\root.vhd' fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' % instance['name']) func = mox.Func(self._check_instance_name) vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED) m = vmutils.VMUtils.get_vm_storage_paths(func) m.AndReturn(([fake_root_vhd_path], [])) m = hostutils.HostUtils.get_local_ips() m.AndReturn([fake_local_ip]) m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'], remove_dir=True) m.AndReturn(fake_revert_path) if same_host: fake.PathUtils.makedirs(mox.IsA(str)) m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str)) if with_exception: m.AndRaise(shutil.Error('Simulated copy error')) else: fake.PathUtils.rename(mox.IsA(str), mox.IsA(str)) if same_host: fake.PathUtils.rename(mox.IsA(str), mox.IsA(str)) self._setup_destroy_mocks() return (instance, fake_dest_ip, network_info)
def testGetTipOfTrunkVersion(self): """Tests if we get the latest version from TOT.""" ARBITRARY_URL = 'Pratooey' path = os.path.join( cros_mark_chrome_as_stable._GetSvnUrl(ARBITRARY_URL), 'src', 'chrome', 'VERSION') self.mox.StubOutWithMock(cros_build_lib, 'RunCommand') cros_build_lib.RunCommand( ['svn', 'info', ARBITRARY_URL], redirect_stdout=True).AndReturn( _StubCommandResult( 'Some Junk 2134\nRevision: %s\nOtherInfo: test_data' % fake_svn_rev)) cros_build_lib.RunCommand( ['svn', 'cat', '-r', fake_svn_rev, path], redirect_stdout=True, error_message=mox.IsA(str)).AndReturn( _StubCommandResult('A=8\nB=0\nC=256\nD=0')) self.mox.ReplayAll() version = cros_mark_chrome_as_stable._GetSpecificVersionUrl( ARBITRARY_URL, fake_svn_rev) self.mox.VerifyAll() self.assertEquals(version, '8.0.256.0')
def testCloseListener(self): cl = self.mox.CreateMock(window.ICloseListener) # Expectations cl.windowClose(mox.IsA(window.CloseEvent)) # Start actual test mox.Replay(cl) # Add listener and send a close event -> should end up in listener once self._window.addListener(cl, window.ICloseListener) self.sendClose(self._window) # Ensure listener was called once mox.Verify(cl) # Remove the listener and send close event -> should not end up in # listener self._window.removeListener(cl, window.ICloseListener) self.sendClose(self._window) # Ensure listener still has been called only once mox.Verify(cl)
def setUp(self): self.mox = mox.Mox() self.mox.StubOutWithMock(urlfetch, 'fetch') self.mox.StubOutWithMock(random, 'shuffle') random.shuffle(mox.IsA(list)) self.orig_person = model.Person model.Person = MockPerson logger = logging.getLogger() # Don't log to stderr... self.original_handlers = logger.handlers logger.handlers = [] # ...instead log to our mock logging handler. self.mock_logging_handler = MockLoggingHandler() logger.addHandler(self.mock_logging_handler) logger.setLevel(logging.INFO) # The first two calls of utils.get_utcnow_seconds() at line 45 and 49 in # external_search.py consult the following date setting for debug. utils.set_utcnow_for_test(datetime.datetime(2011, 1, 1))
def test_retrieve_for_sanity(self): """ Simulate a retrieve call! """ res = core_model.Resource('/foo/bar', infrastructure.COMPUTE, [self.os_template]) res.attributes = {'occi.core.id': 'bar'} self.mox.StubOutWithMock(nova_glue.vm, 'get_occi_state') nova_glue.vm.get_occi_state(mox.IsA(object), mox.IsA(object)).\ AndReturn(('active', [infrastructure.STOP, infrastructure.SUSPEND, infrastructure.RESTART])) self.mox.StubOutWithMock(nova_glue.vm, 'get_vm') nova_glue.vm.get_vm(mox.IsA(object), mox.IsA(object)).AndReturn( { 'hostname': 'bar', 'vcpus': 1, 'memory_mb': 256 }) self.mox.StubOutWithMock(nova_glue.storage, 'get_image_architecture') nova_glue.storage.get_image_architecture(mox.IsA(object), mox.IsA(object)).\ AndReturn('foo') self.mox.ReplayAll() self.backend.retrieve(res, self.sec_obj) # check if all attrs are there! self.assertIn('occi.compute.hostname', res.attributes) self.assertIn('occi.compute.architecture', res.attributes) self.assertIn('occi.compute.cores', res.attributes) self.assertIn('occi.compute.speed', res.attributes) self.assertIn('occi.compute.memory', res.attributes) self.assertIn('occi.compute.state', res.attributes) self.assertIn('occi.core.id', res.attributes) self.assertEqual('active', res.attributes['occi.compute.state']) self.assertListEqual([infrastructure.STOP, infrastructure.SUSPEND, infrastructure.RESTART], res.actions) self.mox.VerifyAll()
def test_create_failed(self): neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123' ).AndReturn('sub123') neutronclient.Client.create_vpnservice(self.VPN_SERVICE_CONF).AndRaise( exceptions.NeutronClientException()) self.m.ReplayAll() snippet = template_format.parse(vpnservice_template) self.stack = utils.parse_stack(snippet) resource_defns = self.stack.t.resource_definitions(self.stack) rsrc = vpnservice.VPNService('vpnservice', resource_defns['VPNService'], self.stack) error = self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.create)) self.assertEqual( 'NeutronClientException: An unknown exception occurred.', six.text_type(error)) self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll()
def test_floatingip_create_specify_ip_address(self): t = template_format.parse(neutron_floating_template) props = t['resources']['floating_ip']['properties'] props['floating_ip_address'] = '172.24.4.98' stack = utils.parse_stack(t) self.stub_NetworkConstraint_validate() neutronV20.find_resourceid_by_name_or_id(mox.IsA( neutronclient.Client), 'network', 'abcd1234').AndReturn('xyz1234') neutronclient.Client.create_floatingip({ 'floatingip': { 'floating_network_id': u'xyz1234', 'floating_ip_address': '172.24.4.98' } }).AndReturn({ 'floatingip': { 'status': 'ACTIVE', 'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766', 'floating_ip_address': '172.24.4.98' } }) neutronclient.Client.show_floatingip( 'fc68ea2c-b60b-4b4f-bd82-94ec81110766').MultipleTimes().AndReturn({ 'floatingip': { 'status': 'ACTIVE', 'id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766', 'floating_ip_address': '172.24.4.98' } }) self.m.ReplayAll() fip = stack['floating_ip'] scheduler.TaskRunner(fip.create)() self.assertEqual((fip.CREATE, fip.COMPLETE), fip.state) self.assertEqual('172.24.4.98', fip.FnGetAtt('floating_ip_address')) self.m.VerifyAll()
def testRecomputeDerivedFields_LotsOfIssues(self): """Servlet should enqueue multiple work items.""" saved_flag = settings.recompute_derived_fields_in_worker settings.recompute_derived_fields_in_worker = True self.services.issue.next_id = 12345 num_calls = (self.services.issue.next_id // self.BLOCK + 1) for _ in range(num_calls): taskqueue.add( params=mox.IsA(dict), url='/_task/recomputeDerivedFields.do').WithSideEffects( self.mock_task_queue.add) self.mox.ReplayAll() filterrules_helpers.RecomputeAllDerivedFields(self.cnxn, self.services, self.project, self.config) self.assertFalse(self.services.issue.get_all_issues_in_project_called) self.assertFalse(self.services.issue.update_issues_called) self.assertFalse(self.services.issue.enqueue_issues_called) work_items = self.mock_task_queue.work_items self.assertEqual(num_calls, len(work_items)) url, params = work_items[0]['url'], work_items[0]['params'] self.assertEqual(urls.RECOMPUTE_DERIVED_FIELDS_TASK + '.do', url) self.assertEqual(self.project.project_id, params['project_id']) self.assertEqual(12345 // self.BLOCK * self.BLOCK + 1, params['lower_bound']) self.assertEqual(12345, params['upper_bound']) url, params = work_items[-1]['url'], work_items[-1]['params'] self.assertEqual(urls.RECOMPUTE_DERIVED_FIELDS_TASK + '.do', url) self.assertEqual(self.project.project_id, params['project_id']) self.assertEqual(1, params['lower_bound']) self.assertEqual(self.BLOCK + 1, params['upper_bound']) self.mox.VerifyAll() settings.recompute_derived_fields_in_worker = saved_flag
def test_create_with_session_persistence(self): clients.OpenStackClients.keystone().AndReturn( fakes.FakeKeystoneClient()) neutron_utils.neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123' ).AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName('test_stack', 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True}} ).AndReturn({'pool': {'id': '5678'}}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80, 'session_persistence': { 'type': 'APP_COOKIE', 'cookie_name': 'cookie'}}} ).AndReturn({'vip': {'id': 'xyz'}}) neutronclient.Client.show_pool('5678').AndReturn( {'pool': {'status': 'ACTIVE'}}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': {'status': 'ACTIVE'}}) snippet = template_format.parse(pool_with_session_persistence_template) stack = utils.parse_stack(snippet) resource_defns = stack.t.resource_definitions(stack) rsrc = loadbalancer.Pool( 'pool', resource_defns['pool'], stack) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_create_pool_with_provider(self): snippet = template_format.parse(pool_template_with_provider) self.stub_ProviderConstraint_validate() self.stack = utils.parse_stack(snippet) neutronV20.find_resourceid_by_name_or_id( mox.IsA(neutronclient.Client), 'subnet', 'sub123', cmd_resource=None, ).MultipleTimes().AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName(self.stack.name, 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True, 'provider': 'test_prov'}} ).AndReturn({'pool': {'id': '5678'}}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80}} ).AndReturn({'vip': {'id': 'xyz'}}) neutronclient.Client.show_pool('5678').MultipleTimes().AndReturn( {'pool': {'status': 'ACTIVE', 'provider': 'test_prov'}}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': {'status': 'ACTIVE'}}) resource_defns = self.stack.t.resource_definitions(self.stack) rsrc = loadbalancer.Pool( 'pool', resource_defns['pool'], self.stack) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.assertEqual("test_prov", rsrc.FnGetAtt("provider")) self.m.VerifyAll()
def create_stack(self, stack_name='test_stack2', params={}): temp = template_format.parse(test_template_waitcondition) template = parser.Template(temp) parameters = parser.Parameters(stack_name, template, params) ctx = context.get_admin_context() ctx.tenant_id = 'test_tenant' stack = parser.Stack(ctx, stack_name, template, parameters, disable_rollback=True) # Stub out the UUID for this test, so we can get an expected signature self.m.StubOutWithMock(uuid, 'uuid4') uuid.uuid4().AndReturn('STACKABCD1234') self.m.ReplayAll() stack.store() self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep') scheduler.TaskRunner._sleep(mox.IsA(int)).AndReturn(None) # Stub waitcondition status so all goes CREATE_COMPLETE self.m.StubOutWithMock(wc.WaitConditionHandle, 'get_status') wc.WaitConditionHandle.get_status().AndReturn(['SUCCESS']) # Stub keystone() with fake client self.m.StubOutWithMock(wc.WaitConditionHandle, 'keystone') wc.WaitConditionHandle.keystone().MultipleTimes().AndReturn(self.fc) id = identifier.ResourceIdentifier('test_tenant', stack.name, stack.id, '', 'WaitHandle') self.m.StubOutWithMock(wc.WaitConditionHandle, 'identifier') wc.WaitConditionHandle.identifier().MultipleTimes().AndReturn(id) self.m.ReplayAll() stack.create() return stack
def test_prep_resize_no_valid_host_back_in_active_state(self): fake_instance_uuid = 'fake-instance-id' inst = {"vm_state": "", "task_state": ""} self._mox_schedule_method_helper('schedule_prep_resize') self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') request_spec = { 'instance_type': 'fake_type', 'instance_uuids': [fake_instance_uuid], 'instance_properties': { 'uuid': fake_instance_uuid } } kwargs = { 'context': self.context, 'image': 'fake_image', 'request_spec': request_spec, 'filter_properties': 'fake_props', 'instance': 'fake_instance', 'instance_type': 'fake_type', 'reservations': list('fake_res'), } self.manager.driver.schedule_prep_resize(**kwargs).AndRaise( exception.NoValidHost(reason="")) db.instance_update_and_get_original(self.context, fake_instance_uuid, { "vm_state": vm_states.ACTIVE, "task_state": None }).AndReturn((inst, inst)) compute_utils.add_instance_fault_from_exc( self.context, fake_instance_uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() self.manager.prep_resize(**kwargs)
def test_url_with_query_string(self): self.mox.StubOutWithMock(httplib2.Http, 'request') uri = 'http://example.com/foo/bar/?show=thundercats&character=snarf' client = oauth.Client(self.consumer, None) expected_kwargs = { 'method': 'GET', 'body': None, 'redirections': httplib2.DEFAULT_MAX_REDIRECTS, 'connection_type': None, 'headers': mox.IsA(dict), } def oauth_verifier(url): req = oauth.Request.from_consumer_and_token(self.consumer, None, http_method='GET', http_url=uri, parameters={}) req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.consumer, None) expected = parse_qsl(urlparse.urlparse(req.to_url()).query) actual = parse_qsl(urlparse.urlparse(url).query) if len(expected) != len(actual): return False actual = dict(actual) for key, value in expected: if key not in ('oauth_signature', 'oauth_nonce', 'oauth_timestamp'): if actual[key] != value: return False return True httplib2.Http.request(client, mox.Func(oauth_verifier), **expected_kwargs) self.mox.ReplayAll() client.request(uri, 'GET') self.mox.VerifyAll()
def test_run_instance_no_hosts(self): def _fake_empty_call_zone_method(*args, **kwargs): return [] sched = fakes.FakeFilterScheduler() uuid = 'fake-uuid1' fake_context = context.RequestContext('user', 'project') instance_properties = {'project_id': 1, 'os_type': 'Linux'} request_spec = { 'instance_type': { 'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0 }, 'instance_properties': instance_properties, 'instance_uuids': [uuid] } self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') old_ref, new_ref = db.instance_update_and_get_original( fake_context, uuid, { 'vm_state': vm_states.ERROR, 'task_state': None }).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc( fake_context, new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.StubOutWithMock(db, 'compute_node_get_all') db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) self.mox.ReplayAll() sched.schedule_run_instance(fake_context, request_spec, None, None, None, None, {}, False)
def test_start_servers(self): self.flags(rpc_driver_queue_base='cells.intercell42', group='cells') fake_msg_runner = fakes.get_message_runner('api-cell') class FakeInterCellRPCDispatcher(object): def __init__(_self, msg_runner): self.assertEqual(fake_msg_runner, msg_runner) self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher', FakeInterCellRPCDispatcher) self.mox.StubOutWithMock(rpc, 'get_server') for message_type in messaging.MessageRunner.get_message_types(): topic = 'cells.intercell42.' + message_type target = oslo_messaging.Target(topic=topic, server=CONF.host) endpoints = [mox.IsA(FakeInterCellRPCDispatcher)] rpcserver = self.mox.CreateMockAnything() rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver) rpcserver.start() self.mox.ReplayAll() self.driver.start_servers(fake_msg_runner)