def test_update_monitors(self): snippet = template_format.parse(pool_template) self.stack = utils.parse_stack(snippet) self.mock_create.return_value = {'pool': {'id': '5678'}} self.mock_create_vip.return_value = {'vip': {'id': 'xyz'}} pool_create_snippet = { 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName(self.stack.name, 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True } } vip_create_snippet = { 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80 } } self.mock_show.return_value = { 'pool': { 'status': 'ACTIVE', 'name': '5678' } } self.mock_show_vip.return_value = { 'vip': { 'status': 'ACTIVE', 'name': 'xyz' } } snippet['resources']['pool']['properties']['monitors'] = [ 'mon123', 'mon456' ] resource_defns = self.stack.t.resource_definitions(self.stack) rsrc = loadbalancer.Pool('pool', resource_defns['pool'], self.stack) scheduler.TaskRunner(rsrc.create)() props = snippet['resources']['pool']['properties'].copy() props['monitors'] = ['mon123', 'mon789'] update_template = rsrc.t.freeze(properties=props) scheduler.TaskRunner(rsrc.update, update_template)() associate_calls = [ mock.call('5678', {'health_monitor': { 'id': 'mon123' }}), mock.call('5678', {'health_monitor': { 'id': 'mon456' }}), mock.call('5678', {'health_monitor': { 'id': 'mon789' }}) ] self.mock_associate.assert_has_calls(associate_calls) self.assertEqual(3, self.mock_associate.call_count) self.mock_disassociate.assert_called_once_with('5678', 'mon456') self.mock_create.assert_called_once_with(pool_create_snippet) self.mock_create_vip.assert_called_once_with(vip_create_snippet) self.mock_show.assert_called_once_with('5678') self.mock_show_vip.assert_called_once_with('xyz')
def _test_create(self, resolve_neutron=True, with_vip_subnet=False): rsrc = self.create_pool(resolve_neutron, with_vip_subnet) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties[self.USER_DATA] or '' flavor = self.properties[self.INSTANCE_TYPE] availability_zone = self.properties[self.AVAILABILITY_ZONE] image_name = self.properties[self.IMAGE_ID] image_id = nova_utils.get_image_id(self.nova(), image_name) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) scheduler_hints = {} if self.properties[self.NOVA_SCHEDULER_HINTS]: for tm in self.properties[self.NOVA_SCHEDULER_HINTS]: # adopted from novaclient shell hint = tm[self.TAG_KEY] hint_value = tm[self.TAG_VALUE] if hint in scheduler_hints: if isinstance(scheduler_hints[hint], six.string_types): scheduler_hints[hint] = [scheduler_hints[hint]] scheduler_hints[hint].append(hint_value) else: scheduler_hints[hint] = hint_value else: scheduler_hints = None nics = self._build_nics(self.properties[self.NETWORK_INTERFACES], security_groups=security_groups, subnet_id=self.properties[self.SUBNET_ID]) server = None # FIXME(shadower): the instance_user config option is deprecated. Once # it's gone, we should always use ec2-user for compatibility with # CloudFormation. if cfg.CONF.instance_user: instance_user = cfg.CONF.instance_user else: instance_user = '******' try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image_id, flavor=flavor_id, key_name=self.properties[self.KEY_NAME], security_groups=security_groups, userdata=nova_utils.build_userdata(self, userdata, instance_user), meta=self._get_nova_metadata(self.properties), scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())
def handle_delete(self): server_id = self.properties[self._instance_property] volume_id = self.properties[self._volume_property] detach_task = VolumeDetachTask(self.stack, server_id, volume_id) scheduler.TaskRunner(detach_task)()
def test_update_monitors(self): neutronV20.find_resourceid_by_name_or_id(mox.IsA(neutronclient.Client), 'subnet', 'sub123').AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName('test_stack', 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True } }).AndReturn({'pool': { 'id': '5678' }}) neutronclient.Client.associate_health_monitor( '5678', {'health_monitor': { 'id': 'mon123' }}) neutronclient.Client.associate_health_monitor( '5678', {'health_monitor': { 'id': 'mon456' }}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80 } }).AndReturn({'vip': { 'id': 'xyz' }}) neutronclient.Client.show_pool('5678').AndReturn( {'pool': { 'status': 'ACTIVE' }}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': { 'status': 'ACTIVE' }}) neutronclient.Client.disassociate_health_monitor('5678', 'mon456') neutronclient.Client.associate_health_monitor( '5678', {'health_monitor': { 'id': 'mon789' }}) snippet = template_format.parse(pool_template) stack = utils.parse_stack(snippet) snippet['Resources']['pool']['Properties']['monitors'] = [ 'mon123', 'mon456' ] resource_defns = stack.t.resource_definitions(stack) rsrc = loadbalancer.Pool('pool', resource_defns['pool'], stack) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() update_template = copy.deepcopy(rsrc.t) update_template['Properties']['monitors'] = ['mon123', 'mon789'] scheduler.TaskRunner(rsrc.update, update_template)() self.m.VerifyAll()
def test_start_cancelled(self): runner = scheduler.TaskRunner(DummyTask()) runner.cancel() self.assertRaises(AssertionError, runner.start)
def create(self): ''' Create the resource. Subclasses should provide a handle_create() method to customise creation. ''' action = self.CREATE if (self.action, self.status) != (self.INIT, self.COMPLETE): exc = exception.Error( _('State %s invalid for create') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) # This method can be called when we replace a resource, too. In that # case, a hook has already been dealt with in `Resource.update` so we # shouldn't do it here again: if self.stack.action == self.stack.CREATE: yield self._break_if_required(self.CREATE, environment.HOOK_PRE_CREATE) LOG.info(_LI('creating %s'), six.text_type(self)) # Re-resolve the template, since if the resource Ref's # the StackId pseudo parameter, it will change after # the parser.Stack is stored (which is after the resources # are __init__'d, but before they are create()'d) self.reparse() self._update_stored_properties() def pause(): try: while True: yield except scheduler.Timeout: return count = {self.CREATE: 0, self.DELETE: 0} retry_limit = max(cfg.CONF.action_retry_limit, 0) first_failure = None while (count[self.CREATE] <= retry_limit and count[self.DELETE] <= retry_limit): if count[action]: delay = timeutils.retry_backoff_delay(count[action], jitter_max=2.0) waiter = scheduler.TaskRunner(pause) waiter.start(timeout=delay) while not waiter.step(): yield try: yield self._do_action(action, self.properties.validate) if action == self.CREATE: return else: action = self.CREATE except exception.ResourceFailure as failure: if not isinstance(failure.exc, ResourceInError): raise failure count[action] += 1 if action == self.CREATE: action = self.DELETE count[action] = 0 if first_failure is None: # Save the first exception first_failure = failure if first_failure: raise first_failure
def test_delete(self): """Deleting the resource deletes the webhook with pyrax.""" self._setup_test_stack(self.webhook_template) resource = self.stack['my_webhook'] scheduler.TaskRunner(resource.delete)() self.assertEqual({}, self.fake_auto_scale.webhooks)
def _create_resource(self, name, snippet, stack): ds = data_source.DataSource(name, snippet, stack) value = mock.MagicMock(id='12345') self.client.data_sources.create.return_value = value scheduler.TaskRunner(ds.create)() return ds
def test_delete(self): """Deleting a ScalingGroup resource invokes pyrax API to delete it.""" self._setup_test_stack() resource = self.stack['my_group'] scheduler.TaskRunner(resource.delete)() self.assertEqual({}, self.fake_auto_scale.groups)
def test_delete(self): """Deleting the resource deletes the policy with pyrax.""" self._setup_test_stack(self.policy_template) resource = self.stack['my_policy'] scheduler.TaskRunner(resource.delete)() self.assertEqual({}, self.fake_auto_scale.policies)
def update_with_template(self, child_template, user_params=None, timeout_mins=None): """Update the nested stack with the new template.""" if self.id is None: self.store() if self.stack.action == self.stack.ROLLBACK: if self._try_rollback(): LOG.info('Triggered nested stack %s rollback', self.physical_resource_name()) return {'target_action': self.stack.ROLLBACK} if self.resource_id is None: # if the create failed for some reason and the nested # stack was not created, we need to create an empty stack # here so that the update will work. def _check_for_completion(): while not self.check_create_complete(): yield empty_temp = template_format.parse( "heat_template_version: '2013-05-23'") self.create_with_template(empty_temp, {}) checker = scheduler.TaskRunner(_check_for_completion) checker(timeout=self.stack.timeout_secs()) if timeout_mins is None: timeout_mins = self.stack.timeout_mins try: status_data = stack_object.Stack.get_status( self.context, self.resource_id) except exception.NotFound: raise resource.UpdateReplace(self) action, status, status_reason, updated_time = status_data kwargs = self._stack_kwargs(user_params, child_template) cookie = { 'previous': { 'updated_at': updated_time, 'state': (action, status) } } kwargs.update({ 'stack_identity': dict(self.nested_identifier()), 'args': { rpc_api.PARAM_TIMEOUT: timeout_mins, rpc_api.PARAM_CONVERGE: self.converge } }) with self.translate_remote_exceptions: try: self.rpc_client()._update_stack(self.context, **kwargs) except exception.HeatException: with excutils.save_and_reraise_exception(): raw_template.RawTemplate.delete(self.context, kwargs['template_id']) return cookie
def _create_test_server(self, name, override_name=False): server = self._setup_test_server(name, override_name) scheduler.TaskRunner(server.create)() return server
def test_create(self): rsrc = self.create_metering_label() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_sleep_none(self): self.m.StubOutWithMock(eventlet, 'sleep') self.m.ReplayAll() runner = scheduler.TaskRunner(DummyTask()) runner(wait_time=None)
def handle_create(self): handle = self._get_handle_resource() runner = scheduler.TaskRunner(self._wait, handle) runner.start(timeout=float(self.properties[self.TIMEOUT])) return runner
def test_double_start(self): runner = scheduler.TaskRunner(DummyTask()) runner.start() self.assertRaises(AssertionError, runner.start)
def test_instance_create(self): t = template_format.parse(db_template) instance = self._setup_test_instance('dbinstance_create', t) scheduler.TaskRunner(instance.create)() self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state) self.assertEqual('instances', instance.entity)
def test_call_double_start(self): runner = scheduler.TaskRunner(DummyTask()) runner(wait_time=None) self.assertRaises(AssertionError, runner.start)
def test_instance_check(self): res = self._get_db_instance() scheduler.TaskRunner(res.check)() self.assertEqual((res.CHECK, res.COMPLETE), res.state)
def test_scaling_group_suspend(self): rsrc = self.create_stack(self.parsed)['my-group'] self.assertEqual(1, len(rsrc.get_instances())) self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) scheduler.TaskRunner(rsrc.suspend)() self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
def test_create(self): rsrc = self.create_providernet_range() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_properties_are_prepared_for_session_persistence(self): neutronV20.find_resourceid_by_name_or_id(mox.IsA(neutronclient.Client), 'subnet', 'sub123').AndReturn('sub123') neutronclient.Client.create_pool({ 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName('test_stack', 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True } }).AndReturn({'pool': { 'id': '5678' }}) neutronclient.Client.create_vip({ 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80, 'session_persistence': { 'type': 'HTTP_COOKIE' } } }).AndReturn({'vip': { 'id': 'xyz' }}) neutronclient.Client.show_pool('5678').AndReturn( {'pool': { 'status': 'ACTIVE' }}) neutronclient.Client.show_vip('xyz').AndReturn( {'vip': { 'status': 'ACTIVE' }}) snippet = template_format.parse(pool_with_session_persistence_template) pool = snippet['Resources']['pool'] persistence = pool['Properties']['vip']['session_persistence'] # change persistence type to HTTP_COOKIE that not require cookie_name persistence['type'] = 'HTTP_COOKIE' del persistence['cookie_name'] stack = utils.parse_stack(snippet) resource_defns = stack.t.resource_definitions(stack) resource = loadbalancer.Pool('pool', resource_defns['pool'], stack) # assert that properties contain cookie_name property with None value persistence = resource.properties['vip']['session_persistence'] self.assertIn('cookie_name', persistence) self.assertIsNone(persistence['cookie_name']) self.m.ReplayAll() scheduler.TaskRunner(resource.create)() self.assertEqual((resource.CREATE, resource.COMPLETE), resource.state) self.m.VerifyAll()
def _test_create(self, resolve_neutron=True, resolve_router=True): rsrc = self.create_vpnservice(resolve_neutron, resolve_router) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_create(self): rsrc = self.create_health_monitor() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_create(self): rsrc = self.create_ipsec_site_connection() self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_create_with_vip_subnet(self): rsrc = self.create_pool(with_vip_subnet=True) self.m.ReplayAll() scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll()
def test_subnet(self): update_props = { 'subnet': { 'dns_nameservers': ['8.8.8.8', '192.168.1.254'], 'name': 'mysubnet', 'enable_dhcp': True, 'host_routes': [{ 'destination': '192.168.1.0/24', 'nexthop': '194.168.1.2' }], 'gateway_ip': '10.0.3.105', 'tags': ['tag2', 'tag3'], 'allocation_pools': [{ 'start': '10.0.3.20', 'end': '10.0.3.100' }, { 'start': '10.0.3.110', 'end': '10.0.3.200' }] } } t, stack = self._setup_mock(tags=['tag1', 'tag2']) create_props = { 'subnet': { 'name': utils.PhysName(stack.name, 'test_subnet'), 'network_id': 'fc68ea2c-b60b-4b4f-bd82-94ec81110766', 'dns_nameservers': [u'8.8.8.8'], 'allocation_pools': [{ 'start': u'10.0.3.20', 'end': u'10.0.3.150' }], 'host_routes': [{ 'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20' }], 'ip_version': 4, 'cidr': u'10.0.3.0/24', 'tenant_id': 'c1210485b2424d48804aad5d39c61b8f', 'enable_dhcp': True } } self.patchobject(stack['net'], 'FnGetRefId', return_value='fc68ea2c-b60b-4b4f-bd82-94ec81110766') set_tag_mock = self.patchobject(neutronclient.Client, 'replace_tag') rsrc = self.create_subnet(t, stack, 'sub_net') scheduler.TaskRunner(rsrc.create)() self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.create_mock.assert_called_once_with(create_props) set_tag_mock.assert_called_once_with('subnets', rsrc.resource_id, {'tags': ['tag1', 'tag2']}) rsrc.validate() ref_id = rsrc.FnGetRefId() self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id) self.assertIsNone(rsrc.FnGetAtt('network_id')) self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766', rsrc.FnGetAtt('network_id')) self.assertEqual('8.8.8.8', rsrc.FnGetAtt('dns_nameservers')[0]) # assert the dependency (implicit or explicit) between the ports # and the subnet self.assertIn(stack['port'], stack.dependencies[stack['sub_net']]) self.assertIn(stack['port2'], stack.dependencies[stack['sub_net']]) update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), update_props['subnet']) rsrc.handle_update(update_snippet, {}, update_props['subnet']) self.update_mock.assert_called_once_with( '91e47a57-7508-46fe-afc9-fc454e8580e1', update_props) set_tag_mock.assert_called_with('subnets', rsrc.resource_id, {'tags': ['tag2', 'tag3']}) # with name None del update_props['subnet']['name'] rsrc.handle_update(update_snippet, {}, update_props['subnet']) self.update_mock.assert_called_with( '91e47a57-7508-46fe-afc9-fc454e8580e1', update_props) # with no prop_diff rsrc.handle_update(update_snippet, {}, {}) self.assertIsNone(scheduler.TaskRunner(rsrc.delete)()) rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again') self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: self.metadata = tmpl_diff['Metadata'] checkers = [] server = None if self.TAGS in prop_diff: server = self.nova().servers.get(self.resource_id) nova_utils.meta_update(self.nova(), server, self._get_nova_metadata(prop_diff)) if self.INSTANCE_TYPE in prop_diff: flavor = prop_diff[self.INSTANCE_TYPE] flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) if not server: server = self.nova().servers.get(self.resource_id) checker = scheduler.TaskRunner(nova_utils.resize, server, flavor, flavor_id) checkers.append(checker) if self.NETWORK_INTERFACES in prop_diff: new_network_ifaces = prop_diff.get(self.NETWORK_INTERFACES) old_network_ifaces = self.properties.get(self.NETWORK_INTERFACES) subnet_id = ( prop_diff.get(self.SUBNET_ID) or self.properties.get(self.SUBNET_ID)) security_groups = self._get_security_groups() if not server: server = self.nova().servers.get(self.resource_id) # if there is entrys in old_network_ifaces and new_network_ifaces, # remove the same entrys from old and new ifaces if old_network_ifaces and new_network_ifaces: # there are four situations: # 1.old includes new, such as: old = 2,3, new = 2 # 2.new includes old, such as: old = 2,3, new = 1,2,3 # 3.has overlaps, such as: old = 2,3, new = 1,2 # 4.different, such as: old = 2,3, new = 1,4 # detach unmatched ones in old, attach unmatched ones in new self._remove_matched_ifaces(old_network_ifaces, new_network_ifaces) if old_network_ifaces: old_nics = self._build_nics(old_network_ifaces) for nic in old_nics: checker = scheduler.TaskRunner( server.interface_detach, nic['port-id']) checkers.append(checker) if new_network_ifaces: new_nics = self._build_nics(new_network_ifaces) for nic in new_nics: checker = scheduler.TaskRunner( server.interface_attach, nic['port-id'], None, None) checkers.append(checker) # if the interfaces not come from property 'NetworkInterfaces', # the situation is somewhat complex, so to detach the old ifaces, # and then attach the new ones. else: interfaces = server.interface_list() for iface in interfaces: checker = scheduler.TaskRunner(server.interface_detach, iface.port_id) checkers.append(checker) nics = self._build_nics(new_network_ifaces, security_groups=security_groups, subnet_id=subnet_id) # 'SubnetId' property is empty(or None) and # 'NetworkInterfaces' property is empty(or None), # _build_nics() will return nics = None,we should attach # first free port, according to similar behavior during # instance creation if not nics: checker = scheduler.TaskRunner(server.interface_attach, None, None, None) checkers.append(checker) else: for nic in nics: checker = scheduler.TaskRunner( server.interface_attach, nic['port-id'], None, None) checkers.append(checker) if checkers: checkers[0].start() return checkers
def test_properties_are_prepared_for_session_persistence(self): snippet = template_format.parse(pool_with_session_persistence_template) pool = snippet['resources']['pool'] persistence = pool['properties']['vip']['session_persistence'] # change persistence type to HTTP_COOKIE that not require cookie_name persistence['type'] = 'HTTP_COOKIE' del persistence['cookie_name'] self.stack = utils.parse_stack(snippet) self.mock_create.return_value = {'pool': {'id': '5678'}} self.mock_create_vip.return_value = {'vip': {'id': 'xyz'}} self.mock_show.return_value = { 'pool': { 'status': 'ACTIVE', 'name': '5678' } } self.mock_show_vip.return_value = { 'vip': { 'status': 'ACTIVE', 'name': 'xyz' } } pool_create_snippet = { 'pool': { 'subnet_id': 'sub123', 'protocol': u'HTTP', 'name': utils.PhysName(self.stack.name, 'pool'), 'lb_method': 'ROUND_ROBIN', 'admin_state_up': True } } vip_create_snippet = { 'vip': { 'protocol': u'HTTP', 'name': 'pool.vip', 'admin_state_up': True, 'subnet_id': u'sub123', 'pool_id': '5678', 'protocol_port': 80, 'session_persistence': { 'type': 'HTTP_COOKIE' } } } resource_defns = self.stack.t.resource_definitions(self.stack) resource = loadbalancer.Pool('pool', resource_defns['pool'], self.stack) # assert that properties contain cookie_name property with None value persistence = resource.properties['vip']['session_persistence'] self.assertIn('cookie_name', persistence) self.assertIsNone(persistence['cookie_name']) scheduler.TaskRunner(resource.create)() self.assertEqual((resource.CREATE, resource.COMPLETE), resource.state) self.mock_create.assert_called_once_with(pool_create_snippet) self.mock_create_vip.assert_called_once_with(vip_create_snippet) self.mock_show.assert_called_once_with('5678') self.mock_show_vip.assert_called_once_with('xyz')