def obj_load_attr(self, attrname): if attrname not in REQUEST_SPEC_OPTIONAL_ATTRS: raise exception.ObjectActionError( action='obj_load_attr', reason='attribute %s not lazy-loadable' % attrname) if attrname == 'security_groups': self.security_groups = objects.SecurityGroupList(objects=[]) return if attrname == 'network_metadata': self.network_metadata = objects.NetworkMetadata(physnets=set(), tunneled=False) return # NOTE(sbauza): In case the primitive was not providing that field # because of a previous RequestSpec version, we want to default # that field in order to have the same behaviour. self.obj_set_defaults(attrname)
def make_secgroup_list(security_groups): """A helper to make security group objects from a list of names or uuids. Note that this does not make them save-able or have the rest of the attributes they would normally have, but provides a quick way to fill, for example, an instance object during create. """ secgroups = objects.SecurityGroupList() secgroups.objects = [] for sg in security_groups: secgroup = objects.SecurityGroup() if uuidutils.is_uuid_like(sg): # This is a neutron security group uuid so store in the uuid field. secgroup.uuid = sg else: # This is neutron's special 'default' security group secgroup.name = sg secgroups.objects.append(secgroup) return secgroups
def test_unfilter_instance_undefines_nwfilter(self, mock_secrule, mock_instlist, mock_define, mock_lookup): fakefilter = NWFilterFakes() mock_lookup.side_effect = fakefilter.nwfilterLookupByName mock_define.side_effect = fakefilter.filterDefineXMLMock instance_ref = self._create_instance_ref() instance_ref.security_groups = objects.SecurityGroupList() mock_secrule.return_value = objects.SecurityGroupRuleList() network_info = _fake_network_info(self, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
def test_unfilter_instance_undefines_nwfilter(self, mock_secgroup, mock_secrule, mock_instlist): fakefilter = NWFilterFakes() _xml_mock = fakefilter.filterDefineXMLMock self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock _lookup_name = fakefilter.nwfilterLookupByName self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name instance_ref = self._create_instance_ref() mock_secgroup.return_value = objects.SecurityGroupList() network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny', services=False, context=None): flavor = flavors.get_flavor_by_name(type_name) inst = objects.Instance(context=context or self.context) inst.cell_name = 'api!child' inst.vm_state = vm_states.ACTIVE inst.task_state = None inst.power_state = power_state.RUNNING inst.image_ref = FAKE_IMAGE_REF inst.reservation_id = 'r-fakeres' inst.user_id = self.user_id inst.project_id = self.project_id inst.host = self.compute.host inst.node = NODENAME inst.instance_type_id = flavor.id inst.ami_launch_index = 0 inst.memory_mb = 0 inst.vcpus = 0 inst.root_gb = 0 inst.ephemeral_gb = 0 inst.architecture = obj_fields.Architecture.X86_64 inst.os_type = 'Linux' inst.system_metadata = (params and params.get('system_metadata', {}) or {}) inst.locked = False inst.created_at = timeutils.utcnow() inst.updated_at = timeutils.utcnow() inst.launched_at = timeutils.utcnow() inst.security_groups = objects.SecurityGroupList(objects=[]) inst.flavor = flavor inst.old_flavor = None inst.new_flavor = None if params: inst.flavor.update(params.pop('flavor', {})) inst.update(params) inst.create() return inst
def test_from_components(self): ctxt = context.RequestContext('fake-user', 'fake-project') destination = objects.Destination(host='foo') instance = fake_instance.fake_instance_obj(ctxt) image = {'id': uuids.image_id, 'properties': {'mappings': []}, 'status': 'fake-status', 'location': 'far-away'} flavor = fake_flavor.fake_flavor_obj(ctxt) filter_properties = {'requested_destination': destination} instance_group = None spec = objects.RequestSpec.from_components(ctxt, instance.uuid, image, flavor, instance.numa_topology, instance.pci_requests, filter_properties, instance_group, instance.availability_zone, objects.SecurityGroupList()) # Make sure that all fields are set using that helper method for field in [f for f in spec.obj_fields if f != 'id']: self.assertTrue(spec.obj_attr_is_set(field), 'Field: %s is not set' % field) # just making sure that the context is set by the method self.assertEqual(ctxt, spec._context) self.assertEqual(destination, spec.requested_destination)
def test_static_filters(self, mock_secrule, mock_instlist): UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146" SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a" instance_ref = self._create_instance_ref(UUID) src_instance_ref = self._create_instance_ref(SRC_UUID) secgroup = objects.SecurityGroup(id=1, user_id='fake', project_id='fake', name='testgroup', description='test group') src_secgroup = objects.SecurityGroup(id=2, user_id='fake', project_id='fake', name='testsourcegroup', description='src group') r1 = objects.SecurityGroupRule(parent_group_id=secgroup.id, protocol='icmp', from_port=-1, to_port=-1, cidr='192.168.11.0/24', grantee_group=None) r2 = objects.SecurityGroupRule(parent_group_id=secgroup.id, protocol='icmp', from_port=8, to_port=-1, cidr='192.168.11.0/24', grantee_group=None) r3 = objects.SecurityGroupRule(parent_group_id=secgroup.id, protocol='tcp', from_port=80, to_port=81, cidr='192.168.10.0/24', grantee_group=None) r4 = objects.SecurityGroupRule(parent_group_id=secgroup.id, protocol='tcp', from_port=80, to_port=81, cidr=None, grantee_group=src_secgroup, group_id=src_secgroup.id) r5 = objects.SecurityGroupRule(parent_group_id=secgroup.id, protocol=None, cidr=None, grantee_group=src_secgroup, group_id=src_secgroup.id) secgroup_list = objects.SecurityGroupList() secgroup_list.objects.append(secgroup) src_secgroup_list = objects.SecurityGroupList() src_secgroup_list.objects.append(src_secgroup) instance_ref.security_groups = secgroup_list src_instance_ref.security_groups = src_secgroup_list mock_secrule.return_value = objects.SecurityGroupRuleList( objects=[r1, r2, r3, r4, r5]) def _fake_instlist(ctxt, id): if id == src_secgroup.id: insts = objects.InstanceList() insts.objects.append(src_instance_ref) return insts else: insts = objects.InstanceList() insts.objects.append(instance_ref) return insts mock_instlist.side_effect = _fake_instlist def fake_iptables_execute(*cmd, **kwargs): process_input = kwargs.get('process_input', None) if cmd == ('ip6tables-save', '-c'): return '\n'.join(self.in6_filter_rules), None if cmd == ('iptables-save', '-c'): return '\n'.join(self.in_rules), None if cmd == ('iptables-restore', '-c'): lines = process_input.split('\n') if '*filter' in lines: self.out_rules = lines return '', '' if cmd == ( 'ip6tables-restore', '-c', ): lines = process_input.split('\n') if '*filter' in lines: self.out6_rules = lines return '', '' network_model = _fake_network_info(self, 1) linux_net.iptables_manager.execute = fake_iptables_execute self.stubs.Set(compute_utils, 'get_nw_info_for_instance', lambda instance: network_model) self.fw.prepare_instance_filter(instance_ref, network_model) self.fw.apply_instance_filter(instance_ref, network_model) in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) for rule in in_rules: if 'nova' not in rule: self.assertIn(rule, self.out_rules, 'Rule went missing: %s' % rule) instance_chain = None for rule in self.out_rules: # This is pretty crude, but it'll do for now # last two octets change if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule): instance_chain = rule.split(' ')[-1] break self.assertTrue(instance_chain, "The instance chain wasn't added") security_group_chain = None for rule in self.out_rules: # This is pretty crude, but it'll do for now if '-A %s -j' % instance_chain in rule: security_group_chain = rule.split(' ')[-1] break self.assertTrue(security_group_chain, "The security group chain wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp ' '-s 192.168.11.0/24') self.assertGreater(len(filter(regex.match, self.out_rules)), 0, "ICMP acceptance rule wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp ' '--icmp-type 8 -s 192.168.11.0/24') self.assertGreater(len(filter(regex.match, self.out_rules)), 0, "ICMP Echo Request acceptance rule wasn't added") for ip in network_model.fixed_ips(): if ip['version'] != 4: continue regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport ' '--dports 80:81 -s %s' % ip['address']) self.assertGreater(len(filter(regex.match, self.out_rules)), 0, "TCP port 80/81 acceptance rule wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s ' '%s' % ip['address']) self.assertGreater( len(filter(regex.match, self.out_rules)), 0, "Protocol/port-less acceptance rule" " wasn't added") regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp ' '-m multiport --dports 80:81 -s 192.168.10.0/24') self.assertGreater(len(filter(regex.match, self.out_rules)), 0, "TCP port 80/81 acceptance rule wasn't added")
def populate_security_groups(self, instance, security_groups): # Setting to empty list since we do not want to populate this field # in the nova database if using the neutron driver instance['security_groups'] = objects.SecurityGroupList() instance['security_groups'].objects = []
def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ instance._context = context if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] # NOTE(danms): We can be called with a dict instead of a # SQLAlchemy object, so we have to be careful here if hasattr(db_inst, '__dict__'): have_extra = 'extra' in db_inst.__dict__ and db_inst['extra'] else: have_extra = 'extra' in db_inst and db_inst['extra'] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = (objects.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'numa_topology' in expected_attrs: if have_extra: instance._load_numa_topology( db_inst['extra'].get('numa_topology')) else: instance.numa_topology = None if 'pci_requests' in expected_attrs: if have_extra: instance._load_pci_requests( db_inst['extra'].get('pci_requests')) else: instance.pci_requests = None if 'vcpu_model' in expected_attrs: if have_extra: instance._load_vcpu_model(db_inst['extra'].get('vcpu_model')) else: instance.vcpu_model = None if 'ec2_ids' in expected_attrs: instance._load_ec2_ids() if 'migration_context' in expected_attrs: if have_extra: instance._load_migration_context( db_inst['extra'].get('migration_context')) else: instance.migration_context = None if 'info_cache' in expected_attrs: if db_inst.get('info_cache') is None: instance.info_cache = None elif not instance.obj_attr_is_set('info_cache'): # TODO(danms): If this ever happens on a backlevel instance # passed to us by a backlevel service, things will break instance.info_cache = objects.InstanceInfoCache(context) if instance.info_cache is not None: instance.info_cache._from_db_object(context, instance.info_cache, db_inst['info_cache']) if any([ x in expected_attrs for x in ('flavor', 'old_flavor', 'new_flavor') ]): if have_extra and db_inst['extra'].get('flavor'): instance._flavor_from_db(db_inst['extra']['flavor']) # TODO(danms): If we are updating these on a backlevel instance, # we'll end up sending back new versions of these objects (see # above note for new info_caches if 'pci_devices' in expected_attrs: pci_devices = base.obj_make_list(context, objects.PciDeviceList(context), objects.PciDevice, db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'security_groups' in expected_attrs: sec_groups = base.obj_make_list(context, objects.SecurityGroupList(context), objects.SecurityGroup, db_inst.get('security_groups', [])) instance['security_groups'] = sec_groups if 'tags' in expected_attrs: tags = base.obj_make_list(context, objects.TagList(context), objects.Tag, db_inst['tags']) instance['tags'] = tags instance.obj_reset_changes() return instance
def populate_security_groups(self, security_groups): # Returning an empty list since we do not want to populate this field # in the nova database if using the neutron driver return objects.SecurityGroupList()
def test_provider_firewall_rules(self, mock_lock, mock_secgroup, mock_fwrules): mock_lock.return_value = threading.Semaphore() mock_secgroup.return_value = objects.SecurityGroupList() # setup basic instance data instance_ref = self._create_instance_ref() # FRAGILE: peeks at how the firewall names chains chain_name = 'inst-%s' % instance_ref['id'] # create a firewall via setup_basic_filtering like libvirt_conn.spawn # should have a chain with 0 rules network_info = _fake_network_info(self.stubs, 1) self.fw.setup_basic_filtering(instance_ref, network_info) self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains) rules = [ rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider' ] self.assertEqual(0, len(rules)) # add a rule angd send the update message, check for 1 rule mock_fwrules.return_value = [{ 'protocol': 'tcp', 'cidr': '10.99.99.99/32', 'from_port': 1, 'to_port': 65535 }] self.fw.refresh_provider_fw_rules() rules = [ rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider' ] self.assertEqual(1, len(rules)) # Add another, refresh, and make sure number of rules goes to two mock_fwrules.return_value = [{ 'protocol': 'tcp', 'cidr': '10.99.99.99/32', 'from_port': 1, 'to_port': 65535 }, { 'protocol': 'udp', 'cidr': '10.99.99.99/32', 'from_port': 1, 'to_port': 65535 }] self.fw.refresh_provider_fw_rules() rules = [ rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider' ] self.assertEqual(2, len(rules)) # create the instance filter and make sure it has a jump rule self.fw.prepare_instance_filter(instance_ref, network_info) self.fw.apply_instance_filter(instance_ref, network_info) inst_rules = [ rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == chain_name ] jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] provjump_rules = [] # IptablesTable doesn't make rules unique internally for rule in jump_rules: if 'provider' in rule.rule and rule not in provjump_rules: provjump_rules.append(rule) self.assertEqual(1, len(provjump_rules)) # remove a rule from the db, cast to compute to refresh rule mock_fwrules.return_value = [{ 'protocol': 'udp', 'cidr': '10.99.99.99/32', 'from_port': 1, 'to_port': 65535 }] self.fw.refresh_provider_fw_rules() rules = [ rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider' ] self.assertEqual(1, len(rules))
def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ instance._context = context if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: print(">> db_inst => %s" % (db_inst)) print(">> utils.instance_sys_meta(db_inst) => %s" % (utils.instance_sys_meta(db_inst))) instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = (objects.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'numa_topology' in expected_attrs: instance._load_numa_topology() if 'pci_requests' in expected_attrs: instance._load_pci_requests() if 'info_cache' in expected_attrs: if db_inst['info_cache'] is None: instance.info_cache = None elif not instance.obj_attr_is_set('info_cache'): # TODO(danms): If this ever happens on a backlevel instance # passed to us by a backlevel service, things will break instance.info_cache = objects.InstanceInfoCache(context) if instance.info_cache is not None: instance.info_cache._from_db_object(context, instance.info_cache, db_inst['info_cache']) # TODO(danms): If we are updating these on a backlevel instance, # we'll end up sending back new versions of these objects (see # above note for new info_caches if 'pci_devices' in expected_attrs: pci_devices = base.obj_make_list(context, objects.PciDeviceList(context), objects.PciDevice, db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'security_groups' in expected_attrs: sec_groups = base.obj_make_list(context, objects.SecurityGroupList(context), objects.SecurityGroup, db_inst['security_groups']) instance['security_groups'] = sec_groups instance.obj_reset_changes() return instance
def fake_db_req(**updates): instance_uuid = uuidutils.generate_uuid() info_cache = objects.InstanceInfoCache() info_cache.instance_uuid = instance_uuid info_cache.network_info = network_model.NetworkInfo() req_spec = fake_request_spec.fake_spec_obj( context.RequestContext('fake-user', 'fake-project')) req_spec.id = 42 req_spec.obj_reset_changes() db_build_request = { 'id': 1, 'project_id': 'fake-project', 'instance_uuid': None, 'user_id': 'fake-user', 'display_name': '', 'instance_metadata': jsonutils.dumps({'foo': 'bar'}), 'progress': 0, 'vm_state': vm_states.BUILDING, 'task_state': task_states.SCHEDULING, 'image_ref': None, 'access_ip_v4': '1.2.3.4', 'access_ip_v6': '::1', 'info_cache': jsonutils.dumps(info_cache.obj_to_primitive()), 'security_groups': jsonutils.dumps(objects.SecurityGroupList().obj_to_primitive()), 'config_drive': False, 'key_name': None, 'locked_by': None, 'request_spec': _req_spec_to_db_format(req_spec), 'instance': None, 'created_at': datetime.datetime(2016, 1, 16), 'updated_at': datetime.datetime(2016, 1, 16), } for name, field in objects.BuildRequest.fields.items(): if name in db_build_request: continue if field.nullable: db_build_request[name] = None elif field.default != fields.UnspecifiedDefault: db_build_request[name] = field.default else: raise Exception('fake_db_req needs help with %s' % name) if updates: db_build_request.update(updates) return db_build_request