def test_notify_usage_exists(self): """Ensure 'exists' notification generates apropriate usage data.""" instance_id = self._create_instance() instance = db.instance_get(self.context, instance_id) compute_utils.notify_usage_exists(instance) self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.exists') payload = msg['payload'] self.assertEquals(payload['tenant_id'], self.project_id) self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance.uuid) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] self.assertEquals(str(payload['instance_type_id']), str(type_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'fixed_ips', 'bandwidth', 'audit_period_beginning', 'audit_period_ending'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) image_ref_url = "%s/images/1" % utils.generate_tank_url() self.assertEquals(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance['uuid'])
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs): instance_ref = db.instance_get(context, instance_id) host = self._schedule_instance(context, instance_ref, *_args, **_kwargs) driver.cast_to_compute_host(context, host, 'start_instance', instance_id=instance_id, **_kwargs)
def test_add_console(self): instance_id = self._create_instance() self.console.add_console(self.context, instance_id) instance = db.instance_get(self.context, instance_id) pool = db.console_pool_get_by_host_type(self.context, instance['host'], self.console.host, self.console.driver.console_type) console_instances = [con['instance_id'] for con in pool.consoles] self.assert_(instance_id in console_instances) db.instance_destroy(self.context, instance_id)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", hostname="test", architecture="x86-64", instance_id=1, check_injection=False, create_record=True, empty_dns=False): stubs.stubout_loopingcall_start(self.stubs) if create_record: instance_values = {'id': instance_id, 'project_id': self.project_id, 'user_id': self.user_id, 'image_ref': image_ref, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, 'local_gb': 20, 'instance_type_id': instance_type_id, 'os_type': os_type, 'hostname': hostname, 'architecture': architecture} instance = db.instance_create(self.context, instance_values) else: instance = db.instance_get(self.context, instance_id) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway_v6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] if empty_dns: network_info[0][1]['dns'] = [] image_meta = {'id': tank_stubs.FakeTank.IMAGE_VHD, 'disk_format': 'vhd'} self.conn.spawn(self.context, instance, image_meta, network_info) self.create_vm_record(self.conn, os_type, instance_id) self.check_vm_record(self.conn, check_injection) self.assertTrue(instance.os_type) self.assertTrue(instance.architecture)
def schedule_live_migration(self, context, instance_id, dest, block_migration=False): """Live migration scheduling method. :param context: :param instance_id: :param dest: destination host :return: The host where instance is running currently. Then scheduler send request that host. """ # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) # Checking instance. self._live_migration_src_check(context, instance_ref) # Checking destination host. self._live_migration_dest_check(context, instance_ref, dest, block_migration) # Common checking. self._live_migration_common_check(context, instance_ref, dest, block_migration) # Changing instance_state. values = {"vm_state": vm_states.MIGRATING} db.instance_update(context, instance_id, values) # Changing volume state for volume_ref in instance_ref['volumes']: db.volume_update(context, volume_ref['id'], {'status': 'migrating'}) src = instance_ref['host'] cast_to_compute_host(context, src, 'live_migration', update_db=False, instance_id=instance_id, dest=dest, block_migration=block_migration)
def get_instance_nw_info(self, context, instance_id, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the engine DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in engine/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: net = db.network_get(admin_context, vif['network_id']) net_id = net['uuid'] if not net_id: # TODO(bgh): We need to figure out a way to tell if we # should actually be raising this exception or not. # In the case that a VM spawn failed it may not have # attached the vif and raising the exception here # prevents deletion of the VM. In that case we should # probably just log, continue, and move on. raise Exception(_("No network for for virtual interface %s") % vif['uuid']) ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif['uuid'], project_id) v4_subnet, v6_subnet = \ self.ipam.get_subnets_by_net_id(context, ipam_tenant_id, net_id, vif['uuid']) v4_ips = self.ipam.get_v4_ips_by_interface(context, net_id, vif['uuid'], project_id=ipam_tenant_id) v6_ips = self.ipam.get_v6_ips_by_interface(context, net_id, vif['uuid'], project_id=ipam_tenant_id) def ip_dict(ip, subnet): return { "ip": ip, "netmask": subnet["netmask"], "enabled": "1"} network_dict = { 'cidr': v4_subnet['cidr'], 'injected': True, 'bridge': net['bridge'], 'multi_host': False} q_tenant_id = project_id or FLAGS.quantum_default_tenant_id info = { 'label': self.q_conn.get_network_name(q_tenant_id, net_id), 'gateway': v4_subnet['gateway'], 'dhcp_server': v4_subnet['gateway'], 'broadcast': v4_subnet['broadcast'], 'mac': vif['address'], 'vif_uuid': vif['uuid'], 'dns': [], 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]} if v6_subnet: if v6_subnet['cidr']: network_dict['cidr_v6'] = v6_subnet['cidr'] info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] if v6_subnet['gateway']: info['gateway_v6'] = v6_subnet['gateway'] dns_dict = {} for s in [v4_subnet, v6_subnet]: for k in ['dns1', 'dns2']: if s and s[k]: dns_dict[s[k]] = None info['dns'] = [d for d in dns_dict.keys()] network_info.append((network_dict, info)) return network_info
def allocate_for_instance(self, context, **kwargs): """Called by compute when it is creating a new VM. There are three key tasks: - Determine the number and order of vNICs to create - Allocate IP addresses - Create ports on a Quantum network and attach vNICs. We support two approaches to determining vNICs: - By default, a VM gets a vNIC for any network belonging to the VM's project, and a vNIC for any "global" network that has a NULL project_id. vNIC order is determined by the network's 'priority' field. - If the 'os-create-server-ext' was used to create the VM, only the networks in 'requested_networks' are used to create vNICs, and the vNIC order is determiend by the order in the requested_networks array. For each vNIC, use the FlatManager to create the entries in the virtual_interfaces table, contact Quantum to create a port and attachment the vNIC, and use the IPAM lib to allocate IP addresses. """ instance_id = kwargs.pop('instance_id') instance_type_id = kwargs['instance_type_id'] host = kwargs.pop('host') project_id = kwargs.pop('project_id') LOG.debug(_("network allocations for instance %s"), project_id) requested_networks = kwargs.get('requested_networks') if requested_networks: net_proj_pairs = [(net_id, project_id) \ for (net_id, _i) in requested_networks] else: net_proj_pairs = self.ipam.get_project_and_global_net_ids(context, project_id) # Quantum may also know about networks that aren't in the networks # table so we need to query Quanutm for any tenant networks and add # them to net_proj_pairs. qnets = self.q_conn.get_networks(project_id) for qn in qnets['networks']: pair = (qn['id'], project_id) if pair not in net_proj_pairs: net_proj_pairs.append(pair) # Create a port via quantum and attach the vif for (quantum_net_id, project_id) in net_proj_pairs: # FIXME(danwent): We'd like to have the manager be # completely decoupled from the engine networks table. # However, other parts of engine sometimes go behind our # back and access network data directly from the DB. So # for now, the quantum manager knows that there is a engine # networks DB table and accesses it here. updating the # virtual_interfaces table to use UUIDs would be one # solution, but this would require significant work # elsewhere. admin_context = context.elevated() # We may not be able to get a network_ref here if this network # isn't in the database (i.e. it came from Quantum). network_ref = db.network_get_by_uuid(admin_context, quantum_net_id) if network_ref is None: network_ref = {} network_ref = {"uuid": quantum_net_id, "project_id": project_id, # NOTE(bgh): We need to document this somewhere but since # we don't know the priority of any networks we get from # quantum we just give them a priority of 0. If its # necessary to specify the order of the vifs and what # network they map to then the user will have to use the # OSCreateServer extension and specify them explicitly. # # In the future users will be able to tag quantum networks # with a priority .. and at that point we can update the # code here to reflect that. "priority": 0, "id": 'NULL', "label": "quantum-net-%s" % quantum_net_id} vif_rec = self.add_virtual_interface(context, instance_id, network_ref['id']) # talk to Quantum API to create and attach port. instance = db.instance_get(context, instance_id) instance_type = instance_types.get_instance_type(instance_type_id) rxtx_factor = instance_type['rxtx_factor'] engine_id = self._get_engine_id(context) q_tenant_id = project_id or FLAGS.quantum_default_tenant_id self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id, vif_rec['uuid'], vm_id=instance['uuid'], rxtx_factor=rxtx_factor, engine_id=engine_id) # Tell melange to allocate an IP ip = self.ipam.allocate_fixed_ip(context, project_id, quantum_net_id, vif_rec) # Set up/start the dhcp server for this network if necessary if FLAGS.quantum_use_dhcp: self.enable_dhcp(context, quantum_net_id, network_ref, vif_rec, project_id) return self.get_instance_nw_info(context, instance_id, instance_type_id, host)
def get_metadata(self, address): ctxt = context.get_admin_context() search_opts = {'fixed_ip': address, 'deleted': False} try: instance_ref = self.compute_api.get_all(ctxt, search_opts=search_opts) except exception.NotFound: instance_ref = None if not instance_ref: return None # This ensures that all attributes of the instance # are populated. instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) hostname = "%s.%s" % (instance_ref['hostname'], FLAGS.dhcp_domain) host = instance_ref['host'] services = db.service_get_all_by_host(ctxt.elevated(), host) availability_zone = ec2utils.get_availability_zone_by_host( services, host) ip_info = ec2utils.get_ip_info_for_instance(ctxt, instance_ref) floating_ips = ip_info['floating_ips'] floating_ip = floating_ips and floating_ips[0] or '' ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) image_ec2_id = ec2utils.image_ec2_id(instance_ref['image_ref']) security_groups = db.security_group_get_by_instance( ctxt, instance_ref['id']) security_groups = [x['name'] for x in security_groups] mappings = self._format_instance_mapping(ctxt, instance_ref) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { 'ami-id': image_ec2_id, 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', 'block-device-mapping': mappings, 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, 'instance-type': instance_ref['instance_type']['name'], 'local-hostname': hostname, 'local-ipv4': address, 'placement': { 'availability-zone': availability_zone }, 'public-hostname': hostname, 'public-ipv4': floating_ip, 'reservation-id': instance_ref['reservation_id'], 'security-groups': security_groups, 'mpi': mpi } } # public-keys should be in meta-data only if user specified one if instance_ref['key_name']: data['meta-data']['public-keys'] = { '0': { '_name': instance_ref['key_name'], 'openssh-key': instance_ref['key_data'] } } for image_type in ['kernel', 'ramdisk']: if instance_ref.get('%s_id' % image_type): ec2_id = ec2utils.image_ec2_id( instance_ref['%s_id' % image_type], ec2utils.image_type(image_type)) data['meta-data']['%s-id' % image_type] = ec2_id if False: # TODO(vish): store ancestor ids data['ancestor-ami-ids'] = [] if False: # TODO(vish): store product codes data['product-codes'] = [] return data
def get_metadata(self, address): ctxt = context.get_admin_context() search_opts = {'fixed_ip': address, 'deleted': False} try: instance_ref = self.compute_api.get_all(ctxt, search_opts=search_opts) except exception.NotFound: instance_ref = None if not instance_ref: return None # This ensures that all attributes of the instance # are populated. instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) hostname = "%s.%s" % (instance_ref['hostname'], FLAGS.dhcp_domain) host = instance_ref['host'] services = db.service_get_all_by_host(ctxt.elevated(), host) availability_zone = ec2utils.get_availability_zone_by_host(services, host) ip_info = ec2utils.get_ip_info_for_instance(ctxt, instance_ref) floating_ips = ip_info['floating_ips'] floating_ip = floating_ips and floating_ips[0] or '' ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) image_ec2_id = ec2utils.image_ec2_id(instance_ref['image_ref']) security_groups = db.security_group_get_by_instance(ctxt, instance_ref['id']) security_groups = [x['name'] for x in security_groups] mappings = self._format_instance_mapping(ctxt, instance_ref) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { 'ami-id': image_ec2_id, 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', 'block-device-mapping': mappings, 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, 'instance-type': instance_ref['instance_type']['name'], 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip, 'reservation-id': instance_ref['reservation_id'], 'security-groups': security_groups, 'mpi': mpi}} # public-keys should be in meta-data only if user specified one if instance_ref['key_name']: data['meta-data']['public-keys'] = { '0': {'_name': instance_ref['key_name'], 'openssh-key': instance_ref['key_data']}} for image_type in ['kernel', 'ramdisk']: if instance_ref.get('%s_id' % image_type): ec2_id = ec2utils.image_ec2_id( instance_ref['%s_id' % image_type], ec2utils.image_type(image_type)) data['meta-data']['%s-id' % image_type] = ec2_id if False: # TODO(vish): store ancestor ids data['ancestor-ami-ids'] = [] if False: # TODO(vish): store product codes data['product-codes'] = [] return data
def test_get_test_instance(self): """get_test_instance's return value looks like an instance_ref""" instance_ref = test_utils.get_test_instance() ctxt = test_utils.get_test_admin_context() db.instance_get(ctxt, instance_ref['id'])