def validate_ec2_id(val): if not validator.validate_str()(val): return False try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: return False return True
def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: instance_id = ec2utils.ec2_id_to_id(ec2_id) self.compute_api.reboot(context, instance_id=instance_id) return True
def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.associate_floating_ip(context, instance_id=instance_id, address=public_ip) return {'associateResponse': ["Address associated."]}
def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) for ec2_id in instance_id: instance_id = ec2utils.ec2_id_to_id(ec2_id) self.compute_api.delete(context, instance_id=instance_id) return True
def attach_volume(self, context, volume_id, instance_id, device, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) self.compute_api.attach_volume(context, instance_id=instance_id, volume_id=volume_id, device=device) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) return self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id)
def update_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: self.volume_api.update(context, volume_id, kwargs) return True
def update_instance(self, context, instance_id, **kwargs): updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: if field in kwargs: changes[field] = kwargs[field] if changes: instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.update(context, instance_id=instance_id, **kwargs) return True
def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes}
def detach_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if type(instance_id) == list: ec2_id = instance_id[0] else: ec2_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) now = datetime.datetime.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)}
def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) vol2 = db.volume_create(self.context, {}) result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x') result = self.cloud.describe_volumes(self.context, volume_id=[volume_id]) self.assertEqual(len(result['volumeSet']), 1) self.assertEqual( ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']), vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id'])
def test_ec2_id_to_id(self): self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30) self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result
def _translate_dependent_image_id(image_key, image_id): image_id = ec2utils.ec2_id_to_id(image_id) image_uuid = self.get_image_uuid(context, image_id) properties[image_key] = image_uuid
def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" instance_id = ec2utils.ec2_id_to_id(instance_id) self.compute_api.unrescue(context, instance_id=instance_id) return True
def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) return self.image_service.show(context, internal_id) except exception.NotFound: return self.image_service.show_by_name(context, ec2_id)
def _s3_parse_manifest(self, context, metadata, manifest): manifest = ElementTree.fromstring(manifest) image_format = "ami" image_type = "machine" try: kernel_id = manifest.find("machine_configuration/kernel_id").text if kernel_id == "true": image_format = "aki" image_type = "kernel" kernel_id = None except Exception: kernel_id = None try: ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text if ramdisk_id == "true": image_format = "ari" image_type = "ramdisk" ramdisk_id = None except Exception: ramdisk_id = None try: arch = manifest.find("machine_configuration/architecture").text except Exception: arch = "x86_64" # NOTE(yamahata): # EC2 ec2-budlne-image --block-device-mapping accepts # <virtual name>=<device name> where # virtual name = {ami, root, swap, ephemeral<N>} # where N is no negative integer # device name = the device name seen by guest kernel. # They are converted into # block_device_mapping/mapping/{virtual, device} # # Do NOT confuse this with ec2-register's block device mapping # argument. mappings = [] try: block_device_mapping = manifest.findall("machine_configuration/" "block_device_mapping/" "mapping") for bdm in block_device_mapping: mappings.append({"virtual": bdm.find("virtual").text, "device": bdm.find("device").text}) except Exception: mappings = [] properties = metadata["properties"] properties["project_id"] = context.project_id properties["architecture"] = arch if kernel_id: properties["kernel_id"] = ec2utils.ec2_id_to_id(kernel_id) if ramdisk_id: properties["ramdisk_id"] = ec2utils.ec2_id_to_id(ramdisk_id) if mappings: properties["mappings"] = mappings metadata.update( { "disk_format": image_format, "container_format": image_format, "status": "queued", "is_public": False, "properties": properties, } ) metadata["properties"]["image_state"] = "pending" image = self.service.create(context, metadata) return manifest, image
def _s3_create(self, context, metadata): """Gets a manifext from s3 and makes an image""" image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir) image_location = metadata['properties']['image_location'] bucket_name = image_location.split("/")[0] manifest_path = image_location[len(bucket_name) + 1:] bucket = self._conn(context).get_bucket(bucket_name) key = bucket.get_key(manifest_path) manifest = key.get_contents_as_string() manifest = ElementTree.fromstring(manifest) image_format = 'ami' image_type = 'machine' try: kernel_id = manifest.find("machine_configuration/kernel_id").text if kernel_id == 'true': image_format = 'aki' image_type = 'kernel' kernel_id = None except Exception: kernel_id = None try: ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text if ramdisk_id == 'true': image_format = 'ari' image_type = 'ramdisk' ramdisk_id = None except Exception: ramdisk_id = None try: arch = manifest.find("machine_configuration/architecture").text except Exception: arch = 'x86_64' properties = metadata['properties'] properties['owner_id'] = context.project_id properties['architecture'] = arch if kernel_id: properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id) if ramdisk_id: properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id) properties['is_public'] = False properties['type'] = image_type metadata.update({'disk_format': image_format, 'container_format': image_format, 'status': 'queued', 'is_public': True, 'properties': properties}) metadata['properties']['image_state'] = 'pending' image = self.service.create(context, metadata) image_id = image['id'] def delayed_create(): """This handles the fetching and decrypting of the part files.""" parts = [] for fn_element in manifest.find("image").getiterator("filename"): part = self._download_file(bucket, fn_element.text, image_path) parts.append(part) # NOTE(vish): this may be suboptimal, should we use cat? encrypted_filename = os.path.join(image_path, 'image.encrypted') with open(encrypted_filename, 'w') as combined: for filename in parts: with open(filename) as part: shutil.copyfileobj(part, combined) metadata['properties']['image_state'] = 'decrypting' self.service.update(context, image_id, metadata) hex_key = manifest.find("image/ec2_encrypted_key").text encrypted_key = binascii.a2b_hex(hex_key) hex_iv = manifest.find("image/ec2_encrypted_iv").text encrypted_iv = binascii.a2b_hex(hex_iv) # FIXME(vish): grab key from common service so this can run on # any host. cloud_pk = os.path.join(FLAGS.ca_path, "private/cakey.pem") decrypted_filename = os.path.join(image_path, 'image.tar.gz') self._decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_pk, decrypted_filename) metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) unz_filename = self._untarzip_image(image_path, decrypted_filename) metadata['properties']['image_state'] = 'uploading' with open(unz_filename) as image_file: self.service.update(context, image_id, metadata, image_file) metadata['properties']['image_state'] = 'available' self.service.update(context, image_id, metadata) shutil.rmtree(image_path) eventlet.spawn_n(delayed_create) return image
def _s3_parse_manifest(self, context, metadata, manifest): manifest = ElementTree.fromstring(manifest) image_format = 'ami' image_type = 'machine' try: kernel_id = manifest.find('machine_configuration/kernel_id').text if kernel_id == 'true': image_format = 'aki' image_type = 'kernel' kernel_id = None except Exception: kernel_id = None try: ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text if ramdisk_id == 'true': image_format = 'ari' image_type = 'ramdisk' ramdisk_id = None except Exception: ramdisk_id = None try: arch = manifest.find('machine_configuration/architecture').text except Exception: arch = 'x86_64' # NOTE(yamahata): # EC2 ec2-budlne-image --block-device-mapping accepts # <virtual name>=<device name> where # virtual name = {ami, root, swap, ephemeral<N>} # where N is no negative integer # device name = the device name seen by guest kernel. # They are converted into # block_device_mapping/mapping/{virtual, device} # # Do NOT confuse this with ec2-register's block device mapping # argument. mappings = [] try: block_device_mapping = manifest.findall('machine_configuration/' 'block_device_mapping/' 'mapping') for bdm in block_device_mapping: mappings.append({'virtual': bdm.find('virtual').text, 'device': bdm.find('device').text}) except Exception: mappings = [] properties = metadata['properties'] properties['project_id'] = context.project_id properties['architecture'] = arch if kernel_id: kernel_id = ec2_utils.ec2_id_to_id(kernel_id) kernel_uuid = self._get_image_uuid(context, kernel_id) properties['kernel_id'] = kernel_uuid if ramdisk_id: ramdisk_id = ec2utils.ec2_id_to_id(ramdisk_id) ramdisk_uuid = self._get_image_uuid(context, ramdisk_id) properties['ramdisk_id'] = ramdisk_uuid if mappings: properties['mappings'] = mappings metadata.update({'disk_format': image_format, 'container_format': image_format, 'status': 'queued', 'is_public': False, 'properties': properties}) metadata['properties']['image_state'] = 'pending' #TODO(bcwaldon): right now, this removes user-defined ids. # We need to re-enable this. image_id = metadata.pop('id', None) image = self.service.create(context, metadata) # extract the new uuid and generate an int id to present back to user image_uuid = image['id'] image['id'] = self._create_image_id(context, image_uuid) # return image_uuid so the caller can still make use of image_service return manifest, image, image_uuid
def test_ec2_id_to_id(self): self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30) self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29) self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28) self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
def get_vnc_console(self, context, instance_id, **kwargs): """Returns vnc browser url. Used by OS dashboard.""" ec2_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_id) return self.compute_api.get_vnc_console(context, instance_id=instance_id)
def _format_instances(self, context, instance_id=None, **kwargs): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id=internal_id) instances.append(instance) else: instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.is_admin: if instance['image_id'] == FLAGS.vpn_image_id: continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = self._image_ec2_id(instance['image_id']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} fixed_addr = None floating_addr = None if instance['fixed_ip']: fixed_addr = instance['fixed_ip']['address'] if instance['fixed_ip']['floating_ips']: fixed = instance['fixed_ip'] floating_addr = fixed['floating_ips'][0]['address'] if instance['fixed_ip']['network'] and 'use_v6' in kwargs: i['dnsNameV6'] = utils.to_global_ipv6( instance['fixed_ip']['network']['cidr_v6'], instance['mac_address']) i['privateDnsName'] = fixed_addr i['publicDnsName'] = floating_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = self._convert_to_set([], 'product_codes') i['instanceType'] = instance['instance_type'] i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] host = instance['host'] zone = self._get_availability_zone_by_host(context, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) r['groupSet'] = self._convert_to_set(security_group_names, 'groupId') r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values())
def delete_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) self.volume_api.delete(context, volume_id=volume_id) return True
def _format_instances(self, context, instance_id=None, **kwargs): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id=internal_id) instances.append(instance) else: instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.is_admin: if instance['image_id'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = self.image_ec2_id(instance['image_id']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description'] } fixed_addr = None floating_addr = None if instance['fixed_ip']: fixed_addr = instance['fixed_ip']['address'] if instance['fixed_ip']['floating_ips']: fixed = instance['fixed_ip'] floating_addr = fixed['floating_ips'][0]['address'] if instance['fixed_ip']['network'] and 'use_v6' in kwargs: i['dnsNameV6'] = utils.to_global_ipv6( instance['fixed_ip']['network']['cidr_v6'], instance['mac_address']) i['privateDnsName'] = fixed_addr i['privateIpAddress'] = fixed_addr i['publicDnsName'] = floating_addr i['ipAddress'] = floating_addr or fixed_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % ( i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = self._convert_to_set([], 'product_codes') if instance['instance_type']: i['instanceType'] = instance['instance_type'].get('name') else: i['instanceType'] = None i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] host = instance['host'] zone = self._get_availability_zone_by_host(context, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) r['groupSet'] = self._convert_to_set(security_group_names, 'groupId') r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values())
def test_stop_with_attached_volume(self): """Make sure attach info is reflected to block device mapping""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId']) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId']) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = { 'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [{ 'device_name': '/dev/vdb', 'volume_id': vol1_uuid, 'delete_on_termination': True }] } ec2_instance_id = self._run_instance(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance_uuid = ec2utils.ec2_instance_id_to_uuid( self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) for vol in vols: self.assertEqual(vol['id'], vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_detached(vol) instance = db.instance_get(self.context, instance_id) self.cloud.compute_api.attach_volume(self.context, instance, volume_id=vol2_uuid, device='/dev/vdc') vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol1, instance_uuid, '/dev/vdb') vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.compute_api.detach_volume(self.context, volume_id=vol1_uuid) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) self._assert_volume_detached(vol1) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) self.cloud.terminate_instances(self.context, [ec2_instance_id])
def _s3_parse_manifest(self, context, metadata, manifest): manifest = ElementTree.fromstring(manifest) image_format = 'ami' image_type = 'machine' try: kernel_id = manifest.find('machine_configuration/kernel_id').text if kernel_id == 'true': image_format = 'aki' image_type = 'kernel' kernel_id = None except Exception: kernel_id = None try: ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text if ramdisk_id == 'true': image_format = 'ari' image_type = 'ramdisk' ramdisk_id = None except Exception: ramdisk_id = None try: arch = manifest.find('machine_configuration/architecture').text except Exception: arch = 'x86_64' # NOTE(yamahata): # EC2 ec2-budlne-image --block-device-mapping accepts # <virtual name>=<device name> where # virtual name = {ami, root, swap, ephemeral<N>} # where N is no negative integer # device name = the device name seen by guest kernel. # They are converted into # block_device_mapping/mapping/{virtual, device} # # Do NOT confuse this with ec2-register's block device mapping # argument. mappings = [] try: block_device_mapping = manifest.findall('machine_configuration/' 'block_device_mapping/' 'mapping') for bdm in block_device_mapping: mappings.append({'virtual': bdm.find('virtual').text, 'device': bdm.find('device').text}) except Exception: mappings = [] properties = metadata['properties'] properties['project_id'] = context.project_id properties['architecture'] = arch if kernel_id: properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id) if ramdisk_id: properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id) if mappings: properties['mappings'] = mappings metadata.update({'disk_format': image_format, 'container_format': image_format, 'status': 'queued', 'is_public': False, 'properties': properties}) metadata['properties']['image_state'] = 'pending' image = self.service.create(context, metadata) return manifest, image
def get_ajax_console(self, context, instance_id, **kwargs): ec2_id = instance_id[0] instance_id = ec2utils.ec2_id_to_id(ec2_id) return self.compute_api.get_ajax_console(context, instance_id=instance_id)
def test_stop_with_attached_volume(self): """Make sure attach info is reflected to block device mapping""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId']) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId']) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = {'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'volume_id': vol1_uuid, 'delete_on_termination': True}]} ec2_instance_id = self._run_instance(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) for vol in vols: self.assertEqual(vol['id'], vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_detached(vol) instance = db.instance_get(self.context, instance_id) self.cloud.compute_api.attach_volume(self.context, instance, volume_id=vol2_uuid, device='/dev/vdc') vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol1, instance_uuid, '/dev/vdb') vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.compute_api.detach_volume(self.context, volume_id=vol1_uuid) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) self._assert_volume_detached(vol1) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) self.cloud.terminate_instances(self.context, [ec2_instance_id])
def _s3_create(self, context, metadata): """Gets a manifext from s3 and makes an image.""" image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir) image_location = metadata['properties']['image_location'] bucket_name = image_location.split('/')[0] manifest_path = image_location[len(bucket_name) + 1:] bucket = self._conn(context).get_bucket(bucket_name) key = bucket.get_key(manifest_path) manifest = key.get_contents_as_string() manifest = ElementTree.fromstring(manifest) image_format = 'ami' image_type = 'machine' try: kernel_id = manifest.find('machine_configuration/kernel_id').text if kernel_id == 'true': image_format = 'aki' image_type = 'kernel' kernel_id = None except Exception: kernel_id = None try: ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text if ramdisk_id == 'true': image_format = 'ari' image_type = 'ramdisk' ramdisk_id = None except Exception: ramdisk_id = None try: arch = manifest.find('machine_configuration/architecture').text except Exception: arch = 'x86_64' properties = metadata['properties'] properties['project_id'] = context.project_id properties['architecture'] = arch if kernel_id: properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id) if ramdisk_id: properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id) metadata.update({ 'disk_format': image_format, 'container_format': image_format, 'status': 'queued', 'is_public': True, 'properties': properties }) metadata['properties']['image_state'] = 'pending' image = self.service.create(context, metadata) image_id = image['id'] def delayed_create(): """This handles the fetching and decrypting of the part files.""" parts = [] for fn_element in manifest.find('image').getiterator('filename'): part = self._download_file(bucket, fn_element.text, image_path) parts.append(part) # NOTE(vish): this may be suboptimal, should we use cat? encrypted_filename = os.path.join(image_path, 'image.encrypted') with open(encrypted_filename, 'w') as combined: for filename in parts: with open(filename) as part: shutil.copyfileobj(part, combined) metadata['properties']['image_state'] = 'decrypting' self.service.update(context, image_id, metadata) hex_key = manifest.find('image/ec2_encrypted_key').text encrypted_key = binascii.a2b_hex(hex_key) hex_iv = manifest.find('image/ec2_encrypted_iv').text encrypted_iv = binascii.a2b_hex(hex_iv) # FIXME(vish): grab key from common service so this can run on # any host. cloud_pk = crypto.key_path(context.project_id) decrypted_filename = os.path.join(image_path, 'image.tar.gz') self._decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_pk, decrypted_filename) metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) unz_filename = self._untarzip_image(image_path, decrypted_filename) metadata['properties']['image_state'] = 'uploading' with open(unz_filename) as image_file: self.service.update(context, image_id, metadata, image_file) metadata['properties']['image_state'] = 'available' self.service.update(context, image_id, metadata) shutil.rmtree(image_path) eventlet.spawn_n(delayed_create) return image
def test_ec2_id_to_id(self): self.assertEqual(ec2utils.ec2_id_to_id("i-0000001e"), 30) self.assertEqual(ec2utils.ec2_id_to_id("ami-1d"), 29) self.assertEqual(ec2utils.ec2_id_to_id("snap-0000001c"), 28) self.assertEqual(ec2utils.ec2_id_to_id("vol-0000001b"), 27)
def test_stop_with_attached_volume(self): """Make sure attach info is reflected to block device mapping""" availability_zone = "zone1:host1" vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1["volumeId"]) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2["volumeId"]) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = { "image_id": "ami-1", "instance_type": FLAGS.default_instance_type, "max_count": 1, "block_device_mapping": [ {"device_name": "/dev/vdb", "volume_id": vol1_uuid, "delete_on_termination": True} ], } ec2_instance_id = self._run_instance(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v["instance_uuid"] == instance_uuid] self.assertEqual(len(vols), 1) for vol in vols: self.assertEqual(vol["id"], vol1_uuid) self._assert_volume_attached(vol, instance_uuid, "/dev/vdb") vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_detached(vol) instance = db.instance_get(self.context, instance_id) self.cloud.compute_api.attach_volume(self.context, instance, volume_id=vol2_uuid, device="/dev/vdc") vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol1, instance_uuid, "/dev/vdb") vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, "/dev/vdc") self.cloud.compute_api.detach_volume(self.context, volume_id=vol1_uuid) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, "/dev/vdc") self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v["instance_uuid"] == instance_uuid] self.assertEqual(len(vols), 1) self._assert_volume_detached(vol1) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) self.cloud.terminate_instances(self.context, [ec2_instance_id])