def _get_modified_chunks(metafile): meta1 = msgpack.unpackb(open(metafile, "r").read()) overlay_files = meta1[Const.META_OVERLAY_FILES] disk_chunks = list() memory_chunks = list() for each_file in overlay_files: disk_chunks += each_file[Const.META_OVERLAY_FILE_DISK_CHUNKS] memory_chunks += each_file[Const.META_OVERLAY_FILE_MEMORY_CHUNKS] disk_chunks_set = set(disk_chunks) memory_chunks_set = set(memory_chunks) if len(disk_chunks_set) != len(disk_chunks): raise Exception("Have duplicated data while converting from list to set") if len(memory_chunks_set) != len(memory_chunks): raise Exception("Have duplicated data while converting from list to set") return disk_chunks_set, memory_chunks_set
def _get_modified_chunks(metafile): meta1 = msgpack.unpackb(open(metafile, "r").read()) overlay_files = meta1[Const.META_OVERLAY_FILES] disk_chunks = list() memory_chunks = list() for each_file in overlay_files: disk_chunks += each_file[Const.META_OVERLAY_FILE_DISK_CHUNKS] memory_chunks += each_file[Const.META_OVERLAY_FILE_MEMORY_CHUNKS] disk_chunks_set = set(disk_chunks) memory_chunks_set = set(memory_chunks) if len(disk_chunks_set) != len(disk_chunks): raise Exception( "Have duplicated data while converting from list to set") if len(memory_chunks_set) != len(memory_chunks): raise Exception( "Have duplicated data while converting from list to set") return disk_chunks_set, memory_chunks_set
def request_synthesis(server_address, token, end_point, key_name=None, server_name=None, overlay_url=None): # read meta data from vm overlay URL from elijah.provisioning.package import VMOverlayPackage try: from elijah.provisioning import msgpack except ImportError as e: import msgpack overlay_package = VMOverlayPackage(overlay_url) meta_raw = overlay_package.read_meta() meta_info = msgpack.unpackb(meta_raw) requested_basevm_id = meta_info['base_vm_sha256'] # find matching base VM image_list = get_list(server_address, token, end_point, "images") basevm_uuid = None basevm_xml = None basevm_name = None basevm_disk = 0 for image in image_list: properties = image.get("metadata", None) if properties is None or len(properties) == 0: continue if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \ CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK: continue base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID) if base_sha256_uuid == requested_basevm_id: basevm_uuid = image['id'] basevm_name = image['name'] basevm_xml = properties.get( CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE, None) basevm_disk = image.get('minDisk', 0) break if basevm_uuid is None: raise CloudletClientError("Cannot find matching Base VM with (%s)" % str(requested_basevm_id)) # find matching flavor. if basevm_xml is None: msg = "Cannot find resource allocation information of base VM (%s)" %\ str(requested_basevm_id) raise CloudletClientError(msg) cpu_count, memory_mb = get_resource_size(basevm_xml) flavor_list = get_list(server_address, token, end_point, "flavors") flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count, memory_mb, basevm_disk) if flavor_ref == None or flavor_id == None: msg = "Cannot find matching flavor: vcpu (%d), ram (%d MB), disk (%d GB)\n" % ( cpu_count, memory_mb, basevm_disk) msg += "Please create the matching at your OpenStack" raise CloudletClientError(msg) # generate request meta_data = {"overlay_url": overlay_url} s = { "server": { "name": server_name, "imageRef": str(basevm_uuid), "flavorRef": flavor_id, "metadata": meta_data, "min_count": "1", "max_count": "1", "key_name": key_name, } } params = json.dumps(s) headers = {"X-Auth-Token": token, "Content-type": "application/json"} conn = httplib.HTTPConnection(end_point[1]) conn.request("POST", "%s/servers" % end_point[2], params, headers) sys.stdout.write("request new server: %s/servers\n" % (end_point[2])) response = conn.getresponse() data = response.read() dd = json.loads(data) conn.close() return dd
def piping_synthesis(overlay_url, base_path): # check_base VM start_time = time.time() meta_stream = urllib2.urlopen(overlay_url) meta_raw = read_all(meta_stream) meta_info = msgpack.unpackb(meta_raw) url_manager = Manager() overlay_urls = url_manager.list() url_prefix = os.path.dirname(overlay_url) for blob in meta_info[Const.META_OVERLAY_FILES]: blob_filename = os.path.basename(blob[Const.META_OVERLAY_FILE_NAME]) url = os.path.join(url_prefix, blob_filename) overlay_urls.append(url) (base_diskmeta, base_mem, base_memmeta) = \ Const.get_basepath(base_path, check_exist=True) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue() time_decomp = Queue() time_delta = Queue() time_fuse = Queue() tmp_dir = tempfile.mkdtemp() temp_overlay_filepath = os.path.join(tmp_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") overlay_pipe = os.path.join(tmp_dir, 'overlay_pipe') os.mkfifo(overlay_pipe) # overlay demanding_queue = Queue() download_queue = JoinableQueue() download_process = Process(target=synthesis_server.network_worker, args=( overlay_urls, demanding_queue, download_queue, time_transfer, CHUNK_SIZE, )) decomp_process = Process(target=synthesis_server.decomp_worker, args=( download_queue, overlay_pipe, time_decomp, temp_overlay_file, )) modified_img, modified_mem, fuse, delta_proc, fuse_thread = \ synthesis.recover_launchVM(base_path, meta_info, overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) delta_proc.time_queue = time_delta fuse_thread.time_queue = time_fuse # start processes download_process.start() decomp_process.start() delta_proc.start() fuse_thread.start() # wait for end delta_proc.join() fuse_thread.join() # printout result end_time = time.time() total_time = (end_time - start_time) synthesis_server.SynthesisTCPHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ print_out=sys.stdout) delta_proc.finish() if os.path.exists(overlay_pipe): os.unlink(overlay_pipe) shutil.rmtree(tmp_dir) print "\n[Time] Total Time for synthesis(including download) : %f" % ( total_time) return fuse
def decoding(data): return msgpack.unpackb(data)
def _spawn_using_synthesis(self, context, instance, xml, image_meta, overlay_url): # download vm overlay overlay_package = VMOverlayPackage(overlay_url) meta_raw = overlay_package.read_meta() meta_info = msgpack.unpackb(meta_raw) basevm_sha256 = meta_info.get(Cloudlet_Const.META_BASE_VM_SHA256, None) image_properties = image_meta.get("properties", None) if image_properties is None: msg = "image does not have properties for cloudlet metadata" raise exception.ImageNotFound(msg) image_sha256 = image_properties.get(CloudletAPI.PROPERTY_KEY_BASE_UUID) # check basevm if basevm_sha256 != image_sha256: msg = "requested base vm is not compatible with openstack base disk %s != %s" \ % (basevm_sha256, image_sha256) raise exception.ImageNotFound(msg) memory_snap_id = str( image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM)) diskhash_snap_id = str( image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH)) memhash_snap_id = str( image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH)) basedisk_path = self._get_cache_image(context, instance, image_meta['id']) basemem_path = self._get_cache_image(context, instance, memory_snap_id) diskhash_path = self._get_cache_image(context, instance, diskhash_snap_id) memhash_path = self._get_cache_image(context, instance, memhash_snap_id) # download blob fileutils.ensure_tree(libvirt_utils.get_instance_path(instance)) decomp_overlay = os.path.join( libvirt_utils.get_instance_path(instance), 'decomp_overlay') meta_info = compression.decomp_overlayzip(overlay_url, decomp_overlay) # recover VM launch_disk, launch_mem, fuse, delta_proc, fuse_proc = \ synthesis.recover_launchVM(basedisk_path, meta_info, decomp_overlay, base_mem=basemem_path, base_diskmeta=diskhash_path, base_memmeta=memhash_path) # resume VM LOG.info(_("Starting VM synthesis"), instance=instance) synthesized_vm = synthesis.SynthesizedVM(launch_disk, launch_mem, fuse, disk_only=False, qemu_args=False, nova_xml=xml, nova_conn=self._conn, nova_util=libvirt_utils) # testing non-thread resume delta_proc.start() fuse_proc.start() delta_proc.join() fuse_proc.join() LOG.info(_("Finish VM synthesis"), instance=instance) synthesized_vm.resume() # rettach NIC synthesis.rettach_nic(synthesized_vm.machine, synthesized_vm.old_xml_str, xml) return synthesized_vm
def request_synthesis(server_address, token, end_point, key_name=None, server_name=None, overlay_url=None): # read meta data from vm overlay URL from elijah.provisioning.package import VMOverlayPackage try: from elijah.provisioning import msgpack except ImportError as e: import msgpack overlay_package = VMOverlayPackage(overlay_url) meta_raw = overlay_package.read_meta() meta_info = msgpack.unpackb(meta_raw) requested_basevm_id = meta_info['base_vm_sha256'] # find matching base VM image_list = get_list(server_address, token, end_point, "images") basevm_uuid = None basevm_xml = None basevm_name = None basevm_disk = 0 for image in image_list: properties = image.get("metadata", None) if properties is None or len(properties) == 0: continue if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \ CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK: continue base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID) if base_sha256_uuid == requested_basevm_id: basevm_uuid = image['id'] basevm_name = image['name'] basevm_xml = properties.get( CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE, None) basevm_disk = image.get('minDisk', 0) break if basevm_uuid is None: raise CloudletClientError("Cannot find matching Base VM with (%s)" % str(requested_basevm_id)) # find matching flavor. if basevm_xml is None: msg = "Cannot find resource allocation information of base VM (%s)" %\ str(requested_basevm_id) raise CloudletClientError(msg) cpu_count, memory_mb = get_resource_size(basevm_xml) flavor_list = get_list(server_address, token, end_point, "flavors") flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count, memory_mb, basevm_disk) if flavor_ref == None or flavor_id == None: msg = "Cannot find matching flavor: vcpu (%d), ram (%d MB), disk (%d GB)\n" % ( cpu_count, memory_mb, basevm_disk) msg += "Please create the matching at your OpenStack" raise CloudletClientError(msg) # generate request meta_data = {"overlay_url": overlay_url} s = { "server": { "name": server_name, "imageRef": str(basevm_uuid), "flavorRef": flavor_id, "metadata": meta_data, "min_count": "1", "max_count": "1", "key_name": key_name, }} params = json.dumps(s) headers = {"X-Auth-Token": token, "Content-type": "application/json"} conn = httplib.HTTPConnection(end_point[1]) conn.request("POST", "%s/servers" % end_point[2], params, headers) sys.stdout.write("request new server: %s/servers\n" % (end_point[2])) response = conn.getresponse() data = response.read() dd = json.loads(data) conn.close() return dd
class SetSynthesizeDetailsAction(workflows.Action): overlay_url = forms.CharField(max_length=200, required=True, label=_("URL for VM overlay"), initial="http://") name = forms.CharField(max_length=80, label=_("Instance Name"), initial="synthesized_vm") flavor = forms.ChoiceField(label=_("Flavor"), required=True, help_text=_("Size of image to launch.")) class Meta: name = _("VM overlay Info") help_text_template = ("project/cloudlet/instance/" "_synthesis_details_help.html") def clean(self): cleaned_data = super(SetSynthesizeDetailsAction, self).clean() overlay_url = cleaned_data.get('overlay_url', None) if overlay_url is None: raise forms.ValidationError(_("Need URL to fetch VM overlay")) # check url format val = URLValidator() try: val(overlay_url) except ValidationError, e: raise forms.ValidationError(_("Malformed URL for VM overlay")) # check url accessibility try: header_ret = requests.head(overlay_url) if header_ret.ok == False: raise except Exception as e: msg = "URL is not accessible : %s" % overlay_url raise forms.ValidationError(_(msg)) if cleaned_data.get('name', None) is None: raise forms.ValidationError(_("Need name for the synthesized VM")) # finally check the header file of VM overlay # to make sure that associated Base VM exists matching_image = None # requested_basevm_sha256 = '' try: overlay_package = VMOverlayPackage(overlay_url) metadata = overlay_package.read_meta() overlay_meta = msgpack.unpackb(metadata) requested_basevm_sha256 = overlay_meta.get( Cloudlet_Const.META_BASE_VM_SHA256, None) # matching_image = utils.find_basevm_by_sha256(self.request, requested_basevm_sha256) basevms = utils.BaseVMs() matching_image = basevms.is_exist(self.request, requested_basevm_sha256) except Exception: msg = "Error while finding matching Base VM with %s" % ( requested_basevm_sha256) raise forms.ValidationError(_(msg)) if matching_image is None: msg = "Cannot find matching base VM with UUID(%s)" % ( requested_basevm_sha256) raise forms.ValidationError(_(msg)) else: # specify associated base VM from the metadata cleaned_data['image_id'] = str(matching_image.id) return cleaned_data
def _spawn_using_synthesis(self, context, instance, xml, image_meta, overlay_url): # download vm overlay overlay_package = VMOverlayPackage(overlay_url) meta_raw = overlay_package.read_meta() meta_info = msgpack.unpackb(meta_raw) basevm_sha256 = meta_info.get(Cloudlet_Const.META_BASE_VM_SHA256, None) image_properties = image_meta.get("properties", None) if image_properties is None: msg = "image does not have properties for cloudlet metadata" raise exception.ImageNotFound(msg) image_sha256 = image_properties.get(CloudletAPI.PROPERTY_KEY_BASE_UUID) # check basevm if basevm_sha256 != image_sha256: msg = "requested base vm is not compatible with openstack base disk %s != %s" % ( basevm_sha256, image_sha256, ) raise exception.ImageNotFound(msg) memory_snap_id = str(image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM)) diskhash_snap_id = str(image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH)) memhash_snap_id = str(image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH)) basedisk_path = self._get_cache_image(context, instance, image_meta["id"]) basemem_path = self._get_cache_image(context, instance, memory_snap_id) diskhash_path = self._get_cache_image(context, instance, diskhash_snap_id) memhash_path = self._get_cache_image(context, instance, memhash_snap_id) # download blob fileutils.ensure_tree(libvirt_utils.get_instance_path(instance)) decomp_overlay = os.path.join(libvirt_utils.get_instance_path(instance), "decomp_overlay") meta_info = compression.decomp_overlayzip(overlay_url, decomp_overlay) # recover VM launch_disk, launch_mem, fuse, delta_proc, fuse_proc = synthesis.recover_launchVM( basedisk_path, meta_info, decomp_overlay, base_mem=basemem_path, base_diskmeta=diskhash_path, base_memmeta=memhash_path, ) # resume VM LOG.info(_("Starting VM synthesis"), instance=instance) synthesized_vm = synthesis.SynthesizedVM( launch_disk, launch_mem, fuse, disk_only=False, qemu_args=False, nova_xml=xml, nova_conn=self._conn, nova_util=libvirt_utils, ) # testing non-thread resume delta_proc.start() fuse_proc.start() delta_proc.join() fuse_proc.join() LOG.info(_("Finish VM synthesis"), instance=instance) synthesized_vm.resume() # rettach NIC synthesis.rettach_nic(synthesized_vm.machine, synthesized_vm.old_xml_str, xml) return synthesized_vm
def piping_synthesis(overlay_url, base_path): # check_base VM start_time = time.time() meta_stream = urllib2.urlopen(overlay_url) meta_raw = read_all(meta_stream) meta_info = msgpack.unpackb(meta_raw) url_manager = Manager() overlay_urls = url_manager.list() url_prefix = os.path.dirname(overlay_url) for blob in meta_info[Const.META_OVERLAY_FILES]: blob_filename = os.path.basename(blob[Const.META_OVERLAY_FILE_NAME]) url = os.path.join(url_prefix, blob_filename) overlay_urls.append(url) (base_diskmeta, base_mem, base_memmeta) = \ Const.get_basepath(base_path, check_exist=True) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue(); time_decomp = Queue(); time_delta = Queue(); time_fuse = Queue(); tmp_dir = tempfile.mkdtemp() temp_overlay_filepath = os.path.join(tmp_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") overlay_pipe = os.path.join(tmp_dir, 'overlay_pipe') os.mkfifo(overlay_pipe) # overlay demanding_queue = Queue() download_queue = JoinableQueue() download_process = Process(target=synthesis_server.network_worker, args=( overlay_urls, demanding_queue, download_queue, time_transfer, CHUNK_SIZE, ) ) decomp_process = Process(target=synthesis_server.decomp_worker, args=( download_queue, overlay_pipe, time_decomp, temp_overlay_file, ) ) modified_img, modified_mem, fuse, delta_proc, fuse_thread = \ synthesis.recover_launchVM(base_path, meta_info, overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) delta_proc.time_queue = time_delta fuse_thread.time_queue = time_fuse # start processes download_process.start() decomp_process.start() delta_proc.start() fuse_thread.start() # wait for end delta_proc.join() fuse_thread.join() # printout result end_time = time.time() total_time = (end_time-start_time) synthesis_server.SynthesisTCPHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ print_out=sys.stdout) delta_proc.finish() if os.path.exists(overlay_pipe): os.unlink(overlay_pipe) shutil.rmtree(tmp_dir) print "\n[Time] Total Time for synthesis(including download) : %f" % (total_time) return fuse
class SetSynthesizeDetailsAction(workflows.Action): overlay_url = forms.CharField(max_length=200, required=True, label=_("URL for VM overlay"), initial="http://") name = forms.CharField(max_length=80, label=_("Instance Name"), initial="synthesized_vm") security_group_ids = forms.MultipleChoiceField( label=_("Security Groups"), required=True, initial=["default"], widget=forms.CheckboxSelectMultiple(), help_text=_("Launch instance in these " "security groups.")) flavor = forms.ChoiceField(label=_("Flavor"), required=True, help_text=_("Size of image to launch.")) #keypair_id = forms.DynamicChoiceField(label=_("Keypair"), # required=False, # help_text=_("Which keypair to use for " # "authentication."), # add_item_link=KEYPAIR_IMPORT_URL) class Meta: name = _("VM overlay Info") help_text_template = ("project/cloudlet/instance/" "_synthesis_details_help.html") def clean(self): cleaned_data = super(SetSynthesizeDetailsAction, self).clean() overlay_url = cleaned_data.get('overlay_url', None) if overlay_url is None: raise forms.ValidationError(_("Need URL to fetch VM overlay")) # check url format val = URLValidator() try: val(overlay_url) except ValidationError, e: raise forms.ValidationError(_("Malformed URL for VM overlay")) # check url accessibility try: header_ret = requests.head(overlay_url) if header_ret.ok == False: raise except Exception as e: msg = "URL is not accessible : %s" % overlay_url raise forms.ValidationError(_(msg)) if cleaned_data.get('name', None) is None: raise forms.ValidationError(_("Need name for the synthesized VM")) # finally check the header file of VM overlay # to make sure that associated Base VM exists from elijah.provisioning.package import VMOverlayPackage matching_image = None requested_basevm_sha256 = '' try: overlay_package = VMOverlayPackage(overlay_url) metadata = overlay_package.read_meta() overlay_meta = msgpack.unpackb(metadata) requested_basevm_sha256 = overlay_meta.get(Cloudlet_Const.META_BASE_VM_SHA256, None) matching_image = find_basevm_by_sha256(self.request, requested_basevm_sha256) except Exception as e: msg = "Error while finding matching Base VM with %s" % (requested_basevm_sha256) raise forms.ValidationError(_(msg)) if matching_image == None: msg = "Cannot find matching base VM with UUID(%s)" % (requested_basevm_sha256) raise forms.ValidationError(_(msg)) else: # specify associated base VM from the metadata cleaned_data['image_id'] = str(matching_image.id) return cleaned_data