def handle(self): '''Handle request from the client Each request follows this format: | header size | header | blob header size | blob header | blob data | | (4 bytes) | (var) | (4 bytes) | (var bytes) | (var bytes)| ''' # variable self.total_recved_size_cur = 0 self.total_recved_size_prev = 0 # get header data = self._recv_all(4) if data is None or len(data) != 4: raise StreamSynthesisError( "Failed to receive first byte of header") message_size = struct.unpack("!I", data)[0] msgpack_data = self._recv_all(message_size) metadata = NetworkUtil.decoding(msgpack_data) launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE] launch_memory_size = metadata[ Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE] analysis_mq = multiprocessing.Queue() analysis_proc = HandoffAnalysisProc(handoff_url=self.client_address[0], message_queue=analysis_mq, disk_size=launch_disk_size, mem_size=launch_memory_size) analysis_proc.start() analysis_mq.put("=" * 50) analysis_mq.put("Adaptive VM Handoff Initiated") analysis_mq.put( "Client Connection - %s:%d" % (self.client_address[0], self.client_address[1])) #client_address is a tuple (ip, port) if self.server.handoff_data is not None: analysis_mq.put("Handoff via OpenStack") via_openstack = True else: analysis_mq.put("Handoff via cloudlet CLI") via_openstack = False synthesis_option, base_diskpath = self._check_validity(metadata) if base_diskpath is None: raise StreamSynthesisError("No matching base VM") if via_openstack: base_diskpath, base_mempath, base_diskmeta, base_memmeta = self.server.handoff_data.base_vm_paths else: (base_diskmeta, base_mempath, base_memmeta) = Cloudlet_Const.get_basepath(base_diskpath, check_exist=True) analysis_mq.put("Synthesis Options %s" % str(pformat(self.synthesis_option))) analysis_mq.put("Base VM Path: %s" % base_diskpath) analysis_mq.put("Image Disk Size: %d" % launch_disk_size) analysis_mq.put("Image Memory Size: %d" % launch_memory_size) analysis_mq.put("=" * 50) # variables for FUSE if via_openstack: launch_disk = self.server.handoff_data.launch_diskpath launch_mem = self.server.handoff_data.launch_memorypath else: temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-") launch_disk = os.path.join(temp_synthesis_dir, "launch-disk") launch_mem = os.path.join(temp_synthesis_dir, "launch-mem") memory_chunk_all = set() disk_chunk_all = set() # start pipelining processes network_out_queue = multiprocessing.Queue() decomp_queue = multiprocessing.Queue() fuse_info_queue = multiprocessing.Queue() decomp_proc = DecompProc(network_out_queue, decomp_queue, num_proc=4, analysis_queue=analysis_mq) decomp_proc.start() analysis_mq.put("Starting (%d) decompression processes..." % (decomp_proc.num_proc)) delta_proc = RecoverDeltaProc(base_diskpath, base_mempath, decomp_queue, launch_mem, launch_disk, Cloudlet_Const.CHUNK_SIZE, fuse_info_queue, analysis_mq) delta_proc.start() analysis_mq.put("Starting delta recovery process...") # get each blob recv_blob_counter = 0 while True: data = self._recv_all(4) if data is None or len(data) != 4: raise StreamSynthesisError( "Failed to receive first byte of header") blob_header_size = struct.unpack("!I", data)[0] blob_header_raw = self._recv_all(blob_header_size) blob_header = NetworkUtil.decoding(blob_header_raw) blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE) if blob_size is None: raise StreamSynthesisError("Failed to receive blob") if blob_size == 0: analysis_mq.put("End of stream received from client at %f)" % (time.time())) break blob_comp_type = blob_header.get( Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION) blob_disk_chunk = blob_header.get( Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS) blob_memory_chunk = blob_header.get( Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS) # send ack right before getting the blob ack_data = struct.pack("!Q", 0x01) self.request.send(ack_data) compressed_blob = self._recv_all(blob_size, ack_size=200 * 1024) # send ack right after getting the blob ack_data = struct.pack("!Q", 0x02) self.request.send(ack_data) network_out_queue.put((blob_comp_type, compressed_blob)) #TODO: remove the interweaving of the valid bit here #TODO: and change the code path in cloudlet_driver.py so that #TODO: it uses the chunk sets in favor of the tuples if via_openstack: memory_chunk_set = set( ["%ld:1" % item for item in blob_memory_chunk]) disk_chunk_set = set( ["%ld:1" % item for item in blob_disk_chunk]) memory_chunk_all.update(memory_chunk_set) disk_chunk_all.update(disk_chunk_set) else: memory_chunk_all.update(blob_memory_chunk) disk_chunk_all.update(blob_disk_chunk) recv_blob_counter += 1 analysis_mq.put("B,R,%d" % (recv_blob_counter)) network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE) delta_proc.join() LOG.debug("%f\tdeltaproc join" % (time.time())) analysis_mq.put("Adaptive VM Handoff Complete!") analysis_mq.put("=" * 50) analysis_mq.put("!E_O_Q!") analysis_proc.join() if via_openstack: ack_data = struct.pack("!Qd", 0x10, time.time()) LOG.info("send ack to client: %d" % len(ack_data)) self.request.sendall(ack_data) disk_overlay_map = ','.join(disk_chunk_all) memory_overlay_map = ','.join(memory_chunk_all) # NOTE: fuse and synthesis take place in cloudlet_driver.py when launched from openstack but #this data must be written to stdout so the pipe connected to cloudlet_driver.py can finish the handoff #TODO: instead of sending this stdout buffer over the pipe to cloudlet_driver.py, we should probably #TODO: move to multiprocessing.Pipe or Queue to avoid issues with other items being dumped to stdout #TODO: and causing problems with this data being sent back; i.e. anything written via LOG #TODO: after this will end up in stdout because the logger has a StreamHandler configured to use stdout sys.stdout.write("openstack\t%s\t%s\t%s\t%s" % (launch_disk_size, launch_memory_size, disk_overlay_map, memory_overlay_map)) else: # We told to FUSE that we have everything ready, so we need to wait # until delta_proc finishes. we cannot start VM before delta_proc # finishes, because we don't know what will be modified in the future time_fuse_start = time.time() fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH, Cloudlet_Const.CHUNK_SIZE, base_diskpath, launch_disk_size, base_mempath, launch_memory_size, resumed_disk=launch_disk, disk_chunks=disk_chunk_all, resumed_memory=launch_mem, memory_chunks=memory_chunk_all, valid_bit=1) time_fuse_end = time.time() synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse) synthesized_vm.start() synthesized_vm.join() # since libvirt does not return immediately after resuming VM, we # measure resume time directly from QEMU actual_resume_time = 0 splited_log = open("/tmp/qemu_debug_messages", "r").read().split("\n") for line in splited_log: if line.startswith("INCOMING_FINISH"): actual_resume_time = float(line.split(" ")[-1]) LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % ( actual_resume_time - time_fuse_start, time_fuse_start, time_fuse_end, actual_resume_time, )) ack_data = struct.pack("!Qd", 0x10, actual_resume_time) LOG.info("send ack to client: %d" % len(ack_data)) self.request.sendall(ack_data) connect_vnc(synthesized_vm.machine, True) signal.signal(signal.SIGUSR1, handlesig) signal.pause() synthesized_vm.monitor.terminate() synthesized_vm.monitor.join() synthesized_vm.terminate()
def handle(self): '''Handle request from the client Each request follows this format: | header size | header | blob header size | blob header | blob data | | (4 bytes) | (var) | (4 bytes) | (var bytes) | (var bytes)| ''' if self.server.handoff_data is not None: LOG.debug("VM synthesis using OpenStack") else: LOG.debug("VM synthesis as standalone") # variable self.total_recved_size_cur = 0 self.total_recved_size_prev = 0 # get header data = self._recv_all(4) if data == None or len(data) != 4: raise StreamSynthesisError( "Failed to receive first byte of header") message_size = struct.unpack("!I", data)[0] msgpack_data = self._recv_all(message_size) metadata = NetworkUtil.decoding(msgpack_data) launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE] launch_memory_size = metadata[ Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE] synthesis_option, base_diskpath = self._check_validity(metadata) if base_diskpath == None: raise StreamSynthesisError("No matching base VM") if self.server.handoff_data: base_diskpath, base_diskmeta, base_mempath, base_memmeta =\ self.server.handoff_data.base_vm_paths else: (base_diskmeta, base_mempath, base_memmeta) = \ Cloudlet_Const.get_basepath(base_diskpath, check_exist=True) LOG.info(" - %s" % str(pformat(self.synthesis_option))) LOG.info(" - Base VM : %s" % base_diskpath) # variables for FUSE temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-") launch_disk = os.path.join(temp_synthesis_dir, "launch-disk") launch_mem = os.path.join(temp_synthesis_dir, "launch-mem") memory_chunk_all = set() disk_chunk_all = set() # start pipelining processes network_out_queue = multiprocessing.Queue() decomp_queue = multiprocessing.Queue() fuse_info_queue = multiprocessing.Queue() decomp_proc = DecompProc(network_out_queue, decomp_queue, num_proc=4) decomp_proc.start() LOG.info("Start Decompression process") delta_proc = RecoverDeltaProc(base_diskpath, base_mempath, decomp_queue, launch_mem, launch_disk, Cloudlet_Const.CHUNK_SIZE, fuse_info_queue) delta_proc.start() LOG.info("Start Synthesis process") # get each blob recv_blob_counter = 0 while True: data = self._recv_all(4) if data == None or len(data) != 4: raise StreamSynthesisError( "Failed to receive first byte of header") break blob_header_size = struct.unpack("!I", data)[0] blob_header_raw = self._recv_all(blob_header_size) blob_header = NetworkUtil.decoding(blob_header_raw) blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE) if blob_size == None: raise StreamSynthesisError("Failed to receive blob") if blob_size == 0: LOG.debug("%f\tend of stream" % (time.time())) break blob_comp_type = blob_header.get( Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION) blob_disk_chunk = blob_header.get( Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS) blob_memory_chunk = blob_header.get( Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS) # send ack right before getting the blob ack_data = struct.pack("!Q", 0x01) self.request.send(ack_data) compressed_blob = self._recv_all(blob_size, ack_size=200 * 1024) # send ack right after getting the blob ack_data = struct.pack("!Q", 0x02) self.request.send(ack_data) network_out_queue.put((blob_comp_type, compressed_blob)) memory_chunk_set = set( ["%ld:1" % item for item in blob_memory_chunk]) disk_chunk_set = set(["%ld:1" % item for item in blob_disk_chunk]) memory_chunk_all.update(memory_chunk_set) disk_chunk_all.update(disk_chunk_set) LOG.debug("%f\treceive one blob" % (time.time())) recv_blob_counter += 1 network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE) delta_proc.join() LOG.debug("%f\tdeltaproc join" % (time.time())) # We told to FUSE that we have everything ready, so we need to wait # until delta_proc fininshes. we cannot start VM before delta_proc # finishes, because we don't know what will be modified in the future time_fuse_start = time.time() disk_overlay_map = ','.join(disk_chunk_all) memory_overlay_map = ','.join(memory_chunk_all) fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH, Cloudlet_Const.CHUNK_SIZE, base_diskpath, launch_disk_size, base_mempath, launch_memory_size, resumed_disk=launch_disk, disk_overlay_map=disk_overlay_map, resumed_memory=launch_mem, memory_overlay_map=memory_overlay_map) time_fuse_end = time.time() memory_path = os.path.join(fuse.mountpoint, 'memory', 'image') if self.server.handoff_data: synthesized_vm = SynthesizedVM( launch_disk, launch_mem, fuse, disk_only=False, qemu_args=None, nova_xml=self.server.handoff_data.libvirt_xml, nova_conn=self.server.handoff_data._conn, nova_util=self.server.handoff_data._libvirt_utils) else: synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse) synthesized_vm.start() synthesized_vm.join() # to be delete #libvirt_xml = synthesized_vm.new_xml_str #vmpaths = [base_diskpath, base_diskmeta, base_mempath, base_memmeta] #base_hashvalue = metadata.get(Cloudlet_Const.META_BASE_VM_SHA256, None) #ds = HandoffDataRecv() #ds.save_data(vmpaths, base_hashvalue, libvirt_xml, "qemu:///session") #ds.to_file("/home/stack/cloudlet/provisioning/handff_recv_data") # since libvirt does not return immediately after resuming VM, we # measure resume time directly from QEMU actual_resume_time = 0 splited_log = open("/tmp/qemu_debug_messages", "r").read().split("\n") for line in splited_log: if line.startswith("INCOMING_FINISH"): actual_resume_time = float(line.split(" ")[-1]) time_resume_end = time.time() LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % ( actual_resume_time - time_fuse_start, time_fuse_start, time_fuse_end, actual_resume_time, )) if self.server.handoff_data == None: # for a standalone version, terminate a VM for the next testing #connect_vnc(synthesized_vm.machine) LOG.debug("Finishing VM in 3 seconds") time.sleep(3) synthesized_vm.monitor.terminate() synthesized_vm.monitor.join() synthesized_vm.terminate() # send end message ack_data = struct.pack("!Qd", 0x10, actual_resume_time) LOG.info("send ack to client: %d" % len(ack_data)) self.request.sendall(ack_data) LOG.info("finished")
def handle(self): '''Handle request from the client Each request follows this format: | header size | header | blob header size | blob header | blob data | | (4 bytes) | (var) | (4 bytes) | (var bytes) | (var bytes)| ''' if self.server.handoff_data is not None: LOG.debug("VM synthesis using OpenStack") else: LOG.debug("VM synthesis as standalone") # variable self.total_recved_size_cur = 0 self.total_recved_size_prev = 0 # get header data = self._recv_all(4) if data == None or len(data) != 4: raise StreamSynthesisError("Failed to receive first byte of header") message_size = struct.unpack("!I", data)[0] msgpack_data = self._recv_all(message_size) metadata = NetworkUtil.decoding(msgpack_data) launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE] launch_memory_size = metadata[Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE] synthesis_option, base_diskpath = self._check_validity(metadata) if base_diskpath == None: raise StreamSynthesisError("No matching base VM") if self.server.handoff_data: base_diskpath, base_diskmeta, base_mempath, base_memmeta =\ self.server.handoff_data.base_vm_paths else: (base_diskmeta, base_mempath, base_memmeta) = \ Cloudlet_Const.get_basepath(base_diskpath, check_exist=True) LOG.info(" - %s" % str(pformat(self.synthesis_option))) LOG.info(" - Base VM : %s" % base_diskpath) # variables for FUSE temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-") launch_disk = os.path.join(temp_synthesis_dir, "launch-disk") launch_mem = os.path.join(temp_synthesis_dir, "launch-mem") memory_chunk_all = set() disk_chunk_all = set() # start pipelining processes network_out_queue = multiprocessing.Queue() decomp_queue = multiprocessing.Queue() fuse_info_queue = multiprocessing.Queue() decomp_proc = DecompProc(network_out_queue, decomp_queue, num_proc=4) decomp_proc.start() LOG.info("Start Decompression process") delta_proc = RecoverDeltaProc(base_diskpath, base_mempath, decomp_queue, launch_mem, launch_disk, Cloudlet_Const.CHUNK_SIZE, fuse_info_queue) delta_proc.start() LOG.info("Start Synthesis process") # get each blob recv_blob_counter = 0 while True: data = self._recv_all(4) if data == None or len(data) != 4: raise StreamSynthesisError("Failed to receive first byte of header") break blob_header_size = struct.unpack("!I", data)[0] blob_header_raw = self._recv_all(blob_header_size) blob_header = NetworkUtil.decoding(blob_header_raw) blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE) if blob_size == None: raise StreamSynthesisError("Failed to receive blob") if blob_size == 0: LOG.debug("%f\tend of stream" % (time.time())) break blob_comp_type = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION) blob_disk_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS) blob_memory_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS) # send ack right before getting the blob ack_data = struct.pack("!Q", 0x01) self.request.send(ack_data) compressed_blob = self._recv_all(blob_size, ack_size=200*1024) # send ack right after getting the blob ack_data = struct.pack("!Q", 0x02) self.request.send(ack_data) network_out_queue.put((blob_comp_type, compressed_blob)) memory_chunk_set = set(["%ld:1" % item for item in blob_memory_chunk]) disk_chunk_set = set(["%ld:1" % item for item in blob_disk_chunk]) memory_chunk_all.update(memory_chunk_set) disk_chunk_all.update(disk_chunk_set) LOG.debug("%f\treceive one blob" % (time.time())) recv_blob_counter += 1 network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE) delta_proc.join() LOG.debug("%f\tdeltaproc join" % (time.time())) # We told to FUSE that we have everything ready, so we need to wait # until delta_proc fininshes. we cannot start VM before delta_proc # finishes, because we don't know what will be modified in the future time_fuse_start = time.time() disk_overlay_map = ','.join(disk_chunk_all) memory_overlay_map = ','.join(memory_chunk_all) fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH, Cloudlet_Const.CHUNK_SIZE, base_diskpath, launch_disk_size, base_mempath, launch_memory_size, resumed_disk=launch_disk, disk_overlay_map=disk_overlay_map, resumed_memory=launch_mem, memory_overlay_map=memory_overlay_map) time_fuse_end = time.time() memory_path = os.path.join(fuse.mountpoint, 'memory', 'image') if self.server.handoff_data: synthesized_vm = SynthesizedVM( launch_disk, launch_mem, fuse, disk_only=False, qemu_args=None, nova_xml=self.server.handoff_data.libvirt_xml, nova_conn=self.server.handoff_data._conn, nova_util=self.server.handoff_data._libvirt_utils ) else: synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse) synthesized_vm.start() synthesized_vm.join() # to be delete #libvirt_xml = synthesized_vm.new_xml_str #vmpaths = [base_diskpath, base_diskmeta, base_mempath, base_memmeta] #base_hashvalue = metadata.get(Cloudlet_Const.META_BASE_VM_SHA256, None) #ds = HandoffDataRecv() #ds.save_data(vmpaths, base_hashvalue, libvirt_xml, "qemu:///session") #ds.to_file("/home/stack/cloudlet/provisioning/handff_recv_data") # since libvirt does not return immediately after resuming VM, we # measure resume time directly from QEMU actual_resume_time = 0 splited_log = open("/tmp/qemu_debug_messages", "r").read().split("\n") for line in splited_log: if line.startswith("INCOMING_FINISH"): actual_resume_time = float(line.split(" ")[-1]) time_resume_end = time.time() LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % ( actual_resume_time-time_fuse_start, time_fuse_start, time_fuse_end, actual_resume_time, )) if self.server.handoff_data == None: # for a standalone version, terminate a VM for the next testing #connect_vnc(synthesized_vm.machine) LOG.debug("Finishing VM in 3 seconds") time.sleep(3) synthesized_vm.monitor.terminate() synthesized_vm.monitor.join() synthesized_vm.terminate() # send end message ack_data = struct.pack("!Qd", 0x10, actual_resume_time) LOG.info("send ack to client: %d" % len(ack_data)) self.request.sendall(ack_data) LOG.info("finished")
def handle(self): '''Handle request from the client Each request follows this format: | header size | header | blob header size | blob header | blob data | | (4 bytes) | (var) | (4 bytes) | (var bytes) | (var bytes)| ''' # variable self.total_recved_size_cur = 0 self.total_recved_size_prev = 0 # get header data = self._recv_all(4) if data is None or len(data) != 4: raise StreamSynthesisError("Failed to receive first byte of header") message_size = struct.unpack("!I", data)[0] msgpack_data = self._recv_all(message_size) metadata = NetworkUtil.decoding(msgpack_data) launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE] launch_memory_size = metadata[Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE] analysis_mq = multiprocessing.Queue() analysis_proc = HandoffAnalysisProc(handoff_url=self.client_address[0],message_queue=analysis_mq, disk_size=launch_disk_size, mem_size=launch_memory_size) analysis_proc.start() analysis_mq.put("=" * 50) analysis_mq.put("Adaptive VM Handoff Initiated") analysis_mq.put("Client Connection - %s:%d" % (self.client_address[0], self.client_address[1])) #client_address is a tuple (ip, port) if self.server.handoff_data is not None: analysis_mq.put("Handoff via OpenStack") via_openstack = True else: analysis_mq.put("Handoff via cloudlet CLI") via_openstack = False synthesis_option, base_diskpath = self._check_validity(metadata) if base_diskpath is None: raise StreamSynthesisError("No matching base VM") if via_openstack: base_diskpath, base_mempath, base_diskmeta, base_memmeta = self.server.handoff_data.base_vm_paths else: (base_diskmeta, base_mempath, base_memmeta) = Cloudlet_Const.get_basepath(base_diskpath, check_exist=True) analysis_mq.put("Synthesis Options %s" % str(pformat(self.synthesis_option))) analysis_mq.put("Base VM Path: %s" % base_diskpath) analysis_mq.put("Image Disk Size: %d" % launch_disk_size) analysis_mq.put("Image Memory Size: %d" % launch_memory_size) analysis_mq.put("=" * 50) # variables for FUSE if via_openstack: launch_disk = self.server.handoff_data.launch_diskpath launch_mem = self.server.handoff_data.launch_memorypath else: temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-") launch_disk = os.path.join(temp_synthesis_dir, "launch-disk") launch_mem = os.path.join(temp_synthesis_dir, "launch-mem") memory_chunk_all = set() disk_chunk_all = set() # start pipelining processes network_out_queue = multiprocessing.Queue() decomp_queue = multiprocessing.Queue() fuse_info_queue = multiprocessing.Queue() decomp_proc = DecompProc(network_out_queue, decomp_queue, num_proc=4, analysis_queue=analysis_mq) decomp_proc.start() analysis_mq.put("Starting (%d) decompression processes..." % (decomp_proc.num_proc)) delta_proc = RecoverDeltaProc(base_diskpath, base_mempath, decomp_queue, launch_mem, launch_disk, Cloudlet_Const.CHUNK_SIZE, fuse_info_queue, analysis_mq) delta_proc.start() analysis_mq.put("Starting delta recovery process...") # get each blob recv_blob_counter = 0 while True: data = self._recv_all(4) if data is None or len(data) != 4: raise StreamSynthesisError("Failed to receive first byte of header") blob_header_size = struct.unpack("!I", data)[0] blob_header_raw = self._recv_all(blob_header_size) blob_header = NetworkUtil.decoding(blob_header_raw) blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE) if blob_size is None: raise StreamSynthesisError("Failed to receive blob") if blob_size == 0: analysis_mq.put("End of stream received from client at %f)" % (time.time())) break blob_comp_type = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION) blob_disk_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS) blob_memory_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS) # send ack right before getting the blob ack_data = struct.pack("!Q", 0x01) self.request.send(ack_data) compressed_blob = self._recv_all(blob_size, ack_size=200*1024) # send ack right after getting the blob ack_data = struct.pack("!Q", 0x02) self.request.send(ack_data) network_out_queue.put((blob_comp_type, compressed_blob)) #TODO: remove the interweaving of the valid bit here #TODO: and change the code path in cloudlet_driver.py so that #TODO: it uses the chunk sets in favor of the tuples if via_openstack: memory_chunk_set = set(["%ld:1" % item for item in blob_memory_chunk]) disk_chunk_set = set(["%ld:1" % item for item in blob_disk_chunk]) memory_chunk_all.update(memory_chunk_set) disk_chunk_all.update(disk_chunk_set) else: memory_chunk_all.update(blob_memory_chunk) disk_chunk_all.update(blob_disk_chunk) recv_blob_counter += 1 analysis_mq.put("B,R,%d" % (recv_blob_counter)) data = self._recv_all(4) iter = struct.unpack("!I", data)[0] analysis_mq.put("iter,%d" % (iter)) network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE) delta_proc.join() LOG.debug("%f\tdeltaproc join" % (time.time())) analysis_mq.put("Adaptive VM Handoff Complete!") analysis_mq.put("=" * 50) analysis_mq.put("!E_O_Q!") analysis_proc.join() if via_openstack: ack_data = struct.pack("!Qd", 0x10, time.time()) LOG.info("send ack to client: %d" % len(ack_data)) self.request.sendall(ack_data) disk_overlay_map = ','.join(disk_chunk_all) memory_overlay_map = ','.join(memory_chunk_all) # NOTE: fuse and synthesis take place in cloudlet_driver.py when launched from openstack but #this data must be written to stdout so the pipe connected to cloudlet_driver.py can finish the handoff #TODO: instead of sending this stdout buffer over the pipe to cloudlet_driver.py, we should probably #TODO: move to multiprocessing.Pipe or Queue to avoid issues with other items being dumped to stdout #TODO: and causing problems with this data being sent back; i.e. anything written via LOG #TODO: after this will end up in stdout because the logger has a StreamHandler configured to use stdout sys.stdout.write("openstack\t%s\t%s\t%s\t%s" % (launch_disk_size, launch_memory_size, disk_overlay_map, memory_overlay_map)) else: # We told to FUSE that we have everything ready, so we need to wait # until delta_proc finishes. we cannot start VM before delta_proc # finishes, because we don't know what will be modified in the future time_fuse_start = time.time() fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH, Cloudlet_Const.CHUNK_SIZE, base_diskpath, launch_disk_size, base_mempath, launch_memory_size, resumed_disk=launch_disk, disk_chunks=disk_chunk_all, resumed_memory=launch_mem, memory_chunks=memory_chunk_all, valid_bit=1) time_fuse_end = time.time() synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse) synthesized_vm.start() synthesized_vm.join() # since libvirt does not return immediately after resuming VM, we # measure resume time directly from QEMU actual_resume_time = 0 splited_log = open("/tmp/qemu_debug_messages", "r").read().split("\n") for line in splited_log: if line.startswith("INCOMING_FINISH"): actual_resume_time = float(line.split(" ")[-1]) LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % ( actual_resume_time-time_fuse_start, time_fuse_start, time_fuse_end, actual_resume_time, )) ack_data = struct.pack("!Qd", 0x10, actual_resume_time) LOG.info("send ack to client: %d" % len(ack_data)) self.request.sendall(ack_data) connect_vnc(synthesized_vm.machine, True) signal.signal(signal.SIGUSR1, handlesig) signal.pause() synthesized_vm.monitor.terminate() synthesized_vm.monitor.join() synthesized_vm.terminate()