def _handle_synthesis_url(self, message): LOG.info("\n\n----------------------- New Connection --------------") # check overlay meta info start_time = time.time() header_start_time = time.time() base_path, meta_info = self._check_url_validity(message) if meta_info is None: self.ret_fail("cannot access overlay URL") return if base_path is None: self.ret_fail("No matching Base VM") return if meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None) is None: self.ret_fail("No overlay files are listed") return # return success get overlay URL self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META) overlay_url = message.get(Protocol.KEY_OVERLAY_URL) overlay_package = VMOverlayPackage(overlay_url) # update DB session_id = message.get(Protocol.KEY_SESSION_ID, None) new_overlayvm = OverlayVM(session_id, base_path) self.server.dbconn.add_item(new_overlayvm) # start synthesis process url_manager = Manager() overlay_urls = url_manager.list() overlay_urls_size = url_manager.dict() for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]: url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME] size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE] overlay_urls.append(url) overlay_urls_size[url] = size LOG.info(" - %s" % str(pformat(self.synthesis_option))) LOG.info(" - Base VM : %s" % base_path) LOG.info(" - Blob count : %d" % len(overlay_urls)) if overlay_urls == None: self.ret_fail("No overlay info listed") return (base_diskmeta, base_mem, base_memmeta) = \ Cloudlet_Const.get_basepath(base_path, check_exist=True) header_end_time = time.time() LOG.info("Meta header processing time: %f" % (header_end_time-header_start_time)) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue(); time_decomp = Queue(); time_delta = Queue(); time_fuse = Queue(); self.tmp_overlay_dir = tempfile.mkdtemp() self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe') os.mkfifo(self.overlay_pipe) # save overlay decomp result for measurement temp_overlay_file = None if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") # overlay demanding_queue = Queue() download_queue = JoinableQueue() download_process = URLFetchStep(overlay_package, overlay_urls, overlay_urls_size, demanding_queue, download_queue, time_transfer, Synthesis_Const.TRANSFER_SIZE, ) decomp_process = DecompStepProc( download_queue, self.overlay_pipe, time_decomp, temp_overlay_file, ) modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \ synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) self.delta_proc.time_queue = time_delta # for measurement self.fuse_proc.time_queue = time_fuse # for measurement if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False): # 1. resume VM self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse) time_start_resume = time.time() self.resumed_VM.start() time_end_resume = time.time() # 2. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_proc.start() # 3. return success right after resuming VM # before receiving all chunks self.resumed_VM.join() self.send_synthesis_done() # 4. then wait fuse end self.fuse_proc.join() else: # 1. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_proc.start() # 2. resume VM self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse) self.resumed_VM.start() # 3. wait for fuse end self.fuse_proc.join() # 4. return success to client time_start_resume = time.time() # measure pure resume time self.resumed_VM.join() time_end_resume = time.time() self.send_synthesis_done() end_time = time.time() # printout result SynthesisHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ resume_time=(time_end_resume-time_start_resume)) if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False): synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True) # save all the resource to the session resource global session_resources s_resource = SessionResource(session_id) s_resource.add(SessionResource.DELTA_PROCESS, self.delta_proc) s_resource.add(SessionResource.RESUMED_VM, self.resumed_VM) s_resource.add(SessionResource.FUSE, self.fuse) s_resource.add(SessionResource.OVERLAY_PIPE, self.overlay_pipe) s_resource.add(SessionResource.OVERLAY_DIR, self.tmp_overlay_dir) s_resource.add(SessionResource.OVERLAY_DB_ENTRY, new_overlayvm) session_resources[session_id] = s_resource LOG.info("Resource is allocated for Session: %s" % str(session_id)) # printout synthesis statistics if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \ mem_access_list, disk_access_list) LOG.info("[SOCKET] waiting for client exit message")
def _handle_synthesis_url(self, message): LOG.info("\n\n----------------------- New Connection --------------") # check overlay meta info start_time = time.time() header_start_time = time.time() base_path, meta_info = self._check_url_validity(message) if meta_info is None: self.ret_fail("cannot access overlay URL") return if base_path is None: self.ret_fail("No matching Base VM") return if meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None) is None: self.ret_fail("No overlay files are listed") return # return success get overlay URL self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META) overlay_url = message.get(Protocol.KEY_OVERLAY_URL) overlay_package = VMOverlayPackage(overlay_url) # update DB session_id = message.get(Protocol.KEY_SESSION_ID, None) new_overlayvm = OverlayVM(session_id, base_path) self.server.dbconn.add_item(new_overlayvm) # start synthesis process url_manager = Manager() overlay_urls = url_manager.list() overlay_urls_size = url_manager.dict() for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]: url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME] size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE] overlay_urls.append(url) overlay_urls_size[url] = size LOG.info(" - %s" % str(pformat(self.synthesis_option))) LOG.info(" - Base VM : %s" % base_path) LOG.info(" - Blob count : %d" % len(overlay_urls)) if overlay_urls == None: self.ret_fail("No overlay info listed") return (base_diskmeta, base_mem, base_memmeta) = \ Cloudlet_Const.get_basepath(base_path, check_exist=True) header_end_time = time.time() LOG.info("Meta header processing time: %f" % (header_end_time - header_start_time)) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue() time_decomp = Queue() time_delta = Queue() time_fuse = Queue() self.tmp_overlay_dir = tempfile.mkdtemp() self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe') os.mkfifo(self.overlay_pipe) # save overlay decomp result for measurement temp_overlay_file = None if self.synthesis_option.get( Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") # overlay demanding_queue = Queue() download_queue = JoinableQueue() download_process = URLFetchStep( overlay_package, overlay_urls, overlay_urls_size, demanding_queue, download_queue, time_transfer, Synthesis_Const.TRANSFER_SIZE, ) decomp_process = DecompStepProc( download_queue, self.overlay_pipe, time_decomp, temp_overlay_file, ) modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \ synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) self.delta_proc.time_queue = time_delta # for measurement self.fuse_proc.time_queue = time_fuse # for measurement if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False): # 1. resume VM self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse) time_start_resume = time.time() self.resumed_VM.start() time_end_resume = time.time() # 2. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_proc.start() # 3. return success right after resuming VM # before receiving all chunks self.resumed_VM.join() self.send_synthesis_done() # 4. then wait fuse end self.fuse_proc.join() else: # 1. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_proc.start() # 2. resume VM self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse) self.resumed_VM.start() # 3. wait for fuse end self.fuse_proc.join() # 4. return success to client time_start_resume = time.time() # measure pure resume time self.resumed_VM.join() time_end_resume = time.time() self.send_synthesis_done() end_time = time.time() # printout result SynthesisHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ resume_time=(time_end_resume-time_start_resume)) if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False): synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True) # save all the resource to the session resource global session_resources s_resource = SessionResource(session_id) s_resource.add(SessionResource.DELTA_PROCESS, self.delta_proc) s_resource.add(SessionResource.RESUMED_VM, self.resumed_VM) s_resource.add(SessionResource.FUSE, self.fuse) s_resource.add(SessionResource.OVERLAY_PIPE, self.overlay_pipe) s_resource.add(SessionResource.OVERLAY_DIR, self.tmp_overlay_dir) s_resource.add(SessionResource.OVERLAY_DB_ENTRY, new_overlayvm) session_resources[session_id] = s_resource LOG.info("Resource is allocated for Session: %s" % str(session_id)) # printout synthesis statistics if self.synthesis_option.get( Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \ mem_access_list, disk_access_list) LOG.info("[SOCKET] waiting for client exit message")