def _handle_synthesis(self, message): Log.write("\n\n----------------------- New Connection --------------\n") # check overlay meta info start_time = time.time() header_start_time = time.time() base_path, meta_info = self._check_validity(message) session_id = message.get(Protocol.KEY_SESSIOIN_ID, None) if base_path and meta_info and meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None): self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META) else: self.ret_fail("No matching Base VM") return # update DB new_overlayvm = OverlayVM(session_id, base_path) self.server.dbconn.add_item(new_overlayvm) # start synthesis process url_manager = Manager() overlay_urls = url_manager.list() overlay_urls_size = url_manager.dict() for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]: url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME] size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE] overlay_urls.append(url) overlay_urls_size[url] = size Log.write(" - %s\n" % str(pformat(self.synthesis_option))) Log.write(" - Base VM : %s\n" % base_path) Log.write(" - Blob count : %d\n" % len(overlay_urls)) if overlay_urls == None: self.ret_fail("No overlay info listed") return (base_diskmeta, base_mem, base_memmeta) = \ Cloudlet_Const.get_basepath(base_path, check_exist=True) header_end_time = time.time() Log.write("Meta header processing time: %f\n" % (header_end_time-header_start_time)) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue(); time_decomp = Queue(); time_delta = Queue(); time_fuse = Queue(); self.tmp_overlay_dir = tempfile.mkdtemp() temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe') os.mkfifo(self.overlay_pipe) # overlay demanding_queue = Queue() download_queue = JoinableQueue() import threading download_process = threading.Thread(target=network_worker, args=( self, overlay_urls, overlay_urls_size, demanding_queue, download_queue, time_transfer, Synthesis_Const.TRANSFER_SIZE, ) ) decomp_process = Process(target=decomp_worker, args=( download_queue, self.overlay_pipe, time_decomp, temp_overlay_file, ) ) modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_thread = \ cloudlet.recover_launchVM(base_path, meta_info, self.overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) self.delta_proc.time_queue = time_delta self.fuse_thread.time_queue = time_fuse if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False): # 1. resume VM self.resumed_VM = cloudlet.ResumedVM(modified_img, modified_mem, self.fuse) time_start_resume = time.time() self.resumed_VM.start() time_end_resume = time.time() # 2. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_thread.start() # 3. return success right after resuming VM # before receiving all chunks self.resumed_VM.join() self.send_synthesis_done() # 4. then wait fuse end self.fuse_thread.join() else: # 1. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_thread.start() # 2. resume VM self.resumed_VM = cloudlet.ResumedVM(modified_img, modified_mem, self.fuse) self.resumed_VM.start() # 3. wait for fuse end self.fuse_thread.join() # 4. return success to client time_start_resume = time.time() # measure pure resume time self.resumed_VM.join() time_end_resume = time.time() self.send_synthesis_done() end_time = time.time() # printout result SynthesisHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ print_out=Log, resume_time=(time_end_resume-time_start_resume)) if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False): cloudlet.connect_vnc(self.resumed_VM.machine, no_wait=True) # wait for finish message from client Log.write("[SOCKET] waiting for client exit message\n") data = self.request.recv(4) msgpack_size = struct.unpack("!I", data)[0] msgpack_data = self.request.recv(msgpack_size) while len(msgpack_data) < msgpack_size: msgpack_data += self.request.recv(msgpack_size- len(msgpack_data)) finish_message = NetworkUtil.decoding(msgpack_data) command = finish_message.get(Protocol.KEY_COMMAND, None) if command != Protocol.MESSAGE_COMMAND_FINISH: msg = "Unexpected command while streaming overlay VM: %d" % command raise RapidSynthesisError(msg) self.ret_success(Protocol.MESSAGE_COMMAND_FINISH) Log.write(" - %s" % str(pformat(finish_message))) Log.write("\n") # TO BE DELETED - save execution pattern ''' app_url = str(overlay_urls[0]) mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list mem_access_str = [str(item) for item in mem_access_list] filename = "exec_patter_%s" % (app_url.split("/")[-2]) open(filename, "w+a").write('\n'.join(mem_access_str)) ''' # printout synthesis statistics if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list cloudlet.synthesis_statistics(meta_info, temp_overlay_filepath, \ mem_access_list, disk_access_list, \ print_out=Log) # update DB new_overlayvm.terminate()
def piping_synthesis(overlay_url, base_path): # check_base VM start_time = time.time() meta_stream = urllib2.urlopen(overlay_url) meta_raw = read_all(meta_stream) meta_info = msgpack.unpackb(meta_raw) url_manager = Manager() overlay_urls = url_manager.list() url_prefix = os.path.dirname(overlay_url) for blob in meta_info[Const.META_OVERLAY_FILES]: blob_filename = os.path.basename(blob[Const.META_OVERLAY_FILE_NAME]) url = os.path.join(url_prefix, blob_filename) overlay_urls.append(url) (base_diskmeta, base_mem, base_memmeta) = \ Const.get_basepath(base_path, check_exist=True) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue(); time_decomp = Queue(); time_delta = Queue(); time_fuse = Queue(); tmp_dir = tempfile.mkdtemp() temp_overlay_filepath = os.path.join(tmp_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") overlay_pipe = os.path.join(tmp_dir, 'overlay_pipe') os.mkfifo(overlay_pipe) # overlay demanding_queue = Queue() download_queue = JoinableQueue() download_process = Process(target=synthesis.network_worker, args=( overlay_urls, demanding_queue, download_queue, time_transfer, CHUNK_SIZE, ) ) decomp_process = Process(target=synthesis.decomp_worker, args=( download_queue, overlay_pipe, time_decomp, temp_overlay_file, ) ) modified_img, modified_mem, fuse, delta_proc, fuse_thread = \ cloudlet.recover_launchVM(base_path, meta_info, overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) delta_proc.time_queue = time_delta fuse_thread.time_queue = time_fuse # start processes download_process.start() decomp_process.start() delta_proc.start() fuse_thread.start() # wait for end delta_proc.join() fuse_thread.join() # printout result end_time = time.time() total_time = (end_time-start_time) synthesis.SynthesisTCPHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ print_out=sys.stdout) delta_proc.finish() if os.path.exists(overlay_pipe): os.unlink(overlay_pipe) shutil.rmtree(tmp_dir) print "\n[Time] Total Time for synthesis(including download) : %f" % (total_time) return fuse