def check_basevm(self): basevm_list = self.dbconn.list_item(BaseVM) ret_list = list() LOG.info("-"*50) LOG.info("* Base VM Configuration") for index, item in enumerate(basevm_list): # check file location (base_diskmeta, base_mempath, base_memmeta) = \ Cloudlet_Const.get_basepath(item.disk_path) if not os.path.exists(item.disk_path): LOG.warning("disk image (%s) is not exist" % (item.disk_path)) continue if not os.path.exists(base_mempath): LOG.warning("memory snapshot (%s) is not exist" % (base_mempath)) continue # add to list ret_list.append(item) LOG.info(" %d : %s (Disk %d MB, Memory %d MB)" % \ (index, item.disk_path, os.path.getsize(item.disk_path)/1024/1024, \ os.path.getsize(base_mempath)/1024/1024)) LOG.info("-"*50) if len(ret_list) == 0: LOG.error("[Error] NO valid Base VM") sys.exit(2) return ret_list
def export_basevm(name, basevm_path, basevm_hashvalue): (base_diskmeta, base_mempath, base_memmeta) = \ Const.get_basepath(basevm_path) output_path = os.path.join(os.curdir, name) if output_path.endswith(".zip") == False: output_path += ".zip" if os.path.exists(output_path) == True: is_overwrite = raw_input("%s exists. Overwirte it? (y/N) " % output_path) if is_overwrite != 'y': return None BaseVMPackage.create(output_path, basevm_hashvalue, basevm_path, base_mempath, base_diskmeta, base_memmeta) #BaseVMPackage.create(output_path, name, base_diskmeta, base_memmeta, base_diskmeta, base_memmeta) return output_path
def import_basevm(filename): filename = os.path.abspath(filename) (base_hashvalue, disk_name, memory_name, diskhash_name, memoryhash_name) = \ PackagingUtil._get_basevm_attribute(filename) # check directory base_vm_dir = os.path.join(os.path.dirname(Const.BASE_VM_DIR), base_hashvalue) temp_dir = mkdtemp(prefix="cloudlet-base-") disk_tmp_path = os.path.join(temp_dir, disk_name) disk_target_path = os.path.join(base_vm_dir, disk_name) dbconn, matching_basevm = PackagingUtil._get_matching_basevm( disk_target_path) if matching_basevm != None: LOG.info("Base VM is already exists") LOG.info("Delete existing Base VM using command") LOG.info("See more 'cloudlet --help'") return None if not os.path.exists(base_vm_dir): LOG.info("create directory for base VM") os.makedirs(base_vm_dir) # decompress LOG.info("Decompressing Base VM to temp directory at %s" % temp_dir) zipbase = zipfile.ZipFile(_FileFile("file:///%s" % filename), 'r') zipbase.extractall(temp_dir) shutil.move(disk_tmp_path, disk_target_path) (target_diskhash, target_memory, target_memoryhash) = \ Const.get_basepath(disk_target_path, check_exist=False) path_list = { os.path.join(temp_dir, memory_name): target_memory, os.path.join(temp_dir, diskhash_name): target_diskhash, os.path.join(temp_dir, memoryhash_name): target_memoryhash, } LOG.info("Place base VM to the right directory") for (src, dest) in path_list.iteritems(): shutil.move(src, dest) # add to DB dbconn = DBConnector() LOG.info("Register New Base to DB") LOG.info("ID for the new Base VM: %s" % base_hashvalue) new_basevm = BaseVM(disk_target_path, base_hashvalue) LOG.info("Success") dbconn.add_item(new_basevm) return disk_target_path, base_hashvalue
def import_basevm(filename): filename = os.path.abspath(filename) (base_hashvalue, disk_name, memory_name, diskhash_name, memoryhash_name) = \ PackagingUtil._get_basevm_attribute(filename) # check directory base_vm_dir = os.path.join(os.path.dirname(Const.BASE_VM_DIR), base_hashvalue) temp_dir = mkdtemp(prefix="cloudlet-base-") disk_tmp_path = os.path.join(temp_dir, disk_name) disk_target_path = os.path.join(base_vm_dir, disk_name) dbconn, matching_basevm = PackagingUtil._get_matching_basevm(disk_target_path) if matching_basevm != None: LOG.info("Base VM is already exists") LOG.info("Delete existing Base VM using command") LOG.info("See more 'cloudlet --help'") return None if not os.path.exists(base_vm_dir): LOG.info("create directory for base VM") os.makedirs(base_vm_dir) # decompress LOG.info("Decompressing Base VM to temp directory at %s" % temp_dir) zipbase = zipfile.ZipFile(_FileFile("file:///%s" % filename), 'r') zipbase.extractall(temp_dir) shutil.move(disk_tmp_path, disk_target_path) (target_diskhash, target_memory, target_memoryhash) = \ Const.get_basepath(disk_target_path, check_exist=False) path_list = { os.path.join(temp_dir, memory_name): target_memory, os.path.join(temp_dir, diskhash_name): target_diskhash, os.path.join(temp_dir, memoryhash_name): target_memoryhash, } LOG.info("Place base VM to the right directory") for (src, dest) in path_list.iteritems(): shutil.move(src, dest) # add to DB dbconn = DBConnector() LOG.info("Register New Base to DB") LOG.info("ID for the new Base VM: %s" % base_hashvalue) new_basevm = BaseVM(disk_target_path, base_hashvalue) LOG.info("Success") dbconn.add_item(new_basevm) return disk_target_path, base_hashvalue
def _handle_synthesis_url(self, message): LOG.info("\n\n----------------------- New Connection --------------") # check overlay meta info start_time = time.time() header_start_time = time.time() base_path, meta_info = self._check_url_validity(message) if meta_info is None: self.ret_fail("cannot access overlay URL") return if base_path is None: self.ret_fail("No matching Base VM") return if meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None) is None: self.ret_fail("No overlay files are listed") return # return success get overlay URL self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META) overlay_url = message.get(Protocol.KEY_OVERLAY_URL) overlay_package = VMOverlayPackage(overlay_url) # update DB session_id = message.get(Protocol.KEY_SESSION_ID, None) new_overlayvm = OverlayVM(session_id, base_path) self.server.dbconn.add_item(new_overlayvm) # start synthesis process url_manager = Manager() overlay_urls = url_manager.list() overlay_urls_size = url_manager.dict() for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]: url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME] size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE] overlay_urls.append(url) overlay_urls_size[url] = size LOG.info(" - %s" % str(pformat(self.synthesis_option))) LOG.info(" - Base VM : %s" % base_path) LOG.info(" - Blob count : %d" % len(overlay_urls)) if overlay_urls == None: self.ret_fail("No overlay info listed") return (base_diskmeta, base_mem, base_memmeta) = \ Cloudlet_Const.get_basepath(base_path, check_exist=True) header_end_time = time.time() LOG.info("Meta header processing time: %f" % (header_end_time-header_start_time)) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue(); time_decomp = Queue(); time_delta = Queue(); time_fuse = Queue(); self.tmp_overlay_dir = tempfile.mkdtemp() self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe') os.mkfifo(self.overlay_pipe) # save overlay decomp result for measurement temp_overlay_file = None if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") # overlay demanding_queue = Queue() download_queue = JoinableQueue() download_process = URLFetchStep(overlay_package, overlay_urls, overlay_urls_size, demanding_queue, download_queue, time_transfer, Synthesis_Const.TRANSFER_SIZE, ) decomp_process = DecompStepProc( download_queue, self.overlay_pipe, time_decomp, temp_overlay_file, ) modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \ synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) self.delta_proc.time_queue = time_delta # for measurement self.fuse_proc.time_queue = time_fuse # for measurement if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False): # 1. resume VM self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse) time_start_resume = time.time() self.resumed_VM.start() time_end_resume = time.time() # 2. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_proc.start() # 3. return success right after resuming VM # before receiving all chunks self.resumed_VM.join() self.send_synthesis_done() # 4. then wait fuse end self.fuse_proc.join() else: # 1. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_proc.start() # 2. resume VM self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse) self.resumed_VM.start() # 3. wait for fuse end self.fuse_proc.join() # 4. return success to client time_start_resume = time.time() # measure pure resume time self.resumed_VM.join() time_end_resume = time.time() self.send_synthesis_done() end_time = time.time() # printout result SynthesisHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ resume_time=(time_end_resume-time_start_resume)) if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False): synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True) # save all the resource to the session resource global session_resources s_resource = SessionResource(session_id) s_resource.add(SessionResource.DELTA_PROCESS, self.delta_proc) s_resource.add(SessionResource.RESUMED_VM, self.resumed_VM) s_resource.add(SessionResource.FUSE, self.fuse) s_resource.add(SessionResource.OVERLAY_PIPE, self.overlay_pipe) s_resource.add(SessionResource.OVERLAY_DIR, self.tmp_overlay_dir) s_resource.add(SessionResource.OVERLAY_DB_ENTRY, new_overlayvm) session_resources[session_id] = s_resource LOG.info("Resource is allocated for Session: %s" % str(session_id)) # printout synthesis statistics if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \ mem_access_list, disk_access_list) LOG.info("[SOCKET] waiting for client exit message")
def _handle_synthesis(self, message): Log.write("\n\n----------------------- New Connection --------------\n") # check overlay meta info start_time = time.time() header_start_time = time.time() base_path, meta_info = self._check_validity(message) session_id = message.get(Protocol.KEY_SESSIOIN_ID, None) if base_path and meta_info and meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None): self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META) else: self.ret_fail("No matching Base VM") return # update DB new_overlayvm = OverlayVM(session_id, base_path) self.server.dbconn.add_item(new_overlayvm) # start synthesis process url_manager = Manager() overlay_urls = url_manager.list() overlay_urls_size = url_manager.dict() for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]: url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME] size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE] overlay_urls.append(url) overlay_urls_size[url] = size Log.write(" - %s\n" % str(pformat(self.synthesis_option))) Log.write(" - Base VM : %s\n" % base_path) Log.write(" - Blob count : %d\n" % len(overlay_urls)) if overlay_urls == None: self.ret_fail("No overlay info listed") return (base_diskmeta, base_mem, base_memmeta) = \ Cloudlet_Const.get_basepath(base_path, check_exist=True) header_end_time = time.time() Log.write("Meta header processing time: %f\n" % (header_end_time-header_start_time)) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue(); time_decomp = Queue(); time_delta = Queue(); time_fuse = Queue(); self.tmp_overlay_dir = tempfile.mkdtemp() temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe') os.mkfifo(self.overlay_pipe) # overlay demanding_queue = Queue() download_queue = JoinableQueue() import threading download_process = threading.Thread(target=network_worker, args=( self, overlay_urls, overlay_urls_size, demanding_queue, download_queue, time_transfer, Synthesis_Const.TRANSFER_SIZE, ) ) decomp_process = Process(target=decomp_worker, args=( download_queue, self.overlay_pipe, time_decomp, temp_overlay_file, ) ) modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_thread = \ cloudlet.recover_launchVM(base_path, meta_info, self.overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) self.delta_proc.time_queue = time_delta self.fuse_thread.time_queue = time_fuse if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False): # 1. resume VM self.resumed_VM = cloudlet.ResumedVM(modified_img, modified_mem, self.fuse) time_start_resume = time.time() self.resumed_VM.start() time_end_resume = time.time() # 2. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_thread.start() # 3. return success right after resuming VM # before receiving all chunks self.resumed_VM.join() self.send_synthesis_done() # 4. then wait fuse end self.fuse_thread.join() else: # 1. start processes download_process.start() decomp_process.start() self.delta_proc.start() self.fuse_thread.start() # 2. resume VM self.resumed_VM = cloudlet.ResumedVM(modified_img, modified_mem, self.fuse) self.resumed_VM.start() # 3. wait for fuse end self.fuse_thread.join() # 4. return success to client time_start_resume = time.time() # measure pure resume time self.resumed_VM.join() time_end_resume = time.time() self.send_synthesis_done() end_time = time.time() # printout result SynthesisHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ print_out=Log, resume_time=(time_end_resume-time_start_resume)) if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False): cloudlet.connect_vnc(self.resumed_VM.machine, no_wait=True) # wait for finish message from client Log.write("[SOCKET] waiting for client exit message\n") data = self.request.recv(4) msgpack_size = struct.unpack("!I", data)[0] msgpack_data = self.request.recv(msgpack_size) while len(msgpack_data) < msgpack_size: msgpack_data += self.request.recv(msgpack_size- len(msgpack_data)) finish_message = NetworkUtil.decoding(msgpack_data) command = finish_message.get(Protocol.KEY_COMMAND, None) if command != Protocol.MESSAGE_COMMAND_FINISH: msg = "Unexpected command while streaming overlay VM: %d" % command raise RapidSynthesisError(msg) self.ret_success(Protocol.MESSAGE_COMMAND_FINISH) Log.write(" - %s" % str(pformat(finish_message))) Log.write("\n") # TO BE DELETED - save execution pattern ''' app_url = str(overlay_urls[0]) mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list mem_access_str = [str(item) for item in mem_access_list] filename = "exec_patter_%s" % (app_url.split("/")[-2]) open(filename, "w+a").write('\n'.join(mem_access_str)) ''' # printout synthesis statistics if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS): mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list cloudlet.synthesis_statistics(meta_info, temp_overlay_filepath, \ mem_access_list, disk_access_list, \ print_out=Log) # update DB new_overlayvm.terminate()
def piping_synthesis(overlay_url, base_path): # check_base VM start_time = time.time() meta_stream = urllib2.urlopen(overlay_url) meta_raw = read_all(meta_stream) meta_info = msgpack.unpackb(meta_raw) url_manager = Manager() overlay_urls = url_manager.list() url_prefix = os.path.dirname(overlay_url) for blob in meta_info[Const.META_OVERLAY_FILES]: blob_filename = os.path.basename(blob[Const.META_OVERLAY_FILE_NAME]) url = os.path.join(url_prefix, blob_filename) overlay_urls.append(url) (base_diskmeta, base_mem, base_memmeta) = \ Const.get_basepath(base_path, check_exist=True) # read overlay files # create named pipe to convert queue to stream time_transfer = Queue(); time_decomp = Queue(); time_delta = Queue(); time_fuse = Queue(); tmp_dir = tempfile.mkdtemp() temp_overlay_filepath = os.path.join(tmp_dir, "overlay_file") temp_overlay_file = open(temp_overlay_filepath, "w+b") overlay_pipe = os.path.join(tmp_dir, 'overlay_pipe') os.mkfifo(overlay_pipe) # overlay demanding_queue = Queue() download_queue = JoinableQueue() download_process = Process(target=synthesis.network_worker, args=( overlay_urls, demanding_queue, download_queue, time_transfer, CHUNK_SIZE, ) ) decomp_process = Process(target=synthesis.decomp_worker, args=( download_queue, overlay_pipe, time_decomp, temp_overlay_file, ) ) modified_img, modified_mem, fuse, delta_proc, fuse_thread = \ cloudlet.recover_launchVM(base_path, meta_info, overlay_pipe, log=sys.stdout, demanding_queue=demanding_queue) delta_proc.time_queue = time_delta fuse_thread.time_queue = time_fuse # start processes download_process.start() decomp_process.start() delta_proc.start() fuse_thread.start() # wait for end delta_proc.join() fuse_thread.join() # printout result end_time = time.time() total_time = (end_time-start_time) synthesis.SynthesisTCPHandler.print_statistics(start_time, end_time, \ time_transfer, time_decomp, time_delta, time_fuse, \ print_out=sys.stdout) delta_proc.finish() if os.path.exists(overlay_pipe): os.unlink(overlay_pipe) shutil.rmtree(tmp_dir) print "\n[Time] Total Time for synthesis(including download) : %f" % (total_time) return fuse