Пример #1
0
    def check_basevm(self):
        basevm_list = self.dbconn.list_item(BaseVM)
        ret_list = list()
        LOG.info("-" * 50)
        LOG.info("* Base VM Configuration")
        for index, item in enumerate(basevm_list):
            # check file location
            (base_diskmeta, base_mempath, base_memmeta) = \
                    Cloudlet_Const.get_basepath(item.disk_path)
            if not os.path.exists(item.disk_path):
                LOG.warning("disk image (%s) is not exist" % (item.disk_path))
                continue
            if not os.path.exists(base_mempath):
                LOG.warning("memory snapshot (%s) is not exist" %
                            (base_mempath))
                continue

            # add to list
            ret_list.append(item)
            LOG.info(" %d : %s (Disk %d MB, Memory %d MB)" % \
                    (index, item.disk_path, os.path.getsize(item.disk_path)/1024/1024, \
                    os.path.getsize(base_mempath)/1024/1024))
        LOG.info("-" * 50)

        if len(ret_list) == 0:
            LOG.error("[Error] NO valid Base VM")
            sys.exit(2)
        return ret_list
    def check_basevm(self):
        basevm_list = self.dbconn.list_item(BaseVM)
        ret_list = list()
        LOG.info("-"*50)
        LOG.info("* Base VM Configuration")
        for index, item in enumerate(basevm_list):
            # check file location
            (base_diskmeta, base_mempath, base_memmeta) = \
                    Cloudlet_Const.get_basepath(item.disk_path)
            if not os.path.exists(item.disk_path):
                LOG.warning("disk image (%s) is not exist" % (item.disk_path))
                continue
            if not os.path.exists(base_mempath):
                LOG.warning("memory snapshot (%s) is not exist" % (base_mempath))
                continue

            # add to list
            ret_list.append(item)
            LOG.info(" %d : %s (Disk %d MB, Memory %d MB)" % \
                    (index, item.disk_path, os.path.getsize(item.disk_path)/1024/1024, \
                    os.path.getsize(base_mempath)/1024/1024))
        LOG.info("-"*50)

        if len(ret_list) == 0:
            LOG.error("[Error] NO valid Base VM")
            sys.exit(2)
        return ret_list
    def _handle_synthesis_url(self, message):
        LOG.info("\n\n----------------------- New Connection --------------")
        # check overlay meta info
        start_time = time.time()
        header_start_time = time.time()
        base_path, meta_info = self._check_url_validity(message)
        if meta_info is None:
            self.ret_fail("cannot access overlay URL")
            return
        if base_path is None:
            self.ret_fail("No matching Base VM")
            return
        if meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None) is None:
            self.ret_fail("No overlay files are listed")
            return 

        # return success get overlay URL
        self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META)
        overlay_url = message.get(Protocol.KEY_OVERLAY_URL)
        overlay_package = VMOverlayPackage(overlay_url)

        # update DB
        session_id = message.get(Protocol.KEY_SESSION_ID, None)
        new_overlayvm = OverlayVM(session_id, base_path)
        self.server.dbconn.add_item(new_overlayvm)

        # start synthesis process
        url_manager = Manager()
        overlay_urls = url_manager.list()
        overlay_urls_size = url_manager.dict()
        for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]:
            url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME]
            size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE]
            overlay_urls.append(url)
            overlay_urls_size[url] = size
        LOG.info("  - %s" % str(pformat(self.synthesis_option)))
        LOG.info("  - Base VM     : %s" % base_path)
        LOG.info("  - Blob count  : %d" % len(overlay_urls))
        if overlay_urls == None:
            self.ret_fail("No overlay info listed")
            return
        (base_diskmeta, base_mem, base_memmeta) = \
                Cloudlet_Const.get_basepath(base_path, check_exist=True)
        header_end_time = time.time()
        LOG.info("Meta header processing time: %f" % (header_end_time-header_start_time))

        # read overlay files
        # create named pipe to convert queue to stream
        time_transfer = Queue(); time_decomp = Queue();
        time_delta = Queue(); time_fuse = Queue();
        self.tmp_overlay_dir = tempfile.mkdtemp()
        self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe')
        os.mkfifo(self.overlay_pipe)

        # save overlay decomp result for measurement
        temp_overlay_file = None
        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
            temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file")
            temp_overlay_file = open(temp_overlay_filepath, "w+b")

        # overlay
        demanding_queue = Queue()
        download_queue = JoinableQueue()
        download_process = URLFetchStep(overlay_package, overlay_urls, 
                overlay_urls_size, demanding_queue, download_queue, 
                time_transfer, Synthesis_Const.TRANSFER_SIZE, )
        decomp_process = DecompStepProc(
                download_queue, self.overlay_pipe, time_decomp, temp_overlay_file,
                )
        modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \
                synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe, 
                        log=sys.stdout, demanding_queue=demanding_queue)
        self.delta_proc.time_queue = time_delta # for measurement
        self.fuse_proc.time_queue = time_fuse # for measurement

        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False):
            # 1. resume VM
            self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
            time_start_resume = time.time()
            self.resumed_VM.start()
            time_end_resume = time.time()

            # 2. start processes
            download_process.start()
            decomp_process.start()
            self.delta_proc.start()
            self.fuse_proc.start()

            # 3. return success right after resuming VM
            # before receiving all chunks
            self.resumed_VM.join()
            self.send_synthesis_done()

            # 4. then wait fuse end
            self.fuse_proc.join()
        else:
            # 1. start processes
            download_process.start()
            decomp_process.start()
            self.delta_proc.start()
            self.fuse_proc.start()

            # 2. resume VM
            self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
            self.resumed_VM.start()

            # 3. wait for fuse end
            self.fuse_proc.join()

            # 4. return success to client
            time_start_resume = time.time()     # measure pure resume time
            self.resumed_VM.join()
            time_end_resume = time.time()
            self.send_synthesis_done()

        end_time = time.time()

        # printout result
        SynthesisHandler.print_statistics(start_time, end_time, \
                time_transfer, time_decomp, time_delta, time_fuse, \
                resume_time=(time_end_resume-time_start_resume))

        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False):
            synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True)

        # save all the resource to the session resource
        global session_resources
        s_resource = SessionResource(session_id)
        s_resource.add(SessionResource.DELTA_PROCESS, self.delta_proc)
        s_resource.add(SessionResource.RESUMED_VM, self.resumed_VM)
        s_resource.add(SessionResource.FUSE, self.fuse)
        s_resource.add(SessionResource.OVERLAY_PIPE, self.overlay_pipe)
        s_resource.add(SessionResource.OVERLAY_DIR, self.tmp_overlay_dir)
        s_resource.add(SessionResource.OVERLAY_DB_ENTRY, new_overlayvm)
        session_resources[session_id] = s_resource
        LOG.info("Resource is allocated for Session: %s" % str(session_id))

        # printout synthesis statistics
        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
            mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list
            disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list
            synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \
                    mem_access_list, disk_access_list)
        LOG.info("[SOCKET] waiting for client exit message")
Пример #4
0
    def _handle_synthesis_url(self, message):
        LOG.info("\n\n----------------------- New Connection --------------")
        # check overlay meta info
        start_time = time.time()
        header_start_time = time.time()
        base_path, meta_info = self._check_url_validity(message)
        if meta_info is None:
            self.ret_fail("cannot access overlay URL")
            return
        if base_path is None:
            self.ret_fail("No matching Base VM")
            return
        if meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None) is None:
            self.ret_fail("No overlay files are listed")
            return

        # return success get overlay URL
        self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META)
        overlay_url = message.get(Protocol.KEY_OVERLAY_URL)
        overlay_package = VMOverlayPackage(overlay_url)

        # update DB
        session_id = message.get(Protocol.KEY_SESSION_ID, None)
        new_overlayvm = OverlayVM(session_id, base_path)
        self.server.dbconn.add_item(new_overlayvm)

        # start synthesis process
        url_manager = Manager()
        overlay_urls = url_manager.list()
        overlay_urls_size = url_manager.dict()
        for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]:
            url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME]
            size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE]
            overlay_urls.append(url)
            overlay_urls_size[url] = size
        LOG.info("  - %s" % str(pformat(self.synthesis_option)))
        LOG.info("  - Base VM     : %s" % base_path)
        LOG.info("  - Blob count  : %d" % len(overlay_urls))
        if overlay_urls == None:
            self.ret_fail("No overlay info listed")
            return
        (base_diskmeta, base_mem, base_memmeta) = \
                Cloudlet_Const.get_basepath(base_path, check_exist=True)
        header_end_time = time.time()
        LOG.info("Meta header processing time: %f" %
                 (header_end_time - header_start_time))

        # read overlay files
        # create named pipe to convert queue to stream
        time_transfer = Queue()
        time_decomp = Queue()
        time_delta = Queue()
        time_fuse = Queue()
        self.tmp_overlay_dir = tempfile.mkdtemp()
        self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe')
        os.mkfifo(self.overlay_pipe)

        # save overlay decomp result for measurement
        temp_overlay_file = None
        if self.synthesis_option.get(
                Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
            temp_overlay_filepath = os.path.join(self.tmp_overlay_dir,
                                                 "overlay_file")
            temp_overlay_file = open(temp_overlay_filepath, "w+b")

        # overlay
        demanding_queue = Queue()
        download_queue = JoinableQueue()
        download_process = URLFetchStep(
            overlay_package,
            overlay_urls,
            overlay_urls_size,
            demanding_queue,
            download_queue,
            time_transfer,
            Synthesis_Const.TRANSFER_SIZE,
        )
        decomp_process = DecompStepProc(
            download_queue,
            self.overlay_pipe,
            time_decomp,
            temp_overlay_file,
        )
        modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \
                synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe,
                        log=sys.stdout, demanding_queue=demanding_queue)
        self.delta_proc.time_queue = time_delta  # for measurement
        self.fuse_proc.time_queue = time_fuse  # for measurement

        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START,
                                     False):
            # 1. resume VM
            self.resumed_VM = synthesis.SynthesizedVM(modified_img,
                                                      modified_mem, self.fuse)
            time_start_resume = time.time()
            self.resumed_VM.start()
            time_end_resume = time.time()

            # 2. start processes
            download_process.start()
            decomp_process.start()
            self.delta_proc.start()
            self.fuse_proc.start()

            # 3. return success right after resuming VM
            # before receiving all chunks
            self.resumed_VM.join()
            self.send_synthesis_done()

            # 4. then wait fuse end
            self.fuse_proc.join()
        else:
            # 1. start processes
            download_process.start()
            decomp_process.start()
            self.delta_proc.start()
            self.fuse_proc.start()

            # 2. resume VM
            self.resumed_VM = synthesis.SynthesizedVM(modified_img,
                                                      modified_mem, self.fuse)
            self.resumed_VM.start()

            # 3. wait for fuse end
            self.fuse_proc.join()

            # 4. return success to client
            time_start_resume = time.time()  # measure pure resume time
            self.resumed_VM.join()
            time_end_resume = time.time()
            self.send_synthesis_done()

        end_time = time.time()

        # printout result
        SynthesisHandler.print_statistics(start_time, end_time, \
                time_transfer, time_decomp, time_delta, time_fuse, \
                resume_time=(time_end_resume-time_start_resume))

        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC,
                                     False):
            synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True)

        # save all the resource to the session resource
        global session_resources
        s_resource = SessionResource(session_id)
        s_resource.add(SessionResource.DELTA_PROCESS, self.delta_proc)
        s_resource.add(SessionResource.RESUMED_VM, self.resumed_VM)
        s_resource.add(SessionResource.FUSE, self.fuse)
        s_resource.add(SessionResource.OVERLAY_PIPE, self.overlay_pipe)
        s_resource.add(SessionResource.OVERLAY_DIR, self.tmp_overlay_dir)
        s_resource.add(SessionResource.OVERLAY_DB_ENTRY, new_overlayvm)
        session_resources[session_id] = s_resource
        LOG.info("Resource is allocated for Session: %s" % str(session_id))

        # printout synthesis statistics
        if self.synthesis_option.get(
                Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
            mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list
            disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list
            synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \
                    mem_access_list, disk_access_list)
        LOG.info("[SOCKET] waiting for client exit message")
    def handle(self):
        '''Handle request from the client
        Each request follows this format:

        | header size | header | blob header size | blob header | blob data  |
        |  (4 bytes)  | (var)  | (4 bytes)        | (var bytes) | (var bytes)|
        '''
        # variable
        self.total_recved_size_cur = 0
        self.total_recved_size_prev = 0

        # get header
        data = self._recv_all(4)
        if data is None or len(data) != 4:
            raise StreamSynthesisError(
                "Failed to receive first byte of header")
        message_size = struct.unpack("!I", data)[0]
        msgpack_data = self._recv_all(message_size)
        metadata = NetworkUtil.decoding(msgpack_data)
        launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE]
        launch_memory_size = metadata[
            Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE]

        analysis_mq = multiprocessing.Queue()
        analysis_proc = HandoffAnalysisProc(handoff_url=self.client_address[0],
                                            message_queue=analysis_mq,
                                            disk_size=launch_disk_size,
                                            mem_size=launch_memory_size)
        analysis_proc.start()

        analysis_mq.put("=" * 50)
        analysis_mq.put("Adaptive VM Handoff Initiated")
        analysis_mq.put(
            "Client Connection - %s:%d" %
            (self.client_address[0],
             self.client_address[1]))  #client_address is a tuple (ip, port)

        if self.server.handoff_data is not None:
            analysis_mq.put("Handoff via OpenStack")
            via_openstack = True
        else:
            analysis_mq.put("Handoff via cloudlet CLI")
            via_openstack = False

        synthesis_option, base_diskpath = self._check_validity(metadata)
        if base_diskpath is None:
            raise StreamSynthesisError("No matching base VM")
        if via_openstack:
            base_diskpath, base_mempath, base_diskmeta, base_memmeta = self.server.handoff_data.base_vm_paths
        else:
            (base_diskmeta, base_mempath,
             base_memmeta) = Cloudlet_Const.get_basepath(base_diskpath,
                                                         check_exist=True)
        analysis_mq.put("Synthesis Options %s" %
                        str(pformat(self.synthesis_option)))
        analysis_mq.put("Base VM Path: %s" % base_diskpath)
        analysis_mq.put("Image Disk Size: %d" % launch_disk_size)
        analysis_mq.put("Image Memory Size: %d" % launch_memory_size)
        analysis_mq.put("=" * 50)
        # variables for FUSE
        if via_openstack:
            launch_disk = self.server.handoff_data.launch_diskpath
            launch_mem = self.server.handoff_data.launch_memorypath
        else:
            temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-")
            launch_disk = os.path.join(temp_synthesis_dir, "launch-disk")
            launch_mem = os.path.join(temp_synthesis_dir, "launch-mem")
        memory_chunk_all = set()
        disk_chunk_all = set()

        # start pipelining processes
        network_out_queue = multiprocessing.Queue()
        decomp_queue = multiprocessing.Queue()
        fuse_info_queue = multiprocessing.Queue()
        decomp_proc = DecompProc(network_out_queue,
                                 decomp_queue,
                                 num_proc=4,
                                 analysis_queue=analysis_mq)
        decomp_proc.start()
        analysis_mq.put("Starting (%d) decompression processes..." %
                        (decomp_proc.num_proc))
        delta_proc = RecoverDeltaProc(base_diskpath, base_mempath,
                                      decomp_queue, launch_mem, launch_disk,
                                      Cloudlet_Const.CHUNK_SIZE,
                                      fuse_info_queue, analysis_mq)
        delta_proc.start()
        analysis_mq.put("Starting delta recovery process...")

        # get each blob
        recv_blob_counter = 0
        while True:
            data = self._recv_all(4)
            if data is None or len(data) != 4:
                raise StreamSynthesisError(
                    "Failed to receive first byte of header")

            blob_header_size = struct.unpack("!I", data)[0]
            blob_header_raw = self._recv_all(blob_header_size)
            blob_header = NetworkUtil.decoding(blob_header_raw)
            blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE)
            if blob_size is None:
                raise StreamSynthesisError("Failed to receive blob")
            if blob_size == 0:
                analysis_mq.put("End of stream received from client at %f)" %
                                (time.time()))
                break
            blob_comp_type = blob_header.get(
                Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION)
            blob_disk_chunk = blob_header.get(
                Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS)
            blob_memory_chunk = blob_header.get(
                Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS)

            # send ack right before getting the blob
            ack_data = struct.pack("!Q", 0x01)
            self.request.send(ack_data)
            compressed_blob = self._recv_all(blob_size, ack_size=200 * 1024)
            # send ack right after getting the blob
            ack_data = struct.pack("!Q", 0x02)
            self.request.send(ack_data)

            network_out_queue.put((blob_comp_type, compressed_blob))
            #TODO: remove the interweaving of the valid bit here
            #TODO: and change the code path in cloudlet_driver.py so that
            #TODO: it uses the chunk sets in favor of the tuples
            if via_openstack:
                memory_chunk_set = set(
                    ["%ld:1" % item for item in blob_memory_chunk])
                disk_chunk_set = set(
                    ["%ld:1" % item for item in blob_disk_chunk])
                memory_chunk_all.update(memory_chunk_set)
                disk_chunk_all.update(disk_chunk_set)
            else:
                memory_chunk_all.update(blob_memory_chunk)
                disk_chunk_all.update(blob_disk_chunk)
            recv_blob_counter += 1
            analysis_mq.put("B,R,%d" % (recv_blob_counter))

        network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE)
        delta_proc.join()
        LOG.debug("%f\tdeltaproc join" % (time.time()))

        analysis_mq.put("Adaptive VM Handoff Complete!")
        analysis_mq.put("=" * 50)
        analysis_mq.put("!E_O_Q!")
        analysis_proc.join()

        if via_openstack:
            ack_data = struct.pack("!Qd", 0x10, time.time())
            LOG.info("send ack to client: %d" % len(ack_data))
            self.request.sendall(ack_data)

            disk_overlay_map = ','.join(disk_chunk_all)
            memory_overlay_map = ','.join(memory_chunk_all)
            # NOTE: fuse and synthesis take place in cloudlet_driver.py when launched from openstack but
            #this data must be written to stdout so the pipe connected to cloudlet_driver.py can finish the handoff
            #TODO: instead of sending this stdout buffer over the pipe to cloudlet_driver.py, we should probably
            #TODO: move to multiprocessing.Pipe or Queue to avoid issues with other items being dumped to stdout
            #TODO: and causing problems with this data being sent back; i.e. anything written via LOG
            #TODO: after this will end up in stdout because the logger has a StreamHandler configured to use stdout
            sys.stdout.write("openstack\t%s\t%s\t%s\t%s" %
                             (launch_disk_size, launch_memory_size,
                              disk_overlay_map, memory_overlay_map))

        else:
            # We told to FUSE that we have everything ready, so we need to wait
            # until delta_proc finishes. we cannot start VM before delta_proc
            # finishes, because we don't know what will be modified in the future
            time_fuse_start = time.time()
            fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH,
                            Cloudlet_Const.CHUNK_SIZE,
                            base_diskpath,
                            launch_disk_size,
                            base_mempath,
                            launch_memory_size,
                            resumed_disk=launch_disk,
                            disk_chunks=disk_chunk_all,
                            resumed_memory=launch_mem,
                            memory_chunks=memory_chunk_all,
                            valid_bit=1)
            time_fuse_end = time.time()

            synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse)

            synthesized_vm.start()
            synthesized_vm.join()

            # since libvirt does not return immediately after resuming VM, we
            # measure resume time directly from QEMU
            actual_resume_time = 0
            splited_log = open("/tmp/qemu_debug_messages",
                               "r").read().split("\n")
            for line in splited_log:
                if line.startswith("INCOMING_FINISH"):
                    actual_resume_time = float(line.split(" ")[-1])

            LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % (
                actual_resume_time - time_fuse_start,
                time_fuse_start,
                time_fuse_end,
                actual_resume_time,
            ))

            ack_data = struct.pack("!Qd", 0x10, actual_resume_time)
            LOG.info("send ack to client: %d" % len(ack_data))
            self.request.sendall(ack_data)

            connect_vnc(synthesized_vm.machine, True)

            signal.signal(signal.SIGUSR1, handlesig)
            signal.pause()

            synthesized_vm.monitor.terminate()
            synthesized_vm.monitor.join()
            synthesized_vm.terminate()
    def handle(self):
        '''Handle request from the client
        Each request follows this format:

        | header size | header | blob header size | blob header | blob data  |
        |  (4 bytes)  | (var)  | (4 bytes)        | (var bytes) | (var bytes)|
        '''
        if self.server.handoff_data is not None:
            LOG.debug("VM synthesis using OpenStack")
        else:
            LOG.debug("VM synthesis as standalone")

        # variable
        self.total_recved_size_cur = 0
        self.total_recved_size_prev = 0

        # get header
        data = self._recv_all(4)
        if data == None or len(data) != 4:
            raise StreamSynthesisError("Failed to receive first byte of header")
        message_size = struct.unpack("!I", data)[0]
        msgpack_data = self._recv_all(message_size)
        metadata = NetworkUtil.decoding(msgpack_data)
        launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE]
        launch_memory_size = metadata[Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE]

        synthesis_option, base_diskpath = self._check_validity(metadata)
        if base_diskpath == None:
            raise StreamSynthesisError("No matching base VM")
        if self.server.handoff_data:
            base_diskpath, base_diskmeta, base_mempath, base_memmeta =\
                self.server.handoff_data.base_vm_paths
        else:
            (base_diskmeta, base_mempath, base_memmeta) = \
                    Cloudlet_Const.get_basepath(base_diskpath, check_exist=True)
        LOG.info("  - %s" % str(pformat(self.synthesis_option)))
        LOG.info("  - Base VM     : %s" % base_diskpath)

        # variables for FUSE
        temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-")
        launch_disk = os.path.join(temp_synthesis_dir, "launch-disk")
        launch_mem = os.path.join(temp_synthesis_dir, "launch-mem")
        memory_chunk_all = set()
        disk_chunk_all = set()

        # start pipelining processes
        network_out_queue = multiprocessing.Queue()
        decomp_queue = multiprocessing.Queue()
        fuse_info_queue = multiprocessing.Queue()
        decomp_proc = DecompProc(network_out_queue, decomp_queue, num_proc=4)
        decomp_proc.start()
        LOG.info("Start Decompression process")
        delta_proc = RecoverDeltaProc(base_diskpath, base_mempath,
                                    decomp_queue,
                                    launch_mem,
                                    launch_disk,
                                    Cloudlet_Const.CHUNK_SIZE,
                                    fuse_info_queue)
        delta_proc.start()
        LOG.info("Start Synthesis process")

        # get each blob
        recv_blob_counter = 0
        while True:
            data = self._recv_all(4)
            if data == None or len(data) != 4:
                raise StreamSynthesisError("Failed to receive first byte of header")
                break
            blob_header_size = struct.unpack("!I", data)[0]
            blob_header_raw = self._recv_all(blob_header_size)
            blob_header = NetworkUtil.decoding(blob_header_raw)
            blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE)
            if blob_size == None:
                raise StreamSynthesisError("Failed to receive blob")
            if blob_size == 0:
                LOG.debug("%f\tend of stream" % (time.time()))
                break
            blob_comp_type = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION)
            blob_disk_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS)
            blob_memory_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS)

            # send ack right before getting the blob
            ack_data = struct.pack("!Q", 0x01)
            self.request.send(ack_data)
            compressed_blob = self._recv_all(blob_size, ack_size=200*1024)
            # send ack right after getting the blob
            ack_data = struct.pack("!Q", 0x02)
            self.request.send(ack_data)

            network_out_queue.put((blob_comp_type, compressed_blob))
            memory_chunk_set = set(["%ld:1" % item for item in blob_memory_chunk])
            disk_chunk_set = set(["%ld:1" % item for item in blob_disk_chunk])
            memory_chunk_all.update(memory_chunk_set)
            disk_chunk_all.update(disk_chunk_set)
            LOG.debug("%f\treceive one blob" % (time.time()))
            recv_blob_counter += 1

        network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE)
        delta_proc.join()
        LOG.debug("%f\tdeltaproc join" % (time.time()))

        # We told to FUSE that we have everything ready, so we need to wait
        # until delta_proc fininshes. we cannot start VM before delta_proc
        # finishes, because we don't know what will be modified in the future
        time_fuse_start = time.time()
        disk_overlay_map = ','.join(disk_chunk_all)
        memory_overlay_map = ','.join(memory_chunk_all)
        fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH, Cloudlet_Const.CHUNK_SIZE,
                base_diskpath, launch_disk_size, base_mempath, launch_memory_size,
                resumed_disk=launch_disk,  disk_overlay_map=disk_overlay_map,
                resumed_memory=launch_mem, memory_overlay_map=memory_overlay_map)
        time_fuse_end = time.time()
        memory_path = os.path.join(fuse.mountpoint, 'memory', 'image')

        if self.server.handoff_data:
            synthesized_vm = SynthesizedVM(
                launch_disk, launch_mem, fuse,
                disk_only=False, qemu_args=None,
                nova_xml=self.server.handoff_data.libvirt_xml,
                nova_conn=self.server.handoff_data._conn,
                nova_util=self.server.handoff_data._libvirt_utils
            )
        else:
            synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse)

        synthesized_vm.start()
        synthesized_vm.join()

        # to be delete
        #libvirt_xml = synthesized_vm.new_xml_str
        #vmpaths = [base_diskpath, base_diskmeta, base_mempath, base_memmeta]
        #base_hashvalue = metadata.get(Cloudlet_Const.META_BASE_VM_SHA256, None)
        #ds = HandoffDataRecv()
        #ds.save_data(vmpaths, base_hashvalue, libvirt_xml, "qemu:///session")
        #ds.to_file("/home/stack/cloudlet/provisioning/handff_recv_data")

        # since libvirt does not return immediately after resuming VM, we
        # measure resume time directly from QEMU
        actual_resume_time = 0
        splited_log = open("/tmp/qemu_debug_messages", "r").read().split("\n")
        for line in splited_log:
            if line.startswith("INCOMING_FINISH"):
                actual_resume_time = float(line.split(" ")[-1])
        time_resume_end = time.time()
        LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % (
            actual_resume_time-time_fuse_start,
            time_fuse_start,
            time_fuse_end,
            actual_resume_time,
        ))
        if self.server.handoff_data == None:
            # for a standalone version, terminate a VM for the next testing
            #connect_vnc(synthesized_vm.machine)
            LOG.debug("Finishing VM in 3 seconds")
            time.sleep(3)
            synthesized_vm.monitor.terminate()
            synthesized_vm.monitor.join()
            synthesized_vm.terminate()

        # send end message
        ack_data = struct.pack("!Qd", 0x10, actual_resume_time)
        LOG.info("send ack to client: %d" % len(ack_data))
        self.request.sendall(ack_data)
        LOG.info("finished")
Пример #7
0
    def handle(self):
        '''Handle request from the client
        Each request follows this format:

        | header size | header | blob header size | blob header | blob data  |
        |  (4 bytes)  | (var)  | (4 bytes)        | (var bytes) | (var bytes)|
        '''
        if self.server.handoff_data is not None:
            LOG.debug("VM synthesis using OpenStack")
        else:
            LOG.debug("VM synthesis as standalone")

        # variable
        self.total_recved_size_cur = 0
        self.total_recved_size_prev = 0

        # get header
        data = self._recv_all(4)
        if data == None or len(data) != 4:
            raise StreamSynthesisError(
                "Failed to receive first byte of header")
        message_size = struct.unpack("!I", data)[0]
        msgpack_data = self._recv_all(message_size)
        metadata = NetworkUtil.decoding(msgpack_data)
        launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE]
        launch_memory_size = metadata[
            Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE]

        synthesis_option, base_diskpath = self._check_validity(metadata)
        if base_diskpath == None:
            raise StreamSynthesisError("No matching base VM")
        if self.server.handoff_data:
            base_diskpath, base_diskmeta, base_mempath, base_memmeta =\
                self.server.handoff_data.base_vm_paths
        else:
            (base_diskmeta, base_mempath, base_memmeta) = \
                    Cloudlet_Const.get_basepath(base_diskpath, check_exist=True)
        LOG.info("  - %s" % str(pformat(self.synthesis_option)))
        LOG.info("  - Base VM     : %s" % base_diskpath)

        # variables for FUSE
        temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-")
        launch_disk = os.path.join(temp_synthesis_dir, "launch-disk")
        launch_mem = os.path.join(temp_synthesis_dir, "launch-mem")
        memory_chunk_all = set()
        disk_chunk_all = set()

        # start pipelining processes
        network_out_queue = multiprocessing.Queue()
        decomp_queue = multiprocessing.Queue()
        fuse_info_queue = multiprocessing.Queue()
        decomp_proc = DecompProc(network_out_queue, decomp_queue, num_proc=4)
        decomp_proc.start()
        LOG.info("Start Decompression process")
        delta_proc = RecoverDeltaProc(base_diskpath, base_mempath,
                                      decomp_queue, launch_mem, launch_disk,
                                      Cloudlet_Const.CHUNK_SIZE,
                                      fuse_info_queue)
        delta_proc.start()
        LOG.info("Start Synthesis process")

        # get each blob
        recv_blob_counter = 0
        while True:
            data = self._recv_all(4)
            if data == None or len(data) != 4:
                raise StreamSynthesisError(
                    "Failed to receive first byte of header")
                break
            blob_header_size = struct.unpack("!I", data)[0]
            blob_header_raw = self._recv_all(blob_header_size)
            blob_header = NetworkUtil.decoding(blob_header_raw)
            blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE)
            if blob_size == None:
                raise StreamSynthesisError("Failed to receive blob")
            if blob_size == 0:
                LOG.debug("%f\tend of stream" % (time.time()))
                break
            blob_comp_type = blob_header.get(
                Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION)
            blob_disk_chunk = blob_header.get(
                Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS)
            blob_memory_chunk = blob_header.get(
                Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS)

            # send ack right before getting the blob
            ack_data = struct.pack("!Q", 0x01)
            self.request.send(ack_data)
            compressed_blob = self._recv_all(blob_size, ack_size=200 * 1024)
            # send ack right after getting the blob
            ack_data = struct.pack("!Q", 0x02)
            self.request.send(ack_data)

            network_out_queue.put((blob_comp_type, compressed_blob))
            memory_chunk_set = set(
                ["%ld:1" % item for item in blob_memory_chunk])
            disk_chunk_set = set(["%ld:1" % item for item in blob_disk_chunk])
            memory_chunk_all.update(memory_chunk_set)
            disk_chunk_all.update(disk_chunk_set)
            LOG.debug("%f\treceive one blob" % (time.time()))
            recv_blob_counter += 1

        network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE)
        delta_proc.join()
        LOG.debug("%f\tdeltaproc join" % (time.time()))

        # We told to FUSE that we have everything ready, so we need to wait
        # until delta_proc fininshes. we cannot start VM before delta_proc
        # finishes, because we don't know what will be modified in the future
        time_fuse_start = time.time()
        disk_overlay_map = ','.join(disk_chunk_all)
        memory_overlay_map = ','.join(memory_chunk_all)
        fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH,
                        Cloudlet_Const.CHUNK_SIZE,
                        base_diskpath,
                        launch_disk_size,
                        base_mempath,
                        launch_memory_size,
                        resumed_disk=launch_disk,
                        disk_overlay_map=disk_overlay_map,
                        resumed_memory=launch_mem,
                        memory_overlay_map=memory_overlay_map)
        time_fuse_end = time.time()
        memory_path = os.path.join(fuse.mountpoint, 'memory', 'image')

        if self.server.handoff_data:
            synthesized_vm = SynthesizedVM(
                launch_disk,
                launch_mem,
                fuse,
                disk_only=False,
                qemu_args=None,
                nova_xml=self.server.handoff_data.libvirt_xml,
                nova_conn=self.server.handoff_data._conn,
                nova_util=self.server.handoff_data._libvirt_utils)
        else:
            synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse)

        synthesized_vm.start()
        synthesized_vm.join()

        # to be delete
        #libvirt_xml = synthesized_vm.new_xml_str
        #vmpaths = [base_diskpath, base_diskmeta, base_mempath, base_memmeta]
        #base_hashvalue = metadata.get(Cloudlet_Const.META_BASE_VM_SHA256, None)
        #ds = HandoffDataRecv()
        #ds.save_data(vmpaths, base_hashvalue, libvirt_xml, "qemu:///session")
        #ds.to_file("/home/stack/cloudlet/provisioning/handff_recv_data")

        # since libvirt does not return immediately after resuming VM, we
        # measure resume time directly from QEMU
        actual_resume_time = 0
        splited_log = open("/tmp/qemu_debug_messages", "r").read().split("\n")
        for line in splited_log:
            if line.startswith("INCOMING_FINISH"):
                actual_resume_time = float(line.split(" ")[-1])
        time_resume_end = time.time()
        LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % (
            actual_resume_time - time_fuse_start,
            time_fuse_start,
            time_fuse_end,
            actual_resume_time,
        ))
        if self.server.handoff_data == None:
            # for a standalone version, terminate a VM for the next testing
            #connect_vnc(synthesized_vm.machine)
            LOG.debug("Finishing VM in 3 seconds")
            time.sleep(3)
            synthesized_vm.monitor.terminate()
            synthesized_vm.monitor.join()
            synthesized_vm.terminate()

        # send end message
        ack_data = struct.pack("!Qd", 0x10, actual_resume_time)
        LOG.info("send ack to client: %d" % len(ack_data))
        self.request.sendall(ack_data)
        LOG.info("finished")
    def handle(self):
        '''Handle request from the client
        Each request follows this format:

        | header size | header | blob header size | blob header | blob data  |
        |  (4 bytes)  | (var)  | (4 bytes)        | (var bytes) | (var bytes)|
        '''
        # variable
        self.total_recved_size_cur = 0
        self.total_recved_size_prev = 0

        # get header
        data = self._recv_all(4)
        if data is None or len(data) != 4:
            raise StreamSynthesisError("Failed to receive first byte of header")
        message_size = struct.unpack("!I", data)[0]
        msgpack_data = self._recv_all(message_size)
        metadata = NetworkUtil.decoding(msgpack_data)
        launch_disk_size = metadata[Cloudlet_Const.META_RESUME_VM_DISK_SIZE]
        launch_memory_size = metadata[Cloudlet_Const.META_RESUME_VM_MEMORY_SIZE]

        analysis_mq = multiprocessing.Queue()
        analysis_proc = HandoffAnalysisProc(handoff_url=self.client_address[0],message_queue=analysis_mq, disk_size=launch_disk_size, mem_size=launch_memory_size)
        analysis_proc.start()

        analysis_mq.put("=" * 50)
        analysis_mq.put("Adaptive VM Handoff Initiated")
        analysis_mq.put("Client Connection - %s:%d" % (self.client_address[0], self.client_address[1])) #client_address is a tuple (ip, port)

        if self.server.handoff_data is not None:
            analysis_mq.put("Handoff via OpenStack")
            via_openstack = True
        else:
            analysis_mq.put("Handoff via cloudlet CLI")
            via_openstack = False

        synthesis_option, base_diskpath = self._check_validity(metadata)
        if base_diskpath is None:
            raise StreamSynthesisError("No matching base VM")
        if via_openstack:
            base_diskpath, base_mempath, base_diskmeta, base_memmeta = self.server.handoff_data.base_vm_paths
        else:
            (base_diskmeta, base_mempath, base_memmeta) = Cloudlet_Const.get_basepath(base_diskpath, check_exist=True)
        analysis_mq.put("Synthesis Options %s" % str(pformat(self.synthesis_option)))
        analysis_mq.put("Base VM Path: %s" % base_diskpath)
        analysis_mq.put("Image Disk Size: %d" % launch_disk_size)
        analysis_mq.put("Image Memory Size: %d" % launch_memory_size)
        analysis_mq.put("=" * 50)
        # variables for FUSE
        if via_openstack:
            launch_disk = self.server.handoff_data.launch_diskpath
            launch_mem = self.server.handoff_data.launch_memorypath
        else:
            temp_synthesis_dir = tempfile.mkdtemp(prefix="cloudlet-comp-")
            launch_disk = os.path.join(temp_synthesis_dir, "launch-disk")
            launch_mem = os.path.join(temp_synthesis_dir, "launch-mem")
        memory_chunk_all = set()
        disk_chunk_all = set()

        # start pipelining processes
        network_out_queue = multiprocessing.Queue()
        decomp_queue = multiprocessing.Queue()
        fuse_info_queue = multiprocessing.Queue()
        decomp_proc = DecompProc(network_out_queue, decomp_queue, num_proc=4, analysis_queue=analysis_mq)
        decomp_proc.start()
        analysis_mq.put("Starting (%d) decompression processes..." % (decomp_proc.num_proc))
        delta_proc = RecoverDeltaProc(base_diskpath, base_mempath,
                                    decomp_queue,
                                    launch_mem,
                                    launch_disk,
                                    Cloudlet_Const.CHUNK_SIZE,
                                    fuse_info_queue,
                                    analysis_mq)
        delta_proc.start()
        analysis_mq.put("Starting delta recovery process...")

        # get each blob
        recv_blob_counter = 0
        while True:
            data = self._recv_all(4)
            if data is None or len(data) != 4:
                raise StreamSynthesisError("Failed to receive first byte of header")

            blob_header_size = struct.unpack("!I", data)[0]
            blob_header_raw = self._recv_all(blob_header_size)
            blob_header = NetworkUtil.decoding(blob_header_raw)
            blob_size = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_SIZE)
            if blob_size is None:
                raise StreamSynthesisError("Failed to receive blob")
            if blob_size == 0:
                analysis_mq.put("End of stream received from client at %f)" % (time.time()))
                break
            blob_comp_type = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_COMPRESSION)
            blob_disk_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_DISK_CHUNKS)
            blob_memory_chunk = blob_header.get(Cloudlet_Const.META_OVERLAY_FILE_MEMORY_CHUNKS)

            # send ack right before getting the blob
            ack_data = struct.pack("!Q", 0x01)
            self.request.send(ack_data)
            compressed_blob = self._recv_all(blob_size, ack_size=200*1024)
            # send ack right after getting the blob
            ack_data = struct.pack("!Q", 0x02)
            self.request.send(ack_data)

            network_out_queue.put((blob_comp_type, compressed_blob))
            #TODO: remove the interweaving of the valid bit here
            #TODO: and change the code path in cloudlet_driver.py so that
            #TODO: it uses the chunk sets in favor of the tuples
            if via_openstack:
                memory_chunk_set = set(["%ld:1" % item for item in blob_memory_chunk])
                disk_chunk_set = set(["%ld:1" % item for item in blob_disk_chunk])
                memory_chunk_all.update(memory_chunk_set)
                disk_chunk_all.update(disk_chunk_set)
            else:
                memory_chunk_all.update(blob_memory_chunk)
                disk_chunk_all.update(blob_disk_chunk)
            recv_blob_counter += 1
            analysis_mq.put("B,R,%d" % (recv_blob_counter))
            data = self._recv_all(4)
            iter = struct.unpack("!I", data)[0]
            analysis_mq.put("iter,%d" % (iter))

        network_out_queue.put(Cloudlet_Const.QUEUE_SUCCESS_MESSAGE)
        delta_proc.join()
        LOG.debug("%f\tdeltaproc join" % (time.time()))


        analysis_mq.put("Adaptive VM Handoff Complete!")
        analysis_mq.put("=" * 50)
        analysis_mq.put("!E_O_Q!")
        analysis_proc.join()

        if via_openstack:
            ack_data = struct.pack("!Qd", 0x10, time.time())
            LOG.info("send ack to client: %d" % len(ack_data))
            self.request.sendall(ack_data)

            disk_overlay_map = ','.join(disk_chunk_all)
            memory_overlay_map = ','.join(memory_chunk_all)
            # NOTE: fuse and synthesis take place in cloudlet_driver.py when launched from openstack but
            #this data must be written to stdout so the pipe connected to cloudlet_driver.py can finish the handoff
            #TODO: instead of sending this stdout buffer over the pipe to cloudlet_driver.py, we should probably
            #TODO: move to multiprocessing.Pipe or Queue to avoid issues with other items being dumped to stdout
            #TODO: and causing problems with this data being sent back; i.e. anything written via LOG
            #TODO: after this will end up in stdout because the logger has a StreamHandler configured to use stdout
            sys.stdout.write("openstack\t%s\t%s\t%s\t%s" % (launch_disk_size, launch_memory_size, disk_overlay_map, memory_overlay_map))

        else:
            # We told to FUSE that we have everything ready, so we need to wait
            # until delta_proc finishes. we cannot start VM before delta_proc
            # finishes, because we don't know what will be modified in the future
            time_fuse_start = time.time()
            fuse = run_fuse(Cloudlet_Const.CLOUDLETFS_PATH, Cloudlet_Const.CHUNK_SIZE,
                            base_diskpath, launch_disk_size, base_mempath, launch_memory_size,
                            resumed_disk=launch_disk, disk_chunks=disk_chunk_all,
                            resumed_memory=launch_mem, memory_chunks=memory_chunk_all,
                            valid_bit=1)
            time_fuse_end = time.time()

            synthesized_vm = SynthesizedVM(launch_disk, launch_mem, fuse)

            synthesized_vm.start()
            synthesized_vm.join()

            # since libvirt does not return immediately after resuming VM, we
            # measure resume time directly from QEMU
            actual_resume_time = 0
            splited_log = open("/tmp/qemu_debug_messages", "r").read().split("\n")
            for line in splited_log:
                if line.startswith("INCOMING_FINISH"):
                    actual_resume_time = float(line.split(" ")[-1])

            LOG.info("[time] non-pipelined time %f (%f ~ %f ~ %f)" % (
                actual_resume_time-time_fuse_start,
                time_fuse_start,
                time_fuse_end,
                actual_resume_time,
            ))

            ack_data = struct.pack("!Qd", 0x10, actual_resume_time)
            LOG.info("send ack to client: %d" % len(ack_data))
            self.request.sendall(ack_data)

            connect_vnc(synthesized_vm.machine, True)

            signal.signal(signal.SIGUSR1, handlesig)
            signal.pause()

            synthesized_vm.monitor.terminate()
            synthesized_vm.monitor.join()
            synthesized_vm.terminate()