Example #1
0
    def check_basevm(self):
        basevm_list = self.dbconn.list_item(BaseVM)
        ret_list = list()
        LOG.info("-"*50)
        LOG.info("* Base VM Configuration")
        for index, item in enumerate(basevm_list):
            # check file location
            (base_diskmeta, base_mempath, base_memmeta) = \
                    Cloudlet_Const.get_basepath(item.disk_path)
            if not os.path.exists(item.disk_path):
                LOG.warning("disk image (%s) is not exist" % (item.disk_path))
                continue
            if not os.path.exists(base_mempath):
                LOG.warning("memory snapshot (%s) is not exist" % (base_mempath))
                continue

            # add to list
            ret_list.append(item)
            LOG.info(" %d : %s (Disk %d MB, Memory %d MB)" % \
                    (index, item.disk_path, os.path.getsize(item.disk_path)/1024/1024, \
                    os.path.getsize(base_mempath)/1024/1024))
        LOG.info("-"*50)

        if len(ret_list) == 0:
            LOG.error("[Error] NO valid Base VM")
            sys.exit(2)
        return ret_list
Example #2
0
def get_indexed_delta_list(base_disk, overlay_metapath):
    from cloudlet.tool import decomp_overlay
    temp_overlay = NamedTemporaryFile(prefix="cloudlet-overlay-file-")
    meta = decomp_overlay(overlay_metapath, temp_overlay.name)
    (base_diskmeta, base_mem, base_memmeta) = \
            Const.get_basepath(base_disk, check_exist=True)
    delta_list = synthesis._reconstruct_mem_deltalist( \
            base_disk, base_mem, temp_overlay.name)
    indexed_delta_list = dict()
    for item in delta_list:
        indexed_delta_list[item.index] = item
    return delta_list, indexed_delta_list
def get_indexed_delta_list(base_disk, overlay_metapath):
    from cloudlet.tool import decomp_overlay
    temp_overlay = NamedTemporaryFile(prefix="cloudlet-overlay-file-")
    meta = decomp_overlay(overlay_metapath, temp_overlay.name)
    (base_diskmeta, base_mem, base_memmeta) = \
            Const.get_basepath(base_disk, check_exist=True)
    delta_list = synthesis._reconstruct_mem_deltalist( \
            base_disk, base_mem, temp_overlay.name)
    indexed_delta_list = dict()
    for item in delta_list:
        indexed_delta_list[item.index] = item
    return delta_list, indexed_delta_list
def piping_synthesis(overlay_url, base_path):
    # check_base VM
    start_time = time.time()
    meta_stream = urllib2.urlopen(overlay_url)
    meta_raw = read_all(meta_stream)
    meta_info = msgpack.unpackb(meta_raw)
    url_manager = Manager()
    overlay_urls = url_manager.list()
    url_prefix = os.path.dirname(overlay_url)
    for blob in meta_info[Const.META_OVERLAY_FILES]:
        blob_filename = os.path.basename(blob[Const.META_OVERLAY_FILE_NAME])
        url = os.path.join(url_prefix, blob_filename)
        overlay_urls.append(url)
    (base_diskmeta, base_mem, base_memmeta) = \
            Const.get_basepath(base_path, check_exist=True)

    # read overlay files
    # create named pipe to convert queue to stream
    time_transfer = Queue(); time_decomp = Queue();
    time_delta = Queue(); time_fuse = Queue();
    tmp_dir = tempfile.mkdtemp()
    temp_overlay_filepath = os.path.join(tmp_dir, "overlay_file")
    temp_overlay_file = open(temp_overlay_filepath, "w+b")
    overlay_pipe = os.path.join(tmp_dir, 'overlay_pipe')
    os.mkfifo(overlay_pipe)

    # overlay
    demanding_queue = Queue()
    download_queue = JoinableQueue()
    download_process = Process(target=synthesis_server.network_worker, 
            args=(
                overlay_urls, demanding_queue, download_queue, time_transfer, CHUNK_SIZE,
                )
            )
    decomp_process = Process(target=synthesis_server.decomp_worker,
            args=(
                download_queue, overlay_pipe, time_decomp, temp_overlay_file,
                )
            )
    modified_img, modified_mem, fuse, delta_proc, fuse_thread = \
            synthesis.recover_launchVM(base_path, meta_info, overlay_pipe, 
                    log=sys.stdout, demanding_queue=demanding_queue)
    delta_proc.time_queue = time_delta
    fuse_thread.time_queue = time_fuse

    # start processes
    download_process.start()
    decomp_process.start()
    delta_proc.start()
    fuse_thread.start()

    # wait for end
    delta_proc.join()
    fuse_thread.join()

    # printout result
    end_time = time.time()
    total_time = (end_time-start_time)
    synthesis_server.SynthesisTCPHandler.print_statistics(start_time, end_time, \
            time_transfer, time_decomp, time_delta, time_fuse, \
            print_out=sys.stdout)

    delta_proc.finish()

    if os.path.exists(overlay_pipe):
        os.unlink(overlay_pipe)
    shutil.rmtree(tmp_dir)

    print "\n[Time] Total Time for synthesis(including download) : %f" % (total_time)
    return fuse
Example #5
0
    def _handle_synthesis(self, message):
        LOG.info("\n\n----------------------- New Connection --------------")
        # check overlay meta info
        start_time = time.time()
        header_start_time = time.time()
        base_path, meta_info = self._check_validity(message)
        session_id = message.get(Protocol.KEY_SESSION_ID, None)
        if base_path and meta_info and meta_info.get(Cloudlet_Const.META_OVERLAY_FILES, None):
            self.ret_success(Protocol.MESSAGE_COMMAND_SEND_META)
        else:
            self.ret_fail("No matching Base VM")
            return

        # update DB
        new_overlayvm = OverlayVM(session_id, base_path)
        self.server.dbconn.add_item(new_overlayvm)

        # start synthesis process
        url_manager = Manager()
        overlay_urls = url_manager.list()
        overlay_urls_size = url_manager.dict()
        for blob in meta_info[Cloudlet_Const.META_OVERLAY_FILES]:
            url = blob[Cloudlet_Const.META_OVERLAY_FILE_NAME]
            size = blob[Cloudlet_Const.META_OVERLAY_FILE_SIZE]
            overlay_urls.append(url)
            overlay_urls_size[url] = size
        LOG.info("  - %s" % str(pformat(self.synthesis_option)))
        LOG.info("  - Base VM     : %s" % base_path)
        LOG.info("  - Blob count  : %d" % len(overlay_urls))
        if overlay_urls == None:
            self.ret_fail("No overlay info listed")
            return
        (base_diskmeta, base_mem, base_memmeta) = \
                Cloudlet_Const.get_basepath(base_path, check_exist=True)
        header_end_time = time.time()
        LOG.info("Meta header processing time: %f" % (header_end_time-header_start_time))

        # read overlay files
        # create named pipe to convert queue to stream
        time_transfer = Queue(); time_decomp = Queue();
        time_delta = Queue(); time_fuse = Queue();
        self.tmp_overlay_dir = tempfile.mkdtemp()
        self.overlay_pipe = os.path.join(self.tmp_overlay_dir, 'overlay_pipe')
        os.mkfifo(self.overlay_pipe)

        # save overlay decomp result for measurement
        temp_overlay_file = None
        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
            temp_overlay_filepath = os.path.join(self.tmp_overlay_dir, "overlay_file")
            temp_overlay_file = open(temp_overlay_filepath, "w+b")

        # overlay
        demanding_queue = Queue()
        download_queue = JoinableQueue()
        download_process = NetworkStepThread(self, 
                    overlay_urls, overlay_urls_size, demanding_queue, 
                    download_queue, time_transfer, Synthesis_Const.TRANSFER_SIZE, 
                    )
        decomp_process = DecompStepProc(
                download_queue, self.overlay_pipe, time_decomp, temp_overlay_file,
                )
        modified_img, modified_mem, self.fuse, self.delta_proc, self.fuse_proc = \
                synthesis.recover_launchVM(base_path, meta_info, self.overlay_pipe, 
                        log=sys.stdout, demanding_queue=demanding_queue)
        self.delta_proc.time_queue = time_delta # for measurement
        self.fuse_proc.time_queue = time_fuse # for measurement

        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_EARLY_START, False):
            # 1. resume VM
            self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
            time_start_resume = time.time()
            self.resumed_VM.start()
            time_end_resume = time.time()

            # 2. start processes
            download_process.start()
            decomp_process.start()
            self.delta_proc.start()
            self.fuse_proc.start()

            # 3. return success right after resuming VM
            # before receiving all chunks
            self.resumed_VM.join()
            self.send_synthesis_done()

            # 4. then wait fuse end
            self.fuse_proc.join()
        else:
            # 1. start processes
            download_process.start()
            decomp_process.start()
            self.delta_proc.start()
            self.fuse_proc.start()

            # 2. resume VM
            self.resumed_VM = synthesis.SynthesizedVM(modified_img, modified_mem, self.fuse)
            self.resumed_VM.start()

            # 3. wait for fuse end
            self.fuse_proc.join()

            # 4. return success to client
            time_start_resume = time.time()     # measure pure resume time
            self.resumed_VM.join()
            time_end_resume = time.time()
            self.send_synthesis_done()

        end_time = time.time()

        # printout result
        SynthesisHandler.print_statistics(start_time, end_time, \
                time_transfer, time_decomp, time_delta, time_fuse, \
                resume_time=(time_end_resume-time_start_resume))

        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_DISPLAY_VNC, False):
            synthesis.connect_vnc(self.resumed_VM.machine, no_wait=True)

        # wait for finish message from client
        LOG.info("[SOCKET] waiting for client exit message")
        data = self.request.recv(4)
        msgpack_size = struct.unpack("!I", data)[0]
        msgpack_data = self.request.recv(msgpack_size)
        while len(msgpack_data) < msgpack_size:
            msgpack_data += self.request.recv(msgpack_size- len(msgpack_data))
        finish_message = NetworkUtil.decoding(msgpack_data)
        command = finish_message.get(Protocol.KEY_COMMAND, None)
        if command != Protocol.MESSAGE_COMMAND_FINISH:
            msg = "Unexpected command while streaming overlay VM: %d" % command
            raise RapidSynthesisError(msg)
        self.ret_success(Protocol.MESSAGE_COMMAND_FINISH)
        LOG.info("  - %s" % str(pformat(finish_message)))

        # printout synthesis statistics
        if self.synthesis_option.get(Protocol.SYNTHESIS_OPTION_SHOW_STATISTICS):
            mem_access_list = self.resumed_VM.monitor.mem_access_chunk_list
            disk_access_list = self.resumed_VM.monitor.disk_access_chunk_list
            synthesis.synthesis_statistics(meta_info, temp_overlay_filepath, \
                    mem_access_list, disk_access_list)

        # update DB
        new_overlayvm.terminate()
Example #6
0
def piping_synthesis(overlay_url, base_path):
    # check_base VM
    start_time = time.time()
    meta_stream = urllib2.urlopen(overlay_url)
    meta_raw = read_all(meta_stream)
    meta_info = msgpack.unpackb(meta_raw)
    url_manager = Manager()
    overlay_urls = url_manager.list()
    url_prefix = os.path.dirname(overlay_url)
    for blob in meta_info[Const.META_OVERLAY_FILES]:
        blob_filename = os.path.basename(blob[Const.META_OVERLAY_FILE_NAME])
        url = os.path.join(url_prefix, blob_filename)
        overlay_urls.append(url)
    (base_diskmeta, base_mem, base_memmeta) = \
            Const.get_basepath(base_path, check_exist=True)

    # read overlay files
    # create named pipe to convert queue to stream
    time_transfer = Queue()
    time_decomp = Queue()
    time_delta = Queue()
    time_fuse = Queue()
    tmp_dir = tempfile.mkdtemp()
    temp_overlay_filepath = os.path.join(tmp_dir, "overlay_file")
    temp_overlay_file = open(temp_overlay_filepath, "w+b")
    overlay_pipe = os.path.join(tmp_dir, 'overlay_pipe')
    os.mkfifo(overlay_pipe)

    # overlay
    demanding_queue = Queue()
    download_queue = JoinableQueue()
    download_process = Process(target=synthesis_server.network_worker,
                               args=(
                                   overlay_urls,
                                   demanding_queue,
                                   download_queue,
                                   time_transfer,
                                   CHUNK_SIZE,
                               ))
    decomp_process = Process(target=synthesis_server.decomp_worker,
                             args=(
                                 download_queue,
                                 overlay_pipe,
                                 time_decomp,
                                 temp_overlay_file,
                             ))
    modified_img, modified_mem, fuse, delta_proc, fuse_thread = \
            synthesis.recover_launchVM(base_path, meta_info, overlay_pipe,
                    log=sys.stdout, demanding_queue=demanding_queue)
    delta_proc.time_queue = time_delta
    fuse_thread.time_queue = time_fuse

    # start processes
    download_process.start()
    decomp_process.start()
    delta_proc.start()
    fuse_thread.start()

    # wait for end
    delta_proc.join()
    fuse_thread.join()

    # printout result
    end_time = time.time()
    total_time = (end_time - start_time)
    synthesis_server.SynthesisTCPHandler.print_statistics(start_time, end_time, \
            time_transfer, time_decomp, time_delta, time_fuse, \
            print_out=sys.stdout)

    delta_proc.finish()

    if os.path.exists(overlay_pipe):
        os.unlink(overlay_pipe)
    shutil.rmtree(tmp_dir)

    print "\n[Time] Total Time for synthesis(including download) : %f" % (
        total_time)
    return fuse