def get_modified(self, new_kvm_file):
     # get modified pages, header delta, footer delta
     modi_header_data, modi_footer_data, hash_list = self._load_file(new_kvm_file, diff=True)
     try:
         header_delta = tool.diff_data(self.header_data, modi_header_data, 2*len(modi_header_data))
         footer_delta = tool.diff_data(self.footer_data, modi_footer_data, 2*len(modi_footer_data))
     except IOError as e:
         print "[INFO] xdelta failed, so save it as raw (%s)" % str(e)
         sys.exit(1)
     print "[INFO] header size(%ld->%ld), footer size(%ld->%ld)" % \
             (len(modi_header_data), len(header_delta), \
             len(modi_footer_data), len(footer_delta))
     return header_delta, footer_delta, hash_list
예제 #2
0
 def get_modified(self, new_kvm_file):
     # get modified pages, header delta, footer delta
     modi_header_data, modi_footer_data, hash_list = self._load_file(
         new_kvm_file, diff=True)
     try:
         header_delta = tool.diff_data(self.header_data, modi_header_data,
                                       2 * len(modi_header_data))
         footer_delta = tool.diff_data(self.footer_data, modi_footer_data,
                                       2 * len(modi_footer_data))
     except IOError as e:
         print "[INFO] xdelta failed, so save it as raw (%s)" % str(e)
         sys.exit(1)
     print "[INFO] header size(%ld->%ld), footer size(%ld->%ld)" % \
             (len(modi_header_data), len(header_delta), \
             len(modi_footer_data), len(footer_delta))
     return header_delta, footer_delta, hash_list
예제 #3
0
    def _get_mem_hash(self, fin, end_offset, hash_list, **kwargs):
        # kwargs
        #  diff: compare hash_list with self object
        #  free_pfn_dict: free memory physical frame number as a dictionary {'#':1, ... }
        diff = kwargs.get("diff", None)
        apply_free_memory = kwargs.get("apply_free_memory", True)
        free_pfn_dict = kwargs.get("free_pfn_dict", None)
        LOG.info("Get hash list of memory page")
        prog_bar = AnimatedProgressBar(end=100, width=80, stdout=sys.stdout)

        total_size = end_offset
        ram_offset = 0
        freed_page_counter = 0
        base_hashlist_length = len(self.hash_list)
        while total_size != ram_offset:
            data = fin.read(Memory.RAM_PAGE_SIZE)
            if not diff:
                hash_list.append((ram_offset, len(data), sha256(data).digest()))
            else:
                # compare input with hash or corresponding base memory, save only when it is different
                hash_list_index = ram_offset/Memory.RAM_PAGE_SIZE
                if hash_list_index < base_hashlist_length:
                    self_hash_value = self.hash_list[hash_list_index][2]
                else:
                    self_hash_value = None

                if self_hash_value != sha256(data).digest():
                    is_free_memory = False
                    if (free_pfn_dict != None) and \
                            (free_pfn_dict.get(long(ram_offset/Memory.RAM_PAGE_SIZE), None) == 1):
                        is_free_memory = True

                    if is_free_memory and apply_free_memory:
                        # Do not compare. It is free memory
                        freed_page_counter += 1
                    else:
                        #get xdelta comparing self.raw
                        source_data = self.get_raw_data(ram_offset, len(data))
                        #save xdelta as DeltaItem only when it gives smaller
                        try:
                            if source_data == None:
                                raise IOError("launch memory snapshot is bigger than base vm")
                            patch = tool.diff_data(source_data, data, 2*len(source_data))
                            if len(patch) < len(data):
                                delta_item = DeltaItem(DeltaItem.DELTA_MEMORY,
                                        ram_offset, len(data),
                                        hash_value=sha256(data).digest(),
                                        ref_id=DeltaItem.REF_XDELTA,
                                        data_len=len(patch),
                                        data=patch)
                            else:
                                raise IOError("xdelta3 patch is bigger than origianl")
                        except IOError as e:
                            #LOG.info("xdelta failed, so save it as raw (%s)" % str(e))
                            delta_item = DeltaItem(DeltaItem.DELTA_MEMORY,
                                    ram_offset, len(data),
                                    hash_value=sha256(data).digest(),
                                    ref_id=DeltaItem.REF_RAW,
                                    data_len=len(data),
                                    data=data)
                        hash_list.append(delta_item)

                # memory over-usage protection
                if len(hash_list) > Memory.RAM_PAGE_SIZE*1000000: # 400MB for hashlist
                    raise MemoryError("possibly comparing with wrong base VM")
            ram_offset += len(data)
            # print progress bar for every 100 page
            if (ram_offset % (Memory.RAM_PAGE_SIZE*100)) == 0:
                prog_bar.set_percent(100.0*ram_offset/total_size)
                prog_bar.show_progress()
        prog_bar.finish()
        return freed_page_counter
예제 #4
0
def create_disk_deltalist(modified_disk, 
            modified_chunk_dict, chunk_size,
            basedisk_hashlist=None, basedisk_path=None,
            trim_dict=None, dma_dict=None,
            apply_discard=True,
            used_blocks_dict=None,
            ret_statistics=None):
    # get disk delta
    # base_diskmeta : hash list of base disk
    # base_disk: path to base VM disk
    # modified_disk_path : path to modified VM disk
    # modified_chunk_dict : chunk dict of modified
    # overlay_path : path to destination of overlay disk
    # dma_dict : dma information, 
    #           dma_dict[disk_chunk] = {'time':time, 'memory_chunk':memory chunk number, 'read': True if read from disk'}
    base_fd = open(basedisk_path, "rb")
    base_mmap = mmap.mmap(base_fd.fileno(), 0, prot=mmap.PROT_READ)
    modified_fd = open(modified_disk, "rb")

    # 0. get info from qemu log file
    # dictionary : (chunk_%, discarded_time)
    trim_counter = 0
    overwritten_after_trim = 0
    xray_counter = 0

    # TO BE DELETED
    trimed_list = []
    xrayed_list = []

    # 1. get modified page
    LOG.debug("1.get modified disk page")
    delta_list = list()
    for index, chunk in enumerate(modified_chunk_dict.keys()):
        offset = chunk * chunk_size
        ctime = modified_chunk_dict[chunk]

        # check TRIM discard
        is_discarded = False
        if trim_dict:
            trim_time = trim_dict.get(chunk, None)
            if trim_time:
                if (trim_time > ctime):
                    trimed_list.append(chunk)
                    trim_counter += 1
                    is_discarded = True
                else:
                    overwritten_after_trim += 1

        # check xray discard
        if used_blocks_dict:
            start_sector = offset/512
            if used_blocks_dict.get(start_sector) != True:
                xrayed_list.append(chunk)
                xray_counter +=1
                is_discarded = True

        if is_discarded == True:
            # only apply when it is true
            if apply_discard:
                continue

        # check file system 
        modified_fd.seek(offset)
        data = modified_fd.read(chunk_size)
        source_data = base_mmap[offset:offset+len(data)]
        try:
            patch = tool.diff_data(source_data, data, 2*len(source_data))
            if len(patch) < len(data):
                delta_item = DeltaItem(DeltaItem.DELTA_DISK,
                        offset, len(data),
                        hash_value=sha256(data).digest(),
                        ref_id=DeltaItem.REF_XDELTA,
                        data_len=len(patch),
                        data=patch)
            else:
                raise IOError("xdelta3 patch is bigger than origianl")
        except IOError as e:
            #LOG.info("xdelta failed, so save it as raw (%s)" % str(e))
            delta_item = DeltaItem(DeltaItem.DELTA_DISK,
                    offset, len(data),
                    hash_value=sha256(data).digest(),
                    ref_id=DeltaItem.REF_RAW,
                    data_len=len(data),
                    data=data)
        delta_list.append(delta_item)
    if ret_statistics != None:
        ret_statistics['trimed'] = trim_counter
        ret_statistics['xrayed'] = xray_counter
        ret_statistics['trimed_list'] = trimed_list
        ret_statistics['xrayed_list'] = xrayed_list
    LOG.debug("1-1. Trim(%d, overwritten after trim(%d)), Xray(%d)" % \
            (trim_counter, overwritten_after_trim, xray_counter))

    return delta_list
예제 #5
0
    def _get_mem_hash(self, fin, end_offset, hash_list, **kwargs):
        # kwargs
        #  diff: compare hash_list with self object
        #  free_pfn_dict: free memory physical frame number as a dictionary {'#':1, ... }
        diff = kwargs.get("diff", None)
        apply_free_memory = kwargs.get("apply_free_memory", True)
        free_pfn_dict = kwargs.get("free_pfn_dict", None)
        LOG.info("Get hash list of memory page")
        prog_bar = AnimatedProgressBar(end=100, width=80, stdout=sys.stdout)

        total_size = end_offset
        ram_offset = 0
        freed_page_counter = 0
        base_hashlist_length = len(self.hash_list)
        while total_size != ram_offset:
            data = fin.read(Memory.RAM_PAGE_SIZE)
            if not diff:
                hash_list.append((ram_offset, len(data), sha256(data).digest()))
            else:
                # compare input with hash or corresponding base memory, save only when it is different
                hash_list_index = ram_offset/Memory.RAM_PAGE_SIZE
                if hash_list_index < base_hashlist_length:
                    self_hash_value = self.hash_list[hash_list_index][2]
                else:
                    self_hash_value = None

                if self_hash_value != sha256(data).digest():
                    is_free_memory = False
                    if (free_pfn_dict != None) and \
                            (free_pfn_dict.get(long(ram_offset/Memory.RAM_PAGE_SIZE), None) == 1):
                        is_free_memory = True

                    if is_free_memory and apply_free_memory:
                        # Do not compare. It is free memory
                        freed_page_counter += 1
                    else:
                        #get xdelta comparing self.raw
                        source_data = self.get_raw_data(ram_offset, len(data))
                        #save xdelta as DeltaItem only when it gives smaller
                        try:
                            if source_data == None:
                                raise IOError("launch memory snapshot is bigger than base vm")
                            patch = tool.diff_data(source_data, data, 2*len(source_data))
                            if len(patch) < len(data):
                                delta_item = DeltaItem(DeltaItem.DELTA_MEMORY,
                                        ram_offset, len(data),
                                        hash_value=sha256(data).digest(),
                                        ref_id=DeltaItem.REF_XDELTA,
                                        data_len=len(patch),
                                        data=patch)
                            else:
                                raise IOError("xdelta3 patch is bigger than origianl")
                        except IOError as e:
                            #LOG.info("xdelta failed, so save it as raw (%s)" % str(e))
                            delta_item = DeltaItem(DeltaItem.DELTA_MEMORY,
                                    ram_offset, len(data),
                                    hash_value=sha256(data).digest(),
                                    ref_id=DeltaItem.REF_RAW,
                                    data_len=len(data),
                                    data=data)
                        hash_list.append(delta_item)

                # memory over-usage protection
                if len(hash_list) > Memory.RAM_PAGE_SIZE*1000000: # 400MB for hashlist
                    raise MemoryError("possibly comparing with wrong base VM")
            ram_offset += len(data)
            # print progress bar for every 100 page
            if (ram_offset % (Memory.RAM_PAGE_SIZE*100)) == 0:
                prog_bar.set_percent(100.0*ram_offset/total_size)
                prog_bar.show_progress()
        prog_bar.finish()
        return freed_page_counter
예제 #6
0
def create_disk_deltalist(modified_disk,
                          modified_chunk_dict,
                          chunk_size,
                          basedisk_hashlist=None,
                          basedisk_path=None,
                          trim_dict=None,
                          dma_dict=None,
                          apply_discard=True,
                          used_blocks_dict=None,
                          ret_statistics=None):
    # get disk delta
    # base_diskmeta : hash list of base disk
    # base_disk: path to base VM disk
    # modified_disk_path : path to modified VM disk
    # modified_chunk_dict : chunk dict of modified
    # overlay_path : path to destination of overlay disk
    # dma_dict : dma information,
    #           dma_dict[disk_chunk] = {'time':time, 'memory_chunk':memory chunk number, 'read': True if read from disk'}
    base_fd = open(basedisk_path, "rb")
    base_mmap = mmap.mmap(base_fd.fileno(), 0, prot=mmap.PROT_READ)
    modified_fd = open(modified_disk, "rb")

    # 0. get info from qemu log file
    # dictionary : (chunk_%, discarded_time)
    trim_counter = 0
    overwritten_after_trim = 0
    xray_counter = 0

    # TO BE DELETED
    trimed_list = []
    xrayed_list = []

    # 1. get modified page
    LOG.debug("1.get modified disk page")
    delta_list = list()
    for index, chunk in enumerate(modified_chunk_dict.keys()):
        offset = chunk * chunk_size
        ctime = modified_chunk_dict[chunk]

        # check TRIM discard
        is_discarded = False
        if trim_dict:
            trim_time = trim_dict.get(chunk, None)
            if trim_time:
                if (trim_time > ctime):
                    trimed_list.append(chunk)
                    trim_counter += 1
                    is_discarded = True
                else:
                    overwritten_after_trim += 1

        # check xray discard
        if used_blocks_dict:
            start_sector = offset / 512
            if used_blocks_dict.get(start_sector) != True:
                xrayed_list.append(chunk)
                xray_counter += 1
                is_discarded = True

        if is_discarded == True:
            # only apply when it is true
            if apply_discard:
                continue

        # check file system
        modified_fd.seek(offset)
        data = modified_fd.read(chunk_size)
        source_data = base_mmap[offset:offset + len(data)]
        try:
            patch = tool.diff_data(source_data, data, 2 * len(source_data))
            if len(patch) < len(data):
                delta_item = DeltaItem(DeltaItem.DELTA_DISK,
                                       offset,
                                       len(data),
                                       hash_value=sha256(data).digest(),
                                       ref_id=DeltaItem.REF_XDELTA,
                                       data_len=len(patch),
                                       data=patch)
            else:
                raise IOError("xdelta3 patch is bigger than origianl")
        except IOError as e:
            #LOG.info("xdelta failed, so save it as raw (%s)" % str(e))
            delta_item = DeltaItem(DeltaItem.DELTA_DISK,
                                   offset,
                                   len(data),
                                   hash_value=sha256(data).digest(),
                                   ref_id=DeltaItem.REF_RAW,
                                   data_len=len(data),
                                   data=data)
        delta_list.append(delta_item)
    if ret_statistics != None:
        ret_statistics['trimed'] = trim_counter
        ret_statistics['xrayed'] = xray_counter
        ret_statistics['trimed_list'] = trimed_list
        ret_statistics['xrayed_list'] = xrayed_list
    LOG.debug("1-1. Trim(%d, overwritten after trim(%d)), Xray(%d)" % \
            (trim_counter, overwritten_after_trim, xray_counter))

    return delta_list
예제 #7
0
    def _load_cont_ram_block(self, f, hash_list, max_size, **kwargs):
        # Load KVM Memory snapshot file and
        # extract hashlist of each memory page while interpreting the format
        # filepath = file path of the loading file
        # kwargsG
        #  diff: compare hash_list with self object
        #  decomp_stream: write decompress memory to decopm_stream
        diff = kwargs.get("diff", None)
        decomp_stream = kwargs.get("decomp_stream", None)

        offset = 0
        while True:
            header_flag = struct.unpack(">q", f.read(8))[0]
            comp_flag = header_flag & 0x0fff
            if comp_flag & self.RAM_SAVE_FLAG_EOS:
                print "EOS at %ld" % (offset)
                raise MemoryError(
                    "Change migration speed to unlimited to avoid EOS")

            offset = header_flag & ~0x0fff
            if not comp_flag & self.RAM_SAVE_FLAG_CONTINUE:
                id_length, id_string = struct.unpack(">c%ds" % \
                        self.RAM_ID_LENGTH, f.read(1+self.RAM_ID_LENGTH))

            if comp_flag & self.RAM_SAVE_FLAG_COMPRESS:
                #print "processing (%ld)\tcompressed" % (offset)
                compressed_byte = f.read(1)
                data = compressed_byte * self.RAM_PAGE_SIZE
            elif comp_flag & self.RAM_SAVE_FLAG_PAGE or comp_flag & self.RAM_SAVE_FLAG_RAW:
                #print "processing (%ld)\traw" % (offset)
                data = f.read(self.RAM_PAGE_SIZE)
            else:
                msg = "Invalid header compression flag: \n%s %ld %d" % \
                        (bin(header_flag), offset, comp_flag)
                raise MemoryError(msg)

            # kwargs: diff
            if diff:
                # compare it with self, save only when it is different
                self_hash_value = self.hash_list[offset /
                                                 self.RAM_PAGE_SIZE][2]
                if self_hash_value != sha256(data).digest():
                    #get xdelta comparing self.raw
                    source_data = self.get_raw_data(offset, self.RAM_PAGE_SIZE)
                    #save xdelta as DeltaItem only when it gives smaller
                    try:
                        patch = tool.diff_data(source_data, data,
                                               2 * len(source_data))
                        if len(patch) < len(data):
                            delta_item = DeltaItem(
                                offset,
                                self.RAM_PAGE_SIZE,
                                hash_value=sha256(data).digest(),
                                ref_id=DeltaItem.REF_XDELTA,
                                data_len=len(patch),
                                data=patch)
                        else:
                            raise IOError(
                                "xdelta3 patch is bigger than origianl")
                    except IOError as e:
                        #print "[INFO] xdelta failed, so save it as raw (%s)" % str(e)
                        delta_item = DeltaItem(
                            offset,
                            self.RAM_PAGE_SIZE,
                            hash_value=sha256(data).digest(),
                            ref_id=DeltaItem.REF_RAW,
                            data_len=len(data),
                            data=data)
                    hash_list.append(delta_item)

                # memory overusage protection
                if len(hash_list) > 200000:  # 800MB if PAGE_SIZE == 4K
                    raise MemoryError("possibly comparing with wrong base VM")
            else:
                # make new hash list
                hash_list.append(
                    (offset, self.RAM_PAGE_SIZE, sha256(data).digest()))

            # kwargs: decomp_stream
            if decomp_stream:
                decomp_stream.write(data)

            # read can be continued to pc.rom without EOS flag
            if offset + self.RAM_PAGE_SIZE == max_size:
                break

        return offset
    def _load_cont_ram_block(self, f, hash_list, max_size, **kwargs):
        # Load KVM Memory snapshot file and 
        # extract hashlist of each memory page while interpreting the format
        # filepath = file path of the loading file
        # kwargsG
        #  diff: compare hash_list with self object
        #  decomp_stream: write decompress memory to decopm_stream
        diff = kwargs.get("diff", None)
        decomp_stream = kwargs.get("decomp_stream", None)

        offset = 0
        while True:
            header_flag =  struct.unpack(">q", f.read(8))[0]
            comp_flag = header_flag & 0x0fff
            if comp_flag & self.RAM_SAVE_FLAG_EOS:
                print "EOS at %ld" % (offset)
                raise MemoryError("Change migration speed to unlimited to avoid EOS")

            offset = header_flag & ~0x0fff
            if not comp_flag & self.RAM_SAVE_FLAG_CONTINUE:
                id_length, id_string = struct.unpack(">c%ds" % \
                        self.RAM_ID_LENGTH, f.read(1+self.RAM_ID_LENGTH))

            if comp_flag & self.RAM_SAVE_FLAG_COMPRESS:
                #print "processing (%ld)\tcompressed" % (offset)
                compressed_byte = f.read(1)
                data = compressed_byte*self.RAM_PAGE_SIZE
            elif comp_flag & self.RAM_SAVE_FLAG_PAGE or comp_flag & self.RAM_SAVE_FLAG_RAW:
                #print "processing (%ld)\traw" % (offset)
                data = f.read(self.RAM_PAGE_SIZE)
            else:
                msg = "Invalid header compression flag: \n%s %ld %d" % \
                        (bin(header_flag), offset, comp_flag)
                raise MemoryError(msg)

            # kwargs: diff
            if diff:
                # compare it with self, save only when it is different
                self_hash_value = self.hash_list[offset/self.RAM_PAGE_SIZE][2]
                if self_hash_value != sha256(data).digest():
                    #get xdelta comparing self.raw
                    source_data = self.get_raw_data(offset, self.RAM_PAGE_SIZE)
                    #save xdelta as DeltaItem only when it gives smaller
                    try:
                        patch = tool.diff_data(source_data, data, 2*len(source_data))
                        if len(patch) < len(data):
                            delta_item = DeltaItem(offset, self.RAM_PAGE_SIZE, 
                                    hash_value=sha256(data).digest(),
                                    ref_id=DeltaItem.REF_XDELTA,
                                    data_len=len(patch),
                                    data=patch)
                        else:
                            raise IOError("xdelta3 patch is bigger than origianl")
                    except IOError as e:
                        #print "[INFO] xdelta failed, so save it as raw (%s)" % str(e)
                        delta_item = DeltaItem(offset, self.RAM_PAGE_SIZE, 
                                hash_value=sha256(data).digest(),
                                ref_id=DeltaItem.REF_RAW,
                                data_len=len(data),
                                data=data)
                    hash_list.append(delta_item)

                # memory overusage protection
                if len(hash_list) > 200000: # 800MB if PAGE_SIZE == 4K
                    raise MemoryError("possibly comparing with wrong base VM")
            else:
                # make new hash list
                hash_list.append((offset, self.RAM_PAGE_SIZE, sha256(data).digest()))

            # kwargs: decomp_stream
            if decomp_stream:
                decomp_stream.write(data)

            # read can be continued to pc.rom without EOS flag
            if offset+self.RAM_PAGE_SIZE == max_size:
                break;

        return offset