def unpack_stream(stream, with_hashvalue=False):
        if len(stream) == 0:
            return None, 999999
        offset = 0
        live_seq = None
        data = stream[0:8 + 2 + 1]
        data_len = 0
        offset += (8 + 2 + 1)
        (ram_offset, offset_len, ref_info) = struct.unpack("!QHc", data)
        ref_id = ord(ref_info) & 0xF0
        delta_type = ord(ref_info) & 0x0F

        if ref_id == DeltaItem.REF_RAW or \
                ref_id == DeltaItem.REF_XDELTA or \
                ref_id == DeltaItem.REF_XOR or \
                ref_id == DeltaItem.REF_BSDIFF:
            data_len = struct.unpack("!Q", stream[offset:offset + 8])[0]
            offset += 8
            data = stream[offset:offset + data_len]
            offset += data_len
        elif ref_id == DeltaItem.REF_SELF:
            data = struct.unpack("!Q", stream[offset:offset + 8])[0]
            offset += 8
        elif ref_id == DeltaItem.REF_BASE_DISK or \
                ref_id == DeltaItem.REF_BASE_MEM:
            data = struct.unpack("!Q", stream[offset:offset + 8])[0]
            offset += 8
        elif ref_id == DeltaItem.REF_SELF_HASH:
            #print "unpacking ref_self_hash"
            data = struct.unpack("!32s", stream[offset:offset + 32])[0]
            offset += 32

        if delta_type == DeltaItem.DELTA_DISK_LIVE or\
                delta_type == DeltaItem.DELTA_MEMORY_LIVE:
            live_seq = struct.unpack("!H", stream[offset:offset + 2])[0]
            offset += 2

        # hash_value typically does not exist when recovered becuase we don't need it
        if with_hashvalue:
            # hash_value is only needed for residue case
            hash_value = struct.unpack("!32s", stream[offset:offset + 32])[0]
            offset += 32
            item = DeltaItem(delta_type,
                             ram_offset,
                             offset_len,
                             hash_value,
                             ref_id,
                             data_len,
                             live_seq=live_seq)
        else:
            item = DeltaItem(delta_type,
                             ram_offset,
                             offset_len,
                             None,
                             ref_id,
                             data_len,
                             data,
                             live_seq=live_seq)
        return item, offset
    def unpack_stream(stream, with_hashvalue=False, offset=0):
        if len(stream) <= offset:
            return None, None

        (ram_offset, offset_len, ref_info) = struct.unpack_from("!QHc", stream, offset)
        offset += struct.calcsize("!QHc")

        ref_id = ord(ref_info) & 0xF0
        delta_type = ord(ref_info) & 0x0F

        data = ''
        data_len = 0
        live_seq = None

        if ref_id == DeltaItem.REF_RAW or \
                ref_id == DeltaItem.REF_XDELTA or \
                ref_id == DeltaItem.REF_XOR or \
                ref_id == DeltaItem.REF_BSDIFF:
            data_len = struct.unpack_from("!Q", stream, offset)[0]
            offset += struct.calcsize("!Q")
            data = stream[offset:offset+data_len]
            offset += data_len

        elif ref_id == DeltaItem.REF_SELF or \
                ref_id == DeltaItem.REF_BASE_DISK or \
                ref_id == DeltaItem.REF_BASE_MEM:
            data = struct.unpack_from("!Q", stream, offset)[0]
            offset += struct.calcsize("!Q")

        elif ref_id == DeltaItem.REF_SELF_HASH:
            #print "unpacking ref_self_hash"
            data = struct.unpack_from("!32s", stream, offset)[0]
            offset += struct.calcsize("!32s")

        if delta_type == DeltaItem.DELTA_DISK_LIVE or\
                delta_type == DeltaItem.DELTA_MEMORY_LIVE:
            live_seq = struct.unpack_from("!H", stream, offset)[0]
            offset += struct.calcsize("!H")

        # hash_value typically does not exist when recovered becuase we don't need it
        if with_hashvalue:
            # hash_value is only needed for residue case
            hash_value = struct.unpack_from("!32s", stream, offset)[0]
            offset += struct.calcsize("!32s")
            item = DeltaItem(delta_type, ram_offset, offset_len, hash_value, ref_id, data_len, live_seq=live_seq)
        else:
            item = DeltaItem(delta_type, ram_offset, offset_len, None, ref_id, data_len, data, live_seq=live_seq)

        return item, offset
Esempio n. 3
0
def create_memory_overlay(raw_meta,
                          raw_mem,
                          modified_mem,
                          out_delta,
                          print_out=sys.stdout):
    # get memory delta
    # raw_meta: meta data path of raw memory, e.g. hash_list+header+footer
    # raw_mem: raw memory path
    # modified_mem: modified memory path
    # out_delta: output path of final delta

    # Create Base Memory from meta file
    base = Memory.import_from_metafile(raw_meta, raw_mem)

    # 1.get modified page
    print_out.write("[Debug] 1.get modified page list\n")
    header_delta, footer_delta, original_delta_list = base.get_modified(
        modified_mem)
    delta_list = []
    for item in original_delta_list:
        delta_item = DeltaItem(item.offset,
                               item.offset_len,
                               hash_value=item.hash_value,
                               ref_id=item.ref_id,
                               data_len=item.data_len,
                               data=item.data)
        delta_list.append(delta_item)

    # 2.find shared with base memory
    print_out.write("[Debug] 2.get delta from base Memory\n")
    base.get_delta(delta_list, ref_id=DeltaItem.REF_BASE_MEM)

    # 3.find shared within self
    print_out.write("[Debug] 3.get delta from itself\n")
    DeltaList.get_self_delta(delta_list)

    DeltaList.statistics(delta_list, print_out)
    DeltaList.tofile_with_footer(header_delta, footer_delta, delta_list,
                                 out_delta)
Esempio n. 4
0
def create_disk_deltalist(modified_disk,
                          modified_chunk_dict,
                          chunk_size,
                          basedisk_hashlist=None,
                          basedisk_path=None,
                          trim_dict=None,
                          dma_dict=None,
                          apply_discard=True,
                          used_blocks_dict=None,
                          ret_statistics=None):
    # get disk delta
    # base_diskmeta : hash list of base disk
    # base_disk: path to base VM disk
    # modified_disk_path : path to modified VM disk
    # modified_chunk_dict : chunk dict of modified
    # overlay_path : path to destination of overlay disk
    # dma_dict : dma information,
    #           dma_dict[disk_chunk] = {'time':time, 'memory_chunk':memory chunk number, 'read': True if read from disk'}
    base_fd = open(basedisk_path, "rb")
    base_mmap = mmap.mmap(base_fd.fileno(), 0, prot=mmap.PROT_READ)
    modified_fd = open(modified_disk, "rb")

    # 0. get info from qemu log file
    # dictionary : (chunk_%, discarded_time)
    trim_counter = 0
    overwritten_after_trim = 0
    xray_counter = 0

    # TO BE DELETED
    trimed_list = []
    xrayed_list = []

    # 1. get modified page
    LOG.debug("1.get modified disk page")
    delta_list = list()
    for index, chunk in enumerate(modified_chunk_dict.keys()):
        offset = chunk * chunk_size
        ctime = modified_chunk_dict[chunk]

        # check TRIM discard
        is_discarded = False
        if trim_dict:
            trim_time = trim_dict.get(chunk, None)
            if trim_time:
                if (trim_time > ctime):
                    trimed_list.append(chunk)
                    trim_counter += 1
                    is_discarded = True
                else:
                    overwritten_after_trim += 1

        # check xray discard
        if used_blocks_dict:
            start_sector = offset / 512
            if used_blocks_dict.get(start_sector) != True:
                xrayed_list.append(chunk)
                xray_counter += 1
                is_discarded = True

        if is_discarded == True:
            # only apply when it is true
            if apply_discard:
                continue

        # check file system
        modified_fd.seek(offset)
        data = modified_fd.read(chunk_size)
        source_data = base_mmap[offset:offset + len(data)]
        try:
            patch = tool.diff_data(source_data, data, 2 * len(source_data))
            if len(patch) < len(data):
                delta_item = DeltaItem(DeltaItem.DELTA_DISK,
                                       offset,
                                       len(data),
                                       hash_value=sha256(data).digest(),
                                       ref_id=DeltaItem.REF_XDELTA,
                                       data_len=len(patch),
                                       data=patch)
            else:
                raise IOError("xdelta3 patch is bigger than origianl")
        except IOError as e:
            #LOG.info("xdelta failed, so save it as raw (%s)" % str(e))
            delta_item = DeltaItem(DeltaItem.DELTA_DISK,
                                   offset,
                                   len(data),
                                   hash_value=sha256(data).digest(),
                                   ref_id=DeltaItem.REF_RAW,
                                   data_len=len(data),
                                   data=data)
        delta_list.append(delta_item)
    if ret_statistics != None:
        ret_statistics['trimed'] = trim_counter
        ret_statistics['xrayed'] = xray_counter
        ret_statistics['trimed_list'] = trimed_list
        ret_statistics['xrayed_list'] = xrayed_list
    LOG.debug("1-1. Trim(%d, overwritten after trim(%d)), Xray(%d)" % \
            (trim_counter, overwritten_after_trim, xray_counter))

    return delta_list
Esempio n. 5
0
    def _load_cont_ram_block(self, f, hash_list, max_size, **kwargs):
        # Load KVM Memory snapshot file and
        # extract hashlist of each memory page while interpreting the format
        # filepath = file path of the loading file
        # kwargsG
        #  diff: compare hash_list with self object
        #  decomp_stream: write decompress memory to decopm_stream
        diff = kwargs.get("diff", None)
        decomp_stream = kwargs.get("decomp_stream", None)

        offset = 0
        while True:
            header_flag = struct.unpack(">q", f.read(8))[0]
            comp_flag = header_flag & 0x0fff
            if comp_flag & self.RAM_SAVE_FLAG_EOS:
                print "EOS at %ld" % (offset)
                raise MemoryError(
                    "Change migration speed to unlimited to avoid EOS")

            offset = header_flag & ~0x0fff
            if not comp_flag & self.RAM_SAVE_FLAG_CONTINUE:
                id_length, id_string = struct.unpack(">c%ds" % \
                        self.RAM_ID_LENGTH, f.read(1+self.RAM_ID_LENGTH))

            if comp_flag & self.RAM_SAVE_FLAG_COMPRESS:
                #print "processing (%ld)\tcompressed" % (offset)
                compressed_byte = f.read(1)
                data = compressed_byte * self.RAM_PAGE_SIZE
            elif comp_flag & self.RAM_SAVE_FLAG_PAGE or comp_flag & self.RAM_SAVE_FLAG_RAW:
                #print "processing (%ld)\traw" % (offset)
                data = f.read(self.RAM_PAGE_SIZE)
            else:
                msg = "Invalid header compression flag: \n%s %ld %d" % \
                        (bin(header_flag), offset, comp_flag)
                raise MemoryError(msg)

            # kwargs: diff
            if diff:
                # compare it with self, save only when it is different
                self_hash_value = self.hash_list[offset /
                                                 self.RAM_PAGE_SIZE][2]
                if self_hash_value != sha256(data).digest():
                    #get xdelta comparing self.raw
                    source_data = self.get_raw_data(offset, self.RAM_PAGE_SIZE)
                    #save xdelta as DeltaItem only when it gives smaller
                    try:
                        patch = tool.diff_data(source_data, data,
                                               2 * len(source_data))
                        if len(patch) < len(data):
                            delta_item = DeltaItem(
                                offset,
                                self.RAM_PAGE_SIZE,
                                hash_value=sha256(data).digest(),
                                ref_id=DeltaItem.REF_XDELTA,
                                data_len=len(patch),
                                data=patch)
                        else:
                            raise IOError(
                                "xdelta3 patch is bigger than origianl")
                    except IOError as e:
                        #print "[INFO] xdelta failed, so save it as raw (%s)" % str(e)
                        delta_item = DeltaItem(
                            offset,
                            self.RAM_PAGE_SIZE,
                            hash_value=sha256(data).digest(),
                            ref_id=DeltaItem.REF_RAW,
                            data_len=len(data),
                            data=data)
                    hash_list.append(delta_item)

                # memory overusage protection
                if len(hash_list) > 200000:  # 800MB if PAGE_SIZE == 4K
                    raise MemoryError("possibly comparing with wrong base VM")
            else:
                # make new hash list
                hash_list.append(
                    (offset, self.RAM_PAGE_SIZE, sha256(data).digest()))

            # kwargs: decomp_stream
            if decomp_stream:
                decomp_stream.write(data)

            # read can be continued to pc.rom without EOS flag
            if offset + self.RAM_PAGE_SIZE == max_size:
                break

        return offset
Esempio n. 6
0
        meta_path = settings.base_file + EXT_META
        modi_mem_path = settings.mig_file
        out_path = settings.mig_file + ".delta"

        # Create Base Memory from meta file
        base = Memory.import_from_metafile(meta_path, raw_path)

        # 1.get modified page
        print "[Debug] get modified page list"
        header_delta, footer_delta, original_delta_list = base.get_modified(
            modi_mem_path)
        delta_list = []
        for item in original_delta_list:
            delta_item = DeltaItem(item.offset,
                                   item.offset_len,
                                   hash_value=item.hash_value,
                                   ref_id=item.ref_id,
                                   data_len=item.data_len,
                                   data=item.data)
            delta_list.append(delta_item)

        # 2.find shared with base memory
        print "[Debug] get delta from base Memory"
        base.get_delta(delta_list, ref_id=DeltaItem.REF_BASE_MEM)

        # 3.find shared within self
        print "[Debug] get delta from itself"
        DeltaList.get_self_delta(delta_list)

        DeltaList.statistics(delta_list)
        DeltaList.tofile_with_footer(header_delta, footer_delta, delta_list,
                                     out_path)
Esempio n. 7
0
    def _get_mem_hash(self, fin, end_offset, hash_list, **kwargs):
        # kwargs
        #  diff: compare hash_list with self object
        #  free_pfn_dict: free memory physical frame number as a dictionary {'#':1, ... }
        diff = kwargs.get("diff", None)
        apply_free_memory = kwargs.get("apply_free_memory", True)
        free_pfn_dict = kwargs.get("free_pfn_dict", None)
        LOG.info("Get hash list of memory page")
        prog_bar = AnimatedProgressBar(end=100, width=80, stdout=sys.stdout)

        total_size = end_offset
        ram_offset = 0
        freed_page_counter = 0
        base_hashlist_length = len(self.hash_list)
        while total_size != ram_offset:
            data = fin.read(Memory.RAM_PAGE_SIZE)
            if not diff:
                hash_list.append((ram_offset, len(data), sha256(data).digest()))
            else:
                # compare input with hash or corresponding base memory, save only when it is different
                hash_list_index = ram_offset/Memory.RAM_PAGE_SIZE
                if hash_list_index < base_hashlist_length:
                    self_hash_value = self.hash_list[hash_list_index][2]
                else:
                    self_hash_value = None

                if self_hash_value != sha256(data).digest():
                    is_free_memory = False
                    if (free_pfn_dict != None) and \
                            (free_pfn_dict.get(long(ram_offset/Memory.RAM_PAGE_SIZE), None) == 1):
                        is_free_memory = True

                    if is_free_memory and apply_free_memory:
                        # Do not compare. It is free memory
                        freed_page_counter += 1
                    else:
                        #get xdelta comparing self.raw
                        source_data = self.get_raw_data(ram_offset, len(data))
                        #save xdelta as DeltaItem only when it gives smaller
                        try:
                            if source_data == None:
                                raise IOError("launch memory snapshot is bigger than base vm")
                            patch = tool.diff_data(source_data, data, 2*len(source_data))
                            if len(patch) < len(data):
                                delta_item = DeltaItem(DeltaItem.DELTA_MEMORY,
                                        ram_offset, len(data),
                                        hash_value=sha256(data).digest(),
                                        ref_id=DeltaItem.REF_XDELTA,
                                        data_len=len(patch),
                                        data=patch)
                            else:
                                raise IOError("xdelta3 patch is bigger than origianl")
                        except IOError as e:
                            #LOG.info("xdelta failed, so save it as raw (%s)" % str(e))
                            delta_item = DeltaItem(DeltaItem.DELTA_MEMORY,
                                    ram_offset, len(data),
                                    hash_value=sha256(data).digest(),
                                    ref_id=DeltaItem.REF_RAW,
                                    data_len=len(data),
                                    data=data)
                        hash_list.append(delta_item)

                # memory over-usage protection
                if len(hash_list) > Memory.RAM_PAGE_SIZE*1000000: # 400MB for hashlist
                    raise MemoryError("possibly comparing with wrong base VM")
            ram_offset += len(data)
            # print progress bar for every 100 page
            if (ram_offset % (Memory.RAM_PAGE_SIZE*100)) == 0:
                prog_bar.set_percent(100.0*ram_offset/total_size)
                prog_bar.show_progress()
        prog_bar.finish()
        return freed_page_counter