Exemple #1
0
def search4cave(stream: io.RawIOBase, section_name: str, section_size: int,
                section_info, cave_size: int, virtaddr: int, _bytes: bytes):
    caves = []
    byte_count = 0

    base = stream.tell()
    offset = 0

    while section_size > 0:
        rb = stream.read(1)
        section_size -= 1
        offset += 1

        if _bytes not in rb:
            if byte_count >= cave_size:
                mr = MiningResult()
                mr.name = section_name
                mr.cave_begin = (base + offset) - byte_count - 1
                mr.cave_end = (base + offset) - 1
                mr.cave_size = byte_count
                mr.virtaddr = virtaddr + offset - byte_count - 1
                mr.info = section_info
                caves.append(mr)
            byte_count = 0
            continue
        byte_count += 1

    stream.seek(base)
    return caves
Exemple #2
0
    def __init__(self, stream: RawIOBase, chunk_size=4096, *args, **kwargs):
        self.chunk_size = chunk_size

        # Get content-size
        stream.seek(0, os.SEEK_END)
        content_length = stream.tell()
        stream.seek(0, os.SEEK_SET)

        super().__init__(stream, content_len=content_length, *args, **kwargs)
Exemple #3
0
def verifycave(stream: io.RawIOBase, cave_size, _byte: bytes):
    base = stream.tell()
    success = True
    while cave_size > 0:
        cave_size -= 1
        rb = stream.read(1)
        if _byte not in rb:
            success = False
            break
    stream.seek(base)
    return success
def statistiques(source: io.RawIOBase) -> (Compteur, int):
    """
    Fonction de calcul des statistiques dans le cadre de la compression de Huffman
    """
    compteur = Compteur()
    source.seek(0)
    octet = source.read(1)
    iterator = 0
    while octet:
        compteur.incrementer(octet)
        iterator += 1
        octet = source.read(1)
    return (compteur, iterator)
Exemple #5
0
def cache_segment_data(input_file: io.RawIOBase, segments: List[Any], segment_id: int, base_file_offset: int=0) -> None:
    """
    base_file_offset: when the input file is located within a containing file.
    """
    data = None
    file_offset = get_segment_data_file_offset(segments, segment_id)
    # No data for segments that have no data..
    if file_offset != -1:
        file_length = get_segment_data_length(segments, segment_id)

        input_file.seek(base_file_offset + file_offset, os.SEEK_SET)
        file_data = bytearray(file_length)
        if input_file.readinto(file_data) == file_length:
            # NOTE(rmtew): Python 2, type(data[0]) is str. Python 3, type(data[0]) is int
            data = memoryview(file_data)
        else:
            logger.error("Unable to cache segment %d data, got %d bytes, wanted %d", segment_id, len(file_data), file_length)
    segments[segment_id][SI_CACHED_DATA] = data
Exemple #6
0
def read_wad(f: io.RawIOBase):
    f.seek(0)
    header = f.read(256)

    if header[:4] != WAD_MAGIC:
        raise CommandError(
            f'File does not appear to be a Zwift WAD file, Expected '
            f'magic: {WAD_MAGIC}, actual: {header[:4]}')

    body_size = struct.unpack('<I', header[248:252])[0]
    wad_size = 256 + body_size
    actual_size = os.fstat(f.fileno()).st_size

    if actual_size < wad_size:
        raise CommandError(f'Truncated wad file: header implies '
                           f'{wad_size} bytes but file is {actual_size} bytes')
    if actual_size > wad_size:
        warnings.warn(
            f'wad file is larger than header implies. expected size: '
            f'{actual_size} bytes, actual size: {actual_size} bytes')

    entry_pointers = read_entry_pointers(f)

    return {'file': f, 'entry_pointers': entry_pointers}
def decompresser(destination: io.RawIOBase, source: io.RawIOBase):
    """
    Fonction qui permet la décompression selon la méthode de Huffman.
    """
    def naturel_to_list(naturel):
        """
        Fonction qui permet de passer d'un naturel à une liste de Bit.
        """
        liste = []
        while naturel != 0:
            liste.append(Bit.BIT_0 if naturel%2 == 0 else Bit.BIT_1)
            naturel = naturel // 2
        if len(liste) < 8:
            liste += [Bit.BIT_0 for i in range(8-len(liste))]
        liste.reverse()
        return liste

    def recherche_identifiant():
        """
        Fonction qui recherche l'identifiant du flux source.
        """
        identifiant = ""
        for i in range(4):
            identifiant = identifiant + chr(int.from_bytes(source.read(1), sys.byteorder))
        return identifiant

    def recherche_stats():
        """
        Fonction qui recherche les nombres d'occurences des 256 octets dans le flux source.
        """
        stat = Compteur()
        for i in range(256):
            occurence = int.from_bytes(source.read(4), sys.byteorder)
            if occurence > 0:
                stat.fixer(i.to_bytes(1, sys.byteorder), occurence)
        return stat

    def reconstruction():
        """
        Fonction qui reconstruit le contenu du flux source
        avant compression à partir de l'arbre obtenu des statistiques.
        """
        octet = source.read(1)
        arbre_courant = arbre
        longueur_courante = 0
        while longueur_courante < longueur and octet:
            liste_bits = naturel_to_list(int.from_bytes(octet, sys.byteorder))
            for i in liste_bits:
                if arbre_courant.est_une_feuille:
                    destination.write(arbre_courant.element)
                    arbre_courant = arbre
                    longueur_courante += 1
                if i == Bit.BIT_0:
                    arbre_courant = arbre_courant.fils_gauche
                else:
                    arbre_courant = arbre_courant.fils_droit
            octet = source.read(1)

    yield "Décompression"
    source.seek(0)
    yield "Cas général"
    identifiant = recherche_identifiant()
    if identifiant == "HUFF":
        longueur = int.from_bytes(source.read(4), sys.byteorder)
        yield "Lecture des stats"
        stat = recherche_stats()
        yield "Création de l'arbre de Huffman"
        arbre = arbre_de_huffman(stat)
        yield "Création du fichier decompressé"
        reconstruction()