def put(
        self, namespace: str, metadata: Dict[str, Any], bytes_io: BinaryIO,
    ) -> None:
        """Store a file."""
        subset = dict_subset(metadata, lambda k, v: k in (
            # We are not storing the 'file_name'
            'image_width', 'image_height', 'original_id', 'version'))
        self._convert_values_to_str(subset)
        if hasattr(bytes_io, 'seekable') and bytes_io.seekable():
            bytes_io.seek(0)

        # When botocore.response.StreamingBody is passed in as bytes_io,
        # the bucket.put_object() call below fails with
        # "AttributeError: 'StreamingBody' object has no attribute 'tell'"
        # so we have to read the stream, getting the bytes:
        if not hasattr(bytes_io, 'tell'):
            bytes_io = bytes_io.read()  # type: ignore

        result = self.bucket.put_object(
            Key=self._get_path(namespace, metadata),
            # done automatically by botocore:  ContentMD5=encoded_md5,
            ContentType=metadata['mime_type'],
            ContentLength=metadata['length'], Body=bytes_io, Metadata=subset)
        # print(result)
        return result
Beispiel #2
0
    def get_index(
        self, archive: BinaryIO, version: Optional[Version] = None
    ) -> Dict[str, ComplexIndexEntry]:
        if not version:
            version = self.version() if self.version else self.detect_version()

        offset = 0
        key: Optional[int] = None
        if self.offset_and_key:
            offset, key = self.offset_and_key
        else:
            offset, key = version.find_offset_and_key(archive)
        archive.seek(offset)
        index: Dict[bytes, IndexEntry] = pickle.loads(
            zlib.decompress(archive.read()), encoding="bytes"
        )
        if key is not None:
            normal_index = UnRPA.deobfuscate_index(key, index)
        else:
            normal_index = UnRPA.normalise_index(index)

        return {
            UnRPA.ensure_str_path(path).replace("/", os.sep): data
            for path, data in normal_index.items()
        }
Beispiel #3
0
def load(file_handle: typing.BinaryIO) -> TSerializable:
    """load(file) -> object

    This function reads a tnetstring from a file and parses it into a
    python object.  The file must support the read() method, and this
    function promises not to read more data than necessary.
    """
    #  Read the length prefix one char at a time.
    #  Note that the netstring spec explicitly forbids padding zeros.
    c = file_handle.read(1)
    if c == b"":  # we want to detect this special case.
        raise ValueError("not a tnetstring: empty file")
    data_length = b""
    while c.isdigit():
        data_length += c
        if len(data_length) > 9:
            raise ValueError("not a tnetstring: absurdly large length prefix")
        c = file_handle.read(1)
    if c != b":":
        raise ValueError("not a tnetstring: missing or invalid length prefix")

    data = file_handle.read(int(data_length))
    data_type = file_handle.read(1)[0]

    return parse(data_type, data)
Beispiel #4
0
 def __init__(self, archive: BinaryIO, offset: int, length: int, prefix: bytes):
     archive.seek(offset)
     self.name = archive.name
     self.remaining = length
     self.sources = [cast(io.BufferedIOBase, archive)]
     if prefix:
         self.sources.insert(0, cast(io.BufferedIOBase, io.BytesIO(prefix)))
 async def save_to(self, name: str, fd: BinaryIO):
     async with ClientSession() as client:
         async with client.post(self.get_url, data=name.encode("utf8")) as resp:
             assert resp.status == 200
             while True:
                 data = await resp.content.read(2 << 20)
                 if not data:
                     break
                 fd.write(data)
Beispiel #6
0
def read_offset_array(file: BinaryIO, count: int):
    """Read an array of offsets to null-terminated strings from the file."""
    cdmat_offsets = str_read(str(count) + 'i', file)
    arr = [None] * count  # type: List[str]

    for ind, off in enumerate(cdmat_offsets):
        file.seek(off)
        arr[ind] = read_nullstr(file)
    return arr
Beispiel #7
0
def download_into(session: requests.Session,
                  url: str, file: BinaryIO, process_func=None) -> None:
  r = session.get(url, stream=True)
  length = int(r.headers.get('Content-Length') or 0)
  received = 0
  for chunk in r.iter_content(CHUNK_SIZE):
    received += len(chunk)
    file.write(chunk)
    if process_func:
      process_func(received, length)
  if not length and process_func:
    process_func(received, received)
Beispiel #8
0
 def postprocess(self, source: ArchiveView, sink: BinaryIO) -> None:
     if self.details:
         key, amount = self.details
         parts = []
         while amount > 0:
             part = source.read(amount)
             amount -= len(part)
             parts.append(part)
         sink.write(obfuscation_run(b"".join(parts), key))
     else:
         raise Exception("find_offset_and_key must be called before postprocess")
     for segment in iter(source.read1, b""):
         sink.write(segment)
Beispiel #9
0
def read_nullstr(file: BinaryIO, pos: int=None):
    """Read a null-terminated string from the file."""
    if pos is not None:
        if pos == 0:
            return ''
        file.seek(pos)

    text = []
    while True:
        char = file.read(1)
        if char == b'\0':
            return b''.join(text).decode('ascii')
        if not char:
            raise ValueError('Fell off end of file!')
        text.append(char)
Beispiel #10
0
 def add_member_stream( self,
     path: PurePosixPath, mtime: ArchiveMTime,
     content_stream: BinaryIO,
 ) -> None:
     content = content_stream.read()
     assert isinstance(content, bytes), type(content)
     return self.add_member_bytes(path, mtime, content)
Beispiel #11
0
def read_delimited_chunks(infile: BinaryIO, chunk_size: int) -> Generator[bytes, None, None]:
    """Yield the contents of infile in chunk_size pieces ending at newlines.
    The individual pieces, except for the last one, end in newlines and
    are smaller than chunk_size if possible.

    Params:
        infile: stream to read from
        chunk_size: maximum size of each chunk

    Yields:
        chunk: chunk with maximum size of chunk_size if possible
    """
    leftover = b""

    while True:
        new_chunk = infile.read(chunk_size)
        chunks = split_chunks(leftover + new_chunk, chunk_size)
        leftover = b""
        # the last item in chunks has to be combined with the next chunk
        # read from the file because it may not actually stop at a
        # newline and to avoid very small chunks.
        if chunks:
            leftover = chunks[-1]
            chunks = chunks[:-1]
        for chunk in chunks:
            yield chunk

        if not new_chunk:
            if leftover:
                yield leftover
            break
Beispiel #12
0
def generate_reports(report_template: Report, infile: BinaryIO, chunk_size: Optional[int],
                     copy_header_line: bool) -> Generator[Report, None, None]:
    """Generate reports from a template and input file, optionally split into chunks.

    If chunk_size is None, a single report is generated with the entire
    contents of infile as the raw data. Otherwise chunk_size should be
    an integer giving the maximum number of bytes in a chunk. The data
    read from infile is then split into chunks of this size at newline
    characters (see read_delimited_chunks). For each of the chunks, this
    function yields a copy of the report_template with that chunk as the
    value of the raw attribute.

    When splitting the data into chunks, if copy_header_line is true,
    the first line the file is read before chunking and then prepended
    to each of the chunks. This is particularly useful when splitting
    CSV files.

    The infile should be a file-like object. generate_reports uses only
    two methods, readline and read, with readline only called once and
    only if copy_header_line is true. Both methods should return bytes
    objects.

    Params:
        report_template: report used as template for all yielded copies
        infile: stream to read from
        chunk_size: maximum size of each chunk
        copy_header_line: copy the first line of the infile to each chunk

    Yields:
        report: a Report object holding the chunk in the raw field
    """
    if chunk_size is None:
        report = report_template.copy()
        data = infile.read()
        if data:
            report.add("raw", data, overwrite=True)
            yield report
    else:
        header = b""
        if copy_header_line:
            header = infile.readline()
        for chunk in read_delimited_chunks(infile, chunk_size):
            report = report_template.copy()
            report.add("raw", header + chunk, overwrite=True)
            yield report
Beispiel #13
0
 def put(
     self, namespace: str, metadata: Dict[str, Any], bytes_io: BinaryIO,
 ) -> None:
     """Store a file (``bytes_io``) inside ``namespace``."""
     if bytes_io.tell():
         bytes_io.seek(0)
     outdir = self._dir_of(namespace)
     if not outdir.exists():
         outdir.mkdir(parents=True)  # Create namespace directory as needed
     outfile = outdir / self._get_filename(metadata)
     with open(str(outfile), mode='wb', buffering=MEGABYTE) as writer:
         while True:
             chunk = bytes_io.read(MEGABYTE)
             if chunk:
                 writer.write(chunk)
             else:
                 break
     assert outfile.lstat().st_size == metadata['length']
Beispiel #14
0
 def _compute_md5(
     self, bytes_io: BinaryIO, metadata: Dict[str, Any],
 ) -> None:
     from hashlib import md5
     two_megabytes = 1048576 * 2
     the_hash = md5()
     the_length = 0
     bytes_io.seek(0)
     while True:
         segment = bytes_io.read(two_megabytes)
         if segment == b'':
             break
         the_length += len(segment)
         the_hash.update(segment)
     metadata['md5'] = the_hash.hexdigest()
     previous_length = metadata.get('length')
     if previous_length is None:
         metadata['length'] = the_length
     else:
         assert previous_length == the_length, "Bug? File lengths {}, {} " \
             "don't match.".format(previous_length, the_length)
     bytes_io.seek(0)  # ...so it can be read again
Beispiel #15
0
def mktar_from_dockerfile(fileobject: BinaryIO) -> IO:
    """
    Create a zipped tar archive from a Dockerfile
    **Remember to close the file object**
    Args:
        fileobj: a Dockerfile
    Returns:
        a NamedTemporaryFile() object
    """

    f = tempfile.NamedTemporaryFile()
    t = tarfile.open(mode="w:gz", fileobj=f)

    if isinstance(fileobject, BytesIO):
        dfinfo = tarfile.TarInfo("Dockerfile")
        dfinfo.size = len(fileobject.getvalue())
        fileobject.seek(0)
    else:
        dfinfo = t.gettarinfo(fileobj=fileobject, arcname="Dockerfile")

    t.addfile(dfinfo, fileobject)
    t.close()
    f.seek(0)
    return f
Beispiel #16
0
def parse_header(source: BinaryIO) -> Tuple[OFXHeaderType, str]:
    """
    Consume source; feed to appropriate class constructor which performs
    validation/type conversion on OFX header.

    Using header, locate/read/decode (but do not parse) OFX data body.

    Returns a 2-tuple of:
        * instance of OFXHeaderV1/OFXHeaderV2 containing parsed data, and
        * decoded text of OFX data body
    """
    # Skip any empty lines at the beginning
    while True:
        # OFX header is read by nice clean machines, not meatbags -
        # should not contain emoji, 漢字, or what have you.
        line = source.readline().decode("ascii")
        if line.strip():
            break

    # If the first non-empty line contains an XML declaration, it's OFX v2
    xml_match = XML_REGEX.match(line)
    if xml_match:
        # OFXv2 spec doesn't require newlines between XML declaration,
        # OFX declaration, and data elements; `line` may or may not
        # contain the latter two.
        #
        # Just rewind, read the whole file (it must be UTF-8 encoded per
        # the spec) and slice the OFX data body from the end of the
        # OFX declaration
        source.seek(0)
        decoded_source = source.read().decode(OFXHeaderV2.codec)
        header, header_end_index = OFXHeaderV2.parse(decoded_source)
        message = decoded_source[header_end_index:]
    else:
        # OFX v1
        rawheader = line + "\n"
        # First line is OFXHEADER; need to read next 8 lines for a fixed
        # total of 9 fields required by OFX v1 spec.
        for n in range(8):
            rawheader += source.readline().decode("ascii")
        header, header_end_index = OFXHeaderV1.parse(rawheader)

        #  Input source stream position has advanced to the beginning of
        #  the OFX body tag soup, which is where subsequent calls
        #  to read()/readlines() will pick up.
        #
        #  Decode the OFX data body according to the encoding declared
        #  in the OFX header
        message = source.read().decode(header.codec)

    return header, message.strip()
Beispiel #17
0
def iter_nullstr(file: BinaryIO):
    """Read a null-terminated ASCII string from the file.
    
    This continuously yields strings, with empty strings 
    indicting the end of a section.
    """
    chars = bytearray()
    while True:
        char = file.read(1)
        if char == b'\x00':
            string = chars.decode('ascii')
            chars.clear()
            
            if string == ' ':  # Blank strings are saved as ' '
                yield ''
            elif string == '':
                return  # Actual blanks end the array.
            else:
                yield string
        elif char == b'':
            raise Exception('Reached EOF without null-terminator in {}!'.format(bytes(chars)))
        else:
            chars.extend(char)
def _rewrite_ownership_v0(
    input_file: BinaryIO, new_file: BinaryIO, header: MdvHeader, uid: int, gid: int
) -> None:
    entries_processed = 0
    entry_size = InodeMetadataV0.FORMAT.size
    for _ in range(header.entry_count):
        entries_processed += 1

        entry_data = input_file.read(entry_size)
        if len(entry_data) != entry_size:
            raise Exception("inode metadata table appears truncated")

        entry = InodeMetadataV0.parse(entry_data)
        entry.uid = uid
        entry.gid = gid
        new_file.write(entry.serialize())

    # Copy the remaining file contents as is.  This is normally all 0-filled data
    # that provides space for new entries to be written in the future.
    padding = input_file.read()
    new_file.write(padding)
Beispiel #19
0
 def from_reader(cls, r: typing.BinaryIO):
     o = FunctionType()
     assert ord(r.read(1)) == 0x60
     o.args = bin_reader.read_bytes(r)
     o.rets = bin_reader.read_bytes(r)
     return o
Beispiel #20
0
def dump(value: TSerializable, file_handle: typing.BinaryIO) -> None:
    """
    This function dumps a python object as a tnetstring and
    writes it to the given file.
    """
    file_handle.write(dumps(value))
Beispiel #21
0
 def from_reader(cls, r: typing.BinaryIO):
     o = CustomSection()
     n = bin_reader.read_count(r, 32)
     o.name = r.read(n).decode()
     o.data = bytearray(r.read(-1))
     return o
Beispiel #22
0
 def from_reader(cls, r: typing.BinaryIO):
     flag = ord(r.read(1))
     minimum = bin_reader.read_count(r)
     maximum = bin_reader.read_count(r) if flag else None
     return Limits(minimum, maximum)
Beispiel #23
0
 def save_to(self, name: str, fd: BinaryIO):
     resp = requests.post(self.get_url, auth=(self.http_user, self.http_password), data=name)
     assert resp.status_code == 200
     for chunk in resp.iter_content(chunk_size=2 << 20):
         fd.write(chunk)
Beispiel #24
0
    def _read_sequences(f: BinaryIO, off, count) -> List[MDLSequence]:
        """Split this off to decrease stack in main parse method."""
        f.seek(off)
        sequences = [None] * count  # type: List[MDLSequence]
        for i in range(count):
            start_pos = f.tell()
            (
                base_ptr,
                label_pos,
                act_name_pos,
                flags,
                _,  # Seems to be a pointer.
                act_weight,
                event_count,
                event_pos,
            ) = str_read('8i', f)
            bbox_min = str_readvec(f)
            bbox_max = str_readvec(f)

            # Skip 20 ints, 9 floats to get to keyvalues = 29*4 bytes
            # Then 8 unused ints.
            (
                keyvalue_pos,
                keyvalue_size,
            ) = str_read('116xii32x', f)
            end_pos = f.tell()

            f.seek(start_pos + event_pos)
            events = [None] * event_count  # type: List[SeqEvent]
            for j in range(event_count):
                event_start = f.tell()
                (
                    event_cycle,
                    event_index,
                    event_flags,
                    event_options,
                    event_nameloc,
                ) = str_read('fii64si', f)
                event_end = f.tell()

                # There are two event systems.
                if event_flags == 1 << 10:
                    # New system, name in the file.
                    event_name = read_nullstr(f, event_start + event_nameloc)
                    if event_name.isdigit():
                        try:
                            event_type = ANIM_EVENT_BY_INDEX[int(event_name)]
                        except KeyError:
                            raise ValueError('Unknown event index!')
                    else:
                        try:
                            event_type = ANIM_EVENT_BY_NAME[event_name]
                        except KeyError:
                            # NPC-specific events, declared dynamically.
                            event_type = event_name
                else:
                    # Old system, index.
                    try:
                        event_type = ANIM_EVENT_BY_INDEX[event_index]
                    except KeyError:
                        # raise ValueError('Unknown event index!')
                        print('Unknown: ', event_index, event_options.rstrip(b'\0'))
                        continue

                f.seek(event_end)
                events[j] = SeqEvent(
                    type=event_type,
                    cycle=event_cycle,
                    options=event_options.rstrip(b'\0').decode('ascii')
                )

            if keyvalue_size:
                keyvalues = read_nullstr(f, start_pos + keyvalue_pos)
            else:
                keyvalues = ''

            sequences[i] = MDLSequence(
                label=read_nullstr(f, start_pos + label_pos),
                act_name=read_nullstr(f, start_pos + act_name_pos),
                flags=flags,
                act_weight=act_weight,
                events=events,
                bbox_min=bbox_min,
                bbox_max=bbox_max,
                keyvalues=keyvalues,
            )

            f.seek(end_pos)

        return sequences
Beispiel #25
0
 def from_reader(cls, r: typing.BinaryIO):
     o = TableType()
     o.elemtype = ord(r.read(1))
     o.limits = Limits.from_reader(r)
     return o
Beispiel #26
0
def _pipe(input_: BinaryIO) -> BinaryIO:
    bio = io.BytesIO(input_.read())
    bio.name = getattr(input_, "name", None)
    return bio
Beispiel #27
0
def write_materials(  # pylint: disable=too-many-branches
    f: BinaryIO, array_size: int, materials: List[Material]
) -> None:
    mat_count = len(materials)
    data = MATERIAL_HEADER.pack(array_size, mat_count, mat_count, mat_count - 1)
    f.write(data)

    for i, material in enumerate(materials):
        index1 = i + 1
        if index1 >= mat_count:
            index1 = -1

        index2 = i - 1
        if index2 < 0:
            index2 = -1

        flag = MaterialFlag.Always
        if material.unknown:
            flag |= MaterialFlag.Unknown

        if material.texture is not None:
            flag |= MaterialFlag.Textured
            rgb = 0x7FFF
            red = green = blue = 255.0
            texture = material.texture
        elif material.color is not None:
            rgb = 0x0
            red, green, blue = material.color
            texture = 0
        else:  # pragma: no cover
            raise ValueError("neither texture nor color set")

        if material.cycle:
            flag |= MaterialFlag.Cycled
            cycle_ptr = material.cycle.info_ptr
        else:
            cycle_ptr = 0

        data = MATERIAL_INFO.pack(
            material.unk00,
            int(flag),
            rgb,
            red,
            green,
            blue,
            texture,
            0.0,
            0.5,
            0.5,
            material.unk32,
            cycle_ptr,
            index1,
            index2,
        )
        f.write(data)

    for i in range(mat_count, array_size):
        index1 = i - 1
        if index1 < mat_count:
            index1 = -1

        index2 = i + 1
        if index2 >= array_size:
            index2 = -1

        data = MATERIAL_INFO.pack(
            0,
            int(MaterialFlag.Free),
            0x0,
            0.0,
            0.0,
            0.0,
            0,
            0.0,
            0.0,
            0.0,
            0,
            0,
            index1,
            index2,
        )
        f.write(data)

    for material in materials:
        cycle = material.cycle
        if not cycle:
            continue

        cycle_count = len(cycle.textures)
        data = CYCLE_HEADER.pack(
            1 if cycle.unk00 else 0,
            cycle.unk04,
            0,
            cycle.unk12,
            cycle_count,
            cycle_count,
            cycle.data_ptr,
        )
        f.write(data)

        data = Struct(f"<{cycle_count}I").pack(*cycle.textures)
        f.write(data)
Beispiel #28
0
 def from_stream(f: BinaryIO,
                 width: int = 16,
                 height: int = 16) -> Image.Image:
     return Image.frombytes("LA", (width * 16, height * 16),
                            f.read(width * height * 64), "sprite")
Beispiel #29
0
 def from_stream(f: BinaryIO,
                 width: int = 16,
                 height: int = 16) -> Image.Image:
     return Image.frombytes("L", (width * 8, height * 8),
                            f.read(width * height * 8), "tile")
 def stuff(a: BinaryIO) -> bytes:
     return a.readline()
Beispiel #31
0
 def from_reader(cls, r: typing.BinaryIO):
     o = GlobalType()
     o.valtype = ord(r.read(1))
     o.mut = ord(r.read(1)) == 1
     return o
def rewrite_rrd(
    output_file: BinaryIO,
    input_file: TextIO,
    requested_step: int,
    requested_heartbeat: int,
):
    # Determine the step of the input RRD dump
    step_re = re.compile(r"<step>(\d*)")
    try:
        for line in input_file:
            match = step_re.search(line)
            if match:
                input_step = int(match.group(1))
                break
        else:
            print("Error, unable to find step in existing RRD dump!",
                  file=sys.stderr)
            sys.exit(-1)
        input_file.seek(0)
    except Exception as err:
        print(f"Exception while finding step in existing RRD dump: {err}",
              file=sys.stderr)
        sys.exit(-2)
    # Check which step (user supplied or existing) is larger and verify the math checks out with
    # the assumptions of the program, calculate the required amount of record skip/duplication
    going_up = input_step > requested_step
    if going_up:
        rowrepeat = input_step // requested_step
        if input_step % requested_step:
            print("Error: Requested step and input step are not factors",
                  file=sys.stderr)
            sys.exit(-3)
    else:
        rowrepeat = requested_step // input_step
        if requested_step % input_step:
            print(
                "Error: Existing step and requested step are not factors",
                file=sys.stderr,
            )
            sys.exit(-4)
    rrd_in_db = False
    skip = 0
    idx = 0
    for line in input_file:
        if rrd_in_db:
            if "</database>" in line:
                output_file.write(line.encode())
                skip = 0
                rrd_in_db = False
                continue
            elif "<row>" in line:
                if going_up:
                    for _ in range(rowrepeat):
                        output_file.write(line.encode())
                else:
                    if skip == rowrepeat:
                        output_file.write(line.encode())
                        skip = 0
                    else:
                        skip += 1
            else:
                output_file.write(line.encode())
        elif "<step>" in line:
            output_file.write(f"<step>{requested_step}</step>\n".encode())
        elif "minimal_heartbeat" in line:
            output_file.write(
                f"<minimal_heartbeat>{requested_heartbeat}</minimal_heartbeat>\n"
                .encode())
        elif "<database>" in line:
            output_file.write(line.encode())
            rrd_in_db = True
            continue
        else:
            output_file.write(line.encode())
Beispiel #33
0
def str_readvec(file: BinaryIO) -> Vec:
    """Read a vector from a file."""
    return Vec(ST_VEC.unpack(file.read(ST_VEC.size)))
Beispiel #34
0
def forward_stream(src_stream: BinaryIO, dst_stream: BinaryIO,
                   rank: str) -> None:
    for line in iter(src_stream.readline, b""):
        line = f"[rank={rank}] ".encode() + line
        os.write(dst_stream.fileno(), line)
Beispiel #35
0
    def __init__(self, nand_fp: BinaryIO, g_stat: os.stat_result, dev: bool = False, readonly: bool = False,
                 otp: bytes = None, cid: AnyStr = None):
        self.crypto = CryptoEngine(dev=dev)

        self.g_stat = {'st_ctime': int(g_stat.st_ctime), 'st_mtime': int(g_stat.st_mtime),
                       'st_atime': int(g_stat.st_atime)}

        nand_fp.seek(0x100)  # screw the signature
        ncsd_header = nand_fp.read(0x100)
        if ncsd_header[0:4] != b'NCSD':
            exit('NCSD magic not found, is this a real Nintendo 3DS NAND image?')
        media_id = ncsd_header[0x8:0x10]
        if media_id != b'\0' * 8:
            exit('Media ID not all-zero, is this a real Nintendo 3DS NAND image?')

        # check for essential.exefs
        nand_fp.seek(0x200)
        try:
            exefs = ExeFSReader.load(nand_fp)
        except InvalidExeFSError:
            exefs = None

        otp_data = None
        if otp:
            try:
                with open(otp, 'rb') as f:
                    otp_data = f.read(0x200)
            except Exception:
                print(f'Failed to open and read given OTP ({otp}).\n')
                print_exc()
                exit(1)

        else:
            if exefs is None:
                exit('OTP not found, provide with --otp or embed essentials backup with GodMode9')
            else:
                if 'otp' in exefs.entries:
                    nand_fp.seek(exefs['otp'].offset + 0x400)
                    otp_data = nand_fp.read(exefs['otp'].size)
                else:
                    exit('"otp" not found in essentials backup, update with GodMode9 or provide with --otp')

        self.crypto.setup_keys_from_otp(otp_data)

        def generate_ctr():
            print('Attempting to generate Counter for CTR/TWL areas. If errors occur, provide the CID manually.')

            # -------------------------------------------------- #
            # attempt to generate CTR Counter
            nand_fp.seek(0xB9301D0)
            # these blocks are assumed to be entirely 00, so no need to xor anything
            ctrn_block_0x1d = nand_fp.read(0x10)
            ctrn_block_0x1e = nand_fp.read(0x10)
            for ks in (Keyslot.CTRNANDOld, Keyslot.CTRNANDNew):
                ctr_counter_offs = self.crypto.create_ecb_cipher(ks).decrypt(ctrn_block_0x1d)
                ctr_counter = int.from_bytes(ctr_counter_offs, 'big') - 0xB9301D

                # try the counter
                out = self.crypto.create_ctr_cipher(ks, ctr_counter + 0xB9301E).decrypt(ctrn_block_0x1e)
                if out == b'\0' * 16:
                    print('Counter for CTR area automatically generated.')
                    self.ctr = ctr_counter
                    break
            else:
                print('Counter could not be generated for CTR area. Related virtual files will not appear.')
                self.ctr = None

            # -------------------------------------------------- #
            # attempt to generate TWL Counter
            nand_fp.seek(0x1C0)
            twln_block_0x1c = readbe(nand_fp.read(0x10))
            twl_blk_xored = twln_block_0x1c ^ 0x18000601A03F97000000A97D04000004
            twl_counter_offs = self.crypto.create_ecb_cipher(0x03).decrypt(twl_blk_xored.to_bytes(0x10, 'little'))
            twl_counter = int.from_bytes(twl_counter_offs, 'big') - 0x1C

            # try the counter
            twln_block_0x1d = nand_fp.read(0x10)
            out = self.crypto.create_ctr_cipher(0x03, twl_counter + 0x1D).decrypt(twln_block_0x1d)
            if out == b'\x8e@\x06\x01\xa0\xc3\x8d\x80\x04\x00\xb3\x05\x01\x00\x00\x00':
                print('Counter for TWL area automatically generated.')
                self.ctr_twl = twl_counter
            else:
                print('Counter could not be generated for TWL area. Related virtual files will not appear.')
                self.ctr_twl = None

        cid_data = None
        if cid:
            try:
                with open(cid, 'rb') as f:
                    cid_data = f.read(0x200)
            except Exception:
                print(f'Failed to open and read given CID ({cid}).')
                print('If you want to attempt Counter generation, do not provide a CID path.\n')
                print_exc()
                exit(1)

        else:
            if exefs is None:
                generate_ctr()
            else:
                if 'nand_cid' in exefs.entries:
                    nand_fp.seek(exefs['nand_cid'].offset + 0x400)
                    cid_data = nand_fp.read(exefs['nand_cid'].size)
                else:
                    print('"nand_cid" not found in essentials backup, update with GodMode9 or provide with --cid')
                    generate_ctr()

        if cid_data:
            self.ctr = readbe(sha256(cid_data).digest()[0:16])
            self.ctr_twl = readle(sha1(cid_data).digest()[0:16])

        if not (self.ctr or self.ctr_twl):
            exit("Couldn't generate Counter for both CTR/TWL. "
                 "Make sure the OTP is correct, or provide the CID manually.")

        nand_fp.seek(0, 2)
        raw_nand_size = nand_fp.tell()

        self.real_nand_size = nand_size[readle(ncsd_header[4:8])]

        self.files = {'/nand_hdr.bin': {'size': 0x200, 'offset': 0, 'keyslot': 0xFF, 'type': 'raw'},
                      '/nand.bin': {'size': raw_nand_size, 'offset': 0, 'keyslot': 0xFF, 'type': 'raw'},
                      '/nand_minsize.bin': {'size': self.real_nand_size, 'offset': 0, 'keyslot': 0xFF, 'type': 'raw'}}

        nand_fp.seek(0x12C00)
        keysect_enc = nand_fp.read(0x200)
        if len(set(keysect_enc)) != 1:
            keysect_dec = self.crypto.create_ecb_cipher(0x11).decrypt(keysect_enc)
            # i'm cheating here by putting the decrypted version in memory and
            #   not reading from the image every time. but it's not AES-CTR so
            #   f**k that.
            self.files['/sector0x96.bin'] = {'size': 0x200, 'offset': 0x12C00, 'keyslot': 0x11, 'type': 'keysect',
                                             'content': keysect_dec}

        ncsd_part_fstype = ncsd_header[0x10:0x18]
        ncsd_part_crypttype = ncsd_header[0x18:0x20]
        ncsd_part_raw = ncsd_header[0x20:0x60]
        ncsd_partitions = [[readle(ncsd_part_raw[i:i + 4]) * 0x200,
                            readle(ncsd_part_raw[i + 4:i + 8]) * 0x200] for i in range(0, 0x40, 0x8)]

        # including padding for crypto
        if self.ctr_twl:
            twl_mbr = self.crypto.create_ctr_cipher(Keyslot.TWLNAND,
                                                    self.ctr_twl + 0x1B).decrypt(ncsd_header[0xB0:0x100])[0xE:0x50]
            if twl_mbr[0x40:0x42] == b'\x55\xaa':
                twl_partitions = [[readle(twl_mbr[i + 8:i + 12]) * 0x200,
                                   readle(twl_mbr[i + 12:i + 16]) * 0x200] for i in range(0, 0x40, 0x10)]
            else:
                twl_partitions = None

            self.files['/twlmbr.bin'] = {'size': 0x42, 'offset': 0x1BE, 'keyslot': Keyslot.TWLNAND, 'type': 'twlmbr',
                                         'content': twl_mbr}
        else:
            twl_partitions = None

        # then actually parse the partitions to create files
        firm_idx = 0
        for idx, part in enumerate(ncsd_partitions):
            if ncsd_part_fstype[idx] == 0:
                continue
            print(f'ncsd idx:{idx} fstype:{ncsd_part_fstype[idx]} crypttype:{ncsd_part_crypttype[idx]} '
                  f'offset:{part[0]:08x} size:{part[1]:08x} ', end='')
            if idx == 0:
                if self.ctr_twl:
                    self.files['/twl_full.img'] = {'size': part[1], 'offset': part[0], 'keyslot': Keyslot.TWLNAND,
                                                   'type': 'enc'}
                    print('/twl_full.img')
                    if twl_partitions:
                        twl_part_fstype = 0
                        for t_idx, t_part in enumerate(twl_partitions):
                            if t_part[0] != 0:
                                print(f'twl  idx:{t_idx}                      '
                                      f'offset:{t_part[0]:08x} size:{t_part[1]:08x} ', end='')
                                if twl_part_fstype == 0:
                                    self.files['/twln.img'] = {'size': t_part[1], 'offset': t_part[0],
                                                               'keyslot': Keyslot.TWLNAND, 'type': 'enc'}
                                    print('/twln.img')
                                    twl_part_fstype += 1
                                elif twl_part_fstype == 1:
                                    self.files['/twlp.img'] = {'size': t_part[1], 'offset': t_part[0],
                                                               'keyslot': Keyslot.TWLNAND, 'type': 'enc'}
                                    print('/twlp.img')
                                    twl_part_fstype += 1
                                else:
                                    self.files[f'/twl_unk{twl_part_fstype}.img'] = {'size': t_part[1],
                                                                                    'offset': t_part[0],
                                                                                    'keyslot': Keyslot.TWLNAND,
                                                                                    'type': 'enc'}
                                    print(f'/twl_unk{twl_part_fstype}.img')
                                    twl_part_fstype += 1
                else:
                    print('<ctr_twl not set>')

            elif self.ctr:
                if ncsd_part_fstype[idx] == 3:
                    # boot9 hardcoded this keyslot, i'll do this properly later
                    self.files[f'/firm{firm_idx}.bin'] = {'size': part[1], 'offset': part[0], 'keyslot': Keyslot.FIRM,
                                                          'type': 'enc'}
                    print(f'/firm{firm_idx}.bin')
                    firm_idx += 1

                elif ncsd_part_fstype[idx] == 1 and ncsd_part_crypttype[idx] >= 2:
                    ctrnand_keyslot = Keyslot.CTRNANDOld if ncsd_part_crypttype[idx] == 2 else Keyslot.CTRNANDNew
                    self.files['/ctrnand_full.img'] = {'size': part[1], 'offset': part[0], 'keyslot': ctrnand_keyslot,
                                                       'type': 'enc'}
                    print('/ctrnand_full.img')
                    nand_fp.seek(part[0])
                    iv = self.ctr + (part[0] >> 4)
                    ctr_mbr = self.crypto.create_ctr_cipher(ctrnand_keyslot, iv).decrypt(
                        nand_fp.read(0x200))[0x1BE:0x200]
                    if ctr_mbr[0x40:0x42] == b'\x55\xaa':
                        ctr_partitions = [[readle(ctr_mbr[i + 8:i + 12]) * 0x200,
                                           readle(ctr_mbr[i + 12:i + 16]) * 0x200]
                                          for i in range(0, 0x40, 0x10)]
                        ctr_part_fstype = 0
                        for c_idx, c_part in enumerate(ctr_partitions):
                            if c_part[0] != 0:
                                print(f'ctr  idx:{c_idx}                      offset:{part[0] + c_part[0]:08x} '
                                      f'size:{c_part[1]:08x} ', end='')
                                if ctr_part_fstype == 0:
                                    self.files['/ctrnand_fat.img'] = {'size': c_part[1], 'offset': part[0] + c_part[0],
                                                                      'keyslot': ctrnand_keyslot, 'type': 'enc'}
                                    print('/ctrnand_fat.img')
                                    ctr_part_fstype += 1
                                else:
                                    self.files[f'/ctr_unk{ctr_part_fstype}.img'] = {'size': c_part[1],
                                                                                    'offset': part[0] + c_part[0],
                                                                                    'keyslot': ctrnand_keyslot,
                                                                                    'type': 'enc'}
                                    print(f'/ctr_unk{ctr_part_fstype}.img')
                                    ctr_part_fstype += 1

                elif ncsd_part_fstype[idx] == 4:
                    self.files['/agbsave.bin'] = {'size': part[1], 'offset': part[0], 'keyslot': Keyslot.AGB,
                                                  'type': 'enc'}
                    print('/agbsave.bin')

            else:
                print('<ctr not set>')

        self.readonly = readonly

        # GM9 bonus drive
        if raw_nand_size != self.real_nand_size:
            nand_fp.seek(self.real_nand_size)
            bonus_drive_header = nand_fp.read(0x200)
            if bonus_drive_header[0x1FE:0x200] == b'\x55\xAA':
                self.files['/bonus.img'] = {'size': raw_nand_size - self.real_nand_size, 'offset': self.real_nand_size,
                                            'keyslot': 0xFF, 'type': 'raw'}

        self.f = nand_fp

        if exefs is not None:
            exefs_size = sum(roundup(x.size, 0x200) for x in exefs.entries.values()) + 0x200
            self.files['/essential.exefs'] = {'size': exefs_size, 'offset': 0x200, 'keyslot': 0xFF, 'type': 'raw'}
            try:
                exefs_vfp = _c.VirtualFileWrapper(self, '/essential.exefs', exefs_size)
                self.exefs_fuse = ExeFSMount(exefs_vfp, g_stat=g_stat)
                self.exefs_fuse.init('/')
                self._essentials_mounted = True
            except Exception as e:
                print(f'Failed to mount essential.exefs: {type(e).__name__}: {e}')
Beispiel #36
0
def read_bytes(buffer: BinaryIO) -> bytes:
    len_ = int32Serializer.read(buffer)
    return buffer.read(len_)
Beispiel #37
0
    async def build(
        self,
        *,
        remote: str = None,
        fileobj: BinaryIO = None,
        path_dockerfile: str = None,
        tag: str = None,
        quiet: bool = False,
        nocache: bool = False,
        buildargs: Mapping = None,
        pull: bool = False,
        rm: bool = True,
        forcerm: bool = False,
        labels: Mapping = None,
        stream: bool = False,
        encoding: str = None
    ) -> Mapping:
        """
        Build an image given a remote Dockerfile
        or a file object with a Dockerfile inside

        Args:
            path_dockerfile: path within the build context to the Dockerfile
            remote: a Git repository URI or HTTP/HTTPS context URI
            quiet: suppress verbose build output
            nocache: do not use the cache when building the image
            rm: remove intermediate containers after a successful build
            pull: downloads any updates to the FROM image in Dockerfiles
            encoding: set `Content-Encoding` for the file object your send
            forcerm: always remove intermediate containers, even upon failure
            labels: arbitrary key/value labels to set on the image
            fileobj: a tar archive compressed or not
        """

        local_context = None

        headers = {}

        params = {
            "t": tag,
            "rm": rm,
            "q": quiet,
            "pull": pull,
            "remote": remote,
            "nocache": nocache,
            "forcerm": forcerm,
            "dockerfile": path_dockerfile,
        }

        if remote is None and fileobj is None:
            raise ValueError("You need to specify either remote or fileobj")

        if fileobj and remote:
            raise ValueError("You cannot specify both fileobj and remote")

        if fileobj and not encoding:
            raise ValueError("You need to specify an encoding")

        if remote is None and fileobj is None:
            raise ValueError("Either remote or fileobj needs to be provided.")

        if fileobj:
            local_context = fileobj.read()
            headers["content-type"] = "application/x-tar"

        if fileobj and encoding:
            headers["Content-Encoding"] = encoding

        if buildargs:
            params.update({"buildargs": json.dumps(buildargs)})

        if labels:
            params.update({"labels": json.dumps(labels)})

        response = await self.docker._query(
            "build",
            "POST",
            params=clean_map(params),
            headers=headers,
            data=local_context,
        )

        return await json_stream_result(response, stream=stream)
Beispiel #38
0
def read_nullable_bytes(buffer: BinaryIO) -> Optional[bytes]:
    len_ = int32Serializer.read(buffer)
    if len_ == -1:
        return None
    return buffer.read(len_)
Beispiel #39
0
def read_string(buffer: BinaryIO) -> str:
    len_ = int16Serializer.read(buffer)
    return buffer.read(len_).decode("utf-8")
Beispiel #40
0
def read_nullable_string(buffer: BinaryIO) -> Optional[str]:
    len_ = int16Serializer.read(buffer)
    if len_ == -1:
        return None
    return buffer.read(len_).decode("utf-8")
Beispiel #41
0
 def from_reader(cls, r: typing.BinaryIO):
     o = Export()
     o.name = bin_reader.read_bytes(r, 32).decode()
     o.kind = ord(r.read(1))
     o.desc = bin_reader.read_count(r, 32)
     return o
Beispiel #42
0
 def read(self, buffer: BinaryIO) -> T:
     return self._struct.unpack(buffer.read(self._struct.size))[0]
 def read(cls: Type["InodeMetadataV0"], input_file: BinaryIO) -> "InodeMetadataV0":
     data = input_file.read(cls.FORMAT.size)
     if len(data) != cls.FORMAT.size:
         raise Exception(f"short inode metadata table header: size={len(data)}")
     return cls.parse(data)
Beispiel #44
0
 def loadContent(self, nodeId:str, outputFile:BinaryIO):
     opUrl = 'nodes/' + quote(nodeId) + '/content'
     self.__client.get('alfresco', opUrl, responseHandler=lambda response: outputFile.write(response.content))
 def read(cls: Type["MdvHeader"], input_file: BinaryIO) -> "MdvHeader":
     data = input_file.read(cls.FORMAT.size)
     return cls.parse(data)
Beispiel #46
0
def write_uint32(file: BinaryIO, value):
    """write uint32 value in 4 bytes."""
    b = pack('<L', value)
    file.write(b)
Beispiel #47
0
 def from_reader(cls, r: typing.BinaryIO):
     o = Locals()
     o.n = bin_reader.read_count(r, 32)
     o.valtype = ord(r.read(1))
     return o
Beispiel #48
0
def raw_hexdump_stream(stream: typing.BinaryIO) -> typing.Iterable[str]:
	line = stream.read(16)
	while line:
		yield " ".join(f"{byte:02x}" for byte in line)
		line = stream.read(16)
Beispiel #49
0
def find_offset(archive: BinaryIO) -> int:
    return obfuscation_offset(archive.readline().split()[-1])
Beispiel #50
0
 def parse(cls, f: BinaryIO) -> Any:
     read_bytes = f.read(16)
     assert len(read_bytes) == 16
     n = int.from_bytes(read_bytes, "big", signed=False)
     assert n <= (2**128) - 1 and n >= 0
     return cls(n)
Beispiel #51
0
def str_read(format, file: BinaryIO):
    """Read a structure from the file."""
    return unpack(format, file.read(calcsize(format)))
Beispiel #52
0
 def parse(cls, f: BinaryIO) -> Any:
     read_bytes = f.read(65)
     assert len(read_bytes) == 65
     n = int.from_bytes(read_bytes, "big", signed=True)
     assert n <= (2**512) - 1 and n >= -(2**512)
     return cls(n)
Beispiel #53
0
 def _read(self, fp: BinaryIO, buffer: BytesIO, start_pos: int) -> None:
     self._start_pos = start_pos
     fp.seek(self._start_pos)
     self._decode_header(fp, buffer)
Beispiel #54
0
 def find_offset_and_key(self, archive: BinaryIO) -> Tuple[int, Optional[int]]:
     line = archive.readline()
     parts = line.split()
     offset = int(parts[1], 16)
     key = int(parts[2], 16)
     return offset, key
Beispiel #55
0
    def _load(self, f: BinaryIO):
        """Read data from the MDL file."""
        assert f.tell() == 0, "Doesn't begin at start?"
        if f.read(4) != b'IDST':
            raise ValueError('Not a model!')
        (
            self.version,
            name,
            file_len,
            # 4 bytes are unknown...
        ) = str_read('i 4x 64s i', f)

        if not 44 <= self.version <= 49:
            raise ValueError('Unknown MDL version {}!'.format(self.version))

        self.name = name.rstrip(b'\0').decode('ascii')
        self.eye_pos = str_readvec(f)
        self.illum_pos = str_readvec(f)
        # Approx dimensions
        self.hull_min = str_readvec(f)
        self.hull_max = str_readvec(f)
        
        self.view_min = str_readvec(f)
        self.view_max = str_readvec(f)

        # Break up the reading a bit to limit the stack size.
        (
            flags,

            bone_count,
            bone_off,

            bone_controller_count, bone_controller_off,

            hitbox_count, hitbox_off,
            anim_count, anim_off,
            sequence_count, sequence_off,
        ) = str_read('11I', f)

        self.flags = Flags(flags)

        (
            activitylistversion, eventsindexed,

            texture_count, texture_offset,
            cdmat_count, cdmat_offset,
            
            skinref_count, skinref_ind, skinfamily_count,
            
            bodypart_count, bodypart_offset,
            attachment_count, attachment_offset,
        ) = str_read('13i', f)

        (
            localnode_count,
            localnode_index,
            localnode_name_index,
         
            # mstudioflexdesc_t
            flexdesc_count,
            flexdesc_index,
         
            # mstudioflexcontroller_t
            flexcontroller_count,
            flexcontroller_index,
         
            # mstudioflexrule_t
            flexrules_count,
            flexrules_index,
         
            # IK probably refers to inverse kinematics
            # mstudioikchain_t
            ikchain_count,
            ikchain_index,
         
            # Information about any "mouth" on the model for speech animation
            # More than one sounds pretty creepy.
            # mstudiomouth_t
            mouths_count, 
            mouths_index,
         
            # mstudioposeparamdesc_t
            localposeparam_count,
            localposeparam_index,
        ) = str_read('15I', f)

        # VDC:
        # For anyone trying to follow along, as of this writing,
        # the next "surfaceprop_index" value is at position 0x0134 (308)
        # from the start of the file.
        assert f.tell() == 308, 'Offset wrong? {} != 308 {}'.format(f.tell(), f)

        (
            # Surface property value (single null-terminated string)
            surfaceprop_index,
         
            # Unusual: In this one index comes first, then count.
            # Key-value data is a series of strings. If you can't find
            # what you're interested in, check the associated PHY file as well.
            keyvalue_index,
            keyvalue_count,	
         
            # More inverse-kinematics
            # mstudioiklock_t
            iklock_count,
            iklock_index,
        ) = str_read('5I', f)

        (
            self.mass,  # Mass of object (float)
            self.contents,  # ??

            # Other models can be referenced for re-used sequences and
            # animations
            # (See also: The $includemodel QC option.)
            # mstudiomodelgroup_t
            includemodel_count,
            includemodel_index,

            # In-engine, this is a pointer to the combined version of this +
            # included models. In the file it's useless.
            virtualModel,

            # mstudioanimblock_t
            animblocks_name_index,
            animblocks_count,
            animblocks_index,

            animblockModel,  # Placeholder for mutable-void*

            # Points to a series of bytes?
            bonetablename_index,

            vertex_base,  # Placeholder for void*
            offset_base,  # Placeholder for void*
        ) = str_read('f 11I', f)

        (
            # Used with $constantdirectionallight from the QC
            # Model should have flag #13 set if enabled
            directionaldotproduct,  # byte

            # Preferred rather than clamped
            rootLod,  # byte

            # 0 means any allowed, N means Lod 0 -> (N-1)
            self.numAllowedRootLods,  # byte

            #unknown byte;
            #unknown int;

            # mstudioflexcontrollerui_t
            flexcontrollerui_count,
            flexcontrollerui_index,
        ) = str_read('3b 5x 2I', f)

        # Build CDMaterials data
        f.seek(cdmat_offset)
        self.cdmaterials = read_offset_array(f, cdmat_count)
        
        for ind, cdmat in enumerate(self.cdmaterials):
            cdmat = cdmat.replace('\\', '/')
            if cdmat[-1:] != '/':
                cdmat += '/'
            self.cdmaterials[ind] = cdmat

        # All models fallback to checking the texture at a root folder.
        if '/' not in self.cdmaterials:
            self.cdmaterials.append('/')
        
        # Build texture data
        f.seek(texture_offset)
        self.textures = [None] * texture_count  # type: List[Tuple[str, int, int]]
        tex_temp = [None] * texture_count  # type: List[Tuple[int, Tuple[int, int, int]]]
        for tex_ind in range(texture_count):
            tex_temp[tex_ind] = (
                f.tell(),
                # Texture data:
                # int: offset to the string, from start of struct.
                # int: flags - appears to solely indicate 'teeth' materials...
                # int: used, whatever that means.
                # 4 unused bytes.
                # 2 4-byte pointers in studiomdl to the material class, for
                #      server and client - shouldn't be in the file...
                # 40 bytes of unused space (for expansion...)
                str_read('iii 4x 8x 40x', f)
            )
        for tex_ind, (offset, data) in enumerate(tex_temp):
            name_offset, flags, used = data
            self.textures[tex_ind] = (
                read_nullstr(f, offset + name_offset),
                flags,
                used,
            )

        f.seek(surfaceprop_index)
        self.surfaceprop = read_nullstr(f)

        if keyvalue_count:
            self.keyvalues = read_nullstr(f, keyvalue_index)
        else:
            self.keyvalues = ''

        f.seek(includemodel_index)
        self.included_models = [None] * includemodel_count  # type: List[IncludedMDL]
        for i in range(includemodel_count):
            pos = f.tell()
            # This is two offsets from the start of the structures.
            lbl_pos, filename_pos = str_read('II', f)
            self.included_models[i] = IncludedMDL(
                read_nullstr(f, pos + lbl_pos) if lbl_pos else '',
                read_nullstr(f, pos + filename_pos) if filename_pos else '',
            )
            # Then return to after that struct - 4 bytes * 2.
            f.seek(pos + 4 * 2)

        self.sequences = self._read_sequences(f, sequence_off, sequence_count)
Beispiel #56
0
    def from_reader(cls, r: typing.BinaryIO) -> 'Module':
        if list(r.read(4)) != [0x00, 0x61, 0x73, 0x6d]:
            raise Exception('Invalid magic number!')
        if list(r.read(4)) != [0x01, 0x00, 0x00, 0x00]:
            raise Exception('Invalid version!')
        mod = Module()
        while True:
            section_id_byte = r.read(1)
            if not section_id_byte:
                break
            section_id = ord(section_id_byte)
            n = bin_reader.read_count(r, 32)
            data = r.read(n)
            if len(data) != n:
                raise Exception('Invalid section size!')
            if section_id == bin_format.custom_section:
                custom_section = CustomSection.from_reader(io.BytesIO(data))
                logger.infoln(
                    f'{bin_format.section[section_id][0]:>9} {custom_section.name}'
                )
            elif section_id == bin_format.type_section:
                type_section = TypeSection.from_reader(io.BytesIO(data))
                for i, e in enumerate(type_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] {e}')
                mod.types = type_section.vec
            elif section_id == bin_format.import_section:
                import_section = ImportSection.from_reader(io.BytesIO(data))
                for i, e in enumerate(import_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] ..{e}')
                mod.imports = import_section.vec
            elif section_id == bin_format.function_section:
                function_section = FunctionSection.from_reader(
                    io.BytesIO(data))
                num_imported_funcs = sum(1 for _ in filter(
                    lambda ins: ins.kind == bin_format.extern_func,
                    mod.imports))
                for i, e in enumerate(function_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] func={num_imported_funcs + i} sig={e}'
                    )
            elif section_id == bin_format.table_section:
                table_section = TableSection.from_reader(io.BytesIO(data))
                for i, e in enumerate(table_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] {e}')
                mod.tables = table_section.vec
            elif section_id == bin_format.memory_section:
                memory_section = MemorySection.from_reader(io.BytesIO(data))
                for i, e in enumerate(memory_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] {e}')
                mod.mems = memory_section.vec
            elif section_id == bin_format.global_section:
                global_section = GlobalSection.from_reader(io.BytesIO(data))
                for i, e in enumerate(global_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] {e}')
                mod.globals = global_section.vec
            elif section_id == bin_format.export_section:
                export_section = ExportSection.from_reader(io.BytesIO(data))
                for i, e in enumerate(export_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] {e}')
                mod.exports = export_section.vec
            elif section_id == bin_format.start_section:
                start_section = StartSection.from_reader(io.BytesIO(data))
                logger.infoln(
                    f'{bin_format.section[section_id][0]:>12} {start_section.start_function}'
                )
                mod.start = start_section.start_function.funcidx
            elif section_id == bin_format.element_section:
                element_section = ElementSection.from_reader(io.BytesIO(data))
                for i, e in enumerate(element_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] {e}')
                mod.elem = element_section.vec
            elif section_id == bin_format.code_section:
                code_section = CodeSection.from_reader(io.BytesIO(data))

                def printex(instrs: typing.List[Instruction], prefix=0):
                    for e in instrs:
                        a = f'           | {" " * prefix}{bin_format.opcodes[e.code][0]}'
                        if e.code in [
                                bin_format.block, bin_format.loop,
                                bin_format.if_
                        ]:
                            logger.infoln(
                                f'{a} {bin_format.blocktype[e.immediate_arguments][0]}'
                            )
                            prefix += 2
                        elif e.code == bin_format.end:
                            prefix -= 2
                            a = f'           | {" " * prefix}{bin_format.opcodes[e.code][0]}'
                            logger.infoln(f'{a}')
                        elif e.immediate_arguments is None:
                            logger.infoln(f'{a}')
                        elif isinstance(e.immediate_arguments, list):
                            logger.infoln(
                                f'{a} {" ".join([str(e) for e in e.immediate_arguments])}'
                            )
                        else:
                            logger.infoln(f'{a} {e.immediate_arguments}')

                num_imported_funcs = sum(1 for _ in filter(
                    lambda ins: ins.kind == bin_format.extern_func,
                    mod.imports))
                for i, e in enumerate(code_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] func={num_imported_funcs + i} {e}'
                    )
                    # printex(e.expr.data)
                    func = Function()
                    func.typeidx = function_section.vec[i]
                    func.locals = e.locals
                    func.expr = e.expr
                    mod.funcs.append(func)
            elif section_id == bin_format.data_section:
                data_section = DataSection.from_reader(io.BytesIO(data))
                for i, e in enumerate(data_section.vec):
                    logger.infoln(
                        f'{bin_format.section[section_id][0]:>9}[{i}] {e}')
                mod.data = data_section.vec
            else:
                raise Exception('Invalid section id!')
        logger.infoln('')
        return mod
def read_bytes_from_file(fptr: BinaryIO) -> bytes:
    data_len = read_varint_from_file(fptr)
    data = fptr.read(data_len)
    if len(data) < data_len:
        raise ValueError('File end before read completed.')
    return data
Beispiel #58
0
 def set_private_key_from_file(self, file: BinaryIO):
     "Load private key from file"
     self.set_private_key(file.read())
Beispiel #59
0
 def stuff(a: BinaryIO) -> bytes:
     return a.readline()
Beispiel #60
0
def stream_file(file_to_stream: BinaryIO, chunk_size=1024):
    while True:
        data_read = file_to_stream.read(chunk_size)
        if not data_read:
            break
        yield data_read