예제 #1
0
파일: misc.py 프로젝트: esc777690/LGK-Hub
 def getFragment(self, frag):
     stream = frag.getSubIStream()
     ministream = guessParser(stream)
     if not ministream:
         warning("Unable to create the OLE2 mini stream parser!")
         return frag
     return ministream
예제 #2
0
    def which_type(self, path):
        """
        Analyzes the image provided and attempts to determine whether it is a poster or banner.

        :param path: full path to the image
        :return: BANNER, POSTER if it concluded one or the other, or None if the image was neither (or didn't exist)
        """

        if not os.path.isfile(path):
            sickrage.app.log.warning("Couldn't check the type of " + str(path) + " cause it doesn't exist")
            return None

        with open(path, 'rb') as fh:
            img_metadata = extractMetadata(guessParser(StringInputStream(fh.read())))
            if not img_metadata:
                sickrage.app.log.debug(
                    "Unable to get metadata from " + str(path) + ", not using your existing image")
                return None

            img_ratio = float(img_metadata.get('width', 0)) / float(img_metadata.get('height', 0))

            # most posters are around 0.68 width/height ratio (eg. 680/1000)
            if 0.55 < img_ratio < 0.8:
                return self.POSTER

            # most banners are around 5.4 width/height ratio (eg. 758/140)
            elif 5 < img_ratio < 6:
                return self.BANNER

            # most fanart are around 1.77777 width/height ratio (eg. 1280/720 and 1920/1080)
            elif 1.7 < img_ratio < 1.8:
                return self.FANART
            else:
                sickrage.app.log.warning("Image has size ratio of " + str(img_ratio) + ", unknown type")
예제 #3
0
def get_meta(filestream):
    metadata = {}

    try:
        filestream.seek(0)
        stream = InputIOStream(filestream, None, tags=[])
        parser = guessParser(stream)
        if not parser:
            return metadata

        tags = extractMetadata(parser).exportPlaintext(human=False,
                                                       line_prefix='')
        for text in tags:
            try:
                json.dumps(text)
                key, value = text.split(':', maxsplit=1)
                key, value = key.strip(), value.strip()
                if key and value:
                    metadata.update({key: value})
            except Exception as ex:
                logger.exception(ex)
    except Exception as ex:
        logger.exception(ex)
        return metadata
    return metadata
예제 #4
0
def getOLE2Parser(ole2, path):
    name = path + "[0]"
    if name in ole2:
        fragment = ole2[name]
    else:
        fragment = getRootParser(ole2)[name]
    return guessParser(fragment.getSubIStream())
예제 #5
0
def openParser(parser_id, filename, offset, size):
    tags = []
    if parser_id:
        tags += [("id", parser_id), None]
    try:
        stream = FileInputStream(filename, offset=offset, size=size, tags=tags)
    except InputStreamError as err:
        return None, "Unable to open file: %s" % err
    parser = guessParser(stream)
    if not parser:
        return None, "Unable to parse file: %s" % filename
    return parser, None
예제 #6
0
    def extract(self):
        self.nb_extract += 1
        self.prefix = ""

        data = self.data.tostring()
        stream = InputIOStream(StringIO(data), filename=self.filename)

        # Create parser
        start = time()
        try:
            parser = guessParser(stream)
        except InputStreamError as err:
            parser = None
        if not parser:
            self.info("Unable to create parser: stop")
            return None

        # Extract metadata
        try:
            metadata = extractMetadata(parser, 0.5)
            failure = bool(self.fuzzer.log_error)
        except Exception as err:
            self.info("SERIOUS ERROR: %s" % err)
            self.prefix = "metadata"
            failure = True
        duration = time() - start

        # Timeout?
        if MAX_DURATION < duration:
            self.info("Process is too long: %.1f seconds" % duration)
            failure = True
            self.prefix = "timeout"
        if not failure and (metadata is None or not metadata):
            self.info("Unable to extract metadata")
            return None
#        for line in metadata.exportPlaintext():
#            print(">>> %s" % line)
        return failure
예제 #7
0
    def main(self):
        if len(argv) != 2:
            print("usage: %s document.swf" % argv[0], file=stderr)
            exit(1)

        filename = argv[1]
        parser = createParser(filename)

        if parser["signature"].value == "CWS":
            deflate_swf = parser["compressed_data"].getSubIStream()
            parser = guessParser(deflate_swf)

        if "jpg_table/data" in parser:
            # JPEG pictures with common header
            jpeg_header = parser["jpg_table/data"].value[:-2]
            for field in parser.array("def_bits"):
                jpeg_content = field["image"].value[2:]
                if self.verbose:
                    print("Extract JPEG from %s" % field.path)
                self.storeJPEG(jpeg_header + jpeg_content)

        # JPEG in format 2/3
        for field in parser.array("def_bits_jpeg2"):
            self.extractFormat2(field)
        for field in parser.array("def_bits_jpeg3"):
            self.extractFormat2(field)

        # Extract sound
        # self.extractSound(parser)
        self.extractSound2(parser)

        # Does it extract anything?
        if self.jpg_index == 1:
            print("No JPEG picture found.")
        if self.snd_index == 1:
            print("No sound found.")
예제 #8
0
def get_meta(filestream):
    metadata = {}

    try:
        filestream.seek(0)
        stream = InputIOStream(filestream, None, tags=[])
        parser = guessParser(stream)
        if not parser:
            return metadata

        tags = extractMetadata(parser).exportPlaintext(human=False, line_prefix='')
        for text in tags:
            try:
                json.dumps(text)
                key, value = text.split(':', maxsplit=1)
                key, value = key.strip(), value.strip()
                if key and value:
                    metadata.update({key: value})
            except Exception as ex:
                logger.exception(ex)
    except Exception as ex:
        logger.exception(ex)
        return metadata
    return metadata
예제 #9
0
 def run():
     msg = _resize = retry = 0
     events = ("window resize", )
     profile_display = args.profile_display
     while True:
         for e in events:
             try:
                 if e == "window resize":
                     size = ui.get_cols_rows()
                     resize = log.height
                 else:
                     e = top.keypress(size, e)
                     if e is None:
                         pass
                     elif e in ('f1', '?'):
                         try:
                             body.select(body.tabs.index(help))
                         except ValueError:
                             body.append(help)
                             resize = log.height
                     elif e in ('esc', 'ctrl w'):
                         body.close()
                         if body.original_widget is None:
                             return
                         resize = log.height
                     elif e == '+':
                         if log.height:
                             resize = log.height - 1
                     elif e == '-':
                         resize = log.height + 1
                     elif e == 'q':
                         return
             # except AssertionError:
             #     hachoir_log.error(getBacktrace())
             except NewTab_Stream as e:
                 stream = e.field.getSubIStream()
                 logger.objects[stream] = e = "%u/%s" % (
                     body.active, e.field.absolute_address)
                 parser = guessParser(stream)
                 if not parser:
                     hachoir_log.error("No parser found for %s" %
                                       stream.source)
                 else:
                     logger.objects[parser] = e
                     body.append((e,
                                  TreeBox(charset, Node(parser, None),
                                          preload_fields, None, options)))
                     resize = log.height
             except NeedInput as e:
                 input.do(*e.args)
             if profile_display:
                 events = events[1:]
                 break
         while True:
             if msgs[0]:
                 for level, prefix, text in msgs[0]:
                     log_count[level] += 1
                     txt = Text("[%u]%s %s" % (msg, prefix, text))
                     msg += 1
                     msgs[1].append(txt)
                     _resize += txt.rows(size[:1])
                 if log.height < _resize and (resize is None
                                              or resize < _resize):
                     resize = _resize
                 try:
                     log.set_focus(len(msgs[1]) - 1)
                 except IndexError:
                     pass
                 sep.set_info(*tuple(log_count))
                 msgs[0] = []
             if resize is not None:
                 body.height = size[1] - sep.rows(size[:1]) - resize
                 if body.height <= 0:
                     resize += body.height - 1
                     body.height = 1
                 log.height = resize
                 resize = None
             canvas = top.render(size, focus=True)
             if not msgs[0]:
                 _resize = retry = 0
                 break
             assert not retry
             retry += 1
         ui.draw_screen(size, canvas)
         msgs[2] = len(msgs[1])
         if profile_display and events:
             continue
         while True:
             events = ui.get_input()
             if events:
                 break
예제 #10
0
import collections
import subprocess
from hachoir.stream import FileInputStream
from hachoir.parser import guessParser, archive

# 1. process the zip file and extract the data (a Deflate stream) for each file
parser = guessParser(FileInputStream("challenge.zip"))
# ignore stuff like zip central directory
files = [entry for entry in parser if type(entry) == archive.zip.FileEntry]
# compressed_data is the last field in the file entry, so index -1
streams = [list(file)[-1].value for file in files]

# 2. use infgen to decode the dynamic huffman tree in each stream
info = []
for stream in streams:
    # https://github.com/madler/infgen
    p = subprocess.run("./infgen", input=stream, capture_output=True)
    info.append(p.stdout.decode())

# 3. copy the code given in the Deflate RFC to rebuild the huffman tree
trees = []
for file in info:
    # ignore all the other infgen output
    lines = [line for line in file.split("\n") if line.startswith("litlen")]

    # litlen 10 7
    # litlen 32 3
    # litlen 33 12
    # ...
    # -> [(10, 7), (32, 3), (33, 12), ...]
    litlens = [tuple(map(int, line.split()[1:])) for line in lines]
예제 #11
0
def getRootParser(ole2):
    return guessParser(ole2["root[0]"].getSubIStream())