Exemplo n.º 1
0
def main():
    level = logging.DEBUG

    progressbar.streams.wrap_stderr()  # needed for logging compatability
    logging.basicConfig(
        format=
        "{asctime}:{msecs:03.0f} [{levelname:^9}] {filename}:{lineno:d} : {message}",  # noqa
        datefmt="%H:%M:%S",
        style="{",
        level=level)

    logger = logging.getLogger()
    logger.addHandler(LoggingBreakPoint())

    statusBar = progressbar.ProgressBar(
        prefix="{variables.unit} >> {variables.status} :: [",
        variables={
            "unit": "--",
            "status": "--"
        },
        widgets=[progressbar.widgets.AnimatedMarker(), "]"],
        redirect_stdout=True)

    args = getArguments()

    with open(args.dyld_path, "rb") as f:
        dyldCtx = DyldContext(f)

        runForAllImages(f, dyldCtx, statusBar, logger, stopIndex=20)
        # runForAllImages(f, dyldCtx, statusBar, logger, startIndex=1020)
        # runForAllImages(f, dyldCtx, statusBar, logger)
    pass
Exemplo n.º 2
0
def _openSubCaches(
        mainCachePath: str,
        numSubCaches: int) -> Tuple[List[DyldContext], List[BinaryIO]]:
    """Create DyldContext objects for each sub cache.

	Assumes that each sub cache has the same base name as the
	main cache, and that the suffixes are preserved.

	Also opens the symbols cache, and adds it to the end of
	the list.

	Returns:
		A list of subcaches, and their file objects, which must be closed!
	"""
    subCaches = []
    subCachesFiles = []

    subCacheSuffixes = [i for i in range(1, numSubCaches + 1)]
    subCacheSuffixes.append("symbols")
    for cacheSuffix in subCacheSuffixes:
        subCachePath = f"{mainCachePath}.{cacheSuffix}"
        cacheFileObject = open(subCachePath, mode="rb")
        cacheFileCtx = DyldContext(cacheFileObject)

        subCaches.append(cacheFileCtx)
        subCachesFiles.append(cacheFileObject)
        pass

    return subCaches, subCachesFiles
def _imageRunner(dyldPath: str, imageIndex: int) -> None:
    level = logging.DEBUG
    loggingStream = io.StringIO()

    # setup logging
    logger = logging.getLogger(f"Worker: {imageIndex}")
    handler = logging.StreamHandler(loggingStream)
    formatter = logging.Formatter(
        fmt=
        "{asctime}:{msecs:03.0f} [{levelname:^9}] {filename}:{lineno:d} : {message}",  # noqa
        datefmt="%H:%M:%S",
        style="{",
    )

    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(level)

    # process the image
    with open(dyldPath, "rb") as f:
        dyldFile = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
        dyldCtx = DyldContext(dyldFile)
        imageOffset = dyldCtx.convertAddr(dyldCtx.images[imageIndex].address)

        machoFile = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_COPY)
        machoCtx = MachOContext(machoFile, imageOffset)

        extractionCtx = ExtractionContext(dyldCtx, machoCtx,
                                          _DummyProgressBar(), logger)

        try:
            linkedit_optimizer.optimizeLinkedit(extractionCtx)
            stub_fixer.fixStubs(extractionCtx)
            objc_fixer.fixObjC(extractionCtx)
        except Exception as e:
            logger.exception(e)
        pass

    # cleanup
    handler.close()
    return loggingStream.getvalue()
Exemplo n.º 4
0
def runForAllImages(dyldFile: BufferedReader,
                    dyldCtx: DyldContext,
                    statusBar: progressbar.ProgressBar,
                    logger: logging.Logger,
                    startIndex: int = 0,
                    stopIndex: int = -1) -> None:
    total = dyldCtx.header.imagesCount

    for index, imageData in enumerate(dyldCtx.images[startIndex:], startIndex):
        if index == stopIndex:
            break

        # TODO: Imp sub caches
        imageOffset = dyldCtx.convertAddr(imageData.address)
        imagePath = dyldCtx.readString(imageData.pathFileOffset)[0:-1]
        imagePath = imagePath.decode("utf-8")
        imageName = imagePath.split("/")[-1]

        # Make a writable copy of the dyld file
        machoFile = mmap.mmap(dyldFile.fileno(), 0, access=mmap.ACCESS_COPY)
        machoCtx = MachOContext(machoFile, imageOffset)

        extractionCtx = ExtractionContext(dyldCtx, machoCtx, statusBar, logger)

        # Test space start

        slide_info.processSlideInfo(extractionCtx)
        linkedit_optimizer.optimizeLinkedit(extractionCtx)
        stub_fixer.fixStubs(extractionCtx)
        objc_fixer.fixObjC(extractionCtx)
        macho_offset.optimizeOffsets(extractionCtx)

        # Test space end

        logger.info(f"processed: ({index + 1}/{total}): {imageName}")
        pass

    statusBar.update(unit="Extractor", status="Done")
    pass
Exemplo n.º 5
0
def _extractImage(dyldFile: BufferedReader, dyldCtx: DyldContext,
                  image: dyld_cache_image_info, outputPath: str) -> None:
    """Extract an image and save it.

	The order of converters is essentally a reverse of Apple's SharedCacheBuilder
	"""

    logger = logging.getLogger()

    # get a a writable copy of the MachOContext
    machoFile = mmap.mmap(dyldFile.fileno(), 0, access=mmap.ACCESS_COPY)
    machoCtx = MachOContext(machoFile, dyldCtx.convertAddr(image.address))

    statusBar = progressbar.ProgressBar(
        prefix="{variables.unit} >> {variables.status} :: [",
        variables={
            "unit": "--",
            "status": "--"
        },
        widgets=[progressbar.widgets.AnimatedMarker(), "]"],
        redirect_stdout=True)

    extractionCtx = ExtractionContext(dyldCtx, machoCtx, statusBar, logger)

    slide_info.processSlideInfo(extractionCtx)
    linkedit_optimizer.optimizeLinkedit(extractionCtx)
    stub_fixer.fixStubs(extractionCtx)
    objc_fixer.fixObjC(extractionCtx)

    macho_offset.optimizeOffsets(extractionCtx)

    # Write the MachO file
    with open(outputPath, "wb") as outFile:
        statusBar.update(unit="Extractor", status="Writing file")

        newMachoCtx = extractionCtx.machoCtx

        # get the size of the file
        linkEditSeg = newMachoCtx.segments[b"__LINKEDIT"].seg
        fileSize = linkEditSeg.fileoff + linkEditSeg.filesize

        newMachoCtx.file.seek(0)
        outFile.write(newMachoCtx.file.read(fileSize))

    statusBar.update(unit="Extractor", status="Done")
Exemplo n.º 6
0
def _imageRunner(dyldPath: str, imageIndex: int) -> None:
    level = logging.DEBUG
    loggingStream = io.StringIO()

    # setup logging
    logger = logging.getLogger(f"Worker: {imageIndex}")
    handler = logging.StreamHandler(loggingStream)
    formatter = logging.Formatter(
        fmt=
        "{asctime}:{msecs:03.0f} [{levelname:^9}] {filename}:{lineno:d} : {message}",  # noqa
        datefmt="%H:%M:%S",
        style="{",
    )

    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(level)

    # process the image
    with open(dyldPath, "rb") as f:
        dyldCtx = DyldContext(f)

        subCacheFiles: List[BinaryIO] = []
        try:
            # add sub caches if there are any
            if dyldCtx.hasSubCaches():
                subCacheFileCtxs, subCacheFiles = _openSubCaches(
                    dyldPath, dyldCtx.header.numSubCaches)
                dyldCtx.addSubCaches(subCacheFileCtxs)
                pass

            machoOffset, context = dyldCtx.convertAddr(
                dyldCtx.images[imageIndex].address)
            machoCtx = MachOContext(context.fileObject, machoOffset, True)

            # Add sub caches if necessary
            if dyldCtx.hasSubCaches():
                mappings = dyldCtx.mappings
                mainFileMap = next((mapping[0] for mapping in mappings
                                    if mapping[1] == context))
                machoCtx.addSubfiles(mainFileMap,
                                     ((m, ctx.makeCopy(copyMode=True))
                                      for m, ctx in mappings))
                pass

            extractionCtx = ExtractionContext(dyldCtx, machoCtx,
                                              _DummyProgressBar(), logger)

            # TODO: implement a way to select convertors
            slide_info.processSlideInfo(extractionCtx)
            linkedit_optimizer.optimizeLinkedit(extractionCtx)
            stub_fixer.fixStubs(extractionCtx)
            objc_fixer.fixObjC(extractionCtx)
            macho_offset.optimizeOffsets(extractionCtx)

        except Exception as e:
            logger.exception(e)
            pass

        finally:
            for file in subCacheFiles:
                file.close()
                pass
            pass
        pass

    # cleanup
    handler.close()
    return loggingStream.getvalue()
Exemplo n.º 7
0
    parser.add_argument("dyld_path",
                        type=pathlib.Path,
                        help="A path to the target DYLD cache.")
    parser.add_argument(
        "-j",
        "--jobs",
        type=int,
        default=mp.cpu_count(),
        help="Number of jobs to run simultaneously."  # noqa
    )
    args = parser.parse_args(namespace=_DyldExtractorArgs)

    # create a list of images
    images: List[str] = []
    with open(args.dyld_path, "rb") as f:
        dyldCtx = DyldContext(f)

        for index, image in enumerate(dyldCtx.images):
            imagePath = dyldCtx.readString(image.pathFileOffset)[0:-1]
            imagePath = imagePath.decode("utf-8")
            imageName = imagePath.split("/")[-1]

            images.append(imageName)
        pass

    summary = ""
    with mp.Pool(args.jobs, initializer=_workerInitializer) as pool:
        # create jobs for each image
        jobs: List[Tuple[str, mp.pool.AsyncResult]] = []
        for index, imageName in enumerate(images):
            jobs.append((imageName,
Exemplo n.º 8
0
def main():
    args = getArguments()

    # Configure Logging
    level = logging.WARNING  # default option

    if args.verbosity == 0:
        # Set the log level so high that it doesn't do anything
        level = 100
    elif args.verbosity == 2:
        level = logging.INFO
    elif args.verbosity == 3:
        level = logging.DEBUG

    progressbar.streams.wrap_stderr()  # needed for logging compatability

    logging.basicConfig(
        format=
        "{asctime}:{msecs:3.0f} [{levelname:^9}] {filename}:{lineno:d} : {message}",  # noqa
        datefmt="%H:%M:%S",
        style="{",
        level=level)

    with open(args.dyld_path, "rb") as f:
        with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as dyldFile:
            dyldCtx = DyldContext(dyldFile)

            # enumerate images, create a map of paths and images
            imageMap = {}
            for imageData in dyldCtx.images:
                path = dyldCtx.readString(imageData.pathFileOffset)
                path = path[0:-1]  # remove null terminator
                path = path.decode("utf-8")

                imageMap[path] = imageData

            # list images option
            if args.list_frameworks:
                imagePaths = imageMap.keys()

                # filter if needed
                if args.filter:
                    filterTerm = args.filter.strip().lower()
                    imagePaths = _filterImages(imagePaths, filterTerm)

                print("Listing Images\n--------------")
                for path in imagePaths:
                    print(path)

                return

            # extract image option
            if args.extract:
                extractionTarget = args.extract.strip()
                targetPaths = _filterImages(imageMap.keys(), extractionTarget)
                if len(targetPaths) == 0:
                    print(f"Unable to find image \"{extractionTarget}\"")
                    return

                outputPath = args.output
                if outputPath is None:
                    outputPath = pathlib.Path("binaries/" + extractionTarget)
                    os.makedirs(outputPath.parent, exist_ok=True)

                print(f"Extracting {targetPaths[0]}")
                _extractImage(f, dyldCtx, imageMap[targetPaths[0]], outputPath)
                return