Exemplo n.º 1
0
def render_decoded_strings(decoded_strings: List[DecodedString], ostream,
                           verbose, disable_headers):
    """
    Render results of string decoding phase.
    """
    if verbose == Verbosity.DEFAULT:
        for ds in decoded_strings:
            ostream.writeln(sanitize(ds.string))
    else:
        strings_by_functions = collections.defaultdict(list)
        for ds in decoded_strings:
            strings_by_functions[ds.decoding_routine].append(ds)

        for fva, data in strings_by_functions.items():
            render_heading(f" FUNCTION at 0x{fva:x}", len(data), ostream,
                           disable_headers)
            rows = []
            for ds in data:
                if ds.address_type in (AddressType.HEAP, AddressType.STACK):
                    offset_string = f"({ds.address_type})"
                else:
                    offset_string = hex(ds.address or 0)
                rows.append(
                    (offset_string, hex(ds.decoded_at), sanitize(ds.string)))

            if rows:
                ostream.write(
                    tabulate.tabulate(
                        rows,
                        headers=("Offset", "Called At",
                                 "String") if not disable_headers else ()))
                ostream.writeln("\n")
Exemplo n.º 2
0
def create_x64dbg_database_content(sample_file_path, imagebase, decoded_strings):
    """
    Create x64dbg database/json file contents for file annotations.
    :param sample_file_path: input file path
    :param imagebase: input files image base to allow calculation of rva
    :param decoded_strings: list of decoded strings ([DecodedString])
    :return: json needed to annotate a binary in x64dbg
    """
    export = {"comments": []}
    module = os.path.basename(sample_file_path)
    processed = {}
    for ds in decoded_strings:
        if ds.s != "":
            sanitized_string = sanitize_string_for_script(ds.s)
            if ds.characteristics["location_type"] == LocationType.GLOBAL:
                rva = hex(ds.va - imagebase)
                try:
                    processed[rva] += "\t" + sanitized_string
                except BaseException:
                    processed[rva] = "FLOSS: " + sanitized_string
            else:
                rva = hex(ds.decoded_at_va - imagebase)
                try:
                    processed[rva] += "\t" + sanitized_string
                except BaseException:
                    processed[rva] = "FLOSS: " + sanitized_string

    for i in list(processed.keys()):
        comment = {"text": processed[i], "manual": False, "module": module, "address": i}
        export["comments"].append(comment)

    return json.dumps(export, indent=1)
Exemplo n.º 3
0
def render_stackstrings(strings: Union[List[StackString], List[TightString]],
                        ostream, verbose: bool, disable_headers: bool):
    if verbose == Verbosity.DEFAULT:
        for s in strings:
            ostream.writeln(sanitize(s.string))
    else:
        if strings:
            ostream.write(
                tabulate.tabulate(
                    [(hex(s.function), hex(s.program_counter),
                      hex(s.frame_offset), sanitize(s.string))
                     for s in strings],
                    headers=("Function", "Function Offset", "Frame Offset",
                             "String") if not disable_headers else (),
                ))
            ostream.write("\n")
Exemplo n.º 4
0
def print_stack_strings(extracted_strings, quiet=False, expert=False):
    """
    Print extracted stackstrings.
    :param extracted_strings: list of stack strings ([StackString])
    :param quiet: print strings only, suppresses headers
    :param expert: expert mode
    """
    count = len(extracted_strings)

    if not quiet:
        print("\nFLOSS extracted %d stackstrings" % (count))

    if not expert:
        for ss in extracted_strings:
            print("%s" % (ss.s))
    elif count > 0:
        print(tabulate.tabulate(
            [(hex(s.fva), hex(s.frame_offset), s.s) for s in extracted_strings],
            headers=["Function", "Frame Offset", "String"]))
Exemplo n.º 5
0
def print_decoded_strings(decoded_strings, quiet=False, expert=False):
    """
    Print decoded strings.
    :param decoded_strings: list of decoded strings ([DecodedString])
    :param quiet: print strings only, suppresses headers
    :param expert: expert mode
    """
    if quiet or not expert:
        for ds in decoded_strings:
            print(sanitize_string_for_printing(ds.s))
    else:
        ss = []
        for ds in decoded_strings:
            s = sanitize_string_for_printing(ds.s)
            if ds.characteristics["location_type"] == LocationType.STACK:
                offset_string = "[STACK]"
            elif ds.characteristics["location_type"] == LocationType.HEAP:
                offset_string = "[HEAP]"
            else:
                offset_string = hex(ds.va or 0)
            ss.append((offset_string, hex(ds.decoded_at_va), s))

        if len(ss) > 0:
            print(tabulate.tabulate(ss, headers=["Offset", "Called At", "String"]))
Exemplo n.º 6
0
def print_identification_results(sample_file_path, decoder_results):
    """
    Print results of string decoding routine identification phase.
    :param sample_file_path: input file
    :param decoder_results: identification_manager
    """
    # TODO pass functions instead of identification_manager
    candidates = decoder_results.get_top_candidate_functions(10)
    if len(candidates) == 0:
        print("No candidate functions found.")
    else:
        print("Most likely decoding functions in: " + sample_file_path)
        print(tabulate.tabulate(
            [(hex(fva), "%.5f" % (score,)) for fva, score in candidates],
            headers=["address", "score"]))