Ejemplo n.º 1
0
def get_all_emails_of_a_user(username: str,
                             repo_name: str) -> Dict[str, Set[str]]:
    emails_to_name = defaultdict(set)
    seen_commits = set()
    page_counter = 1
    commit_counter = 1

    while True:
        continue_loop = True
        url = '{}/repos/{}/{}/commits?per_page=100&page={}'.format(
            API_URL, username, repo_name, page_counter)
        result_dict = __api_call(url=url)

        if 'message' in result_dict:
            if result_dict['message'] == 'Git Repository is empty.':
                info('Git repository is empty', verbosity_level=5)
                continue

            if 'API rate limit exceeded for ' in result_dict['message']:
                warning(
                    'API rate limit exceeded - not all repos where fetched')
                return emails_to_name

            if result_dict['message'] == 'Not Found':
                warning('There is no repository with the name "{}"'.format(
                    repo_name))
                return emails_to_name

        for commit_dict in result_dict:
            sha = commit_dict['sha']
            if sha in seen_commits:
                continue_loop = False
                break

            seen_commits.add(sha)
            info('scan commit {}'.format(commit_counter), verbosity_level=5)
            commit_counter += 1

            if commit_dict['author'] is None:
                continue
            user = commit_dict['author']['login']
            if user.lower() == username.lower():
                commit = commit_dict['commit']
                author_name = commit['author']['name']
                author_email = commit['author']['email']
                committer_name = commit['committer']['name']
                committer_email = commit['committer']['email']
                emails_to_name[author_email].add(author_name)
                emails_to_name[committer_email].add(committer_name)
        if continue_loop and len(result_dict) == 100:
            page_counter += 1
        else:
            break
    return emails_to_name
Ejemplo n.º 2
0
def show_urls(parsed_eml: Message):
    print_headline_banner(headline='URLs in HTML part')
    all_links = set()
    html_str = __get_decoded_payload(parsed_eml=parsed_eml, content_type='text/html')
    if html_str is None:
        warning('Email contains no HTML')
    else:
        for pattern in [r'href="(.+?)"', r"href='(.+?)'"]:
            for match in re.finditer(pattern, html_str):
                all_links.add(match.group(1))
        if len(all_links) == 0:
            info(message='No URLs found in the html')
        for x in all_links:
            print(' - ' + colorize_string(text=x, color=Color.MAGENTA))
    print()
Ejemplo n.º 3
0
def check_tracking(parsed_eml: Message):
    print_headline_banner(headline='Reloaded Content (aka. Tracking Pixels)')
    sources = set()
    html_str = __get_decoded_payload(parsed_eml=parsed_eml, content_type='text/html')
    if html_str is None:
        warning('Email contains no HTML')
    else:
        for pattern in [r'src="(.+?)"', r"src='(.+?)'", r'background="(.+?)"', r"background='(.+?)'"]:
            for match in re.finditer(pattern, html_str):
                if not match.group(1).startswith('cid:'):
                    sources.add(match.group(1))
        if len(sources) == 0:
            info(message='No content found which will be reloaded from external resources')
        for x in sources:
            print(' - ' + colorize_string(text=x, color=Color.MAGENTA))
    print()
Ejemplo n.º 4
0
def crack_zip_archive(password_list: list,
                      zip_archive: zipfile.ZipFile) -> str or None:
    """ returns password of the zip file or None if the password was not in the password list """
    if not __archive_is_encrypted(zip_archive=zip_archive):
        warning('Zip file is not encrypted')
    info('Try {} passwords'.format(len(password_list)))
    for x in password_list:
        try:
            zip_archive.extractall(pwd=x.encode('utf-8'))
            info('Password found: "{}"'.format(x))
            info('Files extracted')
            return x
        except KeyboardInterrupt:
            warning('Keyboard Interruption. Existing.')
        except:
            pass
    info('Password not found')
    return None
Ejemplo n.º 5
0
        def scan_directory(path_to_dir):
            for x in os.scandir(path_to_dir):
                file_name = x.path.replace(base_path, '')
                try:
                    file_info = x.stat()
                except OSError:
                    warning(message='File {} could not be analyzed'.format(
                        file_name))
                    continue

                alternate_data_streams = alternate_data_streams_dict.get(
                    x.path, list())
                generated_output = __generate_output_single_file(
                    arguments=parsed_arguments,
                    path_to_file=x.path,
                    file_name=file_name,
                    file_info=file_info,
                    alternate_data_streams=alternate_data_streams)
                output.extend(generated_output)

                if parsed_arguments.recursive and x.is_dir():
                    scan_directory(path_to_dir=x.path)
Ejemplo n.º 6
0
def get_all_repositories_of_a_user(username: str) -> List[Repository]:
    repositories_seen = set()
    repositories = list()
    page_counter = 1
    while True:
        continue_loop = True

        url = '{}/users/{}/repos?per_page=100&page={}'.format(
            API_URL, username, page_counter)
        result_dict = __api_call(url=url)

        if 'message' in result_dict:
            if 'API rate limit exceeded for ' in result_dict['message']:
                warning(
                    'API rate limit exceeded - not all repos where fetched')
                break
            if result_dict['message'] == 'Not Found':
                warning(
                    'There is no user with the username "{}"'.format(username))
                break

        for repository in result_dict:
            repo_name = repository['name']
            if repo_name in repositories_seen:
                continue_loop = False
                break
            else:
                repositories.append(
                    Repository(name=repo_name, is_fork=repository['fork']))
                repositories_seen.add(repo_name)

        if continue_loop and len(result_dict) == 100:
            page_counter += 1
        else:
            break
    return repositories
Ejemplo n.º 7
0
def main():
    argument_parser = argparse.ArgumentParser(
        usage='emlAnalyzer [OPTION]... -i FILE',
        description=
        'A cli script to analyze an E-Mail in the eml format for viewing the header, extracting attachments etc.'
    )
    argument_parser.add_argument('-i',
                                 '--input',
                                 help="path to the eml-file (is required)",
                                 type=str)
    argument_parser.add_argument('--header',
                                 action='store_true',
                                 default=False,
                                 help="Shows the headers")
    argument_parser.add_argument(
        '-x',
        '--tracking',
        action='store_true',
        default=False,
        help=
        "Shows content which is reloaded from external resources in the HTML part"
    )
    argument_parser.add_argument('-a',
                                 '--attachments',
                                 action='store_true',
                                 default=False,
                                 help="Lists attachments")
    argument_parser.add_argument('--text',
                                 action='store_true',
                                 default=False,
                                 help="Shows plaintext")
    argument_parser.add_argument('--html',
                                 action='store_true',
                                 default=False,
                                 help="Shows HTML")
    argument_parser.add_argument('-s',
                                 '--structure',
                                 action='store_true',
                                 default=False,
                                 help="Shows structure of the E-Mail")
    argument_parser.add_argument(
        '-u',
        '--url',
        action='store_true',
        default=False,
        help="Shows embedded links and urls in the html part")
    argument_parser.add_argument('-ea',
                                 '--extract',
                                 type=int,
                                 default=None,
                                 help="Extracts the x-th attachment")
    argument_parser.add_argument('--extract-all',
                                 action='store_true',
                                 default=None,
                                 help="Extracts all attachments")
    argument_parser.add_argument(
        '-o',
        '--output',
        type=str,
        default=None,
        help=
        "Path for the extracted attachment (default is filename in working directory)"
    )
    arguments = argument_parser.parse_args()

    if arguments.input is None or len(arguments.input) == 0:
        warning('No Input specified')
        argument_parser.print_help()
        exit()

    # get the absolute path to the input file
    path_to_input = os.path.abspath(arguments.input)

    # read the eml file
    try:
        with open(path_to_input, mode='r') as input_file:
            eml_content = input_file.read()
    except Exception as e:
        error('Error: {}'.format(e))
        error('File could not be loaded')
        info('Existing')
        exit()

    # parse the eml file
    try:
        parsed_eml = message_from_string(eml_content)
    except Exception as e:
        error('Error: {}'.format(e))
        error('File could not be parsed. Sure it is a eml-file?')
        info('Existing')
        exit()

    # use default functionality if no options are specified
    is_default_functionality = not (arguments.header or arguments.tracking
                                    or arguments.attachments or arguments.text
                                    or arguments.html or arguments.structure
                                    or arguments.url
                                    or arguments.extract is not None)

    if is_default_functionality:
        arguments.structure = True
        arguments.url = True
        arguments.tracking = True
        arguments.attachments = True

    if arguments.header:
        show_header(parsed_eml=parsed_eml)
    if arguments.structure:
        show_structure(parsed_eml=parsed_eml)
    if arguments.url:
        show_urls(parsed_eml=parsed_eml)
    if arguments.tracking:
        check_tracking(parsed_eml=parsed_eml)
    if arguments.attachments:
        show_attachments(parsed_eml=parsed_eml)
    if arguments.text:
        show_text(parsed_eml=parsed_eml)
    if arguments.html:
        show_html(parsed_eml=parsed_eml)

    if arguments.extract is not None:
        extract_attachment(parsed_eml=parsed_eml,
                           attachment_number=arguments.extract,
                           output_path=arguments.output)
    if arguments.extract_all is not None:
        extract_all_attachments(parsed_eml=parsed_eml, path=arguments.output)
Ejemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(usage='run.py -i INPUT -o OUTPUT')
    parser.add_argument(
        '-i',
        '--input',
        type=str,
        help='Path to the device or file which should be scanned')
    parser.add_argument('-o',
                        '--output',
                        type=str,
                        help='Path to the output directory')
    parser.add_argument(
        '-c',
        '--no-corruption-checks',
        dest='no_corruption_checks',
        help='No corruption checks will be made, which faster the scan',
        action='store_true',
        default=False)
    parser.add_argument(
        '-f',
        dest='flush_if_maximum_file_size_is_reached',
        help=
        'Flush files when the maximum file size is reached even if its not completely carved',
        action='store_true',
        default=False)
    parser.add_argument(
        '-p',
        '--plugin',
        type=str,
        nargs='+',
        dest='plugin_list',
        help=
        'List of plugins which will be used [keys, cert, pictures, binary, pdf]',
        default=['keys', 'cert', 'pictures', 'binary', 'pdf'])

    arguments = parser.parse_args()

    if arguments.input is None:
        parser.print_help()
        exit()
    arguments.input = os.path.abspath(arguments.input)

    if arguments.output is None:
        parser.print_help()
        exit()
    arguments.output = os.path.abspath(arguments.output)

    file_carver = FileCarver(
        path_to_input_device=arguments.input,
        output_directory=arguments.output,
        make_corruption_checks=not arguments.no_corruption_checks,
        flush_if_maximum_file_size_is_reached=arguments.
        flush_if_maximum_file_size_is_reached)

    # Private Keys
    if 'keys' in arguments.plugin_list:
        file_carver.register_plugin(EncryptedPrivateKey)
        file_carver.register_plugin(PrivateKey)
        file_carver.register_plugin(EncryptedPrivateKey)
        file_carver.register_plugin(PrivateDSAKey)
        file_carver.register_plugin(PrivateECKey)
        file_carver.register_plugin(PrivateRsaKey)

    # Certificates and Certificate Requests
    if 'cert' in arguments.plugin_list:
        file_carver.register_plugin(CertificateRequest)
        file_carver.register_plugin(Certificate)
        file_carver.register_plugin(TrustedCertificate)

    # Pictures
    if 'pictures' in arguments.plugin_list:
        file_carver.register_plugin(JPG)
        file_carver.register_plugin(PNG)

    # Binaries
    if 'binary' in arguments.plugin_list:
        file_carver.register_plugin(PeFile)

    # PDF
    if 'pdf' in arguments.plugin_list:
        file_carver.register_plugin(PDF)

    file_carver.start_scanning()

    try:
        time_started = time.time()
        info('Starting file carving process...')
        while file_carver.is_running:
            scanned_sectors = file_carver.scanned_sectors
            if scanned_sectors > 0:
                number_of_sectors = file_carver.number_of_sectors
                progress_in_percent = 100 * (scanned_sectors /
                                             number_of_sectors)
                # predict time it still takes
                duration_up_to_now = time.time() - time_started
                prediction_in_sec = (duration_up_to_now / scanned_sectors) * (
                    number_of_sectors - scanned_sectors)
                d_hours, d_minutes, d_seconds = __convert_seconds(
                    seconds_total=duration_up_to_now)
                p_hours, p_minutes, p_seconds = __convert_seconds(
                    seconds_total=prediction_in_sec)
                info('{:2.2f}%    duration: {}:{}:{}  -  remaining: {}:{}:{}'.
                     format(progress_in_percent, d_hours, d_minutes, d_seconds,
                            p_hours, p_minutes, p_seconds))
            time.sleep(1)

        d_hours, d_minutes, d_seconds = __convert_seconds(time.time() -
                                                          time_started)
        info('File carving process is complete. Needed: {}:{}:{}'.format(
            d_hours, d_minutes, d_seconds))

    except KeyboardInterrupt:
        warning('Keyboard Interrupt! Existing.')
        exit()
Ejemplo n.º 9
0
def main():
    flags_to_parse, path = __parse_cli_arguments()

    parser = argparse.ArgumentParser(
        usage='lad [OPTION]... [FILE]...',
        description=
        'Lists information about the FILEs (the current directory by default) including Alternate Data Streams.',
        add_help=False)
    parser.add_argument('-h',
                        '--human-readable',
                        dest="human_readable",
                        help="print sizes like 1K 234M 2G etc.",
                        action='store_true',
                        default=False)
    parser.add_argument('--help',
                        dest='help',
                        help="prints the help text",
                        action='store_true',
                        default=False)
    parser.add_argument('-R',
                        '--recursive',
                        dest="recursive",
                        help="list subdirectories recursively",
                        action='store_true',
                        default=False)
    parser.add_argument('--full-time',
                        dest="full_time",
                        help="Shows the complete timestamp",
                        action='store_true',
                        default=False)
    parser.add_argument('-n',
                        '--numeric-uid-gid',
                        dest="numeric_uid_gid",
                        help="list numeric user and group IDs",
                        action='store_true',
                        default=False)
    parser.add_argument(
        '-F',
        dest="filter_files_with_ads",
        help="Show only files which include Alternate Data Streams",
        action='store_true',
        default=False)
    parser.add_argument(
        '--no-warning',
        dest="no_warning",
        help="Suppress warnings (e.g. if the filesystem is not NTFS)",
        action='store_true',
        default=False)

    parsed_arguments = parser.parse_args(flags_to_parse)

    if parsed_arguments.help:
        parser.print_help()
        exit()

    base_path = os.path.abspath(path)
    if not os.path.exists(base_path):
        error('Path "{}" does not exist'.format(path))
        exit()

    output = list()

    if os.path.isfile(path):  # the path points to a file
        # check the filesystem and collect the alternate data streams
        if path_is_an_ntfs_filesystem(path=base_path):
            alternate_data_streams = get_alternate_data_streams_of_file(
                path_to_file=base_path)
        else:
            alternate_data_streams = list()
            if not parsed_arguments.no_warning:
                warning(WARNING_TEXT_WRONG_FILE_SYSTEM_PATH)

        output.extend(
            __generate_output_single_file(
                arguments=parsed_arguments,
                path_to_file=base_path,
                file_name=path,
                file_info=os.stat(base_path),
                alternate_data_streams=alternate_data_streams))

    elif not parsed_arguments.recursive:  # the path points to a directory (recursive flag is not set)
        if path_is_an_ntfs_filesystem(path=base_path):
            search_alternate_data_streams = True
        else:
            search_alternate_data_streams = False
            if not parsed_arguments.no_warning:
                warning(WARNING_TEXT_WRONG_FILE_SYSTEM_PATH)

        for x in os.scandir(base_path):
            file_name = x.path.replace(base_path, '')
            file_info = x.stat()
            if search_alternate_data_streams and x.is_file():
                alternate_data_streams = get_alternate_data_streams_of_file(
                    path_to_file=x.path)
            else:
                alternate_data_streams = list()

            generated_output = __generate_output_single_file(
                arguments=parsed_arguments,
                path_to_file=x.path,
                file_name=file_name,
                file_info=file_info,
                alternate_data_streams=alternate_data_streams)
            output.extend(generated_output)

    else:  # the path points to a directory (recursive flag is set)
        # to faster the scan apply getfattr recursively on the directory and parse the complete output

        if not parsed_arguments.no_warning and not path_is_an_ntfs_filesystem(
                base_path):
            warning(WARNING_TEXT_WRONG_FILE_SYSTEM_BASE_PATH)

        alternate_data_streams_dict = get_alternate_data_streams_recursively(
            path_to_directory=base_path)

        def scan_directory(path_to_dir):
            for x in os.scandir(path_to_dir):
                file_name = x.path.replace(base_path, '')
                try:
                    file_info = x.stat()
                except OSError:
                    warning(message='File {} could not be analyzed'.format(
                        file_name))
                    continue

                alternate_data_streams = alternate_data_streams_dict.get(
                    x.path, list())
                generated_output = __generate_output_single_file(
                    arguments=parsed_arguments,
                    path_to_file=x.path,
                    file_name=file_name,
                    file_info=file_info,
                    alternate_data_streams=alternate_data_streams)
                output.extend(generated_output)

                if parsed_arguments.recursive and x.is_dir():
                    scan_directory(path_to_dir=x.path)

        scan_directory(path_to_dir=base_path)

    # Find maximum width of each column to print the table nicely
    max_width_for_each_column = defaultdict(int)
    for line in output:
        for i, cell in enumerate(line):
            max_width_for_each_column[i] = max(max_width_for_each_column[i],
                                               len(cell))

    for line in output:
        for i, cell in enumerate(line):
            if i == len(line) - 1:
                print(cell.ljust(max_width_for_each_column[i]), end='\n')
            elif i == 3:  # the file size has to be aligned to the right
                print(cell.rjust(max_width_for_each_column[i]), end=' ')
            else:
                print(cell.ljust(max_width_for_each_column[i]), end=' ')
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(
        usage='showExposedGitHubEmails [OPTION]... -u USERNAME',
        description=
        'Lists information about the FILEs (the current directory by default) including Alternate Data Streams.'
    )
    parser.add_argument(
        '-u',
        '--user',
        dest="user",
        help="Username of the user which public repositories should be scanned",
        type=str)
    parser.add_argument('-r',
                        '--repository',
                        dest='repository',
                        help="check only one specific repository",
                        type=str)
    parser.add_argument(
        '-t',
        '--token',
        dest='token',
        help="Paste a GitHub token her to increase the API quota",
        type=str)
    parser.add_argument('-v',
                        '--verbose',
                        dest="verbose",
                        help="verbose mode",
                        action='store_true',
                        default=False)
    parser.add_argument('-d',
                        '--delay',
                        dest="delay",
                        help="The delay between to requests in seconds",
                        type=int,
                        default=None)
    parser.add_argument(
        '--api-url',
        dest="api_url",
        help='Specify the URL to the GitHub Api (default is "{}")'.format(
            API_URL),
        type=str,
        default=None)
    parser.add_argument('--no-forks',
                        dest="no_forks",
                        help='Ignore forked repositories',
                        action='store_true',
                        default=False)

    parsed_arguments = parser.parse_args()

    if parsed_arguments.user is None:
        warning('No username specified')
        parser.print_help()
        exit()

    if parsed_arguments.token is not None:
        update_header(
            {'Authorization': 'token {}'.format(parsed_arguments.token)})

    if parsed_arguments.delay is not None:
        set_delay(delay=parsed_arguments.delay)

    if parsed_arguments.api_url is not None:
        set_api_url(api_url=parsed_arguments.api_url)

    if parsed_arguments.verbose:
        set_verbosity_level(level=5)

    if parsed_arguments.repository is not None:
        repos_to_scan = [parsed_arguments.repository]
    else:
        info('Scan for public repositories of user {}'.format(
            parsed_arguments.user))
        repos_to_scan_sorted = sorted(
            get_all_repositories_of_a_user(username=parsed_arguments.user),
            key=lambda x: x.is_fork)
        repos_to_scan = [
            x.name for x in repos_to_scan_sorted
            if (parsed_arguments.no_forks and not x.is_fork)
            or not parsed_arguments.no_forks
        ]
        info('Found {} public repositories'.format(len(repos_to_scan)))

    emails_to_name = defaultdict(set)
    try:
        for repo in repos_to_scan:
            info('Scan repository {}'.format(repo))
            emails_to_name_new = get_all_emails_of_a_user(
                username=parsed_arguments.user, repo_name=repo)
            for email, names in emails_to_name_new.items():
                emails_to_name[email].update(names)
    except KeyboardInterrupt:
        warning('Keyboard interrupt. Stopped scanning.')

    if len(emails_to_name) > 0:
        max_width_email = max([len(x) for x in emails_to_name.keys()])
        info('Exposed emails and names:')
        for email, names in emails_to_name.items():
            names_string = ''
            for i, n in enumerate(names):
                names_string += colorize_string(n, Color.BLUE)
                if i < len(names) - 1:
                    names_string += '; '
            print(
                '\t',
                colorize_string(email.ljust(max_width_email), Color.RED) +
                ' - ' + names_string)
    else:
        info('No emails found')