Ejemplo n.º 1
0
def send_commands_to_celery(populated_command_tuple, output_base_dir,
                            simulation):

    celery_path = sys.path[0]
    cmd_name, populated_command, vhost, outfile, workspace, task_id, scanned_service_port, scanned_service_name, scanned_service_protocol = populated_command_tuple
    host_dir = output_base_dir + vhost
    host_data_dir = host_dir + "/celerystalkOutput/"

    utils.create_task(cmd_name, populated_command, vhost, outfile, workspace,
                      task_id)
    result = chain(
        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
        # in celery the same way a task does, for instance, you can't find it in flower
        # tasks.cel_create_task.subtask(args=(cmd_name, populated_command, ip, outfile + ".txt", workspace, task_id)),

        # run the command. run_task takes care of marking the task as started and then completed.
        # The si tells run_cmd to ignore the data returned from a previous task
        tasks.run_cmd.si(cmd_name, populated_command, celery_path,
                         task_id).set(task_id=task_id), )()  # .apply_async()

    #task_id_list.append(result.task_id)
    host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(vhost)
    f = open(host_audit_log, 'a')
    f.write(populated_command + "\n\n")
    f.close()
Ejemplo n.º 2
0
def nmap_scan_subdomain_host(vhost,workspace,simulation,output_base_dir,config_file=None):
    celery_path = sys.path[0]
    config_nmap_options = config_parser.extract_bb_nmap_options(config_file=config_file)
    config = ConfigParser(allow_no_value=True)
    config.read(['config.ini'])

    vhost_explicitly_out_of_scope = lib.db.is_vhost_explicitly_out_of_scope(vhost, workspace)
    output_host_dir = os.path.normpath(os.path.join(output_base_dir, vhost))
    try:
        os.stat(output_host_dir)
    except:
        os.makedirs(output_host_dir)

    output_file = os.path.normpath(os.path.join(output_host_dir, vhost + "_nmap_tcp_scan.txt"))
    if not vhost_explicitly_out_of_scope:
        #print(config_nmap_options)
        cmd_name = "nmap_tcp_scan"
        try:
            if not simulation:
                populated_command = "nmap " + vhost + config_nmap_options + " -oA " + output_file
            else:
                populated_command = "#nmap " + vhost + config_nmap_options + " -oA " + output_file
        except TypeError:
            print("[!] Error: In the config file, there needs to be one, and only one, enabled tcp_scan command in the nmap_commands section.")
            print("[!]        This determines what ports to scan.")
            exit()
        task_id = uuid()
        utils.create_task(cmd_name, populated_command, vhost, output_file, workspace, task_id)
        result = chain(
            tasks.cel_nmap_scan.si(cmd_name, populated_command, vhost, config_nmap_options, celery_path, task_id,workspace).set(task_id=task_id),
        )()
Ejemplo n.º 3
0
def nmap_scan_subdomain_host(host, workspace, simulation, output_base_dir):
    celery_path = sys.path[0]
    config_nmap_options = config_parser.extract_bb_nmap_options()
    config = ConfigParser(allow_no_value=True)
    config.read(['config.ini'])

    #print(config_nmap_options)
    cmd_name = "nmap_bug_bounty_mode"
    populated_command = "nmap " + host + config_nmap_options
    task_id = uuid()
    utils.create_task(cmd_name, populated_command, host, output_base_dir,
                      workspace, task_id)
    result = chain(
        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, host, output_base_dir, workspace, task_id)),
        tasks.cel_nmap_scan.si(cmd_name, populated_command, host,
                               config_nmap_options, celery_path, task_id,
                               workspace).set(task_id=task_id),
        tasks.cel_scan_process_nmap_data.s(workspace),
        tasks.cel_process_db_services.si(output_base_dir,
                                         simulation,
                                         workspace,
                                         host=host),
        tasks.post_process_domains_bb.si(host, cmd_name, populated_command,
                                         output_base_dir, workspace,
                                         simulation, celery_path),
    )()
Ejemplo n.º 4
0
def parse_config_and_send_commands_to_celery(scanned_service_name, scanned_service_port, scan_output_base_file_name, config, simulation, output_base_dir, host_dir, workspace, task_id_list,ip,scanned_service_protocol):
    """

    :param scanned_service_name:
    :param scanned_service_port:
    :param scan_output_base_file_name:
    :param json_config:
    :param summary_file:
    :param simulation:
    :param output_base_dir:
    :param host_dir:
    :param workspace:
    :param task_id_list:
    :param ip:
    :return:
    """
    celery_path = sys.path[0]

    for (key, val) in config.items("nmap-service-names"):
        services = val.split(",")
        for service in services:
            if service == scanned_service_name:
                mapped_service_name = key
                #print(config.items(mapped_service_name))
                for (cmd_name, cmd) in config.items(mapped_service_name):
                    outfile = scan_output_base_file_name + cmd_name
                    populated_command = cmd.replace("[TARGET]", ip).replace("[PORT]", str(scanned_service_port)).replace("[OUTPUT]", outfile).replace("/[PATH]", "")
                    populated_command = replace_user_config_options(config_file, populated_command)

                    if simulation:
                        #debug - sends jobs to celery, but with a # in front of every one.
                        populated_command = "#" + populated_command

                    # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                    # that allows me to pass it to all of the tasks in the chain.

                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, ip, outfile + ".txt", workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, ip, outfile + ".txt", workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,celery_path,task_id).set(task_id=task_id),

                        # right now, every executed command gets sent to a generic post_process task that can do
                        # additinoal stuff based on the command that just ran.
                        tasks.post_process.si(cmd_name, populated_command, output_base_dir, workspace, ip, host_dir, simulation,
                                        scanned_service_port, scanned_service_name, scanned_service_protocol,celery_path),
                    )()  # .apply_async()

                    task_id_list.append(result.task_id)
                    host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(ip)
                    f = open(host_audit_log, 'a')
                    f.write(populated_command + "\n\n")
                    f.close()
Ejemplo n.º 5
0
def aquatone_all_paths(workspace, simulation=None, config_file=None):
    #print("in aquatone all_paths")
    urls_to_screenshot = []
    #TODO: Instead of just grabbing all paths here, maybe add some logic to see if only new paths should be scanned or something. at a minimum, as they are grabbed, we need to update the "screenshot taken" column and put the auatone directory or something like that.
    paths = lib.db.get_all_paths(workspace)
    celery_path = lib.db.get_current_install_path()[0][0]
    outdir = lib.db.get_output_dir_for_workspace(workspace)[0][0]
    outdir = os.path.join(outdir, 'celerystalkReports/aquatone/')

    try:
        os.stat(outdir)
    except:
        os.makedirs(outdir)

    if config_file == None:
        config_file = "config.ini"

    config, supported_services = config_parser.read_config_ini(config_file)

    if len(paths) > 0:
        screenshot_name = "db"
        for (cmd_name, cmd) in config.items("screenshots"):
            #print(cmd_name, cmd)
            try:
                if cmd_name == "aquatone":
                    populated_command = celery_path + "/celerystalk db paths_only limit | " + cmd.replace(
                        "[OUTPUT]", outdir)
                    #print(populated_command)
            except Exception, e:
                print(e)
                print(
                    "[!] Error: In the config file, there needs to be one (and only one) enabled aquatone command."
                )
                exit()

        task_id = uuid()
        utils.create_task(cmd_name, populated_command, workspace,
                          outdir + "aquatone_report.html", workspace, task_id)
        result = chain(
            tasks.run_cmd.si(cmd_name, populated_command, celery_path,
                             task_id).set(task_id=task_id), )()
        print(
            "[+]\t\tTo keep an eye on things, run one of these commands: \n[+]"
        )
        print("[+]\t\t./celerystalk query [watch]")
        print("[+]\t\t./celerystalk query brief [watch]")
        print("[+]\t\t./celerystalk query summary [watch]")
        print("[+]\t\tor\n[+]\t\ttail -f " + outdir + "aquatone_stdout.txt")
        print("[+]")
        print("[+] To peak behind the curtain, view log/celeryWorker.log")
        print(
            "[+] For a csv compatible record of every command execued, view log/cmdExecutionAudit.log\n"
        )
Ejemplo n.º 6
0
def aquatone_host(urls_to_screenshot,
                  vhost,
                  workspace,
                  simulation,
                  scan_output_base_file_dir,
                  celery_path,
                  config_file=None):
    print("in aquatone host")
    celery_path = lib.db.get_current_install_path()[0][0]
    config, supported_services = config_parser.read_config_ini(config_file)
    for (cmd_name, cmd) in config.items("screenshots"):
        #print(cmd_name, cmd)
        try:
            if cmd_name == "aquatone":
                outfile = scan_output_base_file_dir + "_" + cmd_name
                filename = "/tmp/" + workspace + "_paths_" + vhost + ".txt"
                populated_command = cmd.replace("[FILE]", filename).replace(
                    "[OUTPUT]", outfile)
                populated_command = replace_user_config_options(
                    config_file, populated_command)

                paths = lib.db.get_all_paths_for_host_path_only(
                    vhost, workspace)
                print(str(paths))

                with open(filename, 'w') as paths_tmp_file:
                    #paths_tmp_file.write(str(paths))
                    for line in paths:
                        #print(str(line))
                        paths_tmp_file.write(str(line[0]) + "\n")

                populated_command = cmd.replace("[FILE]", filename).replace(
                    "[OUTPUT]", outfile)
                populated_command = replace_user_config_options(
                    config_file, populated_command)

                #print(populated_command)
        except Exception, e:
            print(e)
            print(
                "[!] Error: In the config file, there needs to be one (and only one) enabled aquatone command."
            )
            exit()

        task_id = uuid()
        utils.create_task(cmd_name, populated_command, vhost,
                          outfile + "/aquatone_report.html", workspace,
                          task_id)
        result = chain(
            tasks.run_cmd.si(cmd_name, populated_command, celery_path,
                             task_id).set(task_id=task_id), )()
Ejemplo n.º 7
0
def create_dns_recon_tasks(domains,simulation,workspace,output_base_dir,out_of_scope_hosts=None,config_file=None):
    workspace_mode = lib.db.get_workspace_mode(workspace)[0][0]
    task_id_list = []
    total_tasks_num = 0
    config, supported_services = config_parser.read_config_ini(config_file)
    celery_path = sys.path[0]
    for domain in domains.split(","):
        for section in config.sections():
            if section == "domain-recon":
                for (cmd_name, cmd) in config.items(section):
                    outfile = output_base_dir + domain + "_" + cmd_name
                    populated_command = cmd.replace("[DOMAIN]", domain).replace("[OUTPUT]", outfile)
                    populated_command = replace_user_config_options(config_file, populated_command)

                    if simulation:
                        populated_command = "#" + populated_command
                    #print(populated_command)

                    # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                    # that allows me to pass it to all of the tasks in the chain.
                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, domain, outfile + ".txt", workspace, task_id)
                    process_domain_tuple = (cmd_name, populated_command, output_base_dir, workspace, domain, simulation, celery_path, workspace_mode)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, domain, "", workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,celery_path,task_id,process_domain_tuple=process_domain_tuple).set(task_id=task_id),

                        # right now, every executed command gets sent to a generic post_process task that can do
                        # additinoal stuff based on the command that just ran.
                        #tasks.post_process_domains.s(cmd_name, populated_command, output_base_dir, workspace, domain, simulation,celery_path,workspace_mode),
                    )()  # .apply_async()
                    task_id_list.append(result.task_id)

    total_tasks_num = total_tasks_num + len(task_id_list)
    print("\n\n[+] Summary:\tSubmitted {0} tasks to the [{1}] workspace.".format(total_tasks_num, workspace))
    print("[+]\t\tThere might be additional tasks added to the queue during post processing\n[+]")
    print("[+]\t\tTo keep an eye on things, run one of these commands: \n[+]")
    print("[+]\t\tcelerystalk query [watch]")
    print("[+]\t\tcelerystalk query brief [watch]")
    print("[+]\t\tcelerystalk query summary [watch]\n")
Ejemplo n.º 8
0
def find_subdomains(domains,simulation,workspace,output_base_dir,scan_mode,out_of_scope_hosts=None):
    config, supported_services = config_parser.read_config_ini()
    celery_path = sys.path[0]
    for domain in domains.split(","):
        for section in config.sections():
            if section == "domain-recon":
                for (cmd_name, cmd) in config.items(section):
                    populated_command = cmd.replace("[DOMAIN]", domain)
                    print(populated_command)

                    # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                    # that allows me to pass it to all of the tasks in the chain.
                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, domain, "", workspace, task_id)
                    process_domain_tuple = (cmd_name, populated_command, output_base_dir, workspace, domain, simulation, celery_path, scan_mode)
                    if scan_mode == "VAPT":
                        result = chain(
                            # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                            # in celery the same way a task does, for instance, you can't find it in flower
                            #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, domain, "", workspace, task_id)),

                            # run the command. run_task takes care of marking the task as started and then completed.
                            # The si tells run_cmd to ignore the data returned from a previous task
                            tasks.run_cmd.si(cmd_name, populated_command,celery_path,task_id,process_domain_tuple=process_domain_tuple).set(task_id=task_id),

                            # right now, every executed command gets sent to a generic post_process task that can do
                            # additinoal stuff based on the command that just ran.
                            #tasks.post_process_domains.s(cmd_name, populated_command, output_base_dir, workspace, domain, simulation,celery_path,scan_mode),
                        )()  # .apply_async()
                    else:
                        result = chain(
                            #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, domain, "", workspace, task_id)),
                            tasks.run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),
                            tasks.post_process_domains_bb.s(cmd_name, populated_command, output_base_dir, workspace,
                                                         domain, simulation, celery_path,out_of_scope_hosts),
                        )()
Ejemplo n.º 9
0
def process_db_services(output_base_dir,
                        simulation,
                        workspace,
                        target=None,
                        host=None,
                        config_file=None):
    celery_path = sys.path[0]
    config, supported_services = config_parser.read_config_ini(config_file)
    task_id_list = []
    total_tasks_num = 0
    if host:
        target = db.get_vhost_ip(host)
    try:
        os.stat(output_base_dir)
    except:
        print("[+] Output directory does not exist. Creating " +
              output_base_dir)
        os.makedirs(output_base_dir)
    #unique_hosts = db.get_unique_hosts(workspace)
    unique_unscanned_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    for row in unique_unscanned_vhosts:

        vhost = row[0]
        #print("in proccess_db_services - vhost:" + vhost)
        vhost_ip = db.get_vhost_ip(vhost, workspace)[0]
        #print(target)
        #print(vhost_ip)
        #print(str(vhost_ip))

        if (IPAddress(vhost_ip[0]) == target) or (target is None):
            host_dir = output_base_dir + vhost
            host_data_dir = host_dir + "/celerystalkOutput/"
            # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
            utils.create_dir_structure(vhost, host_dir)
            #Next two lines create the file that will contain each command that was executed. This is not the audit log,
            #but a log of commands that can easily be copy/pasted if you need to run them again.
            summary_file_name = host_data_dir + "ScanSummary.log"
            summary_file = open(summary_file_name, 'a')

            #THIS is just a work around until i have a real solution.  Really, UDP scans should be done
            #For every host in the scanned host list, launch a quick UDP scan (top 100 ports)
            scan_output_base_host_filename = host_data_dir + vhost

            for (cmd_name, cmd) in config.items("nmap-commands"):
                if cmd_name == "udp_scan":
                    #print(cmd_name,cmd)
                    outfile = scan_output_base_host_filename + "_" + cmd_name
                    populated_command = cmd.replace("[TARGET]", vhost).replace(
                        "[OUTPUT]", outfile)
                    populated_command = replace_user_config_options(
                        config_file, populated_command)

                    #print(cmd)

                    #cmd_name = "udp-top100"
                    #populated_command = 'nmap -sV -sC -Pn -sU --top-ports 100 -oN {0}_nmap_UDP_service_scan.txt -oX {0}_nmap_UDP_service_scan.xml {1}'.format(
                    #    scan_output_base_host_filename, vhost)
                    if simulation:
                        populated_command = "#" + populated_command
                    #outfile = scan_output_base_host_filename + "_nmap_UDP_service_scan.txt"
                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, vhost,
                                      outfile + ".txt", workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, vhost, outfile + ".txt", workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,
                                         celery_path,
                                         task_id).set(task_id=task_id
                                                      ), )()  # .apply_async()
                    task_id_list.append(result.task_id)

            if not simulation:
                db.update_vhosts_submitted(vhost, vhost, workspace, 1)

            #print "IP Address: {0}".format(vhost)
            db_services = db.get_all_services_for_ip(vhost_ip[0], workspace)

            for db_service in db_services:
                (ip, scanned_service_port, scanned_service_protocol,
                 scanned_service_name, product, version, extra_info,
                 workspace) = db_service

                scan_output_base_file_name = host_data_dir + vhost + "_" + str(
                    scanned_service_port
                ) + "_" + scanned_service_protocol + "_"

                #If the service name is not in the supported service list, give the user notice so they can add the service
                # and add some commands to the service. This is a major GAP right now. If the service is not in the config,
                # the script completely ignores it, which is not good!
                if scanned_service_name not in supported_services:
                    print(
                        "[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini."
                        .format(vhost, scanned_service_port,
                                scanned_service_name))
                    summary_file.write(
                        "[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini\n"
                        .format(vhost, scanned_service_port,
                                scanned_service_name))
                    #updated_port_scan = utils.nmap_follow_up_scan(vhost, scanned_service_port)
                    #scanned_service_name = updated_port_scan.hosts[0]._services[0].service
                    cmd_name = "nmap_service_scan"
                    populated_command = 'nmap -sV -sC -Pn -p {0} -oN {1}_nmap_service_scan.txt {2}'.format(
                        scanned_service_port, scan_output_base_file_name,
                        vhost)
                    populated_command = replace_user_config_options(
                        config_file, populated_command)

                    if simulation:
                        populated_command = "#" + populated_command

                    outfile = scan_output_base_file_name + "_nmap_service_scan.txt"

                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, vhost,
                                      outfile, workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, vhost, outfile , workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,
                                         celery_path,
                                         task_id).set(task_id=task_id
                                                      ), )()  # .apply_async()

                    task_id_list.append(result.task_id)
                else:
                    parse_config_and_send_commands_to_celery(
                        scanned_service_name, scanned_service_port,
                        scan_output_base_file_name, config, simulation,
                        output_base_dir, host_dir, workspace, task_id_list,
                        vhost, scanned_service_protocol)
                #task_id_list = task_id_list + new_tasks_list
            summary_file.close()

            print("[+] Submitted {0} tasks to the queue.".format(
                len(task_id_list)))
            total_tasks_num = total_tasks_num + len(task_id_list)
            task_id_list = []
    print(
        "\n\n[+] Summary:\tSubmitted {0} tasks to the [{1}] workspace.".format(
            total_tasks_num, workspace))
    print(
        "[+]\t\tThere might be additional tasks added to the queue during post processing\n[+]"
    )
    print("[+]\t\tTo keep an eye on things, run one of these commands: \n[+]")
    print("[+]\t\tcelerystalk query [watch]")
    print("[+]\t\tcelerystalk query brief [watch]")
    print("[+]\t\tcelerystalk query summary [watch]\n")
Ejemplo n.º 10
0
def process_url(url, workspace, output_dir, arguments, config_file=None):
    celery_path = sys.path[0]
    config, supported_services = config_parser.read_config_ini(config_file)
    task_id_list = []
    urls_to_screenshot = []
    simulation = arguments["--simulation"]

    try:
        parsed_url = urlparse.urlparse(url)
        scheme = parsed_url[0]
        if not scheme:
            print(
                "\n[!] URL parameter (-u) requires that you specify the scheme (http:// or https://)\n"
            )
            exit()
        if ":" in parsed_url[1]:
            vhost, port = parsed_url[1].split(':')
        else:
            vhost = parsed_url[1]
            if scheme == "http":
                port = 80
            elif scheme == "https":
                port = 443
        path = parsed_url[2].replace("//", "/")
    except:
        if not scheme:
            exit()

    try:
        ip = socket.gethostbyname(vhost)
    except:
        print("Error getting IP")
        ip = False

    db_ip_tuple = lib.db.get_vhost_ip(vhost, workspace)
    if db_ip_tuple:
        db_ip = db_ip_tuple[0][0]
        if db_ip != ip:
            lib.db.update_vhost_ip(ip, vhost, workspace)

    proto = "tcp"
    vhost_explicitly_out_of_scope = lib.db.is_vhost_explicitly_out_of_scope(
        vhost, workspace)
    if not vhost_explicitly_out_of_scope:  # and if the vhost is not explicitly out of scope
        if not ip:
            exit()
        elif ip == vhost:
            scan_output_base_file_dir = output_dir + "/" + ip + "/celerystalkOutput/" + ip + "_" + str(
                port) + "_" + proto + "_"
        else:
            scan_output_base_file_dir = output_dir + "/" + ip + "/celerystalkOutput/" + vhost + "_" + str(
                port) + "_" + proto + "_"

        host_dir = output_dir + "/" + ip
        host_data_dir = host_dir + "/celerystalkOutput/"
        # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
        utils.create_dir_structure(ip, host_dir)
        # Next two lines create the file that will contain each command that was executed. This is not the audit log,
        # but a log of commands that can easily be copy/pasted if you need to run them again.
        summary_file_name = host_data_dir + "ScanSummary.log"
        summary_file = open(summary_file_name, 'a')

        is_vhost_in_db = lib.db.is_vhost_in_db(vhost, workspace)
        if is_vhost_in_db:
            lib.db.update_vhosts_in_scope(ip, vhost, workspace, 1)
        else:
            db_vhost = (ip, vhost, 1, 0, 1, workspace
                        )  # add it to the vhosts db and mark as in scope
            lib.db.create_vhost(db_vhost)

        #only mark it as submitted if it is not in scope.
        if not simulation:
            lib.db.update_vhosts_submitted(ip, vhost, workspace, 1)

        # Insert port/service combo into services table if it doesnt exist
        db_service = db.get_service(ip, port, proto, workspace)
        if not db_service:
            db_string = (ip, port, proto, scheme, '', '', '', workspace)
            db.create_service(db_string)

        #mark this host as in scope now
        if not simulation:
            db.update_vhosts_submitted(vhost, vhost, workspace, 1)
    # I might want to keep this, but i think it is redundant if we have gobuster and photon screenshots...
    # Insert url into paths table and take screenshot
    # db_path = db.get_path(path, workspace)
    # if not db_path:
    #     url_screenshot_filename = scan_output_base_file_dir + url.replace("http", "").replace("https", "") \
    #         .replace("/", "_") \
    #         .replace("\\", "") \
    #         .replace(":", "_") + ".png"
    #     url_screenshot_filename = url_screenshot_filename.replace("__", "")
    #     db_path = (ip, port, url, 0, url_screenshot_filename, workspace)
    #     db.insert_new_path(db_path)
    #     # print("Found Url: " + str(url))
    #     urls_to_screenshot.append((url, url_screenshot_filename))
    #     if not simulation:
    #         task_id = uuid()
    #         command_name = "Screenshots"
    #         populated_command = "firefox-esr URL mode screenshot | {0} | {1}".format(vhost,scan_output_base_file_dir)
    #         utils.create_task(command_name, populated_command, vhost, scan_output_base_file_dir, workspace, task_id)
    #         result = tasks.cel_take_screenshot.delay(urls_to_screenshot,task_id,vhost,scan_output_base_file_dir, workspace,command_name,populated_command)
    #     # print(result)

    # TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
        for section in config.sections():
            if (section == "http") or (section == "https"):
                if section == scheme:
                    for (cmd_name, cmd) in config.items(section):
                        path_for_filename = path.replace("/", "_")
                        outfile = scan_output_base_file_dir + path_for_filename + "_" + cmd_name
                        populated_command = cmd.replace(
                            "[TARGET]",
                            vhost).replace("[PORT]", str(port)).replace(
                                "[OUTPUT]", outfile).replace("/[PATH]", path)
                        populated_command = replace_user_config_options(
                            config_file, populated_command)

                        if simulation:
                            # debug - sends jobs to celery, but with a # in front of every one.
                            populated_command = "#" + populated_command

                        # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                        # that allows me to pass it to all of the tasks in the chain.

                        task_id = uuid()
                        utils.create_task(cmd_name, populated_command, vhost,
                                          outfile + ".txt", workspace, task_id)
                        result = chain(
                            # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                            # in celery the same way a task does, for instance, you can't find it in flower
                            # tasks.cel_create_task.subtask(args=(cmd_name,populated_command, vhost, outfile + ".txt", workspace, task_id)),

                            # run the command. run_task takes care of marking the task as started and then completed.
                            # The si tells run_cmd to ignore the data returned from a previous task
                            tasks.run_cmd.si(
                                cmd_name, populated_command, celery_path,
                                task_id).set(task_id=task_id
                                             ), )()  # .apply_async()

                        task_id_list.append(result.task_id)
                        host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(
                            ip)
                        f = open(host_audit_log, 'a')
                        f.write(populated_command + "\n\n")
                        f.close()
        print("[+] Submitted {0} tasks to queue for {1}.".format(
            len(task_id_list), url))
    else:
        print(
            "[!] {0} is explicitly marked as out of scope. Skipping...".format(
                vhost))
Ejemplo n.º 11
0
def post_process(*args):
    command_name, populated_command, output_base_dir, workspace, vhost, host_dir, simulation, scanned_service_port, scanned_service, scanned_service_protocol, celery_path = args
    screenshot_name = ""
    urls_to_screenshot = []
    urls_to_screenshot_with_filenames = []
    if "gobuster" in populated_command:
        screenshot_name = "gobuster"

        scan_output_base_file_dir = os.path.join(
            output_base_dir, "celerystalkReports", "screens", vhost + "_" +
            str(scanned_service_port) + "_" + scanned_service_protocol)

        try:
            os.stat(scan_output_base_file_dir)
        except:
            os.makedirs(scan_output_base_file_dir)

        post_gobuster_filename = populated_command.split(">")[1].split(
            "&")[0].strip()

        print("Post gobuster filename" + post_gobuster_filename + "\n")
        populated_command_list = populated_command.split(" ")

        index = 0
        for arg in populated_command_list:
            if "-u" == populated_command_list[index]:
                if "http" in populated_command_list[index + 1]:
                    scanned_url = populated_command_list[index + 1]
                    #print("Scanned_url: " + scanned_url)
            index = index + 1

        try:
            with open(post_gobuster_filename, 'r') as gobuster_file:
                lines = gobuster_file.read().splitlines()
                print(lines)
                if len(lines) > 300:
                    #TODO: def don't submit 100 direcotires to scan. but need a way to tell the user
                    exit()

            for url in lines:
                url = url.split("?")[0].replace("//", "/")
                if url.startswith("http"):
                    url_screenshot_filename = scan_output_base_file_dir + "/" + url.replace("http", "").replace("https", "") \
                        .replace("/", "_") \
                        .replace("\\", "") \
                        .replace(":", "_") + ".png"
                    url_screenshot_filename = url_screenshot_filename.replace(
                        "__", "")
                    db_path = (vhost, scanned_service_port, url, 0,
                               url_screenshot_filename, workspace)
                    db.insert_new_path(db_path)
                    print("Found Url: " + str(url))
                    urls_to_screenshot_with_filenames.append(
                        (url, url_screenshot_filename))
                    urls_to_screenshot.append((url, url_screenshot_filename))

                    #result = lib.utils.take_screenshot(url,url_screenshot_filename)
        except Exception:
            if not simulation:
                print("[!] Could not open {0}".format(post_gobuster_filename))

    if "photon" in populated_command:
        screenshot_name = "photon"

        scan_output_base_file_dir = os.path.join(
            output_base_dir, "celerystalkReports", "screens", vhost + "_" +
            str(scanned_service_port) + "_" + scanned_service_protocol)

        try:
            os.stat(scan_output_base_file_dir)
        except:
            os.makedirs(scan_output_base_file_dir)

        #post_photon_filename = populated_command.split(">")[1].lstrip()
        post_photon_filename = lib.db.get_output_file_for_command(
            workspace, populated_command)[0][0]
        #print(post_photon_filename)

        print("Post photon filename" + post_photon_filename + "\n")
        populated_command_list = populated_command.split(" ")

        index = 0
        for arg in populated_command_list:
            if "-u" == populated_command_list[index]:
                if "http" in populated_command_list[index + 1]:
                    scanned_url = populated_command_list[index + 1]
                    #print("Scanned_url: " + scanned_url)
            index = index + 1

        try:
            with open(post_photon_filename, 'r') as photon_file:
                photon_file_json = simplejson.load(photon_file)

                good_sections = ["internal", "robots", "fuzzable"]
                for section in good_sections:
                    for url in photon_file_json[section]:
                        if url.startswith("http"):
                            url_screenshot_filename = scan_output_base_file_dir + "/" + url.replace("http", "").replace("https", "") \
                                .replace("/", "_") \
                                .replace("\\", "") \
                                .replace(":", "_") + ".png"
                            url_screenshot_filename = url_screenshot_filename.replace(
                                "__", "")
                            db_path = (vhost, scanned_service_port, url, 0,
                                       url_screenshot_filename, workspace)
                            db.insert_new_path(db_path)
                            print("Found Url: " + str(url))
                            urls_to_screenshot_with_filenames.append(
                                (str(url), url_screenshot_filename))
                            urls_to_screenshot.append(
                                (str(url), url_screenshot_filename))

        except Exception:
            if not simulation:
                print("[!] Could not open {0}".format(post_photon_filename))

    if not simulation:
        if len(urls_to_screenshot) > 0:
            task_id = uuid()
            populated_command = "firefox-esr {0}-screenshots | {1} | {2}".format(
                screenshot_name, vhost, scan_output_base_file_dir)
            command_name = "Screenshots"
            utils.create_task(command_name, populated_command, vhost,
                              scan_output_base_file_dir, workspace, task_id)
            cel_take_screenshot.delay(urls_to_screenshot_with_filenames,
                                      task_id, vhost,
                                      scan_output_base_file_dir, workspace,
                                      command_name, populated_command)
Ejemplo n.º 12
0
                            urls_to_screenshot_with_filenames.append((str(url), url_screenshot_filename))
                            urls_to_screenshot.append((str(url), url_screenshot_filename))


        except Exception, e:
            if not simulation:
                print("[!] Could not open {0}".format(post_photon_filename))



    if not simulation:
        if len(urls_to_screenshot) > 0:
            task_id = uuid()
            populated_command = "firefox-esr {0}-screenshots | {1} | {2}".format(screenshot_name, vhost, scan_output_base_file_dir)
            command_name = "Screenshots"
            utils.create_task(command_name, populated_command, vhost, scan_output_base_file_dir, workspace, task_id)
            cel_take_screenshot.delay(urls_to_screenshot_with_filenames,task_id,vhost,scan_output_base_file_dir, workspace,command_name,populated_command)

            #lib.scan.aquatone_host(urls_to_screenshot, vhost, workspace, simulation, scan_output_base_file_dir, celery_path)


# @after_task_publish.connect(sender='app.task.run_cmd')
# def task_sent_handler(sender=None, headers=None, body=None, **kwargs):
#     db.create_task(task)


@app.task()
def cel_create_task(*args,**kwargs):
    command_name, populated_command, ip, output_dir, workspace, task_id = args
    db_task = (task_id, 1, command_name, populated_command, ip, output_dir, 'SUBMITTED', workspace)
    db.create_task(db_task)
Ejemplo n.º 13
0
def post_process_domains_bb(vhosts, command_name, populated_command, output_base_dir, workspace, simulation,
                         celery_path,out_of_scope_hosts):
    config, supported_services = config_parser.read_config_ini()
    vhosts = vhosts.splitlines()
    # from https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
    ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
    for vhost in vhosts:
        # print("raw:\t" + vhost)
        vhost = ansi_escape.sub('', vhost)
        # print("escaped:\t" + vhost)
        if re.match(r'\w', vhost):
            try:
                ip = socket.gethostbyname(vhost)
                if vhost not in out_of_scope_hosts:
                    print("Found subdomain (in scope):\t" + vhost)
                    db_vhost = (ip, vhost, 1, 0, workspace)
                    db.create_vhost(db_vhost)
                else:
                    print("Found subdomain (out of scope):\t" + vhost)
                    db_vhost = (ip, vhost, 0, 0, workspace)
                    db.create_vhost(db_vhost)
            except:
                print("1There was an issue running the nmap scan against {0}.").format(vhost)
                ip = ""
                db_vhost = (ip, vhost, 0, 0, workspace)  # not in scope if no IP
                print(db_vhost)
                db.create_vhost(db_vhost)

    # pull all in scope vhosts that have not been submitted
    inscope_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    for scannable_vhost in inscope_vhosts:
        scannable_vhost = scannable_vhost[0]
        ip = db.get_vhost_ip(scannable_vhost, workspace)
        ip = ip[0][0]
        print("I'm going to scan: " + scannable_vhost + ":" + ip)



        db_scanned_services = db.get_all_services_for_ip(ip, workspace)
        for (
        id, ip, scanned_service_port, scanned_service_protocol, scanned_service_name, workspace) in db_scanned_services:
            # run chain on each one and then update db as submitted
            scan_output_base_file_name = output_base_dir + "/" + ip + "/celerystalkOutput/" + scannable_vhost + "_" + str(
                scanned_service_port) + "_" + scanned_service_protocol + "_"
            host_dir = output_base_dir + "/" + ip

            # TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
            for section in config.sections():
                if (section == "http") or (section == "https"):
                    if section == scanned_service_name:
                        for (cmd_name, cmd) in config.items(section):
                            outfile = scan_output_base_file_name + cmd_name
                            populated_command = cmd.replace("[TARGET]", scannable_vhost)\
                                                    .replace("[PORT]",str(scanned_service_port))\
                                                    .replace("[OUTPUT]", outfile) \
                                                    .replace("[PATH]", "")
                            if simulation:
                                # debug - sends jobs to celery, but with a # in front of every one.
                                populated_command = "#" + populated_command

                            # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                            # that allows me to pass it to all of the tasks in the chain.

                            task_id = uuid()
                            utils.create_task(cmd_name,populated_command, scannable_vhost, outfile + ".txt",workspace, task_id)
                            result = chain(
                                #cel_create_task.subtask(args=(cmd_name,populated_command, scannable_vhost, outfile + ".txt",workspace, task_id)),
                                run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),
                                post_process.si(cmd_name, populated_command, output_base_dir, workspace, scannable_vhost,
                                                host_dir,
                                                simulation,
                                                scanned_service_port, scanned_service_name,
                                                scanned_service_protocol, celery_path),
                            )()

                            host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(ip)
                            f = open(host_audit_log, 'a')
                            f.write(populated_command + "\n\n")
                            f.close()

        db.update_vhosts_submitted(ip, scannable_vhost, workspace, 1)
Ejemplo n.º 14
0
def post_process_domains(vhosts,command_name,populated_command,output_base_dir,workspace,domain,simulation,celery_path,scan_mode):
    config,supported_services = config_parser.read_config_ini()
    vhosts = vhosts.splitlines()
    # from https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
    ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
    for vhost in vhosts:
        #print("raw:\t" + vhost)
        vhost = ansi_escape.sub('', vhost)
        #print("escaped:\t" + vhost)
        if re.match(r'\w', vhost):
            in_scope,ip = utils.domain_scope_checker(vhost,workspace)
            if in_scope == 1:
                print("Found subdomain (in scope):\t" + vhost)
                db_vhost = (ip,vhost,1, 0,workspace)
                db.create_vhost(db_vhost)
            else:
                print("Found subdomain (out of scope):\t" + vhost)
                db_vhost = (ip, vhost, 0, 0, workspace)
                db.create_vhost(db_vhost)

        # elif scan_mode == "BB":
        #
        #     cmd_name, cmd = config['nmap-bug-bounty_mode']
        #
        #     utils.
        #
        #     db_vhost = ("", vhost, 1, 0, workspace)
        #     db.create_vhost(db_vhost)


    #pull all in scope vhosts that have not been submitted
    inscope_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    for scannable_vhost in inscope_vhosts:
        scannable_vhost = scannable_vhost[0]
        ip = db.get_vhost_ip(scannable_vhost,workspace)
        ip = ip[0][0]
        db_scanned_services = db.get_all_services_for_ip(ip, workspace)
        for (id,ip,scanned_service_port,scanned_service_protocol,scanned_service_name,workspace) in db_scanned_services:
        #run chain on each one and then update db as submitted
            scan_output_base_file_name = output_base_dir + "/" + ip + "/celerystalkOutput/" + scannable_vhost + "_" +  str(scanned_service_port) + "_" + scanned_service_protocol + "_"
            host_dir = output_base_dir + "/" + ip

            #TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
            for section in config.sections():
                if (section == "http") or (section == "https"):
                    if section == scanned_service_name:
                        for (cmd_name, cmd) in config.items(section):
                            outfile = scan_output_base_file_name + cmd_name
                            populated_command = cmd.replace("[TARGET]", scannable_vhost).replace("[PORT]",
                                str(scanned_service_port)).replace("[OUTPUT]", outfile).replace("[PATH]", "")
                            if simulation:
                                # debug - sends jobs to celery, but with a # in front of every one.
                                populated_command = "#" + populated_command

                            # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                            # that allows me to pass it to all of the tasks in the chain.

                            task_id = uuid()
                            utils.create_task(cmd_name,populated_command, scannable_vhost, outfile + ".txt", workspace, task_id)
                            result = chain(
                                # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                                # in celery the same way a task does, for instance, you can't find it in flower
                                #cel_create_task.subtask(args=(cmd_name,populated_command, scannable_vhost, outfile + ".txt", workspace, task_id)),

                                # run the command. run_task takes care of marking the task as started and then completed.
                                # The si tells run_cmd to ignore the data returned from a previous task
                                run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),

                                # right now, every executed command gets sent to a generic post_process task that can do
                                # additinoal stuff based on the command that just ran.
                                post_process.si(cmd_name, populated_command, output_base_dir, workspace, scannable_vhost, host_dir,
                                                      simulation,
                                                      scanned_service_port, scanned_service_name,
                                                      scanned_service_protocol,celery_path),
                            )()  # .apply_async()

                            #task_id_list.append(result.task_id)
                            host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(ip)
                            f = open(host_audit_log, 'a')
                            f.write(populated_command + "\n\n")
                            f.close()

        db.update_vhosts_submitted(ip,scannable_vhost,workspace,1)
Ejemplo n.º 15
0
def process_url(url,output_base_dir,workspace,simulation):
    celery_path = sys.path[0]
    config,supported_services = config_parser.read_config_ini()
    task_id_list = []
    urls_to_screenshot = []

    try:
        parsed_url = urlparse.urlparse(url)
        scheme = parsed_url[0]
        if not scheme:
            print("\n[!] URL parameter (-u) requires that you specify the scheme (http:// or https://)\n")
            exit()
        if ":" in parsed_url[1]:
            target,port = parsed_url[1].split(':')
        else:
            target = parsed_url[1]
            if scheme == "http":
                port = 80
            elif scheme == "https":
                port = 443
        path = parsed_url[2]
    except:
        if not scheme:
            exit()
    try:
        ip = socket.gethostbyname(target)
    except:
        print("Error getting IP")
    proto = "tcp"

    if ip == target:
        scan_output_base_file_dir = output_base_dir + "/" + ip + "/celerystalkOutput/" + ip + "_" + str(port) + "_" + proto + "_"
    else:
        scan_output_base_file_dir = output_base_dir + "/" + ip + "/celerystalkOutput/" + target + "_" + str(port) + "_" + proto + "_"

    host_dir = output_base_dir + "/" + ip
    host_data_dir = host_dir + "/celerystalkOutput/"
    # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
    utils.create_dir_structure(ip, host_dir)
    # Next two lines create the file that will contain each command that was executed. This is not the audit log,
    # but a log of commands that can easily be copy/pasted if you need to run them again.
    summary_file_name = host_data_dir + "ScanSummary.log"
    summary_file = open(summary_file_name, 'a')

    db_vhost = (ip, target, 1, 1, workspace)  # in this mode all vhosts are in scope
    #print(db_vhost)
    db.create_vhost(db_vhost)

    #Insert port/service combo into services table
    db_service = db.get_service(ip, port, proto, workspace)
    if not db_service:
        db_string = (ip, port, proto, scheme, workspace)
        db.create_service(db_string)

    # Insert url into paths table and take screenshot
    db_path = db.get_path(path,workspace)
    if not db_path:
        url_screenshot_filename = scan_output_base_file_dir + url.replace("http", "").replace("https", "") \
            .replace("/", "_") \
            .replace("\\", "") \
            .replace(":", "_") + ".png"
        url_screenshot_filename = url_screenshot_filename.replace("__", "")
        db_path = (ip, port, url, 0, url_screenshot_filename, workspace)
        db.insert_new_path(db_path)
        #print("Found Url: " + str(url))
        urls_to_screenshot.append((url, url_screenshot_filename))
        result = utils.take_screenshot(urls_to_screenshot)
        #print(result)


    #TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
    for section in config.sections():
        if (section == "http") or (section == "https"):
            if section == scheme:
                for (cmd_name, cmd) in config.items(section):
                    outfile = scan_output_base_file_dir + cmd_name
                    populated_command = cmd.replace("[TARGET]", target).replace("[PORT]",
                                            str(port)).replace("[OUTPUT]", outfile).replace("[PATH]", path)
                    if simulation:
                        # debug - sends jobs to celery, but with a # in front of every one.
                        populated_command = "#" + populated_command

                    # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                    # that allows me to pass it to all of the tasks in the chain.

                    task_id = uuid()
                    utils.create_task(cmd_name,populated_command, target, outfile + ".txt", workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name,populated_command, target, outfile + ".txt", workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command, celery_path,task_id).set(task_id=task_id),

                        # right now, every executed command gets sent to a generic post_process task that can do
                        # additinoal stuff based on the command that just ran.
                        tasks.post_process.si(cmd_name, populated_command, output_base_dir, workspace, target, host_dir,
                                              simulation,port, scheme,proto,celery_path),
                    )()  # .apply_async()

                    task_id_list.append(result.task_id)
                    host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(ip)
                    f = open(host_audit_log, 'a')
                    f.write(populated_command + "\n\n")
                    f.close()
    print("[+] Submitted {0} tasks to queue.\n".format(len(task_id_list)))