コード例 #1
0
ファイル: scan.py プロジェクト: triplekill/celerystalk
def process_db_vhosts(workspace, simulation, target_list=None,dont_scan_ips=None,config_file=None):
    workspace_mode = lib.db.get_workspace_mode(workspace)[0][0]
    all_commands = []
    output_base_dir = lib.db.get_output_dir_for_workspace(workspace)[0][0]
    try:
        os.stat(output_base_dir)
    except:
        print("[+] Output directory does not exist. Creating " + output_base_dir)
        os.makedirs(output_base_dir)
    unique_unscanned_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    unique_unscanned_vhosts_list = []
    [unique_unscanned_vhosts_list.append(i[0]) for i in unique_unscanned_vhosts] #converts list of tuples that contains the IPs, to a list of IPs

    if target_list:
        for vhost in target_list:
            #TODO: try this: if vhost in unique_unscanned_vhosts_list:
            for unscanned_vhost in unique_unscanned_vhosts_list:
                if str(vhost) == str(unscanned_vhost):
                    vhost_explicitly_out_of_scope = lib.db.is_vhost_explicitly_out_of_scope(str(vhost), workspace)
                    if not vhost_explicitly_out_of_scope:
                        try:
                            IPAddress(vhost)
                            if not dont_scan_ips:
                                command_list = populate_comamnds(vhost, workspace, simulation, output_base_dir,config_file=config_file)
                        except:
                            command_list = populate_commands_vhost_http_https_only(vhost, workspace, simulation,output_base_dir,config_file=config_file)
                        if len(command_list) > 0:
                            print("Submitted [{1}] tasks for {0}".format(unscanned_vhost, len(command_list)))
                        all_commands = all_commands + command_list
    else:
        for vhost in unique_unscanned_vhosts_list:
            command_list = []
            #print(vhost)
            vhost_explicitly_out_of_scope = lib.db.is_vhost_explicitly_out_of_scope(vhost, workspace)
            if not vhost_explicitly_out_of_scope:
                try:
                    IPAddress(vhost)
                    if not dont_scan_ips:
                        command_list = populate_comamnds(vhost, workspace, simulation, output_base_dir,config_file=config_file)
                        if len(command_list) > 0:
                            print("Submitted [{1}] tasks for {0}".format(vhost, len(command_list)))
                except:
                    command_list = populate_commands_vhost_http_https_only(vhost, workspace, simulation, output_base_dir,config_file=config_file)
                    if len(command_list) > 0:
                        print("Submitted [{1}] tasks for {0}".format(vhost, len(command_list)))
                all_commands = all_commands + command_list

    shuffle(all_commands)
    for populated_command_tuple in all_commands:
        #print populated_command_tuple
        send_commands_to_celery(populated_command_tuple,output_base_dir,simulation)

    total_tasks_num = len(all_commands)
    if total_tasks_num > 0:
        print("\n\n[+] Summary:\tSubmitted {0} tasks to the [{1}] workspace.".format(total_tasks_num,workspace))
        print("[+]\t\tThere might be additional tasks added to the queue during post processing\n[+]")
        print("[+]\t\tTo keep an eye on things, run one of these commands: \n[+]")
        print("[+]\t\tcelerystalk query [watch]")
        print("[+]\t\tcelerystalk query brief [watch]")
        print("[+]\t\tcelerystalk query summary [watch]\n")
コード例 #2
0
def process_db_services(output_base_dir,
                        simulation,
                        workspace,
                        target=None,
                        host=None,
                        config_file=None):
    celery_path = sys.path[0]
    config, supported_services = config_parser.read_config_ini(config_file)
    task_id_list = []
    total_tasks_num = 0
    if host:
        target = db.get_vhost_ip(host)
    try:
        os.stat(output_base_dir)
    except:
        print("[+] Output directory does not exist. Creating " +
              output_base_dir)
        os.makedirs(output_base_dir)
    #unique_hosts = db.get_unique_hosts(workspace)
    unique_unscanned_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    for row in unique_unscanned_vhosts:

        vhost = row[0]
        #print("in proccess_db_services - vhost:" + vhost)
        vhost_ip = db.get_vhost_ip(vhost, workspace)[0]
        #print(target)
        #print(vhost_ip)
        #print(str(vhost_ip))

        if (IPAddress(vhost_ip[0]) == target) or (target is None):
            host_dir = output_base_dir + vhost
            host_data_dir = host_dir + "/celerystalkOutput/"
            # Creates something like /pentest/10.0.0.1, /pentest/10.0.0.2, etc.
            utils.create_dir_structure(vhost, host_dir)
            #Next two lines create the file that will contain each command that was executed. This is not the audit log,
            #but a log of commands that can easily be copy/pasted if you need to run them again.
            summary_file_name = host_data_dir + "ScanSummary.log"
            summary_file = open(summary_file_name, 'a')

            #THIS is just a work around until i have a real solution.  Really, UDP scans should be done
            #For every host in the scanned host list, launch a quick UDP scan (top 100 ports)
            scan_output_base_host_filename = host_data_dir + vhost

            for (cmd_name, cmd) in config.items("nmap-commands"):
                if cmd_name == "udp_scan":
                    #print(cmd_name,cmd)
                    outfile = scan_output_base_host_filename + "_" + cmd_name
                    populated_command = cmd.replace("[TARGET]", vhost).replace(
                        "[OUTPUT]", outfile)
                    populated_command = replace_user_config_options(
                        config_file, populated_command)

                    #print(cmd)

                    #cmd_name = "udp-top100"
                    #populated_command = 'nmap -sV -sC -Pn -sU --top-ports 100 -oN {0}_nmap_UDP_service_scan.txt -oX {0}_nmap_UDP_service_scan.xml {1}'.format(
                    #    scan_output_base_host_filename, vhost)
                    if simulation:
                        populated_command = "#" + populated_command
                    #outfile = scan_output_base_host_filename + "_nmap_UDP_service_scan.txt"
                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, vhost,
                                      outfile + ".txt", workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, vhost, outfile + ".txt", workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,
                                         celery_path,
                                         task_id).set(task_id=task_id
                                                      ), )()  # .apply_async()
                    task_id_list.append(result.task_id)

            if not simulation:
                db.update_vhosts_submitted(vhost, vhost, workspace, 1)

            #print "IP Address: {0}".format(vhost)
            db_services = db.get_all_services_for_ip(vhost_ip[0], workspace)

            for db_service in db_services:
                (ip, scanned_service_port, scanned_service_protocol,
                 scanned_service_name, product, version, extra_info,
                 workspace) = db_service

                scan_output_base_file_name = host_data_dir + vhost + "_" + str(
                    scanned_service_port
                ) + "_" + scanned_service_protocol + "_"

                #If the service name is not in the supported service list, give the user notice so they can add the service
                # and add some commands to the service. This is a major GAP right now. If the service is not in the config,
                # the script completely ignores it, which is not good!
                if scanned_service_name not in supported_services:
                    print(
                        "[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini."
                        .format(vhost, scanned_service_port,
                                scanned_service_name))
                    summary_file.write(
                        "[!] Nmap reports {0}:{1} is running: [{2}]. There are no commands to run against {2} in config.ini\n"
                        .format(vhost, scanned_service_port,
                                scanned_service_name))
                    #updated_port_scan = utils.nmap_follow_up_scan(vhost, scanned_service_port)
                    #scanned_service_name = updated_port_scan.hosts[0]._services[0].service
                    cmd_name = "nmap_service_scan"
                    populated_command = 'nmap -sV -sC -Pn -p {0} -oN {1}_nmap_service_scan.txt {2}'.format(
                        scanned_service_port, scan_output_base_file_name,
                        vhost)
                    populated_command = replace_user_config_options(
                        config_file, populated_command)

                    if simulation:
                        populated_command = "#" + populated_command

                    outfile = scan_output_base_file_name + "_nmap_service_scan.txt"

                    task_id = uuid()
                    utils.create_task(cmd_name, populated_command, vhost,
                                      outfile, workspace, task_id)
                    result = chain(
                        # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                        # in celery the same way a task does, for instance, you can't find it in flower
                        #tasks.cel_create_task.subtask(args=(cmd_name, populated_command, vhost, outfile , workspace, task_id)),

                        # run the command. run_task takes care of marking the task as started and then completed.
                        # The si tells run_cmd to ignore the data returned from a previous task
                        tasks.run_cmd.si(cmd_name, populated_command,
                                         celery_path,
                                         task_id).set(task_id=task_id
                                                      ), )()  # .apply_async()

                    task_id_list.append(result.task_id)
                else:
                    parse_config_and_send_commands_to_celery(
                        scanned_service_name, scanned_service_port,
                        scan_output_base_file_name, config, simulation,
                        output_base_dir, host_dir, workspace, task_id_list,
                        vhost, scanned_service_protocol)
                #task_id_list = task_id_list + new_tasks_list
            summary_file.close()

            print("[+] Submitted {0} tasks to the queue.".format(
                len(task_id_list)))
            total_tasks_num = total_tasks_num + len(task_id_list)
            task_id_list = []
    print(
        "\n\n[+] Summary:\tSubmitted {0} tasks to the [{1}] workspace.".format(
            total_tasks_num, workspace))
    print(
        "[+]\t\tThere might be additional tasks added to the queue during post processing\n[+]"
    )
    print("[+]\t\tTo keep an eye on things, run one of these commands: \n[+]")
    print("[+]\t\tcelerystalk query [watch]")
    print("[+]\t\tcelerystalk query brief [watch]")
    print("[+]\t\tcelerystalk query summary [watch]\n")
コード例 #3
0
ファイル: tasks.py プロジェクト: minkione/celerystalk
def post_process_domains_bb(vhosts, command_name, populated_command, output_base_dir, workspace, simulation,
                         celery_path,out_of_scope_hosts):
    config, supported_services = config_parser.read_config_ini()
    vhosts = vhosts.splitlines()
    # from https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
    ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
    for vhost in vhosts:
        # print("raw:\t" + vhost)
        vhost = ansi_escape.sub('', vhost)
        # print("escaped:\t" + vhost)
        if re.match(r'\w', vhost):
            try:
                ip = socket.gethostbyname(vhost)
                if vhost not in out_of_scope_hosts:
                    print("Found subdomain (in scope):\t" + vhost)
                    db_vhost = (ip, vhost, 1, 0, workspace)
                    db.create_vhost(db_vhost)
                else:
                    print("Found subdomain (out of scope):\t" + vhost)
                    db_vhost = (ip, vhost, 0, 0, workspace)
                    db.create_vhost(db_vhost)
            except:
                print("1There was an issue running the nmap scan against {0}.").format(vhost)
                ip = ""
                db_vhost = (ip, vhost, 0, 0, workspace)  # not in scope if no IP
                print(db_vhost)
                db.create_vhost(db_vhost)

    # pull all in scope vhosts that have not been submitted
    inscope_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    for scannable_vhost in inscope_vhosts:
        scannable_vhost = scannable_vhost[0]
        ip = db.get_vhost_ip(scannable_vhost, workspace)
        ip = ip[0][0]
        print("I'm going to scan: " + scannable_vhost + ":" + ip)



        db_scanned_services = db.get_all_services_for_ip(ip, workspace)
        for (
        id, ip, scanned_service_port, scanned_service_protocol, scanned_service_name, workspace) in db_scanned_services:
            # run chain on each one and then update db as submitted
            scan_output_base_file_name = output_base_dir + "/" + ip + "/celerystalkOutput/" + scannable_vhost + "_" + str(
                scanned_service_port) + "_" + scanned_service_protocol + "_"
            host_dir = output_base_dir + "/" + ip

            # TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
            for section in config.sections():
                if (section == "http") or (section == "https"):
                    if section == scanned_service_name:
                        for (cmd_name, cmd) in config.items(section):
                            outfile = scan_output_base_file_name + cmd_name
                            populated_command = cmd.replace("[TARGET]", scannable_vhost)\
                                                    .replace("[PORT]",str(scanned_service_port))\
                                                    .replace("[OUTPUT]", outfile) \
                                                    .replace("[PATH]", "")
                            if simulation:
                                # debug - sends jobs to celery, but with a # in front of every one.
                                populated_command = "#" + populated_command

                            # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                            # that allows me to pass it to all of the tasks in the chain.

                            task_id = uuid()
                            result = chain(
                                cel_create_task.subtask(args=(cmd_name,populated_command, scannable_vhost, outfile + ".txt",workspace, task_id)),
                                run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),
                                post_process.si(cmd_name, populated_command, output_base_dir, workspace, scannable_vhost,
                                                host_dir,
                                                simulation,
                                                scanned_service_port, scanned_service_name,
                                                scanned_service_protocol, celery_path),
                            )()

                            host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(ip)
                            f = open(host_audit_log, 'a')
                            f.write(populated_command + "\n\n")
                            f.close()

        db.update_vhosts_submitted(ip, scannable_vhost, workspace, 1)
コード例 #4
0
ファイル: tasks.py プロジェクト: minkione/celerystalk
def post_process_domains(vhosts,command_name,populated_command,output_base_dir,workspace,domain,simulation,celery_path,scan_mode):
    config,supported_services = config_parser.read_config_ini()
    vhosts = vhosts.splitlines()
    # from https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
    ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
    for vhost in vhosts:
        #print("raw:\t" + vhost)
        vhost = ansi_escape.sub('', vhost)
        #print("escaped:\t" + vhost)
        if re.match(r'\w', vhost):
            in_scope,ip = utils.domain_scope_checker(vhost,workspace)
            if in_scope == 1:
                print("Found subdomain (in scope):\t" + vhost)
                db_vhost = (ip,vhost,1, 0,workspace)
                db.create_vhost(db_vhost)
            else:
                print("Found subdomain (out of scope):\t" + vhost)
                db_vhost = (ip, vhost, 0, 0, workspace)
                db.create_vhost(db_vhost)

        # elif scan_mode == "BB":
        #
        #     cmd_name, cmd = config['nmap-bug-bounty_mode']
        #
        #     utils.
        #
        #     db_vhost = ("", vhost, 1, 0, workspace)
        #     db.create_vhost(db_vhost)


    #pull all in scope vhosts that have not been submitted
    inscope_vhosts = db.get_inscope_unsubmitted_vhosts(workspace)
    for scannable_vhost in inscope_vhosts:
        scannable_vhost = scannable_vhost[0]
        ip = db.get_vhost_ip(scannable_vhost,workspace)
        ip = ip[0][0]
        db_scanned_services = db.get_all_services_for_ip(ip, workspace)
        for (id,ip,scanned_service_port,scanned_service_protocol,scanned_service_name,workspace) in db_scanned_services:
        #run chain on each one and then update db as submitted
            scan_output_base_file_name = output_base_dir + "/" + ip + "/celerystalkOutput/" + scannable_vhost + "_" +  str(scanned_service_port) + "_" + scanned_service_protocol + "_"
            host_dir = output_base_dir + "/" + ip

            #TODO: This def might introduce a bug - same code as parse config submit jobs to celery. need to just call that function here
            for section in config.sections():
                if (section == "http") or (section == "https"):
                    if section == scanned_service_name:
                        for (cmd_name, cmd) in config.items(section):
                            outfile = scan_output_base_file_name + cmd_name
                            populated_command = cmd.replace("[TARGET]", scannable_vhost).replace("[PORT]",
                                str(scanned_service_port)).replace("[OUTPUT]", outfile).replace("[PATH]", "")
                            if simulation:
                                # debug - sends jobs to celery, but with a # in front of every one.
                                populated_command = "#" + populated_command

                            # Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
                            # that allows me to pass it to all of the tasks in the chain.

                            task_id = uuid()
                            result = chain(
                                # insert a row into the database to mark the task as submitted. a subtask does not get tracked
                                # in celery the same way a task does, for instance, you can't find it in flower
                                cel_create_task.subtask(args=(cmd_name,populated_command, scannable_vhost, outfile + ".txt", workspace, task_id)),

                                # run the command. run_task takes care of marking the task as started and then completed.
                                # The si tells run_cmd to ignore the data returned from a previous task
                                run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),

                                # right now, every executed command gets sent to a generic post_process task that can do
                                # additinoal stuff based on the command that just ran.
                                post_process.si(cmd_name, populated_command, output_base_dir, workspace, scannable_vhost, host_dir,
                                                      simulation,
                                                      scanned_service_port, scanned_service_name,
                                                      scanned_service_protocol,celery_path),
                            )()  # .apply_async()

                            #task_id_list.append(result.task_id)
                            host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(ip)
                            f = open(host_audit_log, 'a')
                            f.write(populated_command + "\n\n")
                            f.close()

        db.update_vhosts_submitted(ip,scannable_vhost,workspace,1)