Ejemplo n.º 1
0
    def new_dataflow(self):
        # 加载目录文件
        pa = ParseArgs(self.target, '', 'csv', '', 'php', '', a_sid=None)
        target_mode = pa.target_mode

        target_directory = pa.target_directory(target_mode)
        logger.info('[CLI] Target : {d}'.format(d=target_directory))

        # static analyse files info
        files, file_count, time_consume = Directory(target_directory).collect_files()

        # Pretreatment ast object
        ast_object.init_pre(target_directory, files)
        ast_object.pre_ast_all(['php'])

        for file in files:
            filename_list = []

            if file[0] in ext_dict['php']:
                filename_list = file[1]['list']

            for filename in filename_list:
                all_nodes = ast_object.get_nodes(filename)
                self.dataflows = []

                base_locate = filename.replace('/', '#').replace('\\', '#').replace('.', '_')
                logger.info("[PhpUnSerChain] New Base locate {}".format(base_locate))

                self.base_dataflow_generate(all_nodes, base_locate)

                base_address_index = self.dataflow_db.objects.all().count()

                for dataflow in self.dataflows:
                    if dataflow:

                        source_node = str(dataflow[2])
                        sink_node = str(dataflow[4])

                        if re.search(r'&[0-9]+', source_node, re.I):
                            address_list = re.findall(r'&[0-9]+', source_node, re.I)
                            for address in address_list:
                                source_node = source_node.replace(address, '&{}'.format(int(address[1:]) + base_address_index))
                            # source_node = '&{}'.format(int(source_node[1:])+base_address_index)

                        if re.search(r'&[0-9]+', sink_node, re.I):
                            address_list = re.findall(r'&[0-9]+', sink_node, re.I)
                            for address in address_list:
                                sink_node = sink_node.replace(address, '&{}'.format(int(address[1:]) + base_address_index))

                        # if str(sink_node).startswith('&'):
                        #     sink_node = '&{}'.format(int(sink_node[1:])+base_address_index)

                        df = self.dataflow_db(node_locate=dataflow[0], node_sort=dataflow[1],
                                              source_node=source_node, node_type=dataflow[3], sink_node=sink_node)
                        df.save()
Ejemplo n.º 2
0
    def new_dataflow(self):
        # 加载目录文件
        pa = ParseArgs(self.target, '', 'csv', '', 'php', '', a_sid=None)
        target_mode = pa.target_mode

        target_directory = pa.target_directory(target_mode)
        logger.info('[CLI] Target : {d}'.format(d=target_directory))

        # static analyse files info
        files, file_count, time_consume = Directory(
            target_directory).collect_files()

        # Pretreatment ast object
        ast_object.init_pre(target_directory, files)
        ast_object.pre_ast_all(['php'])

        for file in files:
            filename_list = []

            if file[0] in ext_dict['php']:
                filename_list = file[1]['list']

            for filename in filename_list:
                all_nodes = ast_object.get_nodes(filename)
                self.dataflows = []

                base_locate = filename.replace('/', '#').replace('\\',
                                                                 '#').replace(
                                                                     '.', '_')
                logger.info(
                    "[PhpUnSerChain] New Base locate {}".format(base_locate))

                self.base_dataflow_generate(all_nodes, base_locate)

                for dataflow in self.dataflows:
                    if dataflow:
                        df = self.dataflow_db(node_locate=dataflow[0],
                                              node_sort=dataflow[1],
                                              source_node=dataflow[2],
                                              node_type=dataflow[3],
                                              sink_node=dataflow[4])
                        df.save()
Ejemplo n.º 3
0
def start(target, formatter, output, special_rules, a_sid=None, language=None, tamper_name=None, black_path=None, is_unconfirm=False, is_unprecom=False):
    """
    Start CLI
    :param black_path: 
    :param tamper_name:
    :param language: 
    :param target: File, FOLDER, GIT
    :param formatter:
    :param output:
    :param special_rules:
    :param a_sid: all scan id
    :return:
    """
    global ast_object
    # generate single scan id
    s_sid = get_sid(target)
    r = Running(a_sid)
    data = (s_sid, target)
    r.init_list(data=target)
    r.list(data)

    report = '?sid={a_sid}'.format(a_sid=a_sid)
    d = r.status()
    d['report'] = report
    r.status(d)

    task_id = a_sid

    # 加载 kunlunmignore
    load_kunlunmignore()

    # parse target mode and output mode
    pa = ParseArgs(target, formatter, output, special_rules, language, black_path, a_sid=None)
    target_mode = pa.target_mode
    output_mode = pa.output_mode
    black_path_list = pa.black_path_list

    # target directory
    try:
        target_directory = pa.target_directory(target_mode)
        logger.info('[CLI] Target : {d}'.format(d=target_directory))

        # static analyse files info
        files, file_count, time_consume = Directory(target_directory, black_path_list).collect_files()

        # vendor check
        project_id = get_and_check_scantask_project_id(task_id)
        Vendors(project_id, target_directory, files)

        # detection main language and framework

        if not language:
            dt = Detection(target_directory, files)
            main_language = dt.language
            main_framework = dt.framework
        else:
            main_language = pa.language
            main_framework = pa.language

        logger.info('[CLI] [STATISTIC] Language: {l} Framework: {f}'.format(l=",".join(main_language), f=main_framework))
        logger.info('[CLI] [STATISTIC] Files: {fc}, Extensions:{ec}, Consume: {tc}'.format(fc=file_count,
                                                                                           ec=len(files),
                                                                                           tc=time_consume))

        if pa.special_rules is not None:
            logger.info('[CLI] [SPECIAL-RULE] only scan used by {r}'.format(r=','.join(pa.special_rules)))

        # Pretreatment ast object
        ast_object.init_pre(target_directory, files)
        ast_object.pre_ast_all(main_language, is_unprecom=is_unprecom)

        # scan
        scan(target_directory=target_directory, a_sid=a_sid, s_sid=s_sid, special_rules=pa.special_rules,
             language=main_language, framework=main_framework, file_count=file_count, extension_count=len(files),
             files=files, tamper_name=tamper_name, is_unconfirm=is_unconfirm)
    except KeyboardInterrupt as e:
        logger.error("[!] KeyboardInterrupt, exit...")
        exit()
    except Exception:
        result = {
            'code': 1002,
            'msg': 'Exception'
        }
        Running(s_sid).data(result)
        raise

    # 输出写入文件
    write_to_file(target=target, sid=s_sid, output_format=formatter, filename=output)
Ejemplo n.º 4
0
import os

# for django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Kunlun_M.settings')

import django

django.setup()

from Kunlun_M.settings import PROJECT_DIRECTORY
from core.core_engine.php.parser import anlysis_params
from core.core_engine.php.parser import scan_parser
from core.pretreatment import ast_object

files = [('.php', {'list': ["v_parser.php", "v.php"]})]
ast_object.init_pre(PROJECT_DIRECTORY + '/tests/vulnerabilities/', files)
ast_object.pre_ast_all(['php'])


target_projects = PROJECT_DIRECTORY + '/tests/vulnerabilities/v_parser.php'
target_projects2 = PROJECT_DIRECTORY + '/tests/vulnerabilities/v.php'

with open(target_projects, 'r') as fi:
    code_contents = fi.read()
with open(target_projects, 'r') as fi2:
    code_contents2 = fi2.read()

sensitive_func = ['system']
lineno = 7

param = '$callback'
Ejemplo n.º 5
0
    def load_files(self):
        target = self.target

        targetlist = re.split("[\\\/]", target)
        if target.endswith("/") or target.endswith("\\"):
            filename = targetlist[-2]
        else:
            filename = targetlist[-1]

        logger.info('[EntranceFinder] Target {} start scan.'.format(filename))
        logger.info('[EntranceFinder] Set Scan limit node number is {}'.format(
            self.limit))

        if self.blackwords:
            self.black_list_split()
            logger.info('[EntranceFinder] Set Scan Blacklist is {}'.format(
                self.black_list))

        # 加载目录文件
        pa = ParseArgs(self.target, '', 'csv', '', 'php', '', a_sid=None)
        target_mode = pa.target_mode

        target_directory = pa.target_directory(target_mode)
        logger.info('[CLI] Target : {d}'.format(d=target_directory))

        # static analyse files info
        files, file_count, time_consume = Directory(
            target_directory).collect_files()

        # Pretreatment ast object
        ast_object.init_pre(target_directory, files)
        ast_object.pre_ast_all(['php'])

        filecontent_dict = {}

        for file in files:

            if file[0] in ext_dict['php']:
                filename_list = file[1]['list']

                for filename in filename_list:
                    all_nodes = ast_object.get_nodes(filename)
                    now_content = ast_object.get_content(filename)

                    # check black list
                    is_black = False
                    for bword in self.black_list:
                        if bword in now_content:
                            logger.debug(
                                '[EntranceFinder] found {} in File {}'.format(
                                    bword, filename))
                            is_black = True

                    if is_black:
                        continue

                    nodes_count, black_nodes_count = self.count_line(all_nodes)

                    if nodes_count in self.filedata_dict:
                        check_ratio = self.get_check_ratio(
                            now_content, filecontent_dict[nodes_count])

                        self.filedata_dict[nodes_count].append(
                            (filename, nodes_count, black_nodes_count,
                             check_ratio))

                    else:
                        self.filedata_dict[nodes_count] = [
                            (filename, nodes_count, black_nodes_count, 1)
                        ]
                        filecontent_dict[nodes_count] = now_content