Ejemplo n.º 1
0
 def task_target(*arguments):
     result = None
     if self.tasks_type == MultiTask.MULTI_PROCESSING:
         curr_task = multiprocessing.process.current_process()
         Log.info(self.tag + 'started (PID=' + str(curr_task.pid) + ')')
     else:
         curr_task = threading.current_thread()
         Log.info(self.tag + 'started')
     if target is not None:
         result = target(*arguments)
     if result is not None:
         Log.success("Result: " + str(result))
         # Scrivo il risultato nel file
         Log.info('Writing result in ' + str(self.resfile))
         storage.overwrite_file(str(result), self.resfile)   # TODO: dump result as object with "pickle"
         # Termino tutti gli altri threads/processi
         if self.tasks_type == MultiTask.MULTI_PROCESSING:
             Log.info('Killing other processes')
             running_pids = MultiTask.get_pids_from_file(self.pidfile)
             for pid in running_pids:
                 pid = int(pid)
                 if pid == curr_task.pid:
                     continue
                 try:
                     os.kill(pid, signal.SIGKILL)
                     Log.info('Process ' + str(pid) + ' killed!')
                 except Exception as e:
                     Log.error(str(e))
             Log.info(self.tag + 'end')
         else:
             Log.info('Ignoring other threads')
             # Killa se stesso
             pid = multiprocessing.process.current_process().pid
             Log.info(self.tag + 'end')
             os.kill(pid, signal.SIGKILL)
Ejemplo n.º 2
0
        def deep_inject_form(href, depth=0):
            # Check the domain
            if href in parsed_forms or \
                    urlparse(href).netloc != base_url or \
                    (max_depth is not None and depth > max_depth):
                return ''

            # Visit the current href
            parsed_relevant, request_cookies = HtmlParser.relevant_parse(href)

            # Find forms in page
            parsed_forms[href] = HtmlParser.find_forms(parsed_relevant, href)

            # Execute Sqlmap task
            task = SqlmapClient.try_inject_form(href, parsed_forms,
                                                request_cookies)
            tasks[task.id] = task
            Log.success('SQL injection of "' + href + '" started!')

            # Find adjacent links
            links = HtmlParser.find_links(parsed_relevant)

            # Visit adjacent links
            for link in links:
                # print('link: '+link)
                child_request_cookies = deep_inject_form(link, depth + 1)
                if len(child_request_cookies) > len(request_cookies):
                    request_cookies = child_request_cookies
Ejemplo n.º 3
0
 def browser_target():
     """
     The function that launch the browser
     """
     set_owner_process(user)
     Log.info('Launching browser with User: '******'Web browser opened')
Ejemplo n.º 4
0
 def kill(self, sig: int):
     """
     Send a signal to process which is running this job
     :param sig: The signal as integer (eg. 9 for SIGKILL)
     """
     Log.info("Sending signal " + str(sig) + " to job #" + str(self.id) + ' (' + str(self.pid) + ')')
     self.status = sig
     os.kill(self.pid, sig)
     self.save()
     Log.success("Signal " + str(sig) + " sent to job #" + str(self.id) + ' (' + str(self.pid) + ')')
Ejemplo n.º 5
0
    def deep_inject_form(url, max_depth, listen: bool = False) -> dict:
        """
        Search a form in the page returned by url.
        If it doesn't find a form, or the injection can't be done, it visit the website in search for other forms
        :param listen: True if this method should listen and print the SQL tasks, otherwise False
        :param url: str The url to visit
        :param max_depth: int The max depth during the visit
        :return A dictionary of SQL injection tasks
        """

        base_url = urlparse(url).netloc
        parsed_forms = dict()
        out_file = APP_STORAGE_OUT + '/' + now(
        ) + '_DEEP_FORMS_' + base_url + '.json'

        def _deep_inject_form(href, depth=1):
            # Check the domain
            if href in parsed_forms or \
                    urlparse(href).netloc != base_url or \
                    (max_depth is not None and depth > max_depth):
                return ''

            # Visit the current href
            parsed_relevant, request_cookies = HtmlParser.relevant_parse(href)

            # Find forms in page
            parsed_forms[href] = HtmlParser.find_forms(parsed_relevant, href)

            # Find adjacent links
            links = HtmlParser.find_links(parsed_relevant)

            if len(parsed_forms) % 10 == 0:
                Log.info('Writing result in ' + out_file + '...')
                JsonSerializer.set_dictionary(parsed_forms, out_file)

            # Visit adjacent links
            for link in links:
                # print('link: '+link)
                child_request_cookies = _deep_inject_form(link, depth + 1)
                if len(child_request_cookies) > len(request_cookies):
                    request_cookies = child_request_cookies

            return request_cookies

        cookies = _deep_inject_form(url)
        Log.info('Writing result in ' + out_file + '...')
        JsonSerializer.set_dictionary(parsed_forms, out_file)
        Log.success('Result wrote in ' + out_file)
        Log.success('Website crawled! Found ' + str(len(parsed_forms)) +
                    ' pages')
        tasks = SqlmapClient.try_inject_forms(parsed_forms, cookies)
        if listen:
            SqlInjection.__listen_tasks(tasks)
        return tasks
Ejemplo n.º 6
0
 def inject_form(url=None, html=None):
     """
     Search a form in the page returned by url (or inside the html).
     :param url: str The url to visit (or None)
     :param html: str the html code to analyze (or None)
     :return A list of parsed forms like [ form_1, form_2 ]
     """
     parsed_forms = dict()
     parsed_forms[url], cookies = HtmlParser.form_parse(url, html)
     Log.success('Html parsed! Found ' + str(len(parsed_forms[url])) +
                 ' forms')
     SqlmapClient.try_inject(parsed_forms, cookies)
Ejemplo n.º 7
0
 def __init__(self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT):
     """
     :param host: The host
     :param port: The port
     """
     self.host = host
     self.port = port
     self.base_url = 'http://' + self.host + ':' + str(port)
     # Start the sqlmap-api server in a parallel thread
     Log.info("Starting sqlmap-api server in a parallel thread")
     MultiTask.multithread(sqlmap_server, (self.host, self.port), True, 1)
     while not check_socket(self.host, self.port):
         # Wait sqlmap-api server
         sleep(0.1)
     Log.success("Sqlmap-api server started!")
Ejemplo n.º 8
0
    def __inject_forms(url, max_depth) -> dict:
        """
        Search a form in the page returned by url.
        If it doesn't find a form, or the injection can't be done, it visit the website in search for other forms
        :param url: str The url to visit
        :param max_depth: int The max depth during the visit
        :return A dictionary of SQL injection tasks
        """

        base_url = urlparse(url).netloc
        parsed_forms = dict()
        tasks = dict()

        def deep_inject_form(href, depth=0):
            # Check the domain
            if href in parsed_forms or \
                    urlparse(href).netloc != base_url or \
                    (max_depth is not None and depth > max_depth):
                return ''

            # Visit the current href
            parsed_relevant, request_cookies = HtmlParser.relevant_parse(href)

            # Find forms in page
            parsed_forms[href] = HtmlParser.find_forms(parsed_relevant, href)

            # Execute Sqlmap task
            task = SqlmapClient.try_inject_form(href, parsed_forms,
                                                request_cookies)
            tasks[task.id] = task
            Log.success('SQL injection of "' + href + '" started!')

            # Find adjacent links
            links = HtmlParser.find_links(parsed_relevant)

            # Visit adjacent links
            for link in links:
                # print('link: '+link)
                child_request_cookies = deep_inject_form(link, depth + 1)
                if len(child_request_cookies) > len(request_cookies):
                    request_cookies = child_request_cookies

        Log.success('SQL injection started!')
        deep_inject_form(url)
        Log.success('Website crawled! Found ' + str(len(parsed_forms)) +
                    ' forms')
        return tasks
Ejemplo n.º 9
0
 def kill(self, sig: int):
     """
     Send a signal to process which is running this job
     :param sig: The signal as integer (eg. 9 for SIGKILL)
     """
     if self.status == sig:
         return
     Log.info("Sending signal " + str(sig) + " to job #" + str(self.id) +
              ' (' + str(self.pid) + ')')
     self.status = sig
     try:
         os.kill(self.pid, sig)
     except ProcessLookupError:
         # Process does not exists
         pass
     self.save()
     Log.success("Signal " + str(sig) + " sent to job #" + str(self.id) +
                 ' (' + str(self.pid) + ')')
Ejemplo n.º 10
0
 def inject_form(url: str = None,
                 html: str = None,
                 listen: bool = False) -> dict:
     """
     Search a form in the page returned by url (or inside the html).
     :param listen: True if this method should listen and print the SQL tasks, otherwise False
     :param url: str The url to visit (or None)
     :param html: str the html code to analyze (or None)
     :return A dictionary of SQL injection tasks
     """
     parsed_forms = dict()
     parsed_forms[url], cookies = HtmlParser.form_parse(url, html)
     Log.success('Html parsed! Found ' + str(len(parsed_forms[url])) +
                 ' forms')
     tasks = SqlmapClient.try_inject_forms(parsed_forms, cookies)
     if listen:
         SqlInjection.__listen_tasks(tasks)
     return tasks
Ejemplo n.º 11
0
    def start(self, target, args, asynchronous, cpu):
        self.tasks = []

        def task_target(*arguments):
            result = None
            if self.tasks_type == MultiTask.MULTI_PROCESSING:
                curr_task = multiprocessing.process.current_process()
                Log.info(self.tag + 'started (PID=' + str(curr_task.pid) + ')')
            else:
                curr_task = threading.current_thread()
                Log.info(self.tag + 'started')
            if target is not None:
                result = target(*arguments)
            if result is not None:
                Log.success("Result: " + str(result))
                # Scrivo il risultato nel file
                Log.info('Writing result in ' + str(self.resfile))
                storage.overwrite_file(str(result), self.resfile)   # TODO: dump result as object with "pickle"
                # Termino tutti gli altri threads/processi
                if self.tasks_type == MultiTask.MULTI_PROCESSING:
                    Log.info('Killing other processes')
                    running_pids = MultiTask.get_pids_from_file(self.pidfile)
                    for pid in running_pids:
                        pid = int(pid)
                        if pid == curr_task.pid:
                            continue
                        try:
                            os.kill(pid, signal.SIGKILL)
                            Log.info('Process ' + str(pid) + ' killed!')
                        except Exception as e:
                            Log.error(str(e))
                    Log.info(self.tag + 'end')
                else:
                    Log.info('Ignoring other threads')
                    # Killa se stesso
                    pid = multiprocessing.process.current_process().pid
                    Log.info(self.tag + 'end')
                    os.kill(pid, signal.SIGKILL)

        for i in range(0, cpu):
            task_args = ()
            for arg in args:
                Log.info('Argument type: ' + str(type(arg)))
                if is_listable(arg):
                    # Divido gli elementi in 1/cpu parti
                    p_list_len = (len(arg) / cpu) + (len(arg) % cpu)
                    if type(arg) == dict:
                        iterator = iter(arg.items())
                        task_args += (
                            dict(itertools.islice(iterator, int((i * p_list_len)), int((i + 1) * p_list_len))),
                        )
                    else:
                        task_args += (arg[int((i * p_list_len)):int(((i + 1) * p_list_len))],)
                else:
                    task_args += (arg,)
            task = self.Multitask(target=task_target, args=task_args)
            self.tasks.append(task)

        if self.tasks_type == MultiTask.MULTI_PROCESSING:
            pids = []
            signal.signal(signal.SIGCHLD, signal.SIG_IGN)   # Ignore child exit status
            for task in self.tasks:
                task.start()
                # noinspection PyUnresolvedReferences
                pids.append(task.pid)
            storage.overwrite_file(str(pids).strip('[]'), self.pidfile)
        else:
            for task in self.tasks:
                task.start()

        if not asynchronous:
            # Attende la fine dell'esecuzione di tutti i tasks
            for task in self.tasks:
                task.join()
                Log.info('Task ' + str(task.name) + ' joined')
            Log.info('Reading result in ' + str(self.resfile))
            # Prendo il risultato dal file
            res = storage.read_file(self.resfile)   # TODO: load result as object with "pickle"
            # Elimino l'eventuale file con i pid
            storage.delete(self.pidfile)
            # Elimino il file con il risultato
            storage.delete(self.resfile)
            Log.success('MultiTask -> result: ' + str(res))
            return res

        return None
Ejemplo n.º 12
0
    def crawl(url: str,
              parsing_type: str,
              callback,
              depth: int = 0,
              cookies: str = None):
        """
        :param url: The url to crawl/parse
        :param parsing_type: HtmlParse.TYPE_ALL | HtmlParse.TYPE_RELEVANT | HtmlParse.TYPE_FORM | HtmlParse.TYPE_META
        :param callback: The callback method to call foreach visited page
        :param depth: The max crawling depth (0 to execute a normal page parsing, < 0 for no limit)
        :param cookies: The cookies to use on parsing
        """
        if not is_url(url):
            raise ValueError('url must be a valid url')
        if parsing_type not in HtmlParser.types():
            raise ValueError('parsing_type must be one of ' +
                             str(HtmlParser.types()))
        if not callable(callback):
            raise ValueError('callback is not callable')
        if type(depth) is not int:
            raise ValueError('dept must be an integer')
        if cookies is not None and type(cookies) is not str:
            raise ValueError('cookies must be a string')

        base_url = urlparse(url).netloc
        base_urls = (base_url, )
        if base_url[0:4] != 'www.':
            base_urls += ('www.' + str(base_url), )
        parsed_urls = set()
        parsed_hashes = set()

        def _crawl(href: str, curr_depth: int = 0):
            if href in parsed_urls or \
                    urlparse(href).netloc not in base_urls or \
                    (0 <= depth and (depth < curr_depth)):
                return

            # Visit the current href
            if parsing_type == HtmlParser.TYPE_ALL:
                parsed, _ = HtmlParser.all_parse(href, cookies=cookies)
            else:
                parsed, _ = HtmlParser.relevant_parse(href, cookies=cookies)

            parsed_hash = hash(JsonSerializer.dump_json(parsed))
            if parsed_hash in parsed_hashes:
                return

            parsed_hashes.add(parsed_hash)
            parsed_urls.add(href)

            if parsing_type == HtmlParser.TYPE_FORM:
                # Find forms in page
                parsed_page = HtmlParser.find_forms(parsed, href)
            elif parsing_type == HtmlParser.TYPE_META:
                # Find metadata in page
                parsed_page = HtmlParser.find_meta(parsed)
            else:
                parsed_page = parsed

            if parsed_page.get('tag') is not None:
                parsed_page = {0: parsed_page}

            parsed_page['url'] = href
            callback(parsed_page)

            # Find adjacent links
            links = HtmlParser.find_links(parsed)
            for link in links:
                _crawl(link, curr_depth + 1)

        _crawl(url)
        Log.success(url + ' crawling done!')