예제 #1
0
파일: spider.py 프로젝트: jorik041/taser
    def crawl(self, src_url):
        '''
        Execute web request and send to parser
        '''
        src_url = self.linkModifier(src_url)
        next_depth = (self.__cur_depth + 1)
        resp = web_request(src_url,
                           timeout=self.conn_timeout,
                           headers=self.headers,
                           proxies=self.proxies)

        if get_statuscode(resp) != 0:
            self.pageParser(resp, next_depth)
예제 #2
0
def o365_validateUser(user, timeout, headers={}, proxy=[], verbose=False):
    url = 'https://outlook.office365.com/autodiscover/autodiscover.json/v1.0/{user}?Protocol=Autodiscoverv1'
    headers['Accept'] = 'application/json'

    r = web_request(url.format(user=user),
                    redirects=False,
                    timeout=timeout,
                    headers=headers,
                    proxies=proxy)
    if get_statuscode(r) == 200:
        logger.success([user])
        ledger.info(user)
    elif verbose:
        logger.fail([user])
예제 #3
0
def tryHTTP(target, port, timeout, ssl=False, verbose=False, proxies=[]):
    schema = "https://" if ssl else "http://"
    t = schema + target + ":" + str(port)
    resp = web_request(t, timeout=timeout, debug=verbose, proxies=proxies)
    code = get_statuscode(resp)
    if code != 0:
        title = get_pagetitle(resp)
        server = extract_header('Server', resp)
        cliLogger.write(
            "{:40} code: {:<3} | Size: {:<6}\tServer:{:<15}\tTitle:{}".format(
                t, code, len(resp.text), server, title))
        fileLogger.info(
            "{:40} code: {:<3} | Size: {:<6}\tServer:{:<15}\tTitle:{}".format(
                t, code, len(resp.text), server, title))
        return True
    return False
예제 #4
0
def prompt_NTLM(url, timeout, headers={}, proxies=[], debug=False):
    challenge = {}
    h = headers.copy()
    h['Authorization'] = 'NTLM TlRMTVNTUAABAAAAB4IIAAAAAAAAAAAAAAAAAAAAAAA='
    request = web_request(url, headers=h, timeout=timeout, proxies=proxies, debug=debug)

    if get_statuscode(request) not in [401, 302]:
        return challenge

    # get auth header
    auth_header = request.headers.get('WWW-Authenticate')
    if not auth_header or not 'NTLM' in auth_header:
        return challenge

    # get challenge message from header
    challenge_message = base64.b64decode(auth_header.split(' ')[1].replace(',', ''))

    # parse challenge
    challenge = parse_challenge(challenge_message)
    return challenge
예제 #5
0
def output_handler(resp, args):
    cliLogger.info([
        resp.url,
        highlight('Title', fg='gray'),
        '\t    ({})\t {}'.format(get_statuscode(resp), get_pagetitle(resp))
    ])
    if args.header:
        for x in WHITELIST:
            h = extract_header(x, resp)
            if h != "N/A":
                cliLogger.info([resp.url, x + ":", h])
    if args.verbose:
        for k, v in resp.headers.items():
            if k not in BLACKLIST:
                cliLogger.info([resp.url, k + ":", v])
    else:
        for k, v in resp.headers.items():
            for w in WHITELIST:
                if k.lower().startswith(w):
                    cliLogger.info([resp.url, k + ":", v])
예제 #6
0
    def search(self):
        timer = self.start_timer()
        self.total_links = 0        # Total Links found by search engine
        self.page_links = 0         # Total links found by search engine w/ our domain in URL
        found_links = 0             # Local count to detect when no new links are found

        while timer.running:
            if self.total_links > 0 and found_links == self.page_links:
                timer.stop()
                return self.links

            found_links = self.page_links
            search_url = self.generateURL()
            resp = web_request(search_url, timeout=self.conn_timeout, headers=self.headers, proxies=self.proxies)

            if get_statuscode(resp) != 0:
                self.user_output(resp)
                self.pageParser(resp)
        timer.stop()
        return self.links
예제 #7
0
파일: websearch.py 프로젝트: jorik041/taser
    def search(self, search_engine, search_query):
        search_timeout = TaserTimeout(self.timeout)
        if self.timeout > 0:
            search_timeout.start()

        self.total_links = 0  # Total Links found by search engine
        self.page_links = 0  # Total links found by search engine w/ our domain in URL
        found_links = 0  # Local count to detect when no new links are found

        while search_timeout.running:
            if self.total_links > 0 and found_links == self.page_links:
                search_timeout.stop()
                return self.links
            found_links = self.page_links

            search_url = self.linkModifier(search_engine, search_query)
            resp = web_request(search_url,
                               timeout=self.conn_timeout,
                               headers=self.headers,
                               proxies=self.proxies)
            if get_statuscode(resp) != 0:
                self.pageParser(resp, search_engine, search_query)
        search_timeout.stop()
        return self.links
예제 #8
0
def minion(url):
    resp = web_request(url, proxies=args.proxy, debug=args.verbose)
    if get_statuscode(resp) != 0:
        output_handler(resp, args)