예제 #1
0
def perform_port_scan(url,
                      ports=None,
                      scanner=NmapHook,
                      verbose=False,
                      opts=None,
                      **kwargs):
    """
    main function that will initalize the port scanning
    """
    url = url.strip()
    logger.info(
        set_color(
            "attempting to find IP address for hostname '{}'...".format(url)))
    found_ip_address = socket.gethostbyname(url)
    logger.info(
        set_color("found IP address for given URL -> '{}'...".format(
            found_ip_address)))
    if verbose:
        logger.debug(set_color("checking for nmap on your system...",
                               level=10))
    nmap_exists = find_nmap(verbose=verbose)
    if nmap_exists:
        if verbose:
            logger.debug(
                set_color(
                    "nmap has been found under '{}'...".format(nmap_exists),
                    level=10))
        logger.info(
            set_color("starting port scan on IP address '{}'...".format(
                found_ip_address)))
        try:
            data = scanner(found_ip_address, ports=ports, opts=opts)
            json_data = data._get_all_info()
            data.show_open_ports(json_data)
            file_path = data.send_to_file(json_data)
            logger.info(
                set_color(
                    "port scan completed, all data saved to JSON file under '{}'..."
                    .format(file_path)))
        except KeyError:
            logger.fatal(
                set_color("no port information found for '{}({})'...".format(
                    url, found_ip_address),
                          level=50))
        except Exception as e:
            logger.exception(
                set_color(
                    "ran into exception '{}', cannot continue quitting...".
                    format(e),
                    level=50))
            request_issue_creation()
            pass
    else:
        logger.fatal(
            set_color(
                "nmap was not found on your system, please install it...",
                level=50))
예제 #2
0
def main(url, show=False, verbose=False):
    logger.info(set_color("loading extensions..."))
    extensions = __load_extensions()
    if verbose:
        logger.debug(
            set_color("loaded a total of {} extensions...".format(
                len(extensions)),
                      level=10))
    logger.info(set_color("attempting to bruteforce admin panel..."))
    check_for_admin_page(url, extensions, show_possibles=show, verbose=verbose)
예제 #3
0
def main_xss(start_url, verbose=False, proxy=None, agent=None):
    find_xss_script(start_url)
    logger.info(set_color(
        "loading payloads..."
    ))
    payloads = __load_payloads()
    if verbose:
        logger.debug(set_color(
            "a total of {} payloads loaded...".format(len(payloads)), level=10
        ))
    logger.info(set_color(
        "payloads will be written to a temporary file and read from there..."
    ))
    filename = create_urls(start_url, payloads)
    if verbose:
        logger.debug(set_color(
            "loaded URL's have been saved to '{}'...".format(filename), level=10
        ))
    logger.info(set_color(
        "testing for XSS vulnerabilities on host '{}'...".format(start_url)
    ))
    if proxy is not None:
        logger.info(set_color(
            "using proxy '{}'...".format(proxy)
        ))
    success = set()
    with open(filename) as urls:
        for url in urls.readlines():
            url = url.strip()
            result = scan_xss(url, proxy=proxy, agent=agent)
            payload = find_xss_script(url)
            logger.info(set_color(
                "trying payload '{}'...".format(payload)
            ))
            if result:
                success.add(url)
                if verbose:
                    logger.debug(set_color(
                        "payload '{}' appears to be usable...".format(payload), level=10
                    ))
            elif result is "sqli":
                logger.error(set_color(
                    "loaded URL '{}' threw a DBMS error and appears to be SQLi vulnerable, test for SQL injection".format(
                        url
                    ), level=30
                ))
            else:
                if verbose:
                    logger.debug(set_color(
                        "host '{}' does not appear to be vulnerable to XSS attacks with payload '{}'...".format(
                            start_url, payload
                        ), level=10
                    ))
    create_tree(start_url, list(success))
예제 #4
0
 def __config_search_engine(verbose=False):
     """
     configure the search engine if a one different from google is given
     """
     non_default_msg = "specified to use non-default search engine..."
     if opt.useDDG:
         if verbose:
             logger.debug(set_color(
                 "using DuckDuckGo as the search engine...", level=10
             ))
         logger.info(set_color(
             non_default_msg
         ))
         se = AUTHORIZED_SEARCH_ENGINES["duckduckgo"]
     elif opt.useAOL:
         if verbose:
             logger.debug(set_color(
                 "using AOL as the search engine...", level=10
             ))
         logger.info(set_color(
             non_default_msg
         ))
         se = AUTHORIZED_SEARCH_ENGINES["aol"]
     else:
         if verbose:
             logger.debug(set_color(
                 "using default search engine (Google)...", level=10
             ))
         logger.info(set_color(
             "using default search engine..."
         )) if opt.fileToEnumerate is None else ""
         se = AUTHORIZED_SEARCH_ENGINES["google"]
     return se
예제 #5
0
 def __config_headers():
     """
     configure the request headers, this will configure user agents and proxies
     """
     if opt.proxyConfig is not None:
         proxy = opt.proxyConfig
     elif opt.proxyFileRand is not None:
         if opt.runInVerbose:
             logger.debug(set_color(
                 "loading random proxy from '{}'...".format(opt.proxyFileRand), level=10
             ))
         with open(opt.proxyFileRand) as proxies:
             possible = proxies.readlines()
             proxy = random.choice(possible).strip()
     else:
         proxy = None
     if opt.usePersonalAgent is not None:
         agent = opt.usePersonalAgent
     elif opt.useRandomAgent:
         agent = grab_random_agent(verbose=opt.runInVerbose)
     else:
         agent = None
     return proxy, agent
예제 #6
0
    def __find_running_opts():
        """
        display the running options if verbose is used
        """
        opts_being_used = []
        for o, v in opt.__dict__.items():
            if v is not None:
                opts_being_used.append((o, v))
        return dict(opts_being_used)


    if opt.runInVerbose:
        being_run = __find_running_opts()
        logger.debug(set_color(
            "running with options '{}'...".format(being_run), level=10
        ))

    logger.info(set_color(
        "log file being saved to '{}'...".format(get_latest_log_file(CURRENT_LOG_FILE_PATH))
    ))

    if opt.showRequestInfo:
        logger.debug(set_color(
            "showing all HTTP requests because --show-requests flag was used...", level=10
        ))
        http_client.HTTPConnection.debuglevel = 1

    def __choose_attack(choice, attacks):
        while True:
            if int(choice) in range(len(attacks)):
예제 #7
0
def get_urls(query,
             url,
             verbose=False,
             warning=True,
             user_agent=None,
             proxy=None,
             **kwargs):
    """
      Bypass Google captchas and Google API by using selenium-webdriver to gather
      the Google URL. This will open a robot controlled browser window and attempt
      to get a URL from Google that will be used for scraping afterwards.

      Only downside to this method is that your IP and user agent will be visible
      until the application pulls the URL.
    """
    if verbose:
        logger.debug(
            set_color("setting up the virtual display to hide the browser...",
                      level=10))
    ff_display = Display(visible=0, size=(800, 600))
    ff_display.start()
    logger.info(
        set_color(
            "firefox browser display will be hidden while it performs the query..."
        ))
    if warning:
        logger.warning(
            set_color(
                "your web browser will be automated in order for Zeus to successfully "
                "bypass captchas and API calls. this is done in order to grab the URL "
                "from the search and parse the results. please give selenium time to "
                "finish it's task...",
                level=30))
    if verbose:
        logger.debug(
            set_color("running selenium-webdriver and launching browser...",
                      level=10))

    if verbose:
        logger.debug(
            set_color(
                "adjusting selenium-webdriver user-agent to '{}'...".format(
                    user_agent),
                level=10))
    if proxy is not None:
        proxy_type = proxy.keys()
        proxy_to_use = Proxy({
            "proxyType": ProxyType.MANUAL,
            "httpProxy": proxy[proxy_type[0]],
            "ftpProxy": proxy[proxy_type[0]],
            "sslProxy": proxy[proxy_type[0]],
            "noProxy": ""
        })
        if verbose:
            logger.debug(
                set_color("setting selenium proxy to '{}'...".format(
                    ''.join(proxy_type) + "://" + ''.join(proxy.values())),
                          level=10))
    else:
        proxy_to_use = None

    profile = webdriver.FirefoxProfile()
    profile.set_preference("general.useragent.override", user_agent)
    browser = webdriver.Firefox(profile, proxy=proxy_to_use)
    logger.info(set_color("browser will open shortly..."))
    browser.get(url)
    if verbose:
        logger.debug(
            set_color(
                "searching search engine for the 'q' element (search button)...",
                level=10))
    search = browser.find_element_by_name('q')
    logger.info(
        set_color("searching '{}' using query '{}'...".format(url, query)))
    search.send_keys(query)
    search.send_keys(Keys.RETURN)  # hit return after you enter search text
    time.sleep(3)
    if verbose:
        logger.debug(set_color("obtaining URL from selenium..."))
    retval = browser.current_url
    ban_url_schema = ["http://ipv6.google.com", "http://ipv4.google.com"]
    if any(u in retval for u in ban_url_schema):  # if you got IP banned
        logger.warning(
            set_color(
                "it appears that Google is attempting to block your IP address, attempting bypass...",
                level=30))
        try:
            retval = bypass_ip_block(retval)
        except IndexError:
            browser.close()  # stop all the random rogue processes
            ff_display.stop()
            logger.warning(
                set_color(
                    "for now the IP ban bypass will only work for queries that have Google's search syntax "
                    "in them. (IE inurl:, incontext:, incontent:)",
                    level=30))
            raise NotImplementedError(
                "bypass for query '{}' is not implemented yet, try again with a different dork, "
                "or change your IP address...".format(query))
    if verbose:
        logger.debug(
            set_color("found current URL from selenium browser '{}'...".format(
                retval),
                      level=10))
    logger.info(set_color("closing the browser and continuing process.."))
    browser.close()
    ff_display.stop()
    return retval
예제 #8
0
def parse_search_results(query,
                         url,
                         verbose=False,
                         dirname="{}/log/url-log",
                         filename="url-log-{}.log",
                         **kwargs):
    """
      Parse a webpage from Google for URL's with a GET(query) parameter
    """
    exclude = "google" or "webcache" or "youtube"

    create_dir(dirname.format(os.getcwd()))
    full_file_path = "{}/{}".format(
        dirname.format(os.getcwd()),
        filename.format(len(os.listdir(dirname.format(os.getcwd()))) + 1))

    def __get_headers():
        try:
            proxy_string = kwargs.get("proxy")
        except:
            pass

        try:
            user_agent = kwargs.get("agent")
        except:
            pass

        return proxy_string, user_agent

    if verbose:
        logger.debug(
            set_color("checking for user-agent and proxy configuration...",
                      level=10))
    proxy_string, user_agent = __get_headers()

    if proxy_string is None:
        proxy_string = None
    else:
        proxy_string = proxy_string_to_dict(proxy_string)
    if user_agent is None:
        user_agent = DEFAULT_USER_AGENT
    else:
        user_agent = user_agent

    user_agent_info = "adjusting user-agent header to {}..."
    if user_agent is not DEFAULT_USER_AGENT:
        user_agent_info = user_agent_info.format(user_agent.strip())
    else:
        user_agent_info = user_agent_info.format(
            "default user agent '{}'".format(DEFAULT_USER_AGENT))

    proxy_string_info = "setting proxy to {}..."
    if proxy_string is not None:
        proxy_string_info = proxy_string_info.format(
            ''.join(proxy_string.keys()) + "://" +
            ''.join(proxy_string.values()))
    else:
        proxy_string_info = "no proxy configuration detected..."

    headers = {"Connection": "close", "user-agent": user_agent}
    logger.info(set_color("attempting to gather query URL..."))
    try:
        query_url = get_urls(query,
                             url,
                             verbose=verbose,
                             user_agent=user_agent,
                             proxy=proxy_string)
    except Exception as e:
        if "WebDriverException" in str(e):
            logger.exception(
                set_color(
                    "it seems that you exited the browser, please allow the browser "
                    "to complete it's run so that Zeus can bypass captchas and API "
                    "calls",
                    level=50))
        else:
            logger.exception(
                set_color(
                    "{} failed to gather the URL from search engine, caught exception '{}' "
                    "exception has been logged to current log file...".format(
                        os.path.basename(__file__),
                        str(e).strip()),
                    level=50))
        shutdown()
    logger.info(
        set_color(
            "URL successfully gathered, searching for GET parameters..."))
    logger.info(set_color(proxy_string_info))
    req = requests.get(query_url, proxies=proxy_string)
    logger.info(set_color(user_agent_info))
    req.headers.update(headers)
    found_urls = URL_REGEX.findall(req.text)
    retval = set()
    for urls in list(found_urls):
        for url in list(urls):
            url = urllib.unquote(url)
            if URL_QUERY_REGEX.match(url) and exclude not in url:
                if type(url) is unicode:
                    url = str(url).encode("utf-8")
                if verbose:
                    logger.debug(
                        set_color("found '{}'...".format(url), level=10))
                retval.add(url.split("&")[0])
    logger.info(
        set_color("found a total of {} URL's with a GET parameter...".format(
            len(retval))))
    if len(retval) != 0:
        logger.info(
            set_color(
                "saving found URL's under '{}'...".format(full_file_path)))
        with open(full_file_path, "a+") as log:
            for url in list(retval):
                log.write(url + "\n")
    else:
        logger.critical(
            set_color(
                "did not find any usable URL's with the given query '{}' "
                "using search engine '{}'...".format(query, url),
                level=50))
        shutdown()
    return list(retval) if len(retval) != 0 else None
예제 #9
0
def check_for_admin_page(url,
                         exts,
                         protocol="http://",
                         show_possibles=False,
                         verbose=False):
    possible_connections, connections = set(), set()
    stripped_url = replace_http(url.strip())
    for ext in exts:
        ext = ext.strip()
        true_url = "{}{}{}".format(protocol, stripped_url, ext)
        if verbose:
            logger.debug(set_color("trying '{}'...".format(true_url),
                                   level=10))
        try:
            urlopen(true_url, timeout=5)
            logger.info(
                set_color(
                    "connected successfully to '{}'...".format(true_url)))
            connections.add(true_url)
        except HTTPError as e:
            data = str(e).split(" ")
            if verbose:
                if "Access Denied" in str(e):
                    logger.warning(
                        set_color(
                            "got access denied, possible control panel found without external access on '{}'..."
                            .format(true_url),
                            level=30))
                    possible_connections.add(true_url)
                else:
                    logger.error(
                        set_color(
                            "failed to connect got error code {}...".format(
                                data[2]),
                            level=40))
        except Exception as e:
            if verbose:
                if "<urlopen error timed out>" or "timeout: timed out" in str(
                        e):
                    logger.warning(
                        set_color(
                            "connection timed out after five seconds "
                            "assuming won't connect and skipping...",
                            level=30))
                else:
                    logger.exception(
                        set_color(
                            "failed to connect with unexpected error '{}'...".
                            format(str(e)),
                            level=50))
                    fix_log_file()
                    request_issue_creation()
    possible_connections, connections = list(possible_connections), list(
        connections)
    data_msg = "found {} possible connections(s) and {} successful connection(s)..."
    logger.info(
        set_color(data_msg.format(len(possible_connections),
                                  len(connections))))
    if len(connections) != 0:
        logger.info(set_color("creating connection tree..."))
        create_tree(url, connections)
    else:
        logger.fatal(
            set_color(
                "did not find any successful connections to {}'s "
                "admin page",
                level=50))
    if show_possibles:
        if len(possible_connections) != 0:
            logger.info(set_color("creating possible connection tree..."))
            create_tree(url, possible_connections)
        else:
            logger.fatal(
                set_color(
                    "did not find any possible connections to {}'s "
                    "admin page",
                    level=50))
예제 #10
0
def main_xss(start_url, verbose=False, proxy=None, agent=None, tamper=None):
    if tamper:
        logger.info(set_color(
            "tampering payloads with '{}'...".format(tamper)
        ))
    find_xss_script(start_url)
    logger.info(set_color(
        "loading payloads..."
    ))
    payloads = __load_payloads()
    if verbose:
        logger.debug(set_color(
            "a total of {} payloads loaded...".format(len(payloads)), level=10
        ))
    logger.info(set_color(
        "payloads will be written to a temporary file and read from there..."
    ))
    filename = create_urls(start_url, payloads, tamper=tamper)
    logger.info(set_color(
            "loaded URL's have been saved to '{}'...".format(filename)
        ))
    logger.info(set_color(
        "testing for XSS vulnerabilities on host '{}'...".format(start_url)
    ))
    if proxy is not None:
        logger.info(set_color(
            "using proxy '{}'...".format(proxy)
        ))
    success = set()
    with open(filename) as urls:
        for i, url in enumerate(urls.readlines(), start=1):
            url = url.strip()
            result = scan_xss(url, proxy=proxy, agent=agent)
            payload = find_xss_script(url)
            if verbose:
                logger.info(set_color(
                    "trying payload '{}'...".format(payload)
                ))
            if result[0] != "sqli" and result[0] is True:
                success.add(url)
                if verbose:
                    logger.debug(set_color(
                        "payload '{}' appears to be usable...".format(payload), level=10
                    ))
            elif result[0] is "sqli":
                if i <= 1:
                    logger.error(set_color(
                        "loaded URL '{}' threw a DBMS error and appears to be injectable, test for SQL injection, "
                        "backend DBMS appears to be '{}'...".format(
                            url, result[1]
                        ), level=40
                    ))
                else:
                    if verbose:
                        logger.error(set_color(
                            "SQL error discovered...", level=40
                        ))
            else:
                if verbose:
                    logger.debug(set_color(
                        "host '{}' does not appear to be vulnerable to XSS attacks with payload '{}'...".format(
                            start_url, payload
                        ), level=10
                    ))
    if len(success) != 0:
        logger.info(set_color(
            "possible XSS scripts to be used:"
        ))
        create_tree(start_url, list(success))
    else:
        logger.error(set_color(
            "host '{}' does not appear to be vulnerable to XSS attacks...".format(start_url)
        ))
    save = prompt(
        "would you like to keep the URL's saved for further testing", opts="yN"
    )
    if save.lower().startswith("n"):
        os.remove(filename)
예제 #11
0
def search_multiple_pages(query,
                          link_amount,
                          proxy=None,
                          agent=None,
                          verbose=False):
    def __config_proxy(proxy_string):
        proxy_type_schema = {
            "http": httplib2.socks.PROXY_TYPE_HTTP,
            "socks4": httplib2.socks.PROXY_TYPE_SOCKS4,
            "socks5": httplib2.socks.PROXY_TYPE_SOCKS5
        }
        proxy_type = get_proxy_type(proxy_string)[0]
        proxy_dict = proxy_string_to_dict(proxy_string)
        proxy_config = httplib2.ProxyInfo(
            proxy_type=proxy_type_schema[proxy_type],
            proxy_host="".join(proxy_dict.keys()),
            proxy_port="".join(proxy_dict.values()))
        return proxy_config

    if proxy is not None:
        if verbose:
            logger.debug(
                set_color("configuring to use proxy '{}'...".format(proxy),
                          level=10))
        __config_proxy(proxy)

    if agent is not None:
        if verbose:
            logger.debug(
                set_color("settings user-agent to '{}'...".format(agent),
                          level=10))

    logger.warning(
        set_color(
            "multiple pages will be searched using Google's API client, searches may be blocked after a certain "
            "amount of time...",
            level=30))
    results, limit, found, index = set(), link_amount, 0, google_api.search(
        query, user_agent=agent, safe="on")
    try:
        while limit > 0:
            results.add(next(index))
            limit -= 1
            found += 1
    except Exception as e:
        if "Error 503" in str(e):
            logger.fatal(
                set_color(
                    "Google is blocking the current IP address, dumping already found URL's...",
                    level=50))
            results = results
            pass

    retval = set()
    for url in results:
        if URL_REGEX.match(url) and URL_QUERY_REGEX.match(url):
            if verbose:
                logger.debug(set_color("found '{}'...".format(url), level=10))
            retval.add(url)

    if len(retval) != 0:
        logger.info(
            set_color(
                "a total of {} links found out of requested {}...".format(
                    len(retval), link_amount)))
        write_to_log_file(list(retval), URL_LOG_PATH, "url-log-{}.log")
    else:
        logger.error(
            set_color("unable to extract URL's from results...", level=40))
예제 #12
0
def parse_search_results(query, url_to_search, verbose=False, **kwargs):
    """
      Parse a webpage from Google for URL's with a GET(query) parameter
    """
    exclude = ("www.google.com", "map.google.com", "mail.google.com",
               "drive.google.com", "news.google.com", "accounts.google.com")
    splitter = "&amp;"
    retval = set()
    query_url = None

    def __get_headers():
        proxy_string, user_agent = None, None
        try:
            proxy_string = kwargs.get("proxy")
        except:
            pass

        try:
            user_agent = kwargs.get("agent")
        except:
            pass

        return proxy_string, user_agent

    if verbose:
        logger.debug(
            set_color("checking for user-agent and proxy configuration...",
                      level=10))
    proxy_string, user_agent = __get_headers()

    if proxy_string is None:
        proxy_string = None
    else:
        proxy_string = proxy_string_to_dict(proxy_string)
    if user_agent is None:
        user_agent = DEFAULT_USER_AGENT
    else:
        user_agent = user_agent

    user_agent_info = "adjusting user-agent header to {}..."
    if user_agent is not DEFAULT_USER_AGENT:
        user_agent_info = user_agent_info.format(user_agent.strip())
    else:
        user_agent_info = user_agent_info.format(
            "default user agent '{}'".format(DEFAULT_USER_AGENT))

    proxy_string_info = "setting proxy to {}..."
    if proxy_string is not None:
        proxy_string_info = proxy_string_info.format(
            ''.join(proxy_string.keys()) + "://" +
            ''.join(proxy_string.values()))
    else:
        proxy_string_info = "no proxy configuration detected..."

    headers = {"Connection": "close", "user-agent": user_agent}
    logger.info(set_color("attempting to gather query URL..."))
    try:
        query_url = get_urls(query,
                             url_to_search,
                             verbose=verbose,
                             user_agent=user_agent,
                             proxy=proxy_string)
    except Exception as e:
        if "WebDriverException" in str(e):
            logger.exception(
                set_color(
                    "it seems that you exited the browser, please allow the browser "
                    "to complete it's run so that Zeus can bypass captchas and API "
                    "calls",
                    level=50))
        elif "'/usr/lib/firefoxdriver/webdriver.xpi'" in str(e):
            logger.fatal(
                set_color(
                    "firefox was not found in the default location on your system, "
                    "check your installation and make sure it is in /usr/lib, if you "
                    "find it there, restart your system and try again...",
                    level=50))
        else:
            logger.exception(
                set_color(
                    "{} failed to gather the URL from search engine, caught exception '{}' "
                    "exception has been logged to current log file...".format(
                        os.path.basename(__file__),
                        str(e).strip()),
                    level=50))
            request_issue_creation()
        shutdown()
    logger.info(
        set_color(
            "URL successfully gathered, searching for GET parameters..."))

    logger.info(set_color(proxy_string_info))
    req = requests.get(query_url, proxies=proxy_string)
    logger.info(set_color(user_agent_info))
    req.headers.update(headers)
    found_urls = URL_REGEX.findall(req.text)
    for urls in list(found_urls):
        for url in list(urls):
            url = unquote(url)
            if URL_QUERY_REGEX.match(url) and not any(l in url
                                                      for l in exclude):
                if isinstance(url, unicode):
                    url = str(url).encode("utf-8")
                if "webcache" in url:
                    logger.info(
                        set_color(
                            "received webcache URL, extracting URL from webcache..."
                        ))
                    url = extract_webcache_url(url)
                if verbose:
                    try:
                        logger.debug(
                            set_color("found '{}'...".format(
                                url.split(splitter)[0]),
                                      level=10))
                    except TypeError:
                        logger.debug(
                            set_color("found '{}'...".format(
                                str(url).split(splitter)[0]),
                                      level=10))
                    except AttributeError:
                        logger.debug(
                            set_color("found '{}...".format(str(url)),
                                      level=10))
                retval.add(url.split("&amp;")[0])
    logger.info(
        set_color("found a total of {} URL's with a GET parameter...".format(
            len(retval))))
    if len(retval) != 0:
        write_to_log_file(retval, URL_LOG_PATH, "url-log-{}.log")
    else:
        logger.critical(
            set_color(
                "did not find any usable URL's with the given query '{}' "
                "using search engine '{}'...".format(query, url_to_search),
                level=50))
        shutdown()
    return list(retval) if len(retval) != 0 else None
예제 #13
0
def get_urls(query, url, verbose=False, warning=True, user_agent=None, proxy=None, **kwargs):
    """
      Bypass Google captchas and Google API by using selenium-webdriver to gather
      the Google URL. This will open a robot controlled browser window and attempt
      to get a URL from Google that will be used for scraping afterwards.

      Only downside to this method is that your IP and user agent will be visible
      until the application pulls the URL.
    """
    if verbose:
        logger.debug(set_color(
            "setting up the virtual display to hide the browser...", level=10
        ))
    ff_display = Display(visible=0, size=(800, 600))
    ff_display.start()
    logger.info(set_color(
        "firefox browser display will be hidden while it performs the query..."
    ))
    if warning:
        logger.warning(set_color(
            "your web browser will be automated in order for Zeus to successfully "
            "bypass captchas and API calls. this is done in order to grab the URL "
            "from the search and parse the results. please give selenium time to "
            "finish it's task...", level=30
        ))
    if verbose:
        logger.debug(set_color(
            "running selenium-webdriver and launching browser...", level=10
        ))

    if verbose:
        logger.debug(set_color(
            "adjusting selenium-webdriver user-agent to '{}'...".format(user_agent), level=10
        ))
    if proxy is not None:
        proxy_type = proxy.keys()
        proxy_to_use = Proxy({
            "proxyType": ProxyType.MANUAL,
            "httpProxy": proxy[proxy_type[0]],
            "ftpProxy": proxy[proxy_type[0]],
            "sslProxy": proxy[proxy_type[0]],
            "noProxy": ""
        })
        if verbose:
            logger.debug(set_color(
                "setting selenium proxy to '{}'...".format(
                    ''.join(proxy_type) + "://" + ''.join(proxy.values())
                ), level=10
            ))
    else:
        proxy_to_use = None

    profile = webdriver.FirefoxProfile()
    profile.set_preference("general.useragent.override", user_agent)
    browser = webdriver.Firefox(profile, proxy=proxy_to_use)
    logger.info(set_color("browser will open shortly..."))
    browser.get(url)
    if verbose:
        logger.debug(set_color(
            "searching search engine for the 'q' element (search button)...", level=10
        ))
    search = browser.find_element_by_name('q')
    logger.info(set_color(
        "searching '{}' using query '{}'...".format(url, query)
    ))
    search.send_keys(query)
    search.send_keys(Keys.RETURN)  # hit return after you enter search text
    time.sleep(3)
    if verbose:
        logger.debug(set_color(
            "obtaining URL from selenium..."
        ))
    retval = browser.current_url
    if verbose:
        logger.debug(set_color(
            "found current URL from selenium browser '{}'...".format(retval), level=10
        ))
    logger.info(set_color(
        "closing the browser and continuing process.."
    ))
    browser.close()
    ff_display.stop()
    return retval