Esempio n. 1
0
def drupal_get_version(target):
    #<meta name="generator" content
    try:
        soup = str(core.get_page(target + "CHANGELOG.txt"))
        regex = re.findall(r'Drupal (.*?),', str(soup))
        if regex != []:
            return regex[0], "/CHANGELOG.txt"
            #return soup.split(",")[0].split(">")[3]
        else:
            #print ("CHANGELOG.txt not found")
            soup = core.get_page(target)
            regex = re.findall(
                r'content="Drupal (.*?) \(http(s|):\/\/(www\.|)drupal.org\)"',
                str(soup))
            #print (soup)
            if regex != []:
                return regex[0][0], "META Generator Tag"
            #for link in soup.find_all('meta'):
            #	if "generator" in str(link):
            #		c=str(link).split('"')[1]
            #		if hasNumbers(c) and c.lower().startswith("drupal "):
            #			return str(link).split('"')[1].split("(")[0]
            else:
                r = request.get(target)
                if r.status_code == 200 and r.headers["X-Generator"]:
                    return r.headers["X-Generator"], "X-Generator HTTP Header"
                return 'X.X.X', ""
                #return "###"
            #return 'NO <meta name="generator"'
            #return "###"
    except Exception as e:
        return "X.X.X"
Esempio n. 2
0
def joomla_get_version(target):
    try:
        soup = core.get_page(target)
        regex = re.findall(
            r'content=(?:\"|\')Joomla! (.*?) - Open Source Content Management(?:\"|\')',
            str(soup))
        if regex != []:
            return regex[0], "META Generator Tag"
        else:
            xml_files = [
                'administrator/manifests/files/joomla.xml',
                'language/en-GB/en-GB.xml',
                'administrator/components/com_content/content.xml',
                'administrator/components/com_plugins/plugins.xml',
                'administrator/components/com_media/media.xml',
                'mambots/content/moscode.xml'
            ]
            for xml_file in xml_files:
                soup = core.get_page(target + xml_file)
                regex = re.findall(r'<version>(.*?)</version>', str(soup))
                if regex != []:
                    return regex[0], "/" + xml_file
                else:
                    soup = str(core.get_page(target + "README.txt"))
                    regex = re.findall(r'package to version (.*?)\n',
                                       str(soup))
                    if regex != []:
                        return regex[0], "/README.txt"
                    else:
                        return "X.X.X", ""
        #return 'NO <meta name="generator"'
        #return "###"
    except Exception as e:
        print(e)
        return "X.X.X"
Esempio n. 3
0
def get_wp_plugins(target):
    plugins = dict()
    lplugins = []
    try:
        soup = core.get_page(target)
        res = re.findall(re.compile('/wp-content/plugins/(.+?)/(.+?)"'),
                         str(soup))
        for x in sorted(set(res)):
            if len(x[1].split("?ver=")) != 1:
                plugins[x[0]] = x[1].split("?ver=")[1]
            else:
                plugins[x[0]] = ""
        res1 = re.findall(re.compile('Powered by (.+?) -'), str(soup))
        for x in sorted(set(res1)):
            plugins[x] = ""
        for x, y in zip(plugins.keys(), plugins.values()):
            if y:
                lplugins.append([x, y])
            else:
                lplugins.append([x])
        #if lplugins:
        return lplugins
        #return [""] #+fuzz_wp_component(target,"plugins")
    except Exception as e:
        print("get_wp_plugins", e)
        return []
    """
Esempio n. 4
0
def wp_drupal_theme_id(target):
    try:
        soup = str(core.get_page(target))
        wp_theme = re.findall(re.compile('/themes/(.+?)/'), str(soup))
        #result=soup.split("themes")[1].split("/")[1]
        if wp_theme:
            return wp_theme[0]
        return fuzz_wp_component(target, "themes")
    except Exception as e:
        print("wp_drupal_theme_id", e)
        return ""
Esempio n. 5
0
def wp_get_version(target):
    try:
        soup = core.get_page(target)
        #print (str(soup))
        regex = re.findall(r'<meta content="WordPress (.*?)"', str(soup))
        if regex != []:
            return regex[0], "META Generator Tag"
        else:
            regex = re.findall(
                r'<generator>https://wordpress.org/\?v=(.*?)</generator>',
                str(soup))
            if regex != []:
                return regex[0], "Meta tag"
            else:
                soup = core.get_page(target + "wp-links-opml.php")
                regex = re.findall(r'generator="wordpress/(.*?)"', str(soup))
                if regex != []:
                    return regex[0], "/wp-links-opml.php"
        #return fingerprint_wp_version_hash_based(target)
        return "X.X.X", ""
    except Exception as e:
        print("wp_get_version", e)
        return "X.X.X", ""
Esempio n. 6
0
def cmscan(target): # u can id cms via: Generator meta tag - source code - robots.txt - headers
    #try:
    #if target[len(target)-1] != '/':
    #   target=target+"/"
    if not target.endswith("/"):
        target=target + "/"
    if not re.match(r"^http", target):
        target = "http://" + target
    #if "http://" not in target and "https://" not in target:
    #   target = "http://" + target
    wp=jml=drpl=0
    soup=str(core.get_page(target)).lower()
    heads=core.get_http_headers_and_ip(target)
    #print (soup)
    for l in soup.split(" "):
        if "wp-" in l:
            wp+=1
        elif "joomla" in l:
            jml+=1
        elif "drupal" in l:
            drpl+=1
    #print (wp,jml,drpl)
    if wp>jml and wp>drpl:
        theme=wps.wp_drupal_theme_id(target)
        plugins=wps.get_wp_plugins(target)
        version,found=wps.wp_get_version(target)
        vulns=wps.wp_get_vulns(version,"wordpresses")+wps.wp_get_vulns(theme,"themes")+wps.wp_get_vulns(plugins,"plugins")
        ads=wps.wp_additional_infos(target,theme,plugins)
        return ["WordPress",version,theme,plugins,heads,vulns,ads,found]
    elif drpl>jml and drpl>wp:
        theme=drs.wp_drupal_theme_id(target)
        plugins=drs.get_drupal_plugins(target)
        version,found=drs.drupal_get_version(target)
        vulns=drs.get_dru_core_vulns(version)+drs.get_dru_mod_vulns(target,plugins)
        ads=drs.dru_additional_infos(target,theme,plugins)
        return ["Drupal",version,theme,plugins,heads,vulns,ads,found] 
    elif jml>wp and jml>drpl:
        version,found=jms.joomla_get_version(target)
        theme=jms.joomla_theme_id(target)
        plugins=jms.get_joomla_plugins(target)
        vulns=jms.get_joo_core_vulns(version)+jms.get_joo_com_vulns(plugins)
        ads=jms.joo_additional_infos(target,theme,plugins)
        return ["Joomla",version,theme,plugins,heads,vulns,ads,found]
    else:
        #print (target, wp,jml,drpl)
        return 0
Esempio n. 7
0
def get_drupal_plugins(target):
    soup = core.get_page(target)
    lst = []
    try:
        for link in soup.find_all('script'):
            for x in str(link).split("\n"):
                #print (x,1)
                if "modules" in x:
                    i = 0
                    for y in x.split("/"):
                        if "modules" in y:
                            lst.append(x.split("/")[i + 1].split("\\")[0])
                        i += 1
        return sorted(set(lst))
    except Exception as e:
        print(e)
    return "-"
Esempio n. 8
0
def crawler_():
    form = crawl_form()
    if form.validate_on_submit():
        urls=[]
        resp=requests.get("http://api.hackertarget.com/pagelinks/?q="+form.site.data).text
        for x in resp.split("\n"):
            if "/" in x and form.site.data in x:
                urls.append(x)
        target=form.site.data
        if not target.endswith("/"):
            target=target + "/"
        if not re.match(r"^http", target):
            target = "http://" + target
        #if "http://" not in target and "https://" not in target:
        #   target = "http://" + target
        wp=jml=drpl=0
        soup=str(core.get_page(target)).lower()
        #print (soup)
        for l in soup.split(" "):
            if "wp-" in l:
                wp+=1
            elif "joomla" in l:
                jml+=1
            elif "drupal" in l:
                drpl+=1
        if wp>jml and wp>drpl:
            iurls=wps.check_wp_default_files(target)
            return render_template('crawling.html',form=form,urls=sorted(set(urls)),iurls=iurls)
        elif drpl>jml and drpl>wp:
            iurls=drs.check_dru_default_files(target)
            return render_template('crawling.html',form=form,urls=sorted(set(urls)),iurls=iurls)
        elif jml>wp and jml>drpl:
            iurls=jms.check_joo_default_files(target)
            return render_template('crawling.html',form=form,urls=sorted(set(urls)),iurls=iurls)
        else:
            return render_template('crawling.html',form=form,urls=sorted(set(urls)))
    return render_template('crawler.html',form=form)