Beispiel #1
0
def crackClients(clients, usersMac, SSID, passphraseQ):
    clientHandshakes = []
    for client in clients:
        handshake = []
        for message in clients[client]:
            if message["message"] == 1:
                handshake = [message]
            elif len(handshake) == 1:
                handshake.append(message)
                clientHandshakes.append(handshake)
                break
            else:
                handshake = []
    for clientHandshake in clientHandshakes:
        if clientHandshake[0]["AP"] == usersMac:
            cracked = crack(
                SSID,
                clientHandshake[0]["client"],
                clientHandshake[0]["AP"],
                clientHandshake[0]["Anonce"],
                clientHandshake[1]["Snonce"],
                clientHandshake[1]["mic"],
                clientHandshake[1]["data"],
                passphraseQ,
            )
            if cracked != False:
                return cracked
    return False
def main():
    text = readFile("ciphers.txt")[0]
    encoded = encoder(text)
    key = encoded["key"]
    plainText = encoded["plain"]
    cipher = encoded["cipher"]

    keyMap = crack(cipher)
    # print(getUnfound(keyMap))
    for key in keyMap:
        print(key + ": " + str(keyMap[key]))

    print(getInitGuess(keyMap))
Beispiel #3
0
def main():
    # Get arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-f", "--file", required=True, help="handshake file")
    ap.add_argument("-s", "--SSID", required=True, help="AP name")
    ap.add_argument("-d",
                    "--dictionary",
                    required=True,
                    help="Dictionary path")
    args = vars(ap.parse_args())

    # File which contains EAPOL packages
    handShakeFile = args['file']

    # SSID name
    ssid = args['SSID']

    # Complete path of the dictionary (HDFS, S3 ...)
    dictPath = args['dictionary']

    # Load Spark
    spark = SparkSession \
        .builder \
        .appName("NotASmurfCracker") \
        .getOrCreate()

    rdd = spark.sparkContext.textFile(dictPath).coalesce(30)
    accu = spark.sparkContext.accumulator("", PasswordAccumulator())

    numberPartitions = rdd.getNumPartitions()
    print("\n\n\n * * * * * *  Dictionary Number of Partitions: {0}".format(
        str(numberPartitions)))
    print("Got SSID %s handshake %s" % (ssid, handShakeFile))

    # Start the thread to check password accumulator
    _thread.start_new_thread(checkPassword,
                             ("PasswordCheck", accu, spark.sparkContext))

    # Function executed by workers
    def crack(file, ssid, partition):
        import cracker.crack
        psswd = cracker.crack.runHashCat(file, ssid, partition)
        if psswd != False:
            accu.add(psswd)
        return

    try:
        rdd.foreachPartition(
            lambda partition: crack(handShakeFile, ssid, partition))
    except Exception:
        print("Driver Exception: Password Found?")
Beispiel #4
0
def crackClients(clients, usersMac, SSID, passphraseQ):
    clientHandshakes = []
    for client in clients:
        handshake = []
        for message in clients[client]:
            if message['message'] == 1:
                handshake = [message]
            elif len(handshake) == 1:
                handshake.append(message)
                clientHandshakes.append(handshake)
                break
            else:
                handshake = []
    for clientHandshake in clientHandshakes:
        if clientHandshake[0]['AP'] == usersMac:
            cracked = crack(SSID, clientHandshake[0]['client'],
                            clientHandshake[0]['AP'],
                            clientHandshake[0]['Anonce'],
                            clientHandshake[1]['Snonce'],
                            clientHandshake[1]['mic'],
                            clientHandshake[1]['data'], passphraseQ)
            if cracked != False:
                return cracked
    return False
Beispiel #5
0
import cracker
from PIL import Image

img = Image.open("C:/codeimg/C4.gif")
symbols = cracker.crack(img, "./codelib")
print symbols
Beispiel #6
0
    def crawl(self,name,type='SQR$=|',mode=0):
  
        logging.shutdown()
        if mode==0:
            url=self.getUrlForFuzzy(name,type)
        if mode==1:
            url=self.getUrlForExact(name,type)

        for i in range(3):
            try:
                urlInfo=urllib2.urlopen(url)
                break
            except:
                if self.ExtraOutInfo:
                    print 'Error when parsing the request url with',name
        aspSucc=urlInfo.read()
        aspUrl='http://epub.cnki.net/kns/brief/brief.aspx?pagename='+aspSucc
        
        for i in range(3):
            try:
                aspInfo=urllib2.urlopen(aspUrl)
                break
            except:
                if self.ExtraOutInfo:
                    print 'Error when parsing the asp url',name
        aspContent=aspInfo.read()
        """
        In order to get the content of cnki, we must at first post a url containing
        several parameters like name and type and some stuff, and then we can get 
        an asp url of the detailed contents, stored in aspContent
        """

        soup=BeautifulSoup(aspContent)
        hrefs=soup.find_all('span','countPageMark')
        if len(hrefs)==0:
            pageCounts=1
        else:
            pageCounts=int(hrefs[0].string[2:])
#        print hrefCounts

        crawledCount,pageNum=0,1
        currentSoup,currentPage=soup,aspContent
        numPerPage=20
        queryID=int(re.findall(r'queryid=\d*',currentPage)[0][8:])
        
        while pageNum<=pageCounts:
            
            if len(currentSoup.find_all('a','fz14'))==0:
                if self.ExtraOutInfo:
                    print 'validation code detected'
                self.cookies.clear()
       #         print("===========validate code page==========")
                
                for i in range(3):
                    try:
                        urlInfo=urllib2.urlopen(url)
                        break
                    except:
                        if self.ExtraOutInfo:
                            print 'Error when parsing the request url with',name
                aspSucc=urlInfo.read()
                aspUrl='http://epub.cnki.net/kns/brief/brief.aspx?pagename='+aspSucc
                for i in range(3):
                    try:
                        aspInfo=urllib2.urlopen(aspUrl)
                        break
                    except:
                        if self.ExtraOutInfo:
                            print 'Error when parsing the asp url',name
                aspContent=aspInfo.read()
#                print aspContent
                queryID=int(re.findall(r'queryid=\d*',aspContent)[0][8:])
#                pageNum=pageNum-1
                
            newPage='http://epub.cnki.net/kns/brief/brief.aspx?'
            newPage=newPage+'curpage='+str(pageNum)+'&RecordsPerPage='+str(numPerPage)+'&QueryID='+str(queryID)+'&ID=&turnpage=1&tpagemode=L&dbPrefix=SCOD&Fields=&DisplayMode=listmode&PageName=ASP.brief_default_result_aspx'
            for i in range(3):
                try:
                    currentPage=urllib2.urlopen(newPage)
                    break
                except:
                    if self.ExtraOutInfo:
                        print 'error when turn page from '+str(pageNum-1) + 'to' + str(pageNum)
            currentSoup=BeautifulSoup(currentPage)
            if len(currentSoup.find_all('a','fz14'))==0:
                if pageNum==1:
                    pageNum=pageNum+1
                continue
            for page in currentSoup.find_all('a','fz14'):
                contentUrl='http://dbpub.cnki.net/grid2008/dbpub'+page.get('href')[11:]
                contentPage=urllib2.urlopen(contentUrl).read()
                soup=BeautifulSoup(contentPage)
             #   print soup.contents
                if len(re.findall(u'您的IP',soup.find('p').text))!=0:
                    self.writeLog("IP Denied")
                    return
                while len(soup.findAll("input", {"id": "validateCode"}))!=0:
                    imgsrc=soup.find('img')['src'].encode('utf-8')
                    imageUrl='http://dbpub.cnki.net/grid2008/dbpub/'+imgsrc
                    imageFile= cStringIO.StringIO(urllib.urlopen(imageUrl).read())
                    img = Image.open(imageFile)
                    symbols=''.join(cracker.crack(img, './codelib'))
                    submitUrl='http://dbpub.cnki.net/grid2008/dbpub/'+soup.find('form')['action'].encode('utf-8')
                    submitRequest= urllib2.Request(submitUrl, data=urllib.urlencode({'validateCode':symbols,'submit':u'提交'}))
                    submitResponse = urllib2.urlopen(submitRequest)
                    contentPage = submitResponse.read()
                    soup=BeautifulSoup(contentPage)
                    if len(soup.findAll("input", {"id": "validateCode"}))!=0:
                        self.writeLog("Validate Fail")
                    else:
                        self.writeLog("Validate Success")
                    
                self.outputPage(contentPage)
                crawledCount=crawledCount+1
                time.sleep(0.1)
            if self.ExtraOutInfo:
                print 'parsing ' + name + ' with '+ str(pageNum) + ' pages finished, ' + str(pageCounts-pageNum) + ' pages left'
            pageNum=pageNum+1
        if self.ExtraOutInfo:
            print name + ' parse completed'