def Morphology(self, frame, kernel): YUV_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV) white_mask = cv2.inRange(YUV_frame[:, :, 0], 200, 255) # cv2.imshow('mask',white_mask) mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, self.kernel_perto) mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, self.kernel_perto2, 1) ret, th1 = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(th1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) #Passa para o classificador as imagens recortadas----------------------- type_label, results = classify(cv2.cvtColor(frame[y:y + h, x:x + w], cv2.COLOR_BGR2RGB), self.net, self.transformer, mean_file=self.mean_file, labels=self.labels, batch_size=None) #----------------------------------------------------------------------- # print results, type_label # cv2.imshow('janela',images[0]) if type_label == 'Ball': return frame, x + w / 2, y + h / 2, (w + h) / 4 #================================================================================================= return frame, 0, 0, 0
def get_move(view): global svc eat = False # 0. Land in square, check if there is a plant hasPlant = view.GetPlantInfo() == game_interface.STATUS_UNKNOWN_PLANT # If there is a plant, if hasPlant: # 1. Decide if observe/how many times numobs = 5 # 2. Classify plant that many number of times ispoisonous = 0 build_svm() for i in xrange(numobs): ispoisonous += classify(view.GetImage()) # ispoisonous = svc.predict(view.GetImage)[0] # print "ISPOI", ispoisonous eat = True if (ispoisonous / numobs > 0.5) else False # print "EAT", eat # 3. Decide where to go move = random.randint(0, 4) # 4. Execute move # time.sleep(0.1) return (move, eat)
def sat_finder(name): print("calculating distribution for " + name) #load and process files subfamilies = load_subfamilies("subfamilies.txt") mers = load_mers("24mers.txt") probs = get_probs(mers, subfamilies) reads = load_reads(name + ".sat") lookup = subfamilies.keys() #classify each read num = np.zeros(15) prob_list = [] all_list = [] len_list = [] len_sub = [] len_uncat = [] for i in range(14): prob_list += [[]] unclassified = 0 i = 0 for read in reads: pred = classify(read.seq, probs) if pred[0] != 0.071428571428571425: i = np.argmax(pred) num[i] += 1 prob_list[i] += [pred[i]] all_list += [pred[i]] length = float(len(read.seq) - read.seq.count('N')) / len(read.seq) len_sub += [length] len_list += [length] else: num[14] += 1 all_list += [pred[0]] length = float(len(read.seq) - read.seq.count('N')) / len(read.seq) len_uncat += [length] len_list += [length] i += 1 lookup += ["Unclassified"] plt.scatter(len_list, all_list, s=[.1] * len(all_list), c=np.random.rand(len(all_list))) plt.xlabel('Fraction of read that is repetitive') plt.ylabel('Confidence in subfamily prediction') plt.show() plt.hist([len_uncat, len_sub], 10, (0, 1), stacked=True) plt.xlabel('Fraction of read that is repetitive') plt.ylabel('Number of reads') plt.show() #plt.figure(figsize=(20,12)) #for i in range(14): # prob_list[i] # plt.subplot(3,5,i+1) # plt.hist(np.nan_to_num(prob_list[i]),100,(0,1)) # plt.title(lookup[i]) #plt.savefig("Confidence_"+name) return num
def getUsersToTweet(): allSearchResults = searchForNegativeTweets(api, "\"depressed\"") searchResults = [] for searchResult in allSearchResults: if (classify(searchResult.text) < 0): searchResults.append(searchResult) print((searchResult.user).screen_name) return searchResults
def classify_reconcile(name): return [{ "id": "My id", "name": classify(name), "score": 100, "match": True, "type": [{ "id": "/type/demographics", "name": "Race" }] }]
def Morphology(self, frame, white_mask, kernel, kernel2, k): start3 = time.time() contador = 0 # cv2.imshow('mask',white_mask) mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, kernel) mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel2, 1) # Se a morfologia de perto k =1, recorta a parte de cima if k == 1: mask[0:200, :] = 0 # Se a morfologia medio k =2, recorta a parte de baixo if k == 2: mask[650:, :] = 0 # Se a morfologia de longe k =3, recorta a parte de baixo if k == 3: mask[450:, :] = 0 # Se a morfologia de muito longe k = 4, recorta a parte de baixo if k == 4: mask[350:, :] = 0 ret, th1 = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY) try: _, contours, _ = cv2.findContours(th1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) except: contours, _ = cv2.findContours(th1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: contador = contador + 1 x, y, w, h = cv2.boundingRect(cnt) #Passa para o classificador as imagens recortadas----------------------- type_label, results = classify(cv2.cvtColor( frame[y:y + h, x:x + w], cv2.COLOR_BGR2RGB), self.net, self.transformer, mean_file=self.mean_file, labels=self.labels, batch_size=None) #----------------------------------------------------------------------- # print results, type_label # cv2.imshow('janela',images[0]) if type_label == 'Ball': return frame, x + w / 2, y + h / 2, (w + h) / 4, mask #================================================================================================= # print "CONTOURS = ", time.time() - start return frame, 0, 0, 0, mask
def output_proc(results):#xử lí kết quả đầu ra id = ' ' name = ' ' birth = ' ' nationality = ' ' sex = ' ' hometown = ' ' address = ' ' classOfDL = ' ' major = ' ' faculty = ' ' course = ' ' if classify(results) == 1: card = IdCard(id,name,birth,nationality,sex,hometown,address) card = output_proc_idCard(results) card.print_idCard() if classify(results) == 2: card = DrivingLicense(id,name,birth,nationality,address, classOfDL) card = output_proc_drivingLicense(results) #phân loại card.print_DrivingLicense() if classify(results) == 3: card = StudentCard(name, id, major, faculty, course) card = output_proc_studentCard(results) card.print_StudentCard()
def handwritingClassTest(): trainData,trainLabel=loadTrainData() testData=loadTestData() testLabel=loadTestResult() m,n=shape(testData) errorCount=0 resultList=[] for i in range(m): classifierResult = classify(testData[i], trainData, trainLabel, 5) resultList.append(classifierResult) print "time %d" % (m) print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, testLabel[0,i]) if (classifierResult != testLabel[0,i]): errorCount += 1.0 print "Wrong" print "\nthe total number of errors is: %d" % errorCount print "\nthe total error rate is: %f" % (errorCount/float(m)) saveResult(resultList)
def post(self): text = self.get_argument('text') user = self.request.remote_ip user_sentiment = self.get_argument('sentiment', False) remaining = self.limit(user) sentiment_key = {0: 'negative', 2: 'neutral', 4: 'positive'} if text != '': if user_sentiment in ('positive', 'negative', 'neutral'): self.learn(user, text, user_sentiment) output = json.dumps({'ratelimit-remaining' : self.limit(user)}) elif user_sentiment == False: score = classify(text) sentiment = {'sentiment' : sentiment_key[score]} db.zadd('requests:'+user, time.time(), time.time()) self.limit(user) output = json.dumps(sentiment) else: self.error(400,'method not implemented') self.write(output)
def writeToFile(searchResults): fileToRead = open("positiveTweets.txt", "r") fileToWrite = open("positiveTweets.txt", "a") for searchResult in searchResults: if searchResult.text not in fileToRead: if (classify(searchResult.text) > 0.5): tweet = (searchResult.text).split(" ") for word in tweet: if "@" in word: pass elif "http" in word: pass elif "www" in word: pass elif "#" in word: pass elif "RT" in word: pass else: fileToWrite.write(word + " ") fileToWrite.write("\n") fileToRead.close() fileToWrite.close()
def captchaSolve(solve_for): for y in range(3): for x in range(3): crop_s = int((s - m * 2) / 3) crop_x = int(x * m + x * crop_s) crop_y = int(y * m + y * crop_s) im = pyautogui.screenshot(region=(captcha_pos[0], captcha_pos[1], s, s)) sl = im.crop([crop_x, crop_y, crop_x + crop_s, crop_y + crop_s]) # slice em cada bloco do captcha hash = rd.getrandbits(128) file = './runtime/%032x.jpg' % hash sl.save(file) classified = classify(file) # classificar bloco pela rede neural if not keepImages: os.remove(file) if solve_for == classified[0] and classified[1] > 55: o = (int)(random() * 80 - 40) # rnd offset block_x = captcha_block[0] + crop_x + o block_y = captcha_block[1] + crop_y + o mouse.click(block_x, block_y) # clica no bloco time.sleep(random() * (2 / 3)) mouse.click(next_button[0], next_button[1]) # clica pra terminar
if __name__ == "__main__": #print "ok", len(sys.argv) if len(sys.argv) == 2: ftrain = sys.argv[1] if(not fExists(ftrain)): sys.exit(0) attr, inst, clas = readData(ftrain) #print [ins[cls] for ins in inst] dtree = createDtree(attr, inst, clas) printTree(dtree, "") #print attr, inst, clas #print dtree elif len(sys.argv) == 3: ftrain = sys.argv[1] ftest = sys.argv[2] if((not fExists(ftrain)) or (not fExists(ftest))): sys.exit(0) attr, inst, clas = readData(ftrain) classif = readClas(ftest, attr) dtree = createDtree(attr, inst, clas) printTree(dtree, "") testRes = classify(classif, dtree) for i in range(len(testRes)): print "Test case %d: %s" % (i + 1, testRes[i]) else: print "Just a little helper:\n\t- Arguments needed. If you want to both train and test, give me both files, in that order.\n\t- Otherwise, just give me the training file." sys.exit(0)
image = cutBorder(image) #------------------------------- #=====>> STEP 2: Identify and Classify Objects #------------------------------- #Contours - identify Objects objs_yes, objs_not = identifyObjects(image) if not len(objs_yes) is 0: #Calculate attributes for the Deep Learning attributes(image, objs_yes) #classity the objs classify(objs_yes) objs_yes = sortObjs(objs_yes) #------------------------------- #=====>> STEP 3: Movement #------------------------------- for obj in objs_yes: box_num = app.whichBox(obj.name) #Draw the contours and the center of mass img_draw = drawCnts(image, objs_yes, objs_not, thickness=3, mark=obj) #,attributes=True) show(img_draw, 'out')
def handle_message(message): if message.text: command = message.text try: if (command == '/start'): bot.send_message(message.chat.id, start_text) elif (command == '/help'): bot.send_message(message.chat.id, help_text) else: predicted_class = classify(command) if (predicted_class == 'weather'): output, speech = get_weather(command) bot.send_message(message.chat.id, output) elif (predicted_class == 'cinema'): movie_start(message, message.text) elif (predicted_class == 'greetings'): bot.send_message( message.chat.id, answer_greetings[random.randint( 0, (len(answer_greetings) - 1))]) elif (predicted_class == 'greetings_mood'): bot.send_message( message.chat.id, answer_greetings_mood[random.randint( 0, (len(answer_greetings_mood) - 1))]) elif (predicted_class == 'mood'): bot.send_message( message.chat.id, answer_mood[random.randint(0, (len(answer_mood) - 1))]) elif (predicted_class == 'philosophy'): bot.send_message(message.chat.id, answer_philosophy[0]) elif (predicted_class == 'thanks'): bot.send_message( message.chat.id, answer_thanks[random.randint( 0, (len(answer_thanks) - 1))]) elif (predicted_class == 'help'): bot.send_message(message.chat.id, help_text) elif (predicted_class == 'action'): bot.send_message( message.chat.id, answer_action[random.randint( 0, (len(answer_action) - 1))]) elif (predicted_class == 'status_bad'): bot.send_message( message.chat.id, answer_status_bad[random.randint( 0, (len(answer_status_bad) - 1))]) elif (predicted_class == 'status_good'): bot.send_message( message.chat.id, answer_status_good[random.randint( 0, (len(answer_status_good) - 1))]) elif (predicted_class == 'how_old'): bot.send_message( message.chat.id, answer_how_old[random.randint( 0, (len(answer_how_old) - 1))]) elif (predicted_class == 'who_are_you'): bot.send_message( message.chat.id, answer_who_are_you[random.randint( 0, (len(answer_who_are_you) - 1))]) elif (predicted_class == 'other_bots'): bot.send_message( message.chat.id, answer_other_bots[random.randint( 0, (len(answer_other_bots) - 1))]) elif (predicted_class == 'your_master'): bot.send_message( message.chat.id, answer_your_master[random.randint( 0, (len(answer_your_master) - 1))]) elif (predicted_class == 'creator'): bot.send_message( message.chat.id, answer_creator[random.randint( 0, (len(answer_creator) - 1))]) elif (predicted_class == 'joke'): answer = answer_jokes[random.randint( 0, (len(answer_jokes) - 1))] bot.send_message(message.chat.id, answer) if (answer == answer_jokes[1]): bot.send_voice(message.chat.id, open('joke.mp3', 'rb')) elif (predicted_class == 'bye'): bot.send_message( message.chat.id, answer_bye[random.randint(0, (len(answer_bye) - 1))]) else: bot.send_message(message.chat.id, 'Извините, я вас не понимаю, но я учусь') except: pass else: file_info = bot.get_file(message.voice.file_id) file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format( token, file_info.file_path)) try: command = speech_to_text(bytes=file.content) except: bot.send_message( message.chat.id, 'Распознование голоса не удалось, попробуйте снова') try: predicted_class = classify(command) if (predicted_class == 'weather'): output, speech = get_weather(command) voice = get_voice(speech) bot.send_message(message.chat.id, output) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'cinema'): movie_start(message, command) elif (predicted_class == 'greetings'): answer = answer_greetings[random.randint( 0, (len(answer_greetings) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, message) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'greetings_mood'): answer = answer_greetings_mood[random.randint( 0, (len(answer_greetings_mood) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'mood'): answer = answer_mood[random.randint(0, (len(answer_mood) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'philosophy'): answer = answer_philosophy[0] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'action'): answer = answer_action[random.randint( 0, (len(answer_action) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'status_good'): answer = answer_status_good[random.randint( 0, (len(answer_status_good) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'status_bad'): answer = answer_status_bad[random.randint( 0, (len(answer_status_bad) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'how_old'): answer = answer_how_old[random.randint( 0, (len(answer_how_old) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'who_are_you'): answer = answer_who_are_you[random.randint( 0, (len(answer_who_are_you) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'thanks'): answer = answer_thanks[random.randint( 0, (len(answer_thanks) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'other_bots'): answer = answer_other_bots[random.randint( 0, (len(answer_other_bots) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'your_master'): answer = answer_your_master[random.randint( 0, (len(answer_your_master) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'creator'): answer = answer_creator[random.randint( 0, (len(answer_creator) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'joke'): answer = answer_jokes[random.randint(0, (len(answer_jokes) - 1))] voice = get_voice(answer) if (answer == answer_jokes[1]): bot.send_voice(message.chat.id, voice) bot.send_voice(message.chat.id, open('joke.mp3', 'rb')) else: bot.send_voice(message.chat.id, voice) elif (predicted_class == 'bye'): answer = answer_bye[random.randint(0, (len(answer_bye) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'help'): bot.send_message(message.chat.id, help_text) else: answer = 'Извините, я вас не понимаю, но я учусь' voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) except: pass
from node import * from factor import * from prepare import * from netzob.all import * from classify import * if __name__ == "__main__": messages = PCAPImporter.readFile("tftp.pcap").values() start_cluster = [] for m_temp in messages: r_temp = Raw(m_temp.data) b_temp = r_temp.value.toBytes() start_cluster.append(b_temp) spart = classify(start_cluster) spart.do_classify() for r in spart.result: for j in r: print repr(j) print "" print ""
#import needed packages from __future__ import division import numpy as np import random as rd import pandas as pd import numpy.random as rd from classify import * from find_resonance import * folder = '/Users/talikhain/Desktop/ClassificationTest/' N = 10 #number of clones #a pandas dataframe with the barycentric aei elements of the best fit (use this to classify into some of the categories) names_bary = pd.read_pickle('/Users/talikhain/Desktop/ClassificationTest/tno_bary.pkl') num = len(names_bary) for ind in range(num): tno = names_bary['tno'].values[ind] a = names_bary['ab'].values[ind] e = names_bary['eb'].values[ind] i = names_bary['incb'].values[ind] node = names_bary['lanb'].values[ind] peri = names_bary['aopb'].values[ind] M = names_bary['Mb'].values[ind] best_bary = [a, e, i, node, peri, M] print('tno #', ind, tno) classify(folder, tno, N, best_bary) find_resonance(folder, tno, N)
def Morphology(self, frame, white_mask, kernel, kernel2, k): start3 = time.time() contador = 0 # variavel com imagen completa frametemp = white_mask if k == 1 or k == 4: # cv2.imshow('mask',white_mask) kernel_teste = np.ones((35, 35), np.uint8) mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, kernel_teste, 1) mask = cv2.morphologyEx(mask, cv2.MORPH_ERODE, kernel) mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel2, 1) else: mask = cv2.morphologyEx(white_mask, cv2.MORPH_DILATE, kernel2, 1) mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) # Se a morfologia de perto k =1, recorta a parte de cima if k == 1: mask[0:300, :] = 0 # Se a morfologia medio k =2, recorta a parte de baixo if k == 2: mask[0:180, :] = 0 # mask[720:,:]=0 # Se a morfologia de longe k =3, recorta a parte de baixo if k == 3: mask[350:, :] = 0 # Se a morfologia de muito longe k = 4, recorta a parte de baixo if k == 4: mask[300:, :] = 0 ret, th1 = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY) try: _, contours, _ = cv2.findContours(th1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) except: contours, _ = cv2.findContours(th1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: contador = contador + 1 x, y, w, h = cv2.boundingRect(cnt) #Passa para o classificador as imagens recortadas----------------------- type_label, results = classify(cv2.cvtColor( frame[y:y + h, x:x + w], cv2.COLOR_BGR2RGB), self.net, self.transformer, mean_file=self.mean_file, labels=self.labels, batch_size=None) #----------------------------------------------------------------------- # print results, type_label # cv2.imshow('janela',images[0]) cv2.imwrite( "/home/fei/Documents/frames_extracted_by_DNN/" + str(rd.random()) + "image.png", frame[y:y + h, x:x + w]) if type_label == 'Ball': return frame, x + w / 2, y + h / 2, (w + h) / 4, mask #================================================================================================= # print "CONTOURS = ", time.time() - start3 return frame, 0, 0, 0, mask
from scapy.all import * from netzob.all import * from classify import * tftp = rdpcap("tftp.pcap") start_str = [] for t in tftp: ss = str(t) ss1 = ss[32:] start_str.append(ss1) print("E ", len(start_str)) spart = classify(start_str) spart.do_classify() results = spart.link file_object = open('thefile.txt', 'w+') for r in results: for t in r: file_object.write(repr(t + 1)) file_object.write(' ') print t file_object.write('\r\n') file_object.write('\r\n') file_object.flush() print "" print "" file_object.close()
for fi in currentFiles: mtch = re.search("%s-(\d+)tr-(\d+)mr" % run, fi) if mtch: nTrain, nMerge = mtch.groups() nTrain, nMerge = int(nTrain), int(nMerge) if [nMerge, nTrain] > latest: latest = [nMerge, nTrain] nMerge, nTrain = latest print("Latest target file found was", nTrain, nMerge) data = restore(inflect, run, nTrain, nMerge) nPolicies = len(data.policyMembers()) nReferences = len(data.referenceMembers()) print(nPolicies, "policies", nReferences, "references") guesser = buildGuesser(data, inflect) guesser.load_weights("data/%s/guesser.h5" % run) results = classify(dev, data, inflect, guesser, filterGuess=True) acc = 0 total = 0 for inst, res in zip(dev, results): (lemma, form, fts) = inst if form == res: acc += 1 total += 1 print(acc, "/", total, acc / total)
def testDataMake(): debug = 1 # 디버깅 관련 변수입니다. 값이 1이면 분류 실패 시 원인을 파악하기 위해 패킷을 출력합니다. test = 1 # host 기반으로 분류를 할지 결정하는 변수입니다. 1이면 직접 언어와 공격 유형을 분류하며 0이면 host 기반으로 분류합니다. count = 0 payloads = pcapFileRead() nPayloads = [] if test: # newClassify함수를 테스트합니다. if debug == 1: # 분류 실패 시 디버깅 함수를 실행합니다. for payload in payloads: classifyResult = newClassify(payload) if not checkResult(classifyResult, "unknown"): count += 1 debugInfo(payload, classifyResult, count) lannum = readLanguage(classifyResult) dictList = excmodule(lannum, classifyResult, payload) valList = [] if dictList: for valDict in dictList: if valDict['value']: valList.append(valDict['value']) if valList: nPayloads.append([valList, classifyResult['attackType']]) # [value list, attack type] else: for payload in payloads: classifyResult = newClassify(payload) lannum = readLanguage(classifyResult) dictList = excmodule(lannum, classifyResult, payload) valList = [] if dictList: for valDict in dictList: if valDict['value']: valList.append(valDict['value']) if valList: nPayloads.append([valList, classifyResult['attackType']]) # [value list, attack type] else: # host를 기반으로 분류하는 classify함수를 이용합니다. for payload in payloads: classifyResult = classify(payload) lannum = readLanguage(classifyResult) dictList = excmodule(lannum, classifyResult, payload) valList = [] if dictList: for valDict in dictList: if valDict['value']: valList.append(valDict['value']) if valList: nPayloads.append([valList, classifyResult['attackType']]) # [value list, attack type] #이후 special char를 통해 npm을 만들어야 함 timestr = time.strftime("%Y%m%d-%H%M%S") afterSpecial = [] for nPayload in nPayloads: afterSpecial.append(runSpecial(nPayload[0], nPayload[1])) npArray = np.array(afterSpecial) np.save('numpy/test/' + 'test_' + timestr, npArray) # npy save '''
def trainDataMake(): debug = 1 # 디버깅 관련 변수입니다. 값이 1이면 분류 실패 시 원인을 파악하기 위해 패킷을 출력합니다. test = 1 # host 기반으로 분류를 할지 결정하는 변수입니다. 1이면 직접 언어와 공격 유형을 분류하며 0이면 host 기반으로 분류합니다. count = 0 fileCount = 0 fileLoc = "pcapFolder/train" fileList = os.listdir(fileLoc) pcapFiles = [] for file in fileList: if ".pcap" in file: pcapFiles.append(file) for pFile in pcapFiles: payloads = pcapSingleRead(fileLoc, pFile) target = "" #학습 데이터가 어떤 공격에 대한 학습 데이터인지 if "xss" in pFile or "XSS" in pFile: target = "XSS" elif "sql" in pFile or "SQL" in pFile: target = "SQLI" if target == "": print("None target pcap file") print(fileLoc + "/" + pFile) continue fileCount = fileCount + 1 nPayloads = [] if test: # newClassify함수를 테스트합니다. if debug == 1: # 분류 실패 시 디버깅 함수를 실행합니다. for payload in payloads: classifyResult = newClassify(payload) if not checkResult(classifyResult, "unknown"): count += 1 debugInfo(payload, classifyResult, count) lannum = readLanguage(classifyResult) dictList = excmodule(lannum, classifyResult, payload) valList = [] if dictList: for valDict in dictList: if valDict['value']: valList.append(valDict['value']) if valList: if classifyResult['attackType'] == target: nPayloads.append([valList, target]) # [value list, attack type] else: for payload in payloads: classifyResult = newClassify(payload) lannum = readLanguage(classifyResult) dictList = excmodule(lannum, classifyResult, payload) valList = [] if dictList: for valDict in dictList: if valDict['value']: valList.append(valDict['value']) if valList: if classifyResult['attackType'] == target: nPayloads.append([valList, target]) # [value list, attack type] else: # host를 기반으로 분류하는 classify함수를 이용합니다. for payload in payloads: classifyResult = classify(payload) lannum = readLanguage(classifyResult) dictList = excmodule(lannum, classifyResult, payload) valList = [] if dictList: for valDict in dictList: if valDict['value']: valList.append(valDict['value']) if valList: if classifyResult['attackType'] == target: nPayloads.append([valList, target]) # [value list, attack type] #이후 special char를 통해 npm을 만들어야 함 afterSpecial = [] for nPayload in nPayloads: afterSpecial.append(runSpecial(nPayload[0], nPayload[1])) npArray = np.array(afterSpecial) np.save('numpy/train/' + target + '_' + str(fileCount) + '_train', npArray) # npy save '''
def race_lookup(name): return classify(name)
def build_haplexD(trainingHap, trainingExp, testHap, k, n_clusters): t=Tree(trainingHap,trainingExp) nodes=t.leaves() predicted=classify(nodes, k, metric="chi2", testHap, n_clusters) return predicted
def cross_validation(train_filename, n_folds, outfilename): utils.print_success("Cross validation") filename = utils.abs_path_file(train_filename) condition = train_filename.split(".")[0].split(os.sep)[-1] features = [] groundtruths = [] with open(filename, "r") as filep: for line in filep: line = line[:-1].split(",") features.append([float(x) for x in line[0:-1]]) groundtruths.append(line[-1]) features = np.array(features) groundtruths = np.array(groundtruths) skf = StratifiedKFold(n_splits=n_folds) # for i in range(0, 10): i = 0 cur_fold = 0 with open("../results/gender/precision.txt", "a") as filep: filep.write(condition + ";" + str( precision_score(dataset["test_groundtruths"], predictions, average='weighted')) + "\n") with open("../results/gender/recall.txt", "a") as filep: filep.write(condition + ";" + str( recall_score(dataset["test_groundtruths"], predictions, average='weighted')) + "\n") with open("../results/gender/f1.txt", "a") as filep: filep.write(condition + ";" + str( f1_score(dataset["test_groundtruths"], predictions, average='weighted')) + "\n") with open("../results/gender/accuracy.txt", "a") as filep: filep.write( condition + ";" + str(accuracy_score(dataset["test_groundtruths"], predictions)) + "\n") for train, test in skf.split(features, groundtruths): cur_fold += 1 utils.print_success("Iteration " + str(i) + "\tFold " + str(cur_fold)) dataset = {} dataset["train_features"] = features[train] dataset["train_groundtruths"] = groundtruths[train] dataset["test_features"] = features[test] dataset["test_groundtruths"] = groundtruths[test] predictions = classify(data=dataset) print("\tPrecision weighted\t" + str( precision_score( dataset["test_groundtruths"], predictions, average='weighted')) ) print("\tRecall weighted\t" + str( recall_score( dataset["test_groundtruths"], predictions, average='weighted')) ) print("\tF1 weighted\t" + str( f1_score( dataset["test_groundtruths"], predictions, average='weighted')) ) print("\tAccuracy\t" + str(accuracy_score(dataset["test_groundtruths"], predictions))) with open("../results/gender/precision.txt", "a") as filep: filep.write( str( precision_score(dataset["test_groundtruths"], predictions, average='weighted')) + "\n") with open("../results/gender/recall.txt", "a") as filep: filep.write( str( recall_score(dataset["test_groundtruths"], predictions, average='weighted')) + "\n") with open("../results/gender/f1.txt", "a") as filep: filep.write( str( f1_score(dataset["test_groundtruths"], predictions, average='weighted')) + "\n") with open("../results/gender/accuracy.txt", "a") as filep: filep.write( str(accuracy_score(dataset["test_groundtruths"], predictions)) + "\n")
from classify import * from translate import * answer = ["Привет!", "Здравствуй", "Приветствую!", "Здравствуйте"] answer_greetings_mood = ["Привет. Пойдет. Как у тебя?", "Здравствуй. Хорошо. Как у тебя?", "Приветствую. Нормально. Как у тебя?", "Здарова. Неплохо. Как у тебя?", "Здравствуйте. Все отлично. Как у вас?"] answer_mood = ["Замечательно, спасибо!!", "Хорошо. Как у тебя дела?", "Все нормально. Как у вас?", "Все отлично. Как у тебя?", "Пойдет. А у тебя?"] answer_philosophy = ['42'] answer_action = ['Разговариваю с тобой', 'Существую', 'Тихо жду здесь пока у меня что-то спросят'] answer_status_good = ['Рада слышать', 'Круто', 'Отлично!', 'Я очень рада :)'] command = '1' while True: command = input('Введите команду(-1 чтобы закончить разговор): ') if command == '-1': break predicted_class = classify(command) if(predicted_class == 'weather'): print(get_weather(command)) elif(predicted_class == 'greetings'): print(answer[random.randint(0,(len(answer)-1))]) elif(predicted_class == 'greetings_mood'): print(answer_greetings_mood[random.randint(0,(len(answer_greetings_mood)-1))]) elif(predicted_class == 'mood'): print (answer_mood[random.randint(0,(len(answer_mood)-1))]) elif(predicted_class == 'philosophy'): print(answer_philosophy[0]) elif(predicted_class == 'action'): print (answer_action[random.randint(0,(len(answer_action)-1))]) elif(predicted_class == 'status_good'): print (answer_status_good[random.randint(0,(len(answer_status_good)-1))]) elif(predicted_class == 'translate'):
sum_score = 0 num_match = 0 num_docs = 0 num_zero = 0 start = time() for doc in labeled_data: expected_cat = doc["Category"].lower( ) # some of the labels are inconsistent in case new_message = doc["message"].lower() category = classify(my_idf, new_message, sim_func=my_sim_func, num_similar=num_similar, min_similarity=0.01 * min_similarity, stemmed_database=stemmed_database, segment=segment) if category[0] == expected_cat: num_match += 1 sum_score += category[1] if category[1] == 0: num_zero += 1 num_docs += 1 end = time() # Main information match_percent = 100 * num_match / num_docs
fakes_path=fake_path, rebalance=rebalance_dataset, rebalance_size=rebalance_to, number_of_fakes=number_of_fakes, iterations=1000, norm=True, pca=True, retain_info=trait_variation, dim_red=None, smote_fct=SMOTE_fct, k_neighbors=k, adasyn=ADASYN_, stratkfold=strat_k_fold, repeated=multiples) results = classify(**args).svm_bottleneck() print("%.2f" % results[1][0] ) print("& %.2f" % results[1][1]) print("& %.2f" % results[1][2]) print("& %.2f" % results[1][3]) print("& %.2f" % results[1][4]) c, x_shape = get_pca_coef(multiples=multiples, path=path, weights=cnn_weights, pooling=pooling_type, smote_fct=SMOTE_fct, rotate=rotate, fakes=add_fakes,
def handle_message(message): if check_if_skip(message): if message.text: bot.send_message(message.chat.id, zhaloba_ok) else: voice = get_voice(zhaloba_ok) bot.send_voice(message.chat.id, voice) user_skip[message.chat.id] = False return if check_if_iin(message): time.sleep(3) text = "Данные по ИИН: {} не найдены".format(message.text) bot.send_message(message.chat.id, text) iin_check[message.chat.id] = False return if message.text: command = message.text try: if (command == '/start'): bot.send_message(message.chat.id, start_text) elif (command == '/help'): bot.send_message(message.chat.id, help_text) else: predicted_class = classify(command) if (predicted_class == 'fines'): text = "Введите ИИН" bot.send_message(message.chat.id, text) iin_check[message.chat.id] = True elif (predicted_class == 'greetings_kaz'): text = "салем" bot.send_message(message.chat.id, text) elif (predicted_class == 'contacts'): text = "Вы можете связаться с акиматом через следующие социальные сети: \n Facebook: https://www.facebook.com/astanaakimat/ \n Instagram: https://www.instagram.com/astana_akimat/ \n Twitter: https://twitter.com/astana_akimat" bot.send_message(message.chat.id, text) elif (predicted_class == 'zhaloba'): user_skip[message.chat.id] = True bot.send_message(message.chat.id, zhaloba_response) elif (predicted_class == 'greetings'): bot.send_message( message.chat.id, answer_greetings[random.randint( 0, (len(answer_greetings) - 1))]) elif (predicted_class == 'greetings_mood'): bot.send_message( message.chat.id, answer_greetings_mood[random.randint( 0, (len(answer_greetings_mood) - 1))]) elif (predicted_class == 'mood'): bot.send_message( message.chat.id, answer_mood[random.randint(0, (len(answer_mood) - 1))]) elif (predicted_class == 'philosophy'): bot.send_message(message.chat.id, answer_philosophy[0]) elif (predicted_class == 'thanks'): bot.send_message( message.chat.id, answer_thanks[random.randint( 0, (len(answer_thanks) - 1))]) elif (predicted_class == 'help'): bot.send_message(message.chat.id, help_text) elif (predicted_class == 'action'): bot.send_message( message.chat.id, answer_action[random.randint( 0, (len(answer_action) - 1))]) elif (predicted_class == 'status_bad'): bot.send_message( message.chat.id, answer_status_bad[random.randint( 0, (len(answer_status_bad) - 1))]) elif (predicted_class == 'status_good'): bot.send_message( message.chat.id, answer_status_good[random.randint( 0, (len(answer_status_good) - 1))]) elif (predicted_class == 'how_old'): bot.send_message( message.chat.id, answer_how_old[random.randint( 0, (len(answer_how_old) - 1))]) elif (predicted_class == 'who_are_you'): bot.send_message( message.chat.id, answer_who_are_you[random.randint( 0, (len(answer_who_are_you) - 1))]) elif (predicted_class == 'other_bots'): bot.send_message( message.chat.id, answer_other_bots[random.randint( 0, (len(answer_other_bots) - 1))]) elif (predicted_class == 'your_master'): bot.send_message( message.chat.id, answer_your_master[random.randint( 0, (len(answer_your_master) - 1))]) elif (predicted_class == 'creator'): bot.send_message( message.chat.id, answer_creator[random.randint( 0, (len(answer_creator) - 1))]) elif (predicted_class == 'news'): bot.send_message(message.chat.id, returnNews()) elif (predicted_class == 'joke'): answer = answer_jokes[random.randint( 0, (len(answer_jokes) - 1))] bot.send_message(message.chat.id, answer) if (answer == answer_jokes[1]): bot.send_voice(message.chat.id, open('joke.mp3', 'rb')) elif (predicted_class == 'bye'): bot.send_message( message.chat.id, answer_bye[random.randint(0, (len(answer_bye) - 1))]) else: bot.send_message(message.chat.id, 'Извините, я вас не понимаю, но я учусь') except: pass else: file_info = bot.get_file(message.voice.file_id) file = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format( token, file_info.file_path)) try: command = speech_to_text(bytes=file.content) print(command) except: bot.send_message( message.chat.id, 'Распознование голоса не удалось, попробуйте снова') try: predicted_class = classify(command) print(predicted_class) if (predicted_class == 'weather'): output, speech = get_weather(command) voice = get_voice(speech) bot.send_message(message.chat.id, output) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'greetings_kaz'): text = "salem" voice = get_voice_kaz(text) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'contacts'): text = "Вы можете связаться с акиматом через следующие социальные сети" voice = get_voice(text) text = "Facebook: https://www.facebook.com/astanaakimat/ \n Instagram: https://www.instagram.com/astana_akimat/ \n Twitter: https://twitter.com/astana_akimat" bot.send_voice(message.chat.id, voice) bot.send_message(message.chat.id, text) elif (predicted_class == 'zhaloba'): voice = get_voice(zhaloba_response) bot.send_voice(message.chat.id, voice) user_skip[message.chat.id] = True elif (predicted_class == 'fines'): text = "Введите ИИН" bot.send_message(message.chat.id, text) iin_check[message.chat.id] = True elif (predicted_class == 'greetings'): answer = answer_greetings[random.randint( 0, (len(answer_greetings) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, message) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'greetings_mood'): answer = answer_greetings_mood[random.randint( 0, (len(answer_greetings_mood) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'mood'): answer = answer_mood[random.randint(0, (len(answer_mood) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'philosophy'): answer = answer_philosophy[0] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'action'): answer = answer_action[random.randint( 0, (len(answer_action) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'status_good'): answer = answer_status_good[random.randint( 0, (len(answer_status_good) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'status_bad'): answer = answer_status_bad[random.randint( 0, (len(answer_status_bad) - 1))] voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'how_old'): answer = answer_how_old[random.randint( 0, (len(answer_how_old) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'who_are_you'): answer = answer_who_are_you[random.randint( 0, (len(answer_who_are_you) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'thanks'): answer = answer_thanks[random.randint( 0, (len(answer_thanks) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'other_bots'): answer = answer_other_bots[random.randint( 0, (len(answer_other_bots) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'your_master'): answer = answer_your_master[random.randint( 0, (len(answer_your_master) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'creator'): answer = answer_creator[random.randint( 0, (len(answer_creator) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'news'): answer = voiceNews() voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'joke'): answer = answer_jokes[random.randint(0, (len(answer_jokes) - 1))] voice = get_voice(answer) if (answer == answer_jokes[1]): bot.send_voice(message.chat.id, voice) bot.send_voice(message.chat.id, open('joke.mp3', 'rb')) else: bot.send_voice(message.chat.id, voice) elif (predicted_class == 'bye'): answer = answer_bye[random.randint(0, (len(answer_bye) - 1))] voice = get_voice(answer) bot.send_voice(message.chat.id, voice) elif (predicted_class == 'help'): bot.send_message(message.chat.id, help_text) else: answer = 'Извините, я вас не понимаю, но я учусь' voice = get_voice(answer) # bot.send_message(message.chat.id, answer) bot.send_voice(message.chat.id, voice) except: pass
""" This finds the most probably satellite subfamily for each read in a fasta file """ from file_loading import * from utils import * from classify import * import numpy as np #load and process files subfamilies = load_subfamilies("subfamilies.txt") mers = load_mers("24mers.txt") probs = get_probs(mers, subfamilies) reads = load_reads("reads.fasta") lookup = subfamilies.keys() #classify each read for read in reads: pred = classify(read.seq, probs) subfamily = lookup[np.argmax(pred)] print(read.description + " " + subfamily)
class_counts = range(2, 15 + 1) if CONVERT: convert(CONVERT_IMAGE_PATH) if RESIZE: resize(RESIZE_IMAGE_PATH) # Download the pretrained model (Google Inception V3 for our analysis) if DOWNLOAD_MODEL: download_pretrained_model(MODEL) if TRAIN: # Create graph from the pretrained model graph, bottleneck_tensor, image_data_tensor, resized_image_tensor = ( create_graph()) for class_count in class_counts: for images_count in images_counts: handle = open(RESULT_FILE, 'ab') np.savetxt(handle, [ run_model(images_count, class_count, graph, bottleneck_tensor, image_data_tensor, resized_image_tensor) ]) handle.close() if CLASSIFY: classify(CLASSIFY_IMAGE_PATH) if PLOT: plot(RESULT_FILE)