def test_categorize_existing_dir(self): test_dir = "testDir" categorize(test_dir, "testDirDest") generate_files(test_dir, "newAudioFile", extension="mp3", numFiles=2) generate_files(test_dir, "newNotes", numFiles=2) generate_files(test_dir, "newPic", extension="png", numFiles=2) generate_files(test_dir, "newClips", extension="avi", numFiles=2) categorize(test_dir, "testDirDest") audioPath = os.path.join("testDirDest", "media", "images") num_files = len(os.listdir(audioPath)) self.assertEqual(num_files, 4)
def test_categorize_duplicates(self): test_dir = "testDir" categorize(test_dir, "testDirDest") generate_files(test_dir, "audioFile", extension="mp3", numFiles=2) generate_files(test_dir, "notes", numFiles=2) generate_files(test_dir, "pic", extension="png", numFiles=2) generate_files(test_dir, "clips", extension="avi", numFiles=2) files_before = os.listdir("testDir") categorize(test_dir, "testDirDest") files_after = os.listdir("testDir") # duplicates should note overwrite self.assertEqual(files_before, files_after)
def calc_carbon(): emission_factors = pd.read_excel(excel_file, sheet_name=5) # emission_factors = emission_factors.pivot_table(index = ['Food']) food_input = categorize() emission_numbers = dict( (emission_factors['Food'][el], (emission_factors['Emissions'][el], emission_factors['Land Use'][el], emission_factors['Carbon Oppurtunity Costs'][el])) for el in range(emission_factors.shape[0])) food_input['Carbon Emissions'] = '' food_input['Land Use'] = '' food_input['Carbon Oppurtunity Costs'] = '' for i in range(food_input.shape[0]): food_input.at[i, 'Carbon Emissions'] = (food_input.at[i, 'Units'])* \ (emission_numbers.get(food_input.at[i, 'Carbon Category'])[0]/1000) food_input.at[i, 'Land Use'] = (food_input.at[i, 'Units'])* \ (emission_numbers.get(food_input.at[i, 'Carbon Category'])[1]/10000) food_input.at[i, 'Carbon Oppurtunity Costs'] = (food_input.at[i, 'Units'])* \ (emission_numbers.get(food_input.at[i, 'Carbon Category'])[2]/1000) return food_input
def categorize_info(ips_addr: str=DEFAULT_IPS_ADDRESS, fields: list=DEFAULT_FIELDS, special_key: str=DEFAULT_SPECIAL_KEY, info_save_addr: str=DEFAULT_INFORMATION_SAVE_ADDRESS): try: ips_list = list() info_list = list() with open(file=ips_addr, mode='r') as f: ips_list = f.readlines() f.close() for ip in ips_list: fixed_ip = ip.strip() info = get_information(ip=fixed_ip, fields=fields) if info["status"] == "success": print(f"{fixed_ip} :: SUCCESS *") info_list.append(info) else: print(f"{fixed_ip} :: FAILED ! => {info['message']}") with open(file=info_save_addr, mode="w") as f: json.dump(fp=f, obj=info_list, indent=4) f.close() print(f"** Information saved in: {info_save_addr} **") result = categorize(info_list=info_list, special_key=special_key) return(result) except Exception as error: result = {"error": error} return(result)
currentDirectory = os.getcwd() data_temp = pd.read_csv(currentDirectory+"/Data_for_UCI_named.csv") data = data_temp.iloc[:, :].values classic = 0 # 1 - classic version of building decision tree 0 - my version of building tree no_trees = 1 size = 5 treshold = 0.3 print(len(data)) ####### training_data = make_training_data(data,no_trees,size) test = data[(size*no_trees):] forest = build_forest(training_data,treshold,no_trees, classic) q = categorize(test, forest) print (q) ###### notrees_test = [1,2,5,10,15,25,50,100,200,400] treshold_test = [-0.5,-0.3,-0.1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1] size_test = [1,2,3,5,10,20,50] repetition = 5 efficiency = np.empty((len(test_no),repetition)) training_data = make_training_data(data,test_no[-1],size) test = data[(size*test_no[-1]):]
if instructions == 0: print("Unable to disassemble executable") exit(1) get_gadgets.GetAllGadgets(instructions, code.data(), EntryAddress, get_gadgets.SpecialInstructions, gadgetLength) if args.gadgets == True: print_pretty.print_pretty(get_gadgets.allGadgets) TwoInstGadgets = categorize.getLNGadgets(get_gadgets.allGadgets, 2) general.ALLGADGETS = categorize.categorize(TwoInstGadgets) # execve() ROP Shellcode if args.exploitType == "execve": execveChain.execveROPChain(general.ALLGADGETS, vulnExecutable) # mprotect() ROP Shellcode + execve() traditional Shellcode elif args.exploitType == "mprotect": mprotectChain.mprotectROPChain(general.ALLGADGETS, vulnExecutable) # BindShell using ROP Shellcode elif args.exploitType == "bindshell": bindshellChain.bindshellROPChain(general.ALLGADGETS, vulnExecutable) # If we don't have the exploit else:
# print("0x%x:\t%s\t%s" %(i.address, i.mnemonic,i.op_str)) #print("Looking for c3s") get_gadgets.GetAllGadgets(instructions, code.data(), EntryAddress, get_gadgets.SpecialInstructions, gadgetLength) print("Gadgets that were found:") print_pretty.print_pretty(get_gadgets.allGadgets) print(len(get_gadgets.SpecialInstructions)) # For now, get all gadgets with just 1 Instruction in it(excluding ret). Temp = categorize.getLNGadgets(get_gadgets.allGadgets, 2) TwoInstGadgets = list() for x in Temp: if x not in TwoInstGadgets: TwoInstGadgets.append(x) # print_pretty.print_pretty(TwoInstGadgets) UniqueGadgetsList = categorize.categorize(TwoInstGadgets) for l in UniqueGadgetsList: print_pretty.print_pretty(l) # # A tuple is returned by chain.execveROPChain() # # It also creates a file named execvePythonPayload which has output like ROPgadget # payload = chain.execveROPChain(general.ALLGADGETS, vulnExecutable)
def test_categorize(self): categorize("testDir", "testDirDest") self.assertTrue(os.path.exists(os.path.join("testDirDest", "media", "images")))
def test_categorize_nested(self): categorize(self.testPath, self.testPath, config_dict=self.config) path = os.path.join(self.testPath, *["dir"+str(i) for i in range(5, -1, -1)]) self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.isdir(path))
def fatalFlaw(game, moves, threeGroups=False): human_rollout, rewards, humanscores = getHumanRollout(game, moves, getscore=True) # #printActions(game, getActions(game, qvalues)) print() print() print("Your Actions:") printActions(game, [s[1] for s in human_rollout[:-1]]) finalScore = getToComeScore(rewards, 0) print("Your score: ", finalScore) print() maxAction = None maxDiff = 0 maxIndex = 0 index = 0 Bhscore = 0 Brscore = 0 BrRollout = None BrActions = None group = None assert humanscores[-1] == finalScore for state, human_action in human_rollout: sofar = getSoFarScore(rewards, index) if human_action == None: break #pdb.set_trace() #rhscore, hractions = getActions(state.getSuccessor(human_action)[0], 0, 50000) s = m2.State() s.gs = state rscore, ractions, NA, rstates, rollouts, scores = getActions( state, 1, NUM_ITERS, allRolloutsAdd=True) humanOptimalRollout = [ roll for roll in rollouts if roll[0] == human_action ][0] humanNode = [x for x, y in NA if y == human_action][0] rhscore = max(humanNode.reward, finalScore) if finalScore <= rscore: print(index, "| BestAction", ractions[0], "~best score: ", formatScore(rscore), "| humanAction", human_action, "~best score: ", formatScore(rhscore)) else: rscore = finalScore ractions = [s[1] for s in human_rollout[index:]] print("uh oh, human's score is better than our best estimate!!!") print(index, "| BestAction", ractions[0], "~best score: ", formatScore(rscore), "| humanAction", human_action, "~best score: ", formatScore(rhscore)) diff = rscore - rhscore if human_action != ractions[0]: mType = categorize(state, human_action, ractions, humanOptimalRollout) print(mType) group = groupMistake(mType) print(group) if (diff > maxDiff and human_action != ractions[0] and not threeGroups) or (diff > maxDiff and human_action != ractions[0] and group): maxDiff = diff maxAction = ractions[0] maxIndex = index Bhscore = rhscore Brscore = rscore BrRollout = rstates BrActions = ractions maxGroup = group maxType = mType index += 1 if not BrActions: return None, None, None, None, None, None, None, None, None BrActions = BrActions + [None] state, human_action = human_rollout[maxIndex] print("Fatal Flaw state") state.printBoard() print() print("Your actions: ") printActions(game, [s[1] for s in human_rollout]) print("There was a fatal flaw at action index", maxIndex) print() print("You took: ", human_action, "~best score of", Bhscore) print("Your actions starting at fatal flaw:") printActions(state, [s[1] for s in human_rollout[maxIndex:-1]]) print() print("You should have taken: ", maxAction, "with a ~best score of ", Brscore) print( "Optimal actions starting at fatal flaw, starting with corrected action:" ) printActions(state, BrActions) print("You could have gotten", Brscore) print("Starting Fatal Flaw state") state.printBoard() k2Agent = MaxAgent(depth=2) valuefn = lambda s: k2Agent.value(s)[0] hR = [x[0] for x in human_rollout][maxIndex:] hS = [x.cash for x in hR] rS = [x.cash for x in BrRollout] f = open('store.pckl', 'wb') pickle.dump( (BrRollout, rS, hR, hS, BrActions, [x[1] for x in human_rollout ][maxIndex:]), f) f.close() #print_max_diff_vals(BrRollout, hR, valuefn, rS, hS) #explain(BrRollout, rS) return BrRollout, rS, hR, hS, BrActions, [ x[1] for x in human_rollout ][maxIndex:], maxIndex, maxGroup, maxType
bucket_name=bucket_name, local_dir_path='/tmp', separator='|', logger=logger) logger.info('Starting pile_up logs...') logs = pile_up(logs_folder_path) logger.info('Ended pile_up logs') logger.info('Starting sharpen logs...') logs = sharpen(logs) logger.info('Ended sharpen logs') logger.info('Starting categorize logs...') raw_data = categorize(logs) logger.info('Ended categorize logs') logger.info('Starting parse logs...') data = parse_logs(raw_data) logger.info('Ended parse logs') logger.info('Starting build dataframes...') dfs = build_dataframes(data) logger.info('Ended build dataframes') logger.info('Starting prettify logs...') pretty_logs_df = prettify(dfs) logger.info('Ended prettigy logs') logger.info('Starting delete_tables...')
def data(): query = request.args.get('q') id = request.args.get('id') resp ={} #alchemyapi resp['alchemy_result'] = categorize.categorize(query) if(int(id)>0): if(resp['alchemy_result']): mylist = [query,resp['alchemy_result']] with open(directory + id +'.csv','a') as myfile: wr = csv.writer(myfile,lineterminator='\n') wr.writerow(mylist) resp['recommend'] = recDict(id) start = time.time() correction = Bing.spellingsuggestions(query) if correction: resp['correction'] = correction query = correction else: resp['correction'] = None ''' p1 = Process(target = bing(query)) p1.start() p2 = Process(target = wolf.wolfaramalpha(query)) p2.start() #p1.join() #p2.join() ''' #wolframAlpha #resp['wolf_result'] = wolf.wolfaramalpha(query) #bing #bing(query) imagekeys = ["image" , "img" , "photo" , "wallpaper"] videokeys = ["video" , "youtube" , "dailymotion"] newskeys = ["news" , "updates"] tokens = query.split() image_result = video_result = news_result = None if any(x in query for x in imagekeys): image_result = Bing.image(query,20) elif any(x in query for x in videokeys): video_result = Bing.video(query,10) elif any(x in query for x in newskeys): news_result = Bing.news(query,15) resp['web_result'] = Bing.web(query) resp['rs_result'] = Bing.relatedsearch(query) #resp['image_result'] = image_result #resp['video_result'] = video_result #resp['news_result'] = news_result #ai resp['ai_result'] = ai.ai(query) #DistMatrix #resp['distMatrix'] = distance(query) #knowledgegraph resp['kg_result'] = knowledgegraph.knowledgegraph(query) #Places #resp['places_result'] = Places.Places(query) #print (resp) json_object = json.dumps(resp) return json_object