def ensemble(): global train_path, test_path, results_path # Create the auxiliary object auxiliary = Auxiliary(size_x=100, size_y=100, interpolation=cv2.INTER_CUBIC) # Create and add all algorithms for the ensemble algorithms = list() algorithms.append(Eigenfaces()) algorithms.append(LBPH()) algorithms.append(Fisherfaces()) algorithms.append(SIFT()) algorithms.append(SURF()) # Create the voting object (Majority or Weighted) voting = Voting(Voting().weighted, [10, 10, 10, 10, 10]) # Create the ensemble object ensemble = Ensemble(algorithms, auxiliary, voting) # Train the algorithm ensemble.train(train_path) # Try to recognize the faces ensemble.recognize_faces(test_path) # Create the report object report = Report(ensemble) # Print the results report.print_results()
def face_recognition(): global train_path, test_path, results_path # Create the auxiliary object auxiliary = Auxiliary(size_x=100, size_y=100, interpolation=cv2.INTER_CUBIC) # Create the algorithm object algorithm = Eigenfaces() # algorithm = Fisherfaces() # algorithm = LBPH() # algorithm = SIFT() # algorithm = SURF() # Create the face recognition object face_recog = FaceRecognition(algorithm, auxiliary) # Train the algorithm face_recog.train(train_path) # Try to recognize the faces face_recog.recognize_faces(test_path) # Create the report object report = Report(face_recog) # Print the results report.print_results()
def __init__(self, default, argv): Standard.__init__(self, default, argv) self.report = Report(default, argv) self.scraping = Scraping(default, argv) self.market = Market(default, argv) self.graph = Graph(default, argv) self.alert = Alert(default, argv) if self.conf['load_file']: self.market.data = self.report.get() if self.conf['print_file']: self.report.display_file_list()
class App(Standard): def __init__(self, default, argv): Standard.__init__(self, default, argv) self.report = Report(default, argv) self.scraping = Scraping(default, argv) self.market = Market(default, argv) self.graph = Graph(default, argv) self.alert = Alert(default, argv) if self.conf['load_file']: self.market.data = self.report.get() if self.conf['print_file']: self.report.display_file_list() def collector(self): self.debug("app","collector") try: while True: self.conf['time_done_collector'] = time.time() + self.conf['loop_timer_collector'] scraping_data = self.scraping.get(self.scraping.get_html(), self.conf['row_limit']) self.report.save(scraping_data) market_data = self.market.data_mapping(scraping_data) self.timer(self.conf['time_done_collector']) except KeyboardInterrupt: print('Manual break by user') def display(self): self.debug("app","display") if self.conf['print_graph']: self.graph.init() try: while True: self.conf['time_done_display'] = time.time() + self.conf['loop_timer_display'] if self.conf['print_scraping']: self.scraping.display() if self.conf['print_report']: self.report.display() if self.conf['print_market']: self.market.display() if self.conf['print_graph']: self.graph.trace(self.market.data) # if self.conf.get('print_graph'): self.graph.display_file(self.report.file, self.scraping.data) self.timer(self.conf['time_done_display']) except KeyboardInterrupt: print('Manually stopped') def alerting(self): self.debug("app","alerting") if self.conf['print_alert_graph']: self.alert.init() try: while True: self.conf['time_done_alert'] = time.time() + self.conf['loop_timer_alert'] self.alert.calc(self.market.data) if self.conf['print_alert']: self.alert.display_calc() if self.conf['print_alert_graph']: self.alert.trace(self.alert.market_data) self.timer(self.conf['time_done_alert']) except KeyboardInterrupt: print('Manually stopped')
optimizer = optim.SGD(list(tagger.parameters()), lr=args.lr, momentum=args.momentum) elif args.opt_method == 'adam': optimizer = optim.Adam(list(tagger.parameters()), lr=args.lr, betas=(0.9, 0.999)) else: raise ValueError('Unknown optimizer, must be one of "sgd"/"adam".') scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1 / (1 + args.lr_decay * epoch)) # Prepare report and temporary variables for "save best" strategy report = Report(args.report_fn, args, score_names=('train loss', 'f1-train', 'f1-dev', 'f1-test', 'acc. train', 'acc. dev', 'acc. test')) iterations_num = floor(datasets_bank.train_data_num / args.batch_size) best_f1_dev = -1 best_epoch = -1 best_f1_test = -1 best_test_connl_str = 'N\A' patience_counter = 0 print('\nStart training...\n') for epoch in range(1, args.epoch_num + 1): ######## time_start = time.time() loss_sum = 0 if epoch > 0: tagger.train() if args.lr_decay > 0: scheduler.step()
# Start the engines. p1 = Process(target=zeekengine, args=(alerts, )) p2 = Process(target=snortengine, args=(alerts, )) p1.start() p2.start() # Wait to their end. p1.join() p2.join() # Some formating and alerts.json writing. with open(os.path.join(capture_directory, "assets/alerts.json"), "w") as f: report = {"high": [], "moderate": [], "low": []} for alert in (alerts["zeek"] + alerts["suricata"]): if alert["level"] == "High": report["high"].append(alert) if alert["level"] == "Moderate": report["moderate"].append(alert) if alert["level"] == "Low": report["low"].append(alert) f.write(json.dumps(report, indent=4, separators=(',', ': '))) # Generate the report report = Report(capture_directory) report.generate_report() else: print("The directory doesn't exist.") else: print("Please specify a capture directory in argument.")