def run_selected_test(file_name, inst_number, sil, snd, th): os.system("rm -r Images/*") Test = Segmentation(inst_number, sil, snd, th) Test.segment_audio(file_name) Test.get_durations() Test.std_dev = Results().std_dev(Test.durations_list) Test.average = Results().average(Test.durations_list) Results().plot_durations(Test.durations_list, Test.inst_number)
def run(file_name, sil_list, snd_list, th_list): start_time = time.time() Test = Automatic(file_name, sil_list, snd_list, th_list) Test.get_parameters() Test.all_param() Test.create_instances() Results().plot_av_durations(Test.inst_list, Test.Summary_DF) Results().plot_stds(Test.inst_list, Test.Summary_DF) print("Programa finalizado")
def query(self, credentials, query): try: db = None db = psycopg2.connect(database=credentials.dbname, user=credentials.username, password=credentials.password, host=credentials.host, port=credentials.port) cursor = db.cursor() cursor.execute(query) command = self.parsecommand(query) result = Results() if (command != "select"): self.commitchanges(db) result.rowsaffected(cursor) elif (command == "create" or command == "drop"): result.populatenames(cursor) elif ("select table_name, table_type from information_schema.tables" in query): result.populatenames(cursor) else: result.populatetable(cursor) return result except Exception as ex: raise ex finally: if db: db.close()
def query(self, credentials, query): try: conn = MySQLdb.connect(host=credentials.host, port=credentials.port, user=credentials.username, passwd=credentials.password, db=credentials.dbname, cursorclass=cursors.SSCursor) cursor = conn.cursor() cursor.execute(query) command = self.parsecommand(query) result = Results() if (command != "select"): self.commitchanges(conn) result.rowsaffected(cursor) elif (command == "create" or command == "drop"): result.populatenames(cursor) elif ("select table_name from information_schema.tables" in query): result.populatenames(cursor) else: result.populatetable(cursor) cursor.close() return result except Exception as ex: raise ex
def main(): wait_time = get_wait_time() primuss_username, primuss_password, my_email_address, my_email_password = init( ) results = Results(data_folder) while True: # scrape grades from website results.refresh_grades( get_grades(primuss_username, primuss_password, my_email_address, my_email_password)) # only act, if grades could be fetched if results.last_fetch_failed == False: if results.changes_since_last_update: # get body text for email results_str = results.as_string() # get subject for email subject = get_email_subject(results.changed_results, results.subject_abbreviations) send_mail(subject, results_str, my_email_address, my_email_password) print("Email sent: \"" + subject + "\"") else: print("No changes were found.") else: print( "No results were collected. Look at your email inbox for more infos." ) print("Waiting " + str(wait_time) + " seconds until next check.") time.sleep(wait_time)
def manage_test(self, query, REQUEST=None): "Executes the SQL in parameter 'query' and returns results" dbc = self() #get our connection res = dbc.query(query) if type(res) is type(''): f = StringIO() f.write(res) f.seek(0) result = RDB.File(f) else: result = Results(res) if REQUEST is None: return result #return unadulterated result objects if result._searchable_result_columns(): r = custom_default_report(self.id, result) else: r = 'This statement returned no results.' report = HTML( '<html><body bgcolor="#ffffff" link="#000099" vlink="#555555">\n' '<dtml-var name="manage_tabs">\n<hr>\n%s\n\n' '<hr><h4>SQL Used:</strong><br>\n<pre>\n%s\n</pre>\n<hr>\n' '</body></html>' % (r, query)) report = apply(report, (self, REQUEST), {self.id: result}) return report
def parse(self): global_run_id = None history = set() results = Results() with open(self.filename) as f: for line in f: line_components = line.strip().split() if len(line_components) != 6: raise ResultsParseError( 'lines in results file should have exactly 6 columns') query_id, _, doc_id, rank, score, run_id = line_components rank = int(rank) score = float(score) if global_run_id is None: global_run_id = run_id elif global_run_id != run_id: raise ResultsParseError( 'Mismatching runIDs in results file') key = query_id + doc_id if key in history: raise ResultsParseError( 'Duplicate query_id, doc_id in results file') history.add(key) results.add_result(query_id, Result(doc_id, score, rank)) return global_run_id, results
def test_003_add_to_repository(self): REPO_FILE = '/tmp/lablog-result-add-to-repo-test-' + uuid.uuid4().hex os.mkdir(REPO_FILE) try: repo = ExperimentRepository(REPO_FILE, readOnly=False) eID = repo.addExperiment('a') eID2 = repo.addExperiment('b') results = Results() results.append(eID, "1") results.append(eID, "2") results.append(eID2, "3") # add results to repository results.addToRepo(repo) # make sure that results are readonly self.assertRaises(RuntimeError, results.append, 'c', "4") # repository should contain the right results self.assertEqual(set(repo.getResults(eID, 2)), set(['1', '2'])) self.assertEqual(set(repo.getResults(eID2, 1)), set(['3'])) # double add should raise self.assertRaises(Exception, results.addToRepo, repo) finally: shutil.rmtree(REPO_FILE)
def __init__(self): self.clock = 0.0 # Reloj de la simulacion self.number_of_runs = 0 # Cantidad de veces a ejecutar la simulacion self.simulation_time = 0.0 # Tiempo de simulacion por corrida self.max = 0.0 # Valor utilizado como infinito (se cambia a 4 * Tiempo de simulacion) self.x1_probability = 0.0 # Probabilidad de X1 self.x2_probability = 0.0 # Probabilidad de X2 self.x3_probability = 0.0 # Probabilidad de X3 self.distribution_list = {} # Contiene D1, D2, D3, D4, D5 y D6 self.event_list = {} # Contiene la lista de eventos para programar self.message_list = [ ] # Contiene todos los mensajes que se crean en una corrida self.processor_list = [ ] # Contiene todos los procesadores utilizados en la simulacion self.LMC1_list = [ ] # Lista ordenada de mensajes que deben llegar a la computadora 1 self.LMC2_list = [ ] # Lista ordenada de mensajes que deben llegar a la computadora 2 self.LMC3_list = [ ] # Lista ordenada de mensajes que deben llegar a la computadora 3 self.results = Results( ) # Objeto que contiene los resultados de cada corrida self.interface = Interface( ) # Instancia para utilizar la interfaz de consola self.computer_1 = None # Instancia de la Computadora 1 de la Simulación self.computer_2 = None # Instancia de la Computadora 2 de la Simulación self.computer_3 = None # Instancia de la Computadora 3 de la Simulación
def simulate_line(self, rise_time: float, N: int = 50, plot: bool = True) -> \ Tuple[List[float], List[float], List[float]]: """ Simula la respuesta de la línea a un escalón """ # parametros del circuito C = self.c * self.L R = self.r * self.L tao = 5 * R * C # parametros de la simulacion end_time = rise_time * 10 time_step = end_time / 1e5 # creamos los objetos source = ExpSource(2.5, rise_time) circuit = Circuit(R, C, source, self.uid, N) simulation = Simulation(time_step, end_time, circuit, self.uid) results = Results(self.uid) # simulamos y procesamos los resultados simulation.run(results.filename) results.process() # limpiamos los archivos circuit.clean() simulation.clean() results.clean() return results.time, results.vin, results.vout
def test_002_dump_and_read_works(self): results = Results() results.append('a', 1) results.append('a', 2) results.append('b', 3) DIR = '/tmp/lablog-result-dump-test-' + uuid.uuid4().hex os.mkdir(DIR) try: bundleID = results.bundleID results.dump(DIR) # make sure that results are readonly self.assertRaises(RuntimeError, results.append, 'c', 4) results = Results.fromFile( os.path.join(DIR, bundleID + ".resBundle")) self.assertEqual(results.resDict, {'a': [1, 2], 'b': [3]}) self.assertEqual(bundleID, results.bundleID) # new results instance should be readonly self.assertRaises(RuntimeError, results.append, 'c', 4) finally: shutil.rmtree(DIR)
def evaluate(self, *args, **kwarg): results = Results() if self.verbose: print 'process start' for tag, step in self.items(): if self.verbose: print 'step :', tag #if not callable(step): continue if hasattr(step, 'evaluate'): result = step.evaluate(*args, **kwarg) else: result = step(*args, **kwarg) results[tag] = result #: for each step if self.verbose: print 'process end' return results
def main(): # read configuration file with open(sys.argv[1]) as configs: config_file = json.load(configs) # Load all the paths PATH_TO_IMAGES = config_file["path_to_images"] TRAIN_DATASET_CSV = config_file["path_to_train_csv"] TEST_DATASET_CSV = config_file["path_to_test_csv"] PATH_TO_WEIGHTS = config_file["path_to_weights"] PATH_TO_RESULTS = config_file["path_to_results"] WEIGHTS_FILE = PATH_TO_IMAGES + "weights.pt" # Creates the results folder # This folder will contain the train and test results, config file and weights of the model results_directory = PATH_TO_RESULTS + get_time() + "/" os.mkdir(results_directory) copy2(sys.argv[1], results_directory) # Transform of the images transform = transforms.Compose([ transforms.Resize((config_file["image_size"], config_file["image_size"])), # Image size transforms.ToTensor() ]) # Datasets train_dataset = BoneDataset(TRAIN_DATASET_CSV, PATH_TO_IMAGES, transform, config_file["region"]) test_dataset = BoneDataset(TEST_DATASET_CSV, PATH_TO_IMAGES, transform, config_file["region"]) # Train loader train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=config_file["train_batch_size"], shuffle=True) # Test loader test_loader = torch.utils.data.DataLoader( dataset=test_dataset, batch_size=config_file["test_batch_size"], shuffle=True) # device, model, optimizer, criterion , MAE and results device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = get_model(config_file["model"], PATH_TO_WEIGHTS).to(device) optimizer = get_optimizer(model, config_file["optimizer"], config_file["optimizer_hyperparameters"]) criterion = get_criterion(config_file["criterion"]) results = Results(results_directory) for epoch in range(1, config_file["epochs"] + 1): train(model, device, train_loader, optimizer, criterion, epoch, config_file["log_interval"], config_file["decay_lr_interval"], results) test(model, device, test_loader, criterion, results) torch.save(model.state_dict(), WEIGHTS_FILE) results.write_results()
def evaluate(self, *args, **kwarg): results = Results() for tag, analysis in self.items(): if hasattr(analysis, 'evaluate'): result = analysis.evaluate(*args, **kwarg) else: result = analysis(*args, **kwarg) results[tag] = result return results
def test_001_adding_results_succeeds(self): results = Results() results.append('a', 1) self.assertEqual(results.resDict, {'a': [1]}) results.append('a', 2) self.assertEqual(results.resDict, {'a': [1, 2]}) results.append('b', 3) self.assertEqual(results.resDict, {'a': [1, 2], 'b': [3]})
def __init__(self, k, classifiers, data, features, label, clfNames): self.k = k self.classifiers = classifiers self.results = [] self.data = data self.features = features self.labelCol = label self.folders = [] self.mse = [0]*len(self.classifiers) for i in range(0, len(self.classifiers)): self.results.append(Results()) self.clfNames = clfNames
def store_current_simulation_outputs(self): """ store the current simulation outputs into the ResultsCollection attribute :return: nothing """ # Instantiate a new Results object and add it to the collection result_obj = Results(initializer=self.initialization_object, combustion_module=self.combustion_module, mass_module=self.mass_simulator_module, trajectory_module=self.trajectory_module) self.results_collection.add_element(result_obj)
def createResults(self, env): common_dict = self.config_dict["required-worker-config"][ "common-config"] throughput_interval = common_dict["stats-interval-second"] publish_dir = common_dict["publish-dir"] results = Results( env.all_hosts, publish_dir, throughput_interval, self.config_dict["collection-config"], env.copy_inst, ) return results
class Simulation: model = Model() driver = Driver() scanario = Scenario() env = Env() env = None # Find way to do vector<samples> for line below with correct import samples = np.array() results = Results() results = None def Simulation(self, model): self.model = model driver = Driver(model, "Driver", 25, float(1.0), float(1.0)) scenario = Scenario() env = Env(driver, scenario) # samples.add(recordSample(env)) # Find way for syncrhonized to work def synchronized_update(self): self.env.update() # samples.add (recordSample (env)) # analyze() defined below def getResults(self): self.results = analyze() return self.results def numSamples(self): return self.samples.size() def recordSample(self, env): s = Sample() s.time = env.time s.simcarPos = env.simcar.p.myclone() s.simcarHeading = env.simcar.h.myclone() s.simcarFracIndex = env.simcar.fracIndex s.simcarSpeed = env.simcar.speed s.simcarRoadIndex = env.simcar.roadIndex if(env.simcar.nearPoint == None): s.nearPoint = env.simcar.nearPoint.myclone() s.farPoint = env.simcar.farPoint.myclone() s.carPoint = env.simcar.carPoint.myclone() s.steerAngle = env.simcar.steerAngle s.accelerator = env.simcar.accelerator s.brake = env.simcar.brake s.autocarPos = env.autocar.p.myclone() s.autocarHeading = env.autocar.h.myclone() s.autocarFracIndex = env.autocar.fracIndex s.autocarSpeed = env.autocar.speed s.autocarBraking = env.autocar.braking
def _saveTestResults(self, iter, dataset, save=True): log = Log(self._name, self._scratchLogFile) results = OrderedDict() for measure in self.params().measures(): value = log.getAssignment(str(measure)) if save: Results(self._path).update(iter, dataset, self.params().task(), measure.type(), measure.position(), value) results[str(measure)] = value return results
def score_output(): result = Results() main_menu.disable() main_menu.reset(1) game = True while game: if main_menu.is_disabled(): result.draw(surface) for events in pygame.event.get(): if events.type == pygame.KEYDOWN: if events.key == pygame.K_ESCAPE: game = False elif events.type == pygame.QUIT: game = False main_menu.enable()
def wait_for_the_simulation(self): if not self.thread.is_alive(): self.thread.join() if self.finish: # print("finish") self.parent.window.hide() self.norm = numpy.sqrt(self.Brho_grid**2 + self.Bz_grid**2) results = Results(self.parent, self) # from matplotlib import pyplot # flatten = self.norm.flatten() # flatten[flatten > 1] = 0 # pyplot.figure() # pyplot.hist(flatten, 50) # pyplot.show() else: GLib.timeout_add(10, self.wait_for_the_simulation)
def playGame(self): while self.newGame != False: self.newGame = False print( "Welcome to Bender's Totally Legit and Not Rigged at All Blackjack Table." ) print( "You're not a cop, are you? You have to tell me if you're a cop..." ) self.getPlayers() print("Welcome", self.player.name) self.player.startingCash() print(self.player.name, "has $", self.player.cash, "available") deck = Deck() dealer = Dealer() while self.replayGame != False: if len(deck.currentDeck) <= 10: house = deck.newDeck() round = Round(self) results = Results(self) score = Scoreboard(self) wager = score.placeBet(self.player.cash) if self.newGame == True: break round.startingHands(self.player, dealer, deck, house) round.takeAction(self.player, dealer, deck, house) if self.player.score <= 21 and self.player.score > 0: round.checkDealerHand(self.player, dealer, deck, house) results.determineWinner(self.player, dealer) self.player.cash = score.updateCash(self.player.cash, wager) print(self.player.name, "has $", self.player.cash, "available") replay = KeepPlaying() replay.replayGame(self.player, dealer) self.replayGame = replay.playAgain if self.newGame == False: print( "I don't need you. I'll build my own casino. With Blackjack... and hookers... Awww, forget it." ) elif self.newGame == True: print("Oops, you're broke! ¯\_(ツ)_/¯") print( "Come back when you have some money to lose. (\/)(;,,;)(\/)" )
def parse(self): global_run_id = None history = set() results = Results() with open(self.filename) as f: for line in f: line_components = line.strip().split() if len(line_components) != 6: raise ResultsParser.ResultsParseError( 'lines in %s do not have exactly 6 columns' % self.filename) query_id, _, doc_id, rank, score, run_id = line_components try: rank = int(rank) score = float(score) except ValueError: raise ResultsParser.ResultsParseError( 'Error parsing rank or score in %s' % self.filename) if doc_id not in self.docno_list: raise ResultsParser.ResultsParseError( 'Docno %s does not exist for query %s in %s' % (doc_id, query_id, self.filename)) if global_run_id is None: global_run_id = run_id elif global_run_id != run_id: raise ResultsParser.ResultsParseError( 'Mismatching runIDs in %s' % self.filename) key = query_id + doc_id if key in history: raise ResultsParser.ResultsParseError( 'Duplicate query_id, doc_id in %s' % self.filename) history.add(key) results.add_result(query_id, Result(doc_id, score, rank)) return global_run_id, results
def evaluate(self, state): """The default evaluate function. Assumptions: None Source: N/A Inputs: None Outputs: results <Results class> (empty) Properties Used: N/A """ results = Results() return results
def __init__(self, number_of_actions, input_dimension, load, batch_size=25, episodes=10, max_steps=100, epsilon=0, gamma=0.0, alpha=0.0, epsilon_decay=1.0, episodes_decay=30, epochs=1): self.episodes = episodes self.max_steps = max_steps self.epsilon = epsilon self.gamma = gamma self.alpha = alpha self.epsilon_decay = epsilon_decay self.episodes_decay = episodes_decay self.epochs = epochs self.agent = Agent(number_of_actions, input_dimension, batch_size, self.alpha, load, 'model_weights.h5') self.analyzer = Results()
def create_instances( self ): #Crea todas las instancias de la clase Segmentation para cada set de parámetros distintos. st_dev = [] av_dur = [] for i in range(0, len(self.all_combinations)): print("Cargando Prueba " + str(i) + "...\n") self.inst_list.append( Segmentation(i, self.all_combinations[i][0], self.all_combinations[i][1], self.all_combinations[i][2])) self.inst_list[i].segment_audio(self.file_name) self.inst_list[i].get_durations() lista_dur = self.inst_list[i].durations_list self.inst_list[i].std_dev = Results().std_dev(lista_dur) self.inst_list[i].average = Results().average(lista_dur) Results().plot_durations(lista_dur, self.inst_list[i].inst_number) self.inst_list[i].delete_audio() # Este bloque es para crear el dataframe de todas las duraciones de todas las pruebas self.Durations_DF = Results().get_durations_df( self.inst_list[i], self.Durations_DF) # Este bloque es para crear el dataframe de la duracion promedio y desviacion estandar de cada prueba st_dev.append(self.inst_list[i].std_dev) av_dur.append(self.inst_list[i].average) self.Summary_DF = Results().get_summary_df(self.inst_list[i], st_dev, av_dur, i, self.Summary_DF) # Este bloque agrega al dataframe los valores maximos y minimos de duracion promedio y desviacion estandar # self.Summary_DF.loc[0, 'MAX_Dur'] = max(av_dur) # self.Summary_DF.loc[0, 'MIN_Dur'] = min(av_dur) # self.Summary_DF.loc[0, 'MAX_STD'] = max(st_dev) # self.Summary_DF.loc[0, 'MIN_STD'] = min(st_dev) Results().create_CSV(self.Summary_DF, self.Durations_DF)
def main(): # Argparse argparser = argparse.ArgumentParser( description= "This script takes a user's poker hand history and calculates proportions of card draws and hands compared to the expected values, their confidence intervals, and chi-square p-values to determine if the site's RNG is behaving as expected." ) argparser.add_argument('path', type=str, help='Path to hand history directory') argparser.add_argument( '--site', choices=['Bovada'], default='Bovada', type=str, help='Which site\'s hand history is being parsed. Default=Bovada') argparser.add_argument('--summaryonly', action='store_true', help='Show summary only, no tables.') argparser.add_argument( '--stdev', choices=[1, 2, 3], default=2, type=int, help= 'Stdev for confidence limit, so 1 for 68%%, 2 for 95%%, and 3 for 99.7%%. Default=2' ) argparser.add_argument( '--bins', default=10, type=int, help= 'Number of bins for p-value uniformity test (Kolmogorov-Smirnov test on Chi-square p-values). Default=10' ) argparser.add_argument('--showallbinnedtables', action='store_true', help='Show tables for all bins.') argparser.add_argument('--onlyme', action='store_true', help='Only count my hands') argparser.add_argument( '--holecards', action='store_true', help='Show results for frequency of hole cards without suits') argparser.add_argument( '--holecardswithsuits', action='store_true', help='Show results for frequency of hole cards with suits (Long output)' ) argparser.add_argument( '--allcombinations', action='store_true', help= 'Show results for frequency of all combinations between hole and board cards.' ) args = argparser.parse_args() # Determine correct parser if args.site == 'Bovada': Parser = Parse.Bovada hand_probabilites = Parse.HAND_PROBABILITIES card_frequency = {x: 0 for x in CARDS} hand_frequency = {x: 0 for x in hand_probabilites.keys()} all_hole_cards = [] all_board_cards = [] if args.allcombinations: hand_allcombinations_frequency = { x: 0 for x in hand_probabilites.keys() } if args.holecardswithsuits: hole_card_frequency = {' '.join(x): 0 for x in combinations(CARDS, 2)} if args.holecards: hole_card_nosuits_frequency = { ' '.join(x): 0 for x in combinations([c[0] for c in CARDS], 2) } # Remove suit from cards # Treys evaluator evaluator = Evaluator() for file in os.listdir(args.path): # Only open .txt files if not file.lower().endswith('.txt'): continue # Open file with parser b = Parser('{}\{}'.format(args.path, file)) while True: # Get hole cards hole_cards = b.get_hole_cards(only_me=args.onlyme) if not hole_cards: break # EOF # Count card frequency of hole cards for c_1, c_2 in hole_cards: # Individual card frequency card_frequency[c_1] += 1 card_frequency[c_2] += 1 # Frequency of hole cards together if args.holecardswithsuits: count_hole_cards_frequency((c_1, c_2), hole_card_frequency) if args.holecards: count_hole_cards_frequency([x[0] for x in (c_1, c_2)], hole_card_nosuits_frequency) # Get board cards board = b.get_board_cards() if not board: continue # Count frequency of individual board cards for c in board: card_frequency[c] += 1 # Count hand frequencies count_hand_frequencies( evaluator, hole_cards, board, hand_frequency, allcombinations=args.allcombinations, allcombinations_hand_frequency=None if not args.allcombinations else hand_allcombinations_frequency) # Track all hole_card with board results all_hole_cards.append(hole_cards) all_board_cards.append(board) assert len(all_hole_cards) == len( all_board_cards) # Sanity check on lengths results = Results(summary_only=args.summaryonly) summary = [] # List of strs of result summaries test_results = [] # List of bool of pass/fail test results # P-value uniformity test of chisquare pvalues based with args.bins bins all_length = len(all_hole_cards) interval = len(all_hole_cards) // args.bins binned_frequencies = [{x: 0 for x in hand_probabilites.keys()} for _ in range(args.bins)] if args.allcombinations: binned_all_frequencies = [{x: 0 for x in hand_probabilites.keys()} for _ in range(args.bins)] chisquare_pvalues = [] if args.allcombinations: chisquare_allcombinations_pvalues = [] for x, i in enumerate(range(0, all_length, interval)): end_i = i + interval last = all_length - end_i < interval if last: end_i = all_length # Count binned hand frequencies for bin_hole_cards, bin_board_cards in zip(all_hole_cards[i:end_i], all_board_cards[i:end_i]): count_hand_frequencies( evaluator, bin_hole_cards, bin_board_cards, binned_frequencies[x], allcombinations=args.allcombinations, allcombinations_hand_frequency=None if not args.allcombinations else binned_all_frequencies[x]) results.calculate_and_print_results( 'BIN #{} Distribution of Hands'.format(x), 'Hand', hand_probabilites, binned_frequencies[x], pvalues=chisquare_pvalues, std_dev=args.stdev, no_output=not args.showallbinnedtables, ) if args.allcombinations: results.calculate_and_print_results( 'BIN #{} Distribution of All Hand Combinations'.format(x), 'Hand', hand_probabilites, binned_all_frequencies[x], pvalues=chisquare_allcombinations_pvalues, std_dev=args.stdev, no_output=not args.showallbinnedtables, ) if last: break # Print all results results.calculate_and_print_results( 'Distribution of All Hands', 'Hand', hand_probabilites, hand_frequency, summary, test_results, std_dev=args.stdev, ) results.print_kstest_table( chisquare_pvalues, 'Chi-square p-values of binned Distribution of Hands', summary, test_results, column_size=40) if args.allcombinations: results.calculate_and_print_results( 'Distribution of Hands, All Combinations', 'Hand', hand_probabilites, hand_allcombinations_frequency, summary, test_results, std_dev=args.stdev, ) results.print_kstest_table( chisquare_allcombinations_pvalues, 'Chi-square p-values of binned Distribution of Hands, All Combinations', summary, test_results, column_size=40) results.calculate_and_print_results('Distribution of Cards', 'Card', {x: 1 / len(CARDS) for x in CARDS}, card_frequency, summary, test_results, std_dev=args.stdev, is_normal=False) if args.holecardswithsuits: hole_card_combinations = [' '.join(x) for x in combinations(CARDS, 2)] hole_card_expected_frequency = { x: hole_card_combinations.count(x) / len(hole_card_combinations) for x in hole_card_combinations } results.calculate_and_print_results( 'Distribution of Hole Cards with suits', 'Hole Cards', hole_card_expected_frequency, hole_card_frequency, summary, test_results, std_dev=args.stdev, is_normal=False, ) if args.holecards: hole_card_nosuit_combinations = [ ' '.join((x[0], y[0])) for x, y in combinations(CARDS, 2) ] hole_card_nosuits_expected_frequency = { x: hole_card_nosuit_combinations.count(x) / len(hole_card_nosuit_combinations) for x in hole_card_nosuit_combinations } results.calculate_and_print_results( 'Distribution of Hole Cards without suits', 'Hole Cards', hole_card_nosuits_expected_frequency, hole_card_nosuits_frequency, summary, test_results, std_dev=args.stdev, is_normal=False, ) results.print_summary(summary, test_results)
def evaluate(self,state): results = Results() return results
def evaluate(self, *args, **kwarg): """ analysis specific evaluation algorithms """ raise NotImplementedError return Results()