def __init__(self, rows, cols, iterable=None): super().__init__(gs.BOARD_POS) self.rows = rows self.cols = cols self.m = [[0 for c in range(cols)] for r in range(rows)] self.tiles = [[None for c in range(cols)] for r in range(rows)] self.tiles_to_destroy = [] self.tiles_to_spawn = [] self.should_wait_for_move_finished = False self.tile_factory = TileFactory(self) self.scorer = Scorer((10, 5)) board_image = pygame.image.load("data/images/board.png") board_image_size = (gs.BOARD_WIDTH + gs.BOARD_BORDER, gs.BOARD_HEIGHT + gs.BOARD_BORDER) self.board_image = pygame.transform.scale(board_image, board_image_size) if iterable != None: for n, (i, j) in enumerate( itertools.product(range(self.rows), range(self.cols))): val = iterable[n] if val: self.m[i][j] = val self.tiles[i][j] = self.tile_factory.create(val, i, j)
def eval_openset(self): self.sequential_extract(self.valid_test, f"{self.project_dir}/tmp/test.h5") if self.valid_enroll: self.sequential_extract(self.valid_enroll, f"{self.project_dir}/tmp/enroll.h5") enroll_embedding = f"{self.project_dir}/tmp/enroll.h5" else: enroll_embedding = f"{self.project_dir}/tmp/test.h5" if self.valid_target: self.sequential_extract(self.valid_target, f"{self.project_dir}/tmp/target.h5") data_target = h52dict(f"{self.project_dir}/tmp/target.h5") transform_lst = [PCA(whiten=True)] for transform in transform_lst: transform.fit_transform(data_target["X"]) else: transform_lst = None if self.score_paras is None: self.score_paras = {} scorer = Scorer( comp_minDCF=False, enroll=enroll_embedding, test=f"{self.project_dir}/tmp/test.h5", ndx_file=self.valid_trial_list, transforms=transform_lst, **self.score_paras, ) eer = scorer.batch_cosine_score() with open(f"{self.logger_dir}/validation.log", "a") as f: f.write(f"{self.epoch} EER is {eer}\n")
def __init__(self, argument_string): """ Initialises metric-specific parameters. """ Scorer.__init__(self, argument_string) if not 'negative_value' in self._arguments.keys(): self._arguments['negative_value'] = 0.0
def __init__(self, model, source_file, target_file, source_file2, target_file2, num_sentences=1000, beam_size=3): self.model = model self.source_file = source_file self.target_file = target_file self.source_file2 = source_file2 self.target_file2 = target_file2 self.num_sentences = num_sentences self.beam_size = beam_size self.translationList = [] self.pairs = [] self.scoresList = [] self.scorer = Scorer() self.metric_to_cter = {} self.all_cter_scores = [] self.metric_to_bad = {}
def __init__(self, playerCount=2, firstToAct=1, nextToAct=1, actingOrderPointer=0, \ roundNumber=1, roundActionNumber=1, deck=None, deckPointer=0, variant='ofc'): """ Initialise Game object Each game has a current round number, Player objects and a board object for each round :param playerCount: int number of players :param firstToAct: int playerNumber who acts first this round :param deck: 104 char string containing card names format <rank><suit>*52 :return: None """ assert isinstance(playerCount, int) assert 2 <= playerCount <= 4 assert isinstance(firstToAct, int) assert 1 <= firstToAct <= 4 self.playerCount = playerCount self.firstToAct = firstToAct self.nextToAct = nextToAct self.actingOrder = self.generateActingOrder(firstToAct=firstToAct) self.actingOrderPointer = actingOrderPointer self.roundActionNumber = roundActionNumber self.roundNumber = roundNumber self.variant = variant self.board = Board(playerCount=playerCount, deck=deck, deckPointer=deckPointer) self.players = self.createPlayers() self.playerIds = self.createPlayerIds() self.scoring = Scorer(players=self.players, board=self.board)
def score(self, y, y_hat, score="rmse"): """ Calculates score metrics for the learning algorithm. Parameters ---------- X : array_like The dataset of shape (m x n). y : array_like A vector of shape (m, ) for the values at a given data point. Options ---------- type : string The type of metric to be used in evaluation. - rmse : root mean squared error - mse : mean squared error Returns ------- score_metric: float """ scorer = Scorer() if score == "rmse": score_metric = scorer.rmse_(y, y_hat) elif score == "mse": score_metric = scorer.mse_(y, y_hat) return score_metric
class Game: def __init__(self): self._current_frame = 0 self._first_throw_in_frame = True self._scorer = Scorer() def score(self): return self.score_for_frame(self._current_frame) def score_for_frame(self, frame): return self._scorer.score_for_frame(frame) def add(self, pins): self._scorer.add_throw(pins) self.adjust_current_frame(pins) def adjust_current_frame(self, pins): if self.last_ball_in_frame(pins): self.advance_frame() else: self._first_throw_in_frame = False def last_ball_in_frame(self, pins): return self.strike(pins) or not self._first_throw_in_frame def strike(self, pins): return self._first_throw_in_frame and pins == 10 def advance_frame(self): self._current_frame = min(10, self._current_frame + 1)
def _eval_pas(self, arguments_set, dataset: PASDataset, corpus: str, suffix: str = '') -> Dict[str, ScoreResult]: prediction_output_dir = self.save_dir / f'{corpus}_out{suffix}' prediction_writer = PredictionKNPWriter( dataset, self.logger, use_knp_overt=(not self.predict_overt)) documents_pred = prediction_writer.write(arguments_set, prediction_output_dir, add_pas_tag=False) log = {} for pas_target in self.pas_targets: scorer = Scorer(documents_pred, dataset.gold_documents, target_cases=dataset.target_cases, target_exophors=dataset.target_exophors, coreference=dataset.coreference, bridging=dataset.bridging, pas_target=pas_target) result = scorer.run() target = corpus + (f'_{pas_target}' if pas_target else '') + suffix scorer.write_html(self.save_dir / f'{target}.html') result.export_txt(self.save_dir / f'{target}.txt') result.export_csv(self.save_dir / f'{target}.csv') log[pas_target] = result return log
def test_getter(self, size): y_true = [1] * size y_pred = [1] * size y_true.append(0) y_pred.append(0) scorer = Scorer() with pytest.raises(ValueError): print(scorer['ACC(Accuracy)']) scorer.evaluate(y_true, y_pred) assert scorer.num_classes == 2 np.testing.assert_allclose( scorer['FP(False positive/type 1 error/false alarm)'], np.zeros(shape=(len(set(y_true)), ))) np.testing.assert_allclose( scorer.score['FN(False negative/miss/type 2 error)'], np.zeros(shape=(len(set(y_true)), ))) np.testing.assert_allclose(scorer.score['ACC(Accuracy)'], np.ones(shape=(len(set(y_true)), ))) with pytest.raises(KeyError): print(scorer['dummy'])
def __init__(self, model, source_file, target_file, test_source_file, test_target_file, raw_source_file, raw_target_file, num_sentences=400, batch_translate=True): self.model = model self.source_file = source_file self.target_file = target_file self.loader = LanguagePairLoader("de", "en", source_file, target_file) self.test_loader = LanguagePairLoader("de", "en", test_source_file, test_target_file) self.extractor = DomainSpecificExtractor(source_file=raw_source_file, train_source_file=hp.source_file, train_vocab_file="train_vocab.pkl") self.target_extractor = DomainSpecificExtractor(source_file=raw_target_file, train_source_file=hp.source_file, train_vocab_file="train_vocab_en.pkl") self.scorer = Scorer() self.scores = {} self.num_sentences = num_sentences self.batch_translate = batch_translate self.evaluate_every = 10 self.metric_bleu_scores = {} self.metric_gleu_scores = {} self.metric_precisions = {} self.metric_recalls = {} # Plot each metric plt.style.use('seaborn-darkgrid') self.palette = sns.color_palette()
def main(argv): # Check number of command line arguments if len(argv) != 3: print "Error usage:" print "python simple_classifier.py <train_features> <train_classes> <test_features>" sys.exit() else: trainfeatfile = argv[0] trainclassfile = argv[1] testfeatfile = argv[2] y=[] # y is the classes, 1st column X=[] # X is the features, 2nd column onwards X_test=[] # X_test is features to test on # Open training CSV file and save data in X with open(trainfeatfile,'r') as traincsv: trainreader = csv.reader(traincsv) for row in trainreader: X.append(row) # Open training classes file and save in y with open(trainclassfile,'r') as trainclass: for row in trainclass: y.append(int(row)) predictions = [0] * len(y) # Open testing CSV file and save data in X_test with open(testfeatfile,'r') as testcsv: testreader = csv.reader(testcsv) for row in testreader: X_test.append(row) # Do N-fold X-val N=10 skf = StratifiedKFold(y, N) for train, test in skf: # Get training and test subsets X_sub = [X[a] for a in train] y_sub = [y[a] for a in train] X_test_sub = [X[a] for a in test] #Train a decision tree classifier for each split. clf = tree.DecisionTreeClassifier(min_samples_leaf=50) clf.fit(X_sub,y_sub) #Predict on test subset and save results predictions_sub = clf.predict(X_test_sub) for i in range(0,len(test)): predictions[test[i]] = predictions_sub[i] # Score results scorer=Scorer(0) # Compute classification performance scorer.printAccuracy(predictions, y, "Training set performance") return
def __init__(self, argument_string): """ Initialises metric-specific parameters. """ Scorer.__init__(self, argument_string) # use n-gram order of 4 by default if not 'n' in list(self._arguments.keys()): self._arguments['n'] = 4
def __init__(self, argument_string): """ Initialises metric-specific parameters. """ Scorer.__init__(self, argument_string='') self._reference = None # use n-gram order of 4 by default self.additional_flags = argument_string
def testCalculateMaximumsGreater(self): scorer = Scorer() scorer.words = self.scorer.get_words_copy() scorer.calculate_maximums(n=1000) self.assertEqual(3, len(scorer.max_words)) self.assertEqual(1000, scorer.n) self.assertEqual([("word", 10), ("another", 5), ("yet", 2)], scorer.max_words)
def test_wrong_nclass(self, size): y_true = [1] * size y_pred = [1] * size scorer = Scorer() with pytest.raises(ValueError): scorer.evaluate(y_true, y_pred)
def __init__(self,title,ns_list): self.tws = title_split(title) self.ns_list = ns_list self.scorer = Scorer('xh') self.score_li = [] self.confidence_li = [] self.sim_var = 0.0 self.recorder = Recorder() self.block = []
def main(file_name): parsed_match = ScoreParser(yaml.load).parse(open(file_name).read()) scorer = Scorer() scores = scorer.produce_scores(parsed_match) return { "version" : "1.0.0", "match_number" : parsed_match["match_number"], "scores" : scores, }
def test_wrong_size(self, size): y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size + 1, )) scorer = Scorer() with pytest.raises(ValueError): scorer.evaluate(y_true, y_pred)
def test_setter(self, size): y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) scorer = Scorer() scorer.evaluate(y_true, y_pred) with pytest.warns(UserWarning): scorer['Nico'] = 'Nico'
def test_numpy(self, size): y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, )) scorer = Scorer() _ = scorer.evaluate(y_true, y_pred) assert isinstance(_, type(scorer)) assert repr(scorer) == '<Scorer (classes: 2)>'
def check_by_input_file(input_name): input_file = os.path.join("test/data/scorer", input_name) test_data = yaml.load(open(input_file).read()) scorer = Scorer(test_data['input']) scores = scorer.calculate_scores() expected_scores = test_data['scores'] assert scores == expected_scores, "Incorrect scores for '{0}'.".format(input_name)
def _eval_pas(self, arguments_set, dataset: PASDataset, corpus: str, suffix: str = ''): prediction_output_dir = self.save_dir / f'{corpus}_out{suffix}' prediction_writer = PredictionKNPWriter(dataset, self.logger, use_knp_overt=(not self.predict_overt)) documents_pred = prediction_writer.write(arguments_set, prediction_output_dir) documents_gold = dataset.joined_documents if corpus == 'kc' else dataset.documents result = {} for pas_target in self.pas_targets: scorer = Scorer(documents_pred, documents_gold, target_cases=dataset.target_cases, target_exophors=dataset.target_exophors, coreference=dataset.coreference, bridging=dataset.bridging, pas_target=pas_target) stem = corpus if pas_target: stem += f'_{pas_target}' stem += suffix if self.target != 'test': scorer.write_html(self.save_dir / f'{stem}.html') scorer.export_txt(self.save_dir / f'{stem}.txt') scorer.export_csv(self.save_dir / f'{stem}.csv') metrics = self._eval_metrics(scorer.result_dict()) for met, value in zip(self.metrics, metrics): met_name = met.__name__ if 'case_analysis' in met_name or 'zero_anaphora' in met_name: if pas_target: met_name = f'{pas_target}_{met_name}' result[met_name] = value return result
def _play_turn(self, turn_num, current_score=0): num_remaining_dice = 6 turn_actions = [] new_score = 0 new_actions = [] scorer = Scorer([]) while(self._should_roll(turn_num, num_remaining_dice, current_score)): die_rolls = self._roll(num_remaining_dice) turn_actions.append(('rolled', die_rolls)) scorer = Scorer(die_rolls) if scorer.is_blown(): return 0, turn_actions + ['blew it'] actions = self.strategy.actions(die_rolls) turn_actions += actions score = scorer.apply_actions(actions) current_score = current_score + score turn_actions.append(('adding', score, current_score)) num_remaining_dice = scorer.num_remaining_dice() num_remaining_dice = num_remaining_dice if not num_remaining_dice == 0 else 6 dice = scorer._make_remaining_dice() num_remaining, raw_score = Scorer(dice).raw_score() current_score += raw_score turn_actions.append(('auto-adding', raw_score, current_score)) game_over = self.stop_score and current_score + self.total_score >= self.stop_score if num_remaining == 0 and not game_over: turn_actions.append('rolled over') current_score, new_actions = self._play_turn(turn_num, current_score) return (current_score, turn_actions + new_actions)
def _valid_epoch(self, data_loader, corpus): """ Validate after training an epoch :return: A log that contains information about validation Note: The validation metrics in log must have the key 'val_metrics'. """ self.model.eval() total_loss = 0 arguments_set: List[List[List[int]]] = [] contingency_set: List[int] = [] with torch.no_grad(): for step, batch in enumerate(data_loader): batch = {label: t.to(self.device, non_blocking=True) for label, t in batch.items()} loss, *output = self.model(**batch) if len(loss.size()) > 0: loss = loss.mean() pas_scores = output[0] # (b, seq, case, seq) if corpus != 'commonsense': arguments_set += torch.argmax(pas_scores, dim=3).tolist() # (b, seq, case) total_loss += loss.item() * pas_scores.size(0) if step % self.log_step == 0: self.logger.info('Validation [{}/{} ({:.0f}%)] Time: {}'.format( step * data_loader.batch_size, len(data_loader.dataset), 100.0 * step / len(data_loader), datetime.datetime.now().strftime('%H:%M:%S'))) log = {'loss': total_loss / len(data_loader.dataset)} self.writer.add_scalar(f'loss/{corpus}', log['loss']) if corpus != 'commonsense': dataset = data_loader.dataset prediction_writer = PredictionKNPWriter(dataset, self.logger) documents_pred = prediction_writer.write(arguments_set, None, add_pas_tag=False) targets2label = {tuple(): '', ('pred',): 'pred', ('noun',): 'noun', ('pred', 'noun'): 'all'} scorer = Scorer(documents_pred, dataset.gold_documents, target_cases=dataset.target_cases, target_exophors=dataset.target_exophors, coreference=dataset.coreference, bridging=dataset.bridging, pas_target=targets2label[tuple(dataset.pas_targets)]) result = scorer.run() log['result'] = result else: log['f1'] = self._eval_commonsense(contingency_set) return log
def grade(pull_request): if pull_request.travis_build() is None: print("travis not build - branch has conflicts") return -1 print(pull_request.travis_build().url()) pull_request.check_test_modifications() scorer = Scorer(pull_request) score, comment = scorer.compute() print("score:{s} comment:{c}".format(s=score, c=comment)) return score, comment
def __init__(self, argument_string): """ Initialises metric-specific parameters. """ Scorer.__init__(self, argument_string) # use character n-gram order of 4 by default if not 'n' in list(self._arguments.keys()): self._arguments['n'] = 6 # use beta = 1 by default (recommendation by Maja Popovic for generative modelling) if not 'beta' in list(self._arguments.keys()): self._arguments['beta'] = 1
def __init__(self, model, source_file, target_file, num_sentences=1000): self.model = model self.source_file = source_file self.target_file = target_file self.scorer = Scorer() self.num_sentences = num_sentences self.metric_to_gleu = {} self.all_gleu_scores = [] self.metric_to_bad = {} self.bad_count = {} # self.threshold = 0.2826 - 0.167362 self.threshold = 0.6
def score(phrases, docs): scores = [] scorer = Scorer(docs) for id, phrase in phrases.items(): p_score = scorer.calculate_score(phrase) phrase.score = p_score scores.append(p_score) scores.sort() return scores[len(scores) / 2]
def score(phrases, docs): scores = [] scorer = Scorer(docs) for id, phrase in phrases.items(): p_score = scorer.calculate_score(phrase) phrase.score = p_score scores.append(p_score) scores.sort() return scores[len(scores)/2]
def main(argv): # Check number of command line arguments if len(argv) != 3: print "Error usage:" print "python simple_classifier.py <train_features> <train_classes> <test_features>" sys.exit() else: trainfeatfile = argv[0] trainclassfile = argv[1] testfeatfile = argv[2] y=[] # y is the classes, 1st column X=[] # X is the features, 2nd column onwards X_test=[] # X_test is features to test on # Open training CSV file and save data in X with open(trainfeatfile,'r') as traincsv: trainreader = csv.reader(traincsv) for row in trainreader: X.append(row) # Open training classes file and save in y with open(trainclassfile,'r') as trainclass: for row in trainclass: y.append(int(row)) # Open testing CSV file and save data in X_test with open(testfeatfile,'r') as testcsv: testreader = csv.reader(testcsv) for row in testreader: X_test.append(row) # Train a decision tree classifier. Though the default settings # aren't very good! clf = tree.DecisionTreeClassifier(min_samples_leaf=50) clf.fit(X,y) predictions = clf.predict(X_test) train_predictions = clf.predict(X) # Print out the predictions, 1 per line for predict in predictions: print predict # Score results scorer=Scorer(0) # Compute classification performance scorer.printAccuracy(train_predictions, y, "Training set performance") return
def calculate_maximums(self, n: int = 10) -> None: """ Calculates the maximum values for each file in the list. :param int n: the number of maximum values to find :return: None :rtype: None """ self.combine_words() for file in self.files: file.calculate_maximums(n=n) Scorer.calculate_maximums(self, n=n)
def __init__(self, argument_string): Scorer.__init__(self, argument_string) #Lock for the METEOR process, which can only handle one request at a time: self.lock = threading.Lock() #Get necessary arguments for starting METEOR from argument string parsed in Scorer.__init__() self._meteor_language = self._arguments["meteor_language"] self._meteor_path = self._arguments["meteor_path"] + "/" #Start a METEOR process: command = "java -Xmx2G -jar "+self._meteor_path+"meteor-*.jar - - -l "+self._meteor_language+" -stdio" self.meteor_process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
def __init__(self, argument_string): Scorer.__init__(self, argument_string) #Lock for the BEER process, which can only handle one request at a time: self.lock = threading.Lock() #Get necessary arguments for starting BEER from argument string parsed in Scorer.__init__() self._beer_language = self._arguments["beer_language"] self._beer_path = self._arguments["beer_path"] + "/" #Start a BEER process: command = self._beer_path+"beer -l "+self._beer_language+" --workingMode interactive " self.beer_process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
def test_invalid_data(): something = object() input_ = { "TLA1": something, "TLA2": something } error = Exception('bacon') def checker(tla, data): assert data is something, "Wrong data passed to validator" raise error with mock.patch('scorer.validate_team', create=True) as mock_validate: mock_validate.side_effect = checker threw = False try: scorer = Scorer(input_) actual = scorer.calculate_scores() except Exception as e: threw = True assert e is error assert threw, "Should have experienced an error from the validator"
def minimax(self, board, current_player, depth=0): spot_score = -1 prime_move = 0 highest_score = -1 depth = depth if Scorer.is_game_over(board): return (self.score_move(board, current_player, depth), None) depth += 1 for spot in board.available_spots(): board.place_move(current_player, spot) spot_score = -(self.minimax(board, self.switch_players(current_player), depth)[0]) board.spots[spot - 1] = spot if spot_score > highest_score: prime_move = spot highest_score = spot_score return (highest_score, prime_move)
class Recommender: phrasesRoot = os.getcwd() + "/phrases/" corpusRoot = os.path.join(os.getcwd(), "corpus") def __init__(self, filename="scored.txt"): self.scorer = Scorer() # Calculates the semantic orientation of all products # based on their reviews and outputs a list of them # ordered descendingly def recommend(self, filename="recommendations.txt"): scores = {} nscores = {} # how many reviews for that product for d in os.listdir(Recommender.corpusRoot): if os.path.isdir(os.path.join(Recommender.corpusRoot, d)): for f in os.listdir(os.path.join(Recommender.corpusRoot, d)): m = re.search("(\d+)t(\d+).txt",f) if m: key = "{0}-{1}".format(d,m.groups()[0]) so = self.scorer.file_semantic_orientation(os.path.join(Recommender.corpusRoot, d, f)) if key in scores: scores[key] = scores[key] + so nscores[key] = nscores[key] + 1 else: scores[key] = so nscores[key] = 1 for key in scores: scores[key] = scores[key]/nscores[key] #averaging multiple reviews for the same product scores = sorted(scores.iteritems(), key=operator.itemgetter(1), reverse = True) if filename: f = open(filename,"w") for score in scores: f.write('{0} {1}\n'.format(score[0],score[1])) f.close() return scores
class Game(object): def __init__(self, playerCount=2, firstToAct=1, nextToAct=1, actingOrderPointer=0, \ roundNumber=1, roundActionNumber=1, deck=None, deckPointer=0, variant='ofc'): """ Initialise Game object Each game has a current round number, Player objects and a board object for each round :param playerCount: int number of players :param firstToAct: int playerNumber who acts first this round :param deck: 104 char string containing card names format <rank><suit>*52 :return: None """ assert isinstance(playerCount, int) assert 2 <= playerCount <= 4 assert isinstance(firstToAct, int) assert 1 <= firstToAct <= 4 self.playerCount = playerCount self.firstToAct = firstToAct self.nextToAct = nextToAct self.actingOrder = self.generateActingOrder(firstToAct=firstToAct) self.actingOrderPointer = actingOrderPointer self.roundActionNumber = roundActionNumber self.roundNumber = roundNumber self.variant = variant self.board = Board(playerCount=playerCount, deck=deck, deckPointer=deckPointer) self.players = self.createPlayers() self.playerIds = self.createPlayerIds() self.scoring = Scorer(players=self.players, board=self.board) def createPlayers(self): """ Used to initialise the player objects based on given player requirements :return: List of player objects in ascending numerical order """ players = [] for i in range(1, self.playerCount + 1): players.append(Player(playerNumber=i)) return players def createPlayerIds(self): """ Generate uuid4 for each player This will be used as well as individual game ids for frontend :return: List of player ids """ playerIds = [] for i in range(0, self.playerCount): playerIds.append(str(uuid.uuid4())) return playerIds def resetBoard(self): """ Clears board and generates new deck of cards :return: None """ self.board = Board(playerCount=self.playerCount) def newRound(self): """ Start a new round :return: None """ self.scoreBoard() self.resetBoard() self.roundNumber += 1 self.incrementNextToAct() self.actingOrder = self.generateActingOrder(self.nextToAct) def scoreBoard(self): """ Scores the board :return: None """ self.scoring.scoreAll() def interpretScores(self): """ Calls and interprets results of scorer :return: string scores interpretation """ self.scoreBoard() returnStr = "" for message in self.scoring.scoresMessages: returnStr += message + "\n" returnStr += "\n" for player in self.players: returnStr += "Player %i's total score after this round = %i\n" % \ (player.playerNumber, player.score) return returnStr def generateActingOrder(self, firstToAct=1): """ Generates actingOrder for clockwise rotation of player action :param firstToAct: int first player number to act :return: List actingOrder [first playerNumber, second playerNumber ..] """ assert isinstance(firstToAct, int) assert 1 <= firstToAct <= self.playerCount actingOrder = [] for i in range(firstToAct, self.playerCount + 1): actingOrder.append(i) for i in range(1, firstToAct): actingOrder.append(i) return actingOrder def incrementNextToAct(self): """ Increments nextToAct var and if necessary, the roundActionNumber If last player has acted, go back to first player for next round of placements :return: None """ if self.nextToAct == self.actingOrder[self.playerCount - 1]: self.actingOrderPointer = 0 self.nextToAct = self.actingOrder[0] self.roundActionNumber += 1 else: self.actingOrderPointer += 1 self.nextToAct = self.actingOrder[self.actingOrderPointer] def getLastActor(self): """ Returns the player number for the agent who acted last :return: int player number """ if self.actingOrderPointer > 0: return self.actingOrder[self.actingOrderPointer - 1] else: return self.actingOrder[self.playerCount - 1] def handleNextAction(self): """ Determines which method to call next and for which player If the last action has happened will pass request to scoring handler and return that response instead :return: [int playerNumber, int roundActionNumber, [Card card]] """ playerNumber = self.nextToAct cardsDealt = [] if (self.roundActionNumber == 1): cardsDealt = self.dealFirstHand(playerNumber) elif (self.roundActionNumber <= 9): cardsDealt = self.dealSubsequentRounds(playerNumber) else: tools.write_error("handleNextAction(): All action for this round has finished!") raise ValueError("All action for this round has finished!") return [playerNumber, self.roundActionNumber, cardsDealt] def dealFirstHand(self, playerNumber): """ Deal 5 cards to the given player :param playerNumber: int playerNumber :return: [5 card objects] """ assert self.roundActionNumber == 1 assert isinstance(playerNumber, int) assert 1 <= playerNumber <= self.playerCount if (len(self.players[playerNumber - 1].cards) > 0): raise ValueError("Player already has cards dealt!") cards = self.board.deck.deal_n(5) self.players[playerNumber - 1].cards = cards self.incrementNextToAct() return cards def dealSubsequentRounds(self, playerNumber): """ Deal one card to the given player :param playerNumber: int playerNumber :return: [1 card object] """ assert self.roundActionNumber > 1 assert isinstance(playerNumber, int) assert 1 <= playerNumber <= self.playerCount card = self.board.deck.deal_one() self.players[playerNumber - 1].cards.append(card) self.incrementNextToAct() return [card]
STOP_WORDS = ['d01', 'd02', 'd03', 'd04', 'd05', 'd06', 'd07', 'd08', 'a', 'also', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'do', 'for', 'have', 'is', 'in', 'it', 'of', 'or', 'see', 'so', 'that', 'the', 'this', 'to', 'we'] crawler = Crawler([urljoin(SEED_URL, page) for page in SEED_PAGES]) page_rank = PageRank(crawler.webgraph_in, crawler.webgraph_out) page_rank.build_graph() index = Indexer(crawler.contents, STOP_WORDS) index.build_index() scorer = Scorer(index) print("> SIMPLE SEARCH ENGINE (by Tammo, Tim & Flo)") while True: scores = scorer.calculate_scores(input("\n> query: ")) if not scores: print("your search term does not occur on any page") continue ranked_scores = [(url, score, page_rank.get_rank(url), score * page_rank.get_rank(url)) for url, score in scores.items()] print("\n url | score | rank | rank * score\n" + "-" * 54) for url, score, rank, ranked_score in sorted(ranked_scores, key=lambda element: element[3], reverse=True): print(" ..{} | {:.4f} | {:.4f} | {:.4f}".format(url[-15:], round(score, 6), round(rank, 6), round(ranked_score, 6)))
print("\n# Indexer TEST [doc length]") d08_len = index.documents_length['http://mysql12.f4.htw-berlin.de/crawl/d08.html'] print(" d08 length: " + ("OK" if round(d08_len, 6) == 2.727447 else "WRONG")) d06_len = index.documents_length['http://mysql12.f4.htw-berlin.de/crawl/d06.html'] print(" d06 length: " + ("OK" if round(d06_len, 6) == 1.974093 else "WRONG")) d04_len = index.documents_length['http://mysql12.f4.htw-berlin.de/crawl/d04.html'] print(" d04 length: " + ("OK" if round(d04_len, 6) == 4.312757 else "WRONG")) print("\n# Scorer TEST") scorer = Scorer(index) tokens_scores = scorer.calculate_scores('tokens') tokens_scores_check = all( ((round(tokens_scores['http://mysql12.f4.htw-berlin.de/crawl/d08.html'], 6) == 0.119897), (round(tokens_scores['http://mysql12.f4.htw-berlin.de/crawl/d02.html'], 6) == 0.093106), (round(tokens_scores['http://mysql12.f4.htw-berlin.de/crawl/d04.html'], 6) == 0.061577), (round(tokens_scores['http://mysql12.f4.htw-berlin.de/crawl/d01.html'], 6) == 0.051784), (round(tokens_scores['http://mysql12.f4.htw-berlin.de/crawl/d03.html'], 6) == 0.045677))) print(" 'tokens' score: " + ("OK" if tokens_scores_check else "WRONG")) index_scores = scorer.calculate_scores('index') index_scores_check = all( ((round(index_scores['http://mysql12.f4.htw-berlin.de/crawl/d08.html'], 6) == 0.250207), (round(index_scores['http://mysql12.f4.htw-berlin.de/crawl/d05.html'], 6) == 0.233073), (round(index_scores['http://mysql12.f4.htw-berlin.de/crawl/d04.html'], 6) == 0.098769)))
def play(self): # cards may be unlimited - 4 suits in a deck start = True while start: player = Player() computer = Player() player.addCards(self.deck.getCard()) player.addCards(self.deck.getCard()) if self.__isAllFaceCards(player.getCards()): player.setPlayStatus(False) scorer = Scorer() while player.getPlayStatus(): scorer.addPointsToPlayer(self.__addUpCards(player.getCards())) print "Player cards: %s total: %d" % (player.getCards(), scorer.getTotalPlayer()) if scorer.isBusted('p'): print "Player bursted!" player.setBurstedStatus(True) player.setPlayStatus(False) elif scorer.isBlackjack(): print "Black jack!" player.setPlayStatus(False) else: hit_or_stand = raw_input("hit or stand? h / s : ") if hit_or_stand is "h": player.addCards(self.deck.getCard()) else: player.setPlayStatus(False) while computer.getPlayStatus(): # almost same computer.addCards(self.deck.getCard()) computer.addCards(self.deck.getCard()) # till the computer hv 18 while True: scorer.addPointsToComputer(self.__addUpCards(computer.getCards())) if scorer.getTotalComputer() <= 18 : computer.addCards(self.deck.getCard()) else: break print "computer cards: %s total: %d" % (computer.getCards(), scorer.getTotalComputer()) # who is the winner if scorer.getTotalComputer() > 21: print "computer bursted!" if not player.getBurstedStatus(): print "player wins!" elif scorer.getTotalComputer() > scorer.getTotalPlayer(): print "computer wins!" elif scorer.getTotalComputer() == scorer.getTotalPlayer(): print "Draw!" elif scorer.getTotalPlayer() > scorer.getTotalComputer(): if not player.getBurstedStatus(): print "player wins!" elif not computer.getBurstedStatus(): print "computer bursted" computer.setPlayStatus(False) carryon = raw_input("would you like to continue? y or n : ") if carryon is not "y": start = False
def is_over(self): if Scorer.is_game_won(self.board): self.presenter.winner_message(self.current_player.mark) elif Scorer.is_game_stalemate(self.board): self.presenter.stalemate_message() return Scorer.is_game_over(self.board)
def score_move(self, board, current_player, depth): if Scorer.is_game_won(board): return (1.0 / -depth) else: return 0
def __init__(self): Scorer.__init__(self)
def __init__(self, filename="scored.txt"): self.scorer = Scorer()