Пример #1
0
	def _play_turn(self, turn_num, current_score=0):
		num_remaining_dice = 6
		turn_actions = []
		new_score = 0
		new_actions = []
		scorer = Scorer([])
		while(self._should_roll(turn_num, num_remaining_dice, current_score)):
			die_rolls = self._roll(num_remaining_dice)
			turn_actions.append(('rolled', die_rolls))
			scorer = Scorer(die_rolls)
			if scorer.is_blown():
				return 0, turn_actions + ['blew it']
			actions = self.strategy.actions(die_rolls)
			turn_actions += actions
			score = scorer.apply_actions(actions)
			current_score = current_score + score
			turn_actions.append(('adding', score, current_score))
			num_remaining_dice = scorer.num_remaining_dice()
			num_remaining_dice = num_remaining_dice if not num_remaining_dice == 0 else 6
		dice = scorer._make_remaining_dice()
		num_remaining, raw_score = Scorer(dice).raw_score()
		current_score += raw_score
		turn_actions.append(('auto-adding', raw_score, current_score))
		game_over = self.stop_score and current_score + self.total_score >= self.stop_score
		if num_remaining == 0 and not game_over:
			turn_actions.append('rolled over')
			current_score, new_actions = self._play_turn(turn_num, current_score)
		return (current_score, turn_actions + new_actions)
Пример #2
0
    def __init__(self, playerCount=2, firstToAct=1, nextToAct=1, actingOrderPointer=0, \
                 roundNumber=1, roundActionNumber=1, deck=None, deckPointer=0, variant='ofc'):
        """
        Initialise Game object
        Each game has a current round number, Player objects and a board object for each round
        :param playerCount: int number of players
        :param firstToAct: int playerNumber who acts first this round
        :param deck: 104 char string containing card names format <rank><suit>*52
        :return: None
        """
        assert isinstance(playerCount, int)
        assert 2 <= playerCount <= 4
        assert isinstance(firstToAct, int)
        assert 1 <= firstToAct <= 4

        self.playerCount = playerCount
        self.firstToAct = firstToAct
        self.nextToAct = nextToAct
        self.actingOrder = self.generateActingOrder(firstToAct=firstToAct)
        self.actingOrderPointer = actingOrderPointer
        self.roundActionNumber = roundActionNumber
        self.roundNumber = roundNumber
        self.variant = variant

        self.board = Board(playerCount=playerCount,
                           deck=deck,
                           deckPointer=deckPointer)
        self.players = self.createPlayers()
        self.playerIds = self.createPlayerIds()

        self.scoring = Scorer(players=self.players, board=self.board)
Пример #3
0
    def _eval_pas(self,
                  arguments_set,
                  dataset: PASDataset,
                  corpus: str,
                  suffix: str = '') -> Dict[str, ScoreResult]:
        prediction_output_dir = self.save_dir / f'{corpus}_out{suffix}'
        prediction_writer = PredictionKNPWriter(
            dataset, self.logger, use_knp_overt=(not self.predict_overt))
        documents_pred = prediction_writer.write(arguments_set,
                                                 prediction_output_dir,
                                                 add_pas_tag=False)

        log = {}
        for pas_target in self.pas_targets:
            scorer = Scorer(documents_pred,
                            dataset.gold_documents,
                            target_cases=dataset.target_cases,
                            target_exophors=dataset.target_exophors,
                            coreference=dataset.coreference,
                            bridging=dataset.bridging,
                            pas_target=pas_target)
            result = scorer.run()
            target = corpus + (f'_{pas_target}' if pas_target else '') + suffix

            scorer.write_html(self.save_dir / f'{target}.html')
            result.export_txt(self.save_dir / f'{target}.txt')
            result.export_csv(self.save_dir / f'{target}.csv')

            log[pas_target] = result

        return log
Пример #4
0
    def _eval_pas(self, arguments_set, dataset: PASDataset, corpus: str, suffix: str = ''):
        prediction_output_dir = self.save_dir / f'{corpus}_out{suffix}'
        prediction_writer = PredictionKNPWriter(dataset,
                                                self.logger,
                                                use_knp_overt=(not self.predict_overt))
        documents_pred = prediction_writer.write(arguments_set, prediction_output_dir)
        documents_gold = dataset.joined_documents if corpus == 'kc' else dataset.documents

        result = {}
        for pas_target in self.pas_targets:
            scorer = Scorer(documents_pred, documents_gold,
                            target_cases=dataset.target_cases,
                            target_exophors=dataset.target_exophors,
                            coreference=dataset.coreference,
                            bridging=dataset.bridging,
                            pas_target=pas_target)

            stem = corpus
            if pas_target:
                stem += f'_{pas_target}'
            stem += suffix
            if self.target != 'test':
                scorer.write_html(self.save_dir / f'{stem}.html')
            scorer.export_txt(self.save_dir / f'{stem}.txt')
            scorer.export_csv(self.save_dir / f'{stem}.csv')

            metrics = self._eval_metrics(scorer.result_dict())
            for met, value in zip(self.metrics, metrics):
                met_name = met.__name__
                if 'case_analysis' in met_name or 'zero_anaphora' in met_name:
                    if pas_target:
                        met_name = f'{pas_target}_{met_name}'
                result[met_name] = value

        return result
Пример #5
0
def results(results_id, res=None):
    if not res:
        res = load_results(results_id)
        if not res:
            abort(404)
    scorer = Scorer(res)
    scores = {
        'total_score': scorer.total_score(),
        'cross_domain_existence_score': scorer.cross_domain_existence_score(),
        'sri_score': scorer.sri_score(),
        'mixed_content_score': scorer.mixed_content_score(),
        'leaking_server_software_info_score': scorer.leaking_server_software_info_score(),
        'third_party_libs_score': scorer.third_party_libs_score(),
        'cache_control_score': scorer.cache_control_score(),
        'referrer_policy_score': scorer.referrer_policy_score(),
        'csrf_score': scorer.csrf_score(),
        'csp_score': scorer.csp_score(),
        'cors_score': scorer.cors_score(),
        'cors_policy_score': scorer.cors_policy_score(),
        'cookie_security_score': scorer.cookie_security_score(),
        'expect_ct_score': scorer.expect_ct_score(),
        'x_download_options_score': scorer.x_download_options_score(),
        'x_frame_options_score': scorer.x_frame_options_score(),
        'x_xss_protection_score': scorer.x_xss_protection_score(),
        'x_content_type_options_score': scorer.x_content_type_options_score(),
        'hpkp_score': scorer.hpkp_score(),
        'hsts_score': scorer.hsts_score(),
        'tls_score': scorer.tls_score(),
        'http_redirection_score': scorer.http_redirection_score()
    }
    return render_template('results.html', results=res, scores=scores)
Пример #6
0
    def eval_openset(self):
        self.sequential_extract(self.valid_test,
                                f"{self.project_dir}/tmp/test.h5")
        if self.valid_enroll:
            self.sequential_extract(self.valid_enroll,
                                    f"{self.project_dir}/tmp/enroll.h5")
            enroll_embedding = f"{self.project_dir}/tmp/enroll.h5"
        else:
            enroll_embedding = f"{self.project_dir}/tmp/test.h5"

        if self.valid_target:
            self.sequential_extract(self.valid_target,
                                    f"{self.project_dir}/tmp/target.h5")
            data_target = h52dict(f"{self.project_dir}/tmp/target.h5")
            transform_lst = [PCA(whiten=True)]
            for transform in transform_lst:
                transform.fit_transform(data_target["X"])
        else:
            transform_lst = None

        if self.score_paras is None:
            self.score_paras = {}
        scorer = Scorer(
            comp_minDCF=False,
            enroll=enroll_embedding,
            test=f"{self.project_dir}/tmp/test.h5",
            ndx_file=self.valid_trial_list,
            transforms=transform_lst,
            **self.score_paras,
        )
        eer = scorer.batch_cosine_score()

        with open(f"{self.logger_dir}/validation.log", "a") as f:
            f.write(f"{self.epoch} EER is {eer}\n")
Пример #7
0
    def __init__(self, rows, cols, iterable=None):
        super().__init__(gs.BOARD_POS)
        self.rows = rows
        self.cols = cols
        self.m = [[0 for c in range(cols)] for r in range(rows)]
        self.tiles = [[None for c in range(cols)] for r in range(rows)]
        self.tiles_to_destroy = []
        self.tiles_to_spawn = []
        self.should_wait_for_move_finished = False
        self.tile_factory = TileFactory(self)
        self.scorer = Scorer((10, 5))

        board_image = pygame.image.load("data/images/board.png")
        board_image_size = (gs.BOARD_WIDTH + gs.BOARD_BORDER,
                            gs.BOARD_HEIGHT + gs.BOARD_BORDER)
        self.board_image = pygame.transform.scale(board_image,
                                                  board_image_size)

        if iterable != None:
            for n, (i, j) in enumerate(
                    itertools.product(range(self.rows), range(self.cols))):
                val = iterable[n]
                if val:
                    self.m[i][j] = val
                    self.tiles[i][j] = self.tile_factory.create(val, i, j)
Пример #8
0
 def setUpClass(cls):
     """Sets up the TestScorer class by creating a simple Scorer object."""
     cls.scorer = Scorer()
     for word, total in [("word", 10), ("another", 5), ("yet", 2)]:
         for i in range(total):
             cls.scorer.add_word(word)
     cls.scorer.calculate_maximums(2)
Пример #9
0
    def test_getter(self, size):

        y_true = [1] * size
        y_pred = [1] * size
        y_true.append(0)
        y_pred.append(0)

        scorer = Scorer()

        with pytest.raises(ValueError):
            print(scorer['ACC(Accuracy)'])

        scorer.evaluate(y_true, y_pred)

        assert scorer.num_classes == 2

        np.testing.assert_allclose(
            scorer['FP(False positive/type 1 error/false alarm)'],
            np.zeros(shape=(len(set(y_true)), )))
        np.testing.assert_allclose(
            scorer.score['FN(False negative/miss/type 2 error)'],
            np.zeros(shape=(len(set(y_true)), )))
        np.testing.assert_allclose(scorer.score['ACC(Accuracy)'],
                                   np.ones(shape=(len(set(y_true)), )))

        with pytest.raises(KeyError):
            print(scorer['dummy'])
Пример #10
0
    def score(self, y, y_hat, score="rmse"):
        """ 
        Calculates score metrics for the learning algorithm.
        Parameters
        ----------
        X : array_like
            The dataset of shape (m x n).

        y : array_like
            A vector of shape (m, ) for the values at a given data point.

        Options
        ----------        
        type : string
            The type of metric to be used in evaluation.
            - rmse : root mean squared error
            - mse : mean squared error

        Returns
        -------
        score_metric: float
        """
        scorer = Scorer()
        if score == "rmse":
            score_metric = scorer.rmse_(y, y_hat)
        elif score == "mse":
            score_metric = scorer.mse_(y, y_hat)

        return score_metric
Пример #11
0
    def __init__(self, model, source_file, target_file, test_source_file, test_target_file,
                 raw_source_file,
                 raw_target_file, num_sentences=400,
                 batch_translate=True):
        self.model = model
        self.source_file = source_file
        self.target_file = target_file
        self.loader = LanguagePairLoader("de", "en", source_file, target_file)
        self.test_loader = LanguagePairLoader("de", "en", test_source_file, test_target_file)
        self.extractor = DomainSpecificExtractor(source_file=raw_source_file, train_source_file=hp.source_file,
                                                 train_vocab_file="train_vocab.pkl")
        self.target_extractor = DomainSpecificExtractor(source_file=raw_target_file, train_source_file=hp.source_file,
                                                        train_vocab_file="train_vocab_en.pkl")
        self.scorer = Scorer()
        self.scores = {}
        self.num_sentences = num_sentences
        self.batch_translate = batch_translate
        self.evaluate_every = 10

        self.metric_bleu_scores = {}
        self.metric_gleu_scores = {}
        self.metric_precisions = {}
        self.metric_recalls = {}

        # Plot each metric
        plt.style.use('seaborn-darkgrid')
        self.palette = sns.color_palette()
Пример #12
0
def make_scorer(args):

    bidirectional = args.bidirectional
    enc_hidden_size = hidden_size // 2 if bidirectional else hidden_size
    if args.useObjLabelOrVis == 'none':
        feature_size, action_embedding_size = 2048 + 128, 2048 + 128
    elif args.useObjLabelOrVis == 'vis':
        feature_size, action_embedding_size = 2048 + 128 + args.objVisFeatDim, 2048 + 128 + args.objVisFeatDim
    elif args.useObjLabelOrVis == 'label':
        feature_size, action_embedding_size = 2048 + 128 + args.objLanFeatDim, 2048 + 128 + args.objLanFeatDim
    elif args.useObjLabelOrVis == 'both':
        feature_size = 2048 + 128 + args.objVisFeatDim + args.objLanFeatDim
        action_embedding_size = 2048 + args.objVisFeatDim + args.objLanFeatDim + 128

    traj_encoder = try_cuda(
        SpeakerEncoderLSTM(action_embedding_size,
                           feature_size,
                           enc_hidden_size,
                           dropout_ratio,
                           bidirectional=args.bidirectional))
    scorer_module = try_cuda(DotScorer(enc_hidden_size, enc_hidden_size))
    scorer = Scorer(scorer_module, traj_encoder)
    if args.load_scorer is not '':
        scorer.load(args.load_scorer)
        print(colorize('load scorer traj ' + args.load_scorer))
    elif args.load_traj_encoder is not '':
        scorer.load_traj_encoder(args.load_traj_encoder)
        print(colorize('load traj encoder ' + args.load_traj_encoder))
    return scorer
Пример #13
0
    def __init__(self,
                 model,
                 source_file,
                 target_file,
                 source_file2,
                 target_file2,
                 num_sentences=1000,
                 beam_size=3):
        self.model = model
        self.source_file = source_file
        self.target_file = target_file
        self.source_file2 = source_file2
        self.target_file2 = target_file2
        self.num_sentences = num_sentences
        self.beam_size = beam_size

        self.translationList = []
        self.pairs = []
        self.scoresList = []

        self.scorer = Scorer()

        self.metric_to_cter = {}
        self.all_cter_scores = []

        self.metric_to_bad = {}
Пример #14
0
def main(argv):

    # Check number of command line arguments
    if len(argv) != 3:
        print "Error usage:"
        print "python simple_classifier.py <train_features> <train_classes> <test_features>"
        sys.exit()
    else:
        trainfeatfile = argv[0]
        trainclassfile = argv[1]
        testfeatfile = argv[2]

    y=[] # y is the classes, 1st column
    X=[] # X is the features, 2nd column onwards
    X_test=[] # X_test is features to test on

    # Open training CSV file and save data in X
    with open(trainfeatfile,'r') as traincsv:
        trainreader = csv.reader(traincsv)
        for row in trainreader:
            X.append(row)

    # Open training classes file and save in y
    with open(trainclassfile,'r') as trainclass:
        for row in trainclass:
            y.append(int(row))
    predictions = [0] * len(y)

    # Open testing CSV file and save data in X_test
    with open(testfeatfile,'r') as testcsv:
        testreader = csv.reader(testcsv)
        for row in testreader:
            X_test.append(row)

    # Do N-fold X-val
    N=10
    skf = StratifiedKFold(y, N)

    for train, test in skf:
        # Get training and test subsets
        X_sub = [X[a] for a in train]
        y_sub = [y[a] for a in train]
        X_test_sub = [X[a] for a in test]
        #Train a decision tree classifier for each split.
        clf = tree.DecisionTreeClassifier(min_samples_leaf=50)
        clf.fit(X_sub,y_sub)
        #Predict on test subset and save results
        predictions_sub = clf.predict(X_test_sub)
        for i in range(0,len(test)):
            predictions[test[i]] = predictions_sub[i]


    # Score results
    scorer=Scorer(0)

    # Compute classification performance
    scorer.printAccuracy(predictions, y, "Training set performance")

    return
Пример #15
0
def main():

    pred_with_tweets = '../data/trial.csv'  # predicted labels + tweet text
    gold = '../data/trial.labels'  # file contains gold labels
    mycorpus = Corpus(pred_with_tweets, gold)
    myscores = Scorer(mycorpus)
    myresult = Result()
    myresult.show(myscores)
Пример #16
0
 def testCalculateMaximumsGreater(self):
     scorer = Scorer()
     scorer.words = self.scorer.get_words_copy()
     scorer.calculate_maximums(n=1000)
     self.assertEqual(3, len(scorer.max_words))
     self.assertEqual(1000, scorer.n)
     self.assertEqual([("word", 10), ("another", 5), ("yet", 2)],
                      scorer.max_words)
Пример #17
0
 def __init__(self,title,ns_list):
     self.tws = title_split(title)
     self.ns_list = ns_list
     self.scorer = Scorer('xh')
     self.score_li = []
     self.confidence_li = []
     self.sim_var = 0.0
     self.recorder = Recorder()
     self.block = []
Пример #18
0
 def testAddWord(self):
     """Tests that adding words to the dictionary counter works as expected."""
     fs = Scorer()
     fs.add_word("word")
     expected_dict = {"word": 1}
     self.assertEqual(expected_dict, fs.words)
     fs.add_word("word")
     expected_dict = {"word": 2}
     self.assertEqual(expected_dict, fs.words)
Пример #19
0
 def __init__(self, ph: Posting_handler):
     """
     Args:
         ph: a posting handler
     """
     self.scorer = Scorer(ph)
     self.total_docs = ph.num_doc
     self.ph = ph
     self.pattern = re.compile(r"^[a-zA-Z][a-zA-Z']+$")
Пример #20
0
    def test_wrong_size(self, size):

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size + 1, ))

        scorer = Scorer()

        with pytest.raises(ValueError):
            scorer.evaluate(y_true, y_pred)
Пример #21
0
    def test_wrong_nclass(self, size):

        y_true = [1] * size
        y_pred = [1] * size

        scorer = Scorer()

        with pytest.raises(ValueError):
            scorer.evaluate(y_true, y_pred)
Пример #22
0
def get_nlp_data(path):

    data = pd.read_pickle(path)

    scorer = Scorer()
    data['clean_def'] = data['definition'].apply(clean_def)
    data['nlp_doc'] = data['clean_def'].apply(lambda x: add_nlp_doc(x, scorer))
    data['leaderboard'] = np.empty((len(data), 0)).tolist()

    return data
Пример #23
0
    def test_numpy(self, size):

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))

        scorer = Scorer()
        _ = scorer.evaluate(y_true, y_pred)

        assert isinstance(_, type(scorer))
        assert repr(scorer) == '<Scorer (classes: 2)>'
Пример #24
0
    def test_keys(self):

        y_true = ['a', 'b', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'b', 'c', 'a']
        y_pred = ['b', 'b', 'a', 'c', 'b', 'a', 'c', 'b', 'a', 'b', 'a', 'a']

        scorer = Scorer()
        assert len(scorer.keys()) == 0

        scorer.evaluate(y_true, y_pred)
        assert len(scorer.keys()) == 116
Пример #25
0
 def test_get_nlp_data(self):
     data = pd.read_pickle(path)
     self.assertEqual(type(data), pandas.core.frame.DataFrame)
     scorer = Scorer()
     data['clean_def'] = data['definition'].apply(clean_def)
     data['nlp_doc'] = data['clean_def'].apply(
                         lambda x: add_nlp_doc(x, scorer))
     data['leaderboard'] = np.empty((len(data), 0)).tolist()
     self.assertEqual(type(data['leaderboard']), list)
     self.assertEqual(['leaderboard'.'clean_df','nlp_doc'] in data, True)
Пример #26
0
    def test_setter(self, size):

        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))

        scorer = Scorer()
        scorer.evaluate(y_true, y_pred)

        with pytest.warns(UserWarning):
            scorer['Nico'] = 'Nico'
Пример #27
0
def check_by_input_file(input_name):
    input_file = os.path.join("test/data/scorer", input_name)

    test_data = yaml.load(open(input_file).read())
    scorer = Scorer(test_data['input'])
    scores = scorer.calculate_scores()

    expected_scores = test_data['scores']

    assert scores == expected_scores, "Incorrect scores for '{0}'.".format(input_name)
Пример #28
0
    def _valid_epoch(self, data_loader, corpus):
        """
        Validate after training an epoch
        :return: A log that contains information about validation
        Note:
            The validation metrics in log must have the key 'val_metrics'.
        """
        self.model.eval()
        total_loss = 0
        arguments_set: List[List[List[int]]] = []
        contingency_set: List[int] = []
        with torch.no_grad():
            for step, batch in enumerate(data_loader):
                batch = {label: t.to(self.device, non_blocking=True) for label, t in batch.items()}

                loss, *output = self.model(**batch)

                if len(loss.size()) > 0:
                    loss = loss.mean()
                pas_scores = output[0]  # (b, seq, case, seq)

                if corpus != 'commonsense':
                    arguments_set += torch.argmax(pas_scores, dim=3).tolist()  # (b, seq, case)

                total_loss += loss.item() * pas_scores.size(0)

                if step % self.log_step == 0:
                    self.logger.info('Validation [{}/{} ({:.0f}%)] Time: {}'.format(
                        step * data_loader.batch_size,
                        len(data_loader.dataset),
                        100.0 * step / len(data_loader),
                        datetime.datetime.now().strftime('%H:%M:%S')))

        log = {'loss': total_loss / len(data_loader.dataset)}
        self.writer.add_scalar(f'loss/{corpus}', log['loss'])

        if corpus != 'commonsense':
            dataset = data_loader.dataset
            prediction_writer = PredictionKNPWriter(dataset, self.logger)
            documents_pred = prediction_writer.write(arguments_set, None, add_pas_tag=False)
            targets2label = {tuple(): '', ('pred',): 'pred', ('noun',): 'noun', ('pred', 'noun'): 'all'}

            scorer = Scorer(documents_pred, dataset.gold_documents,
                            target_cases=dataset.target_cases,
                            target_exophors=dataset.target_exophors,
                            coreference=dataset.coreference,
                            bridging=dataset.bridging,
                            pas_target=targets2label[tuple(dataset.pas_targets)])
            result = scorer.run()
            log['result'] = result
        else:
            log['f1'] = self._eval_commonsense(contingency_set)

        return log
Пример #29
0
def grade(pull_request):
    if pull_request.travis_build() is None:
        print("travis not build - branch has conflicts")
        return -1
    print(pull_request.travis_build().url())
    pull_request.check_test_modifications()

    scorer = Scorer(pull_request)
    score, comment = scorer.compute()
    print("score:{s} comment:{c}".format(s=score, c=comment))
    return score, comment
Пример #30
0
    def score(phrases, docs):
        scores = []
        scorer = Scorer(docs)

        for id, phrase in phrases.items():
            p_score = scorer.calculate_score(phrase)
            phrase.score = p_score
            scores.append(p_score)

        scores.sort()

        return scores[len(scores) / 2]