コード例 #1
0
def test_get_sf_evaluation_cloud_mate_in_x():

    # this specific FEN is already evaluated by lichess
    fen = '8/8/6k1/8/6KP/6P1/8/8 b - - 0 1'

    # loc/depth don't matter
    rating = transforms.get_sf_evaluation(fen, '', 1)

    assert rating == 9999
コード例 #2
0
def test_get_sf_evaluation_cloud():

    # this specific FEN is already evaluated by lichess
    fen = 'r1bqkb1r/pp1ppppp/2n2n2/2p5/8/1P3NP1/PBPPPP1P/RN1QKB1R b KQkq - 0 1'

    # loc/depth don't matter
    rating = transforms.get_sf_evaluation(fen, '', 1)

    assert rating == -0.29
コード例 #3
0
def test_get_sf_evaluation_double_checkmate():

    fen = '6k1/4pppp/6r1/3b4/4r3/8/1Q5P/1R5K w - - 0 0'

    cfg = ConfigParser()
    cfg.read('luigi.cfg')
    stockfish_loc = cfg['stockfish_cfg']['location']

    depth = 20

    rating = transforms.get_sf_evaluation(fen, stockfish_loc, depth)

    assert rating == 9999

    fen = '6k1/4pppp/6r1/3b4/4r3/8/1Q5P/1R5K b - - 0 0'

    rating = transforms.get_sf_evaluation(fen, stockfish_loc, depth)

    assert rating == -9999
コード例 #4
0
def test_get_sf_evaluation_in_checkmate():

    fen = '4Rb1k/7Q/8/1p4N1/p7/8/1P4PK/8 b - - 4 0'

    cfg = ConfigParser()
    cfg.read('luigi.cfg')
    stockfish_loc = cfg['stockfish_cfg']['location']

    depth = 20

    rating = transforms.get_sf_evaluation(fen, stockfish_loc, depth)

    assert rating is None
コード例 #5
0
def test_get_sf_evaluation_checkmate_white():

    fen = '5rk1/4Q1b1/8/pp6/8/7N/1P2R1PK/8 w - - 1 0'

    cfg = ConfigParser()
    cfg.read('luigi.cfg')
    stockfish_loc = cfg['stockfish_cfg']['location']

    depth = 20

    rating = transforms.get_sf_evaluation(fen, stockfish_loc, depth)

    assert rating == 9999
コード例 #6
0
def test_get_sf_evaluation_checkmate_black():

    fen = '8/5q1k/7p/4Q2r/P3P3/4R1P1/7p/3R1r1K w - - 3 0'

    cfg = ConfigParser()
    cfg.read('luigi.cfg')
    stockfish_loc = cfg['stockfish_cfg']['location']

    depth = 20

    rating = transforms.get_sf_evaluation(fen, stockfish_loc, depth)

    assert rating == -9999
コード例 #7
0
def test_get_sf_evaluation_deep():

    fen = 'r1bq1rk1/1pp3b1/3p2np/nP2P1p1/4Pp2/PN3NP1/1B3PBP/R2Q1RK1 b - - 2 0'

    cfg = ConfigParser()
    cfg.read('luigi.cfg')
    stockfish_loc = cfg['stockfish_cfg']['location']

    depth = 20

    rating = transforms.get_sf_evaluation(fen, stockfish_loc, depth)

    if '10' in stockfish_loc:
        assert rating == -0.89
    elif '11' in stockfish_loc:
        assert rating == -0.89
    elif '12' in stockfish_loc:
        assert rating == -0.89
    elif '13' in stockfish_loc:
        assert rating == -0.89
コード例 #8
0
    def run(self):
        from pandas import read_pickle, to_numeric, concat, DataFrame

        self.output().makedirs()

        with self.input().open('r') as f:
            df = read_pickle(f, compression=None)

        if df.empty:

            def complete(self):
                return True

            with self.output().temporary_path() as temp_output_path:
                df.to_pickle(temp_output_path, compression=None)

            return

        stockfish_params = stockfish_cfg()

        df = df[['evaluations', 'eval_depths', 'positions']]

        # explode the two different list-likes separately, then concat
        no_evals = df[~df['evaluations'].astype(bool)]
        df = df[df['evaluations'].astype(bool)]

        no_evals = DataFrame(no_evals['positions'].explode())
        no_evals['positions'] = get_clean_fens(no_evals['positions'])

        evals = df['evaluations'].explode().reset_index(drop=True)
        depths = df['eval_depths'].explode().reset_index(drop=True)
        positions = df['positions'].explode().reset_index(drop=True)
        positions = get_clean_fens(positions)

        sql = """SELECT fen, evaluation, eval_depth
                 FROM position_evals
                 WHERE fen IN %(positions)s;
                 """
        db_evaluations = run_remote_sql_query(sql,
                                              positions=tuple(positions.tolist() + no_evals['positions'].tolist()),  # noqa
                                              )
        positions_evaluated = db_evaluations['fen'].drop_duplicates()

        df = concat([positions, evals, depths], axis=1)

        if self.local_stockfish:

            local_evals = []

            counter = 0
            position_count = len(no_evals['positions'])

            for position in no_evals['positions'].tolist():
                if position in positions_evaluated.values:
                    # position will be dropped later if evaluation is None
                    evaluation = None
                else:
                    sf_eval = get_sf_evaluation(position + ' 0',
                                                stockfish_params.location,
                                                stockfish_params.depth)
                    if sf_eval is not None:
                        evaluation = sf_eval
                local_evals.append(evaluation)

                # progress bar stuff
                counter += 1

                current_progress = counter / position_count
                self.set_status_message(f'Analyzed :: '
                                        f'{counter} / {position_count}')
                self.set_progress_percentage(round(current_progress * 100, 2))

            self.set_status_message(f'Analyzed all {position_count} positions')
            self.set_progress_percentage(100)

            no_evals['evaluations'] = local_evals
            no_evals['eval_depths'] = stockfish_params.depth
            no_evals.dropna(inplace=True)

            df = concat([df, no_evals], axis=0, ignore_index=True)

        df = df[~df['positions'].isin(positions_evaluated)]

        df.rename(columns={'evaluations': 'evaluation',
                           'eval_depths': 'eval_depth',
                           'positions': 'fen'},
                  inplace=True)
        df['evaluation'] = to_numeric(df['evaluation'],
                                      errors='coerce')

        df.dropna(inplace=True)
        df = concat([df, db_evaluations], axis=0, ignore_index=True)

        with self.output().temporary_path() as temp_output_path:
            df.to_pickle(temp_output_path, compression=None)