コード例 #1
0
ファイル: leela_lite.py プロジェクト: so-much-meta/leela_lite
def load_leela_network():
    global net, nn
    if network_id is not None:
        net = load_network(backend='net_client', network_id=network_id)
    else:
        net = load_network(backend='pytorch_cuda', filename=weights)
    nn = search.NeuralNet(net=net, lru_size=min(5000, nodes))
コード例 #2
0
ファイル: engine.py プロジェクト: oscardssmith/leela_lite
def load_leela_network():
    global net, nn
    if network_id is not None:
        net = load_network(backend='net_client', network_id=network_id, policy_softmax_temp=2.2)
    else:
        net = load_network(backend='pytorch_cuda', filename=weights, policy_softmax_temp=2.2)
    nn = search.NeuralNet(net=net, lru_size=max(5000, nodes))
コード例 #3
0
 def load(self):
     if dummy:
         self.model = None
         self.batch_processor = DummyBatchProcessor(self.context,
                                                    self.model,
                                                    self.network_id,
                                                    self.batch_queue,
                                                    self.response_queue)
     else:
         with BatchProcessor._cuda_lock:
             net = load_network(backend='pytorch_cuda',
                                filename=self.weights_file,
                                half=self.half)
         self.model = net.model
         self.batch_processor = BatchProcessor(self.context, self.model,
                                               self.network_id,
                                               self.batch_queue,
                                               self.response_queue)
コード例 #4
0
ファイル: leela_lite.py プロジェクト: kmcrage/leela_lite
                    "--black",
                    help="the engine to use for black",
                    choices=search.engines.keys(),
                    default=default_engine)
parser.add_argument("-n",
                    "--nodes",
                    help="the engine to use for black",
                    type=int,
                    default=800)
parser.add_argument("-v", "--verbosity", action="count", default=0)
args = parser.parse_args()

backend = 'pytorch_cuda' if os.path.exists(
    '/opt/bin/nvidia-smi') else 'pytorch_cpu'
net = load_network(backend=backend,
                   filename=args.weights,
                   policy_softmax_temp=2.2)
nn = search.NeuralNet(net=net)
board = LeelaBoard()

players = [{
    'engine': args.white,
    'root': None,
    'resets': 0
}, {
    'engine': args.black,
    'root': None,
    'resets': 0
}]

turn = 0
コード例 #5
0
import chess.pgn
import time
import json
import collections

# NOTE: Policy values seem to be a tiny bit off from lc0...
#       The reason for this seems to be usage of src/mcts/node.cc::Edge::SetP in lc0
#       This function applies some rounding to policy values
#
# Changing tolerance from 0.00006 to 0.0005
TOLERANCE = 0.0005

engine = LC0Engine()
board = lcztools.LeelaBoard()
# engine.evaluate(board())
net = lcztools.load_network()


def fix_policy_float(policy):
    '''Numpy to normal python float, for json dumps'''
    return collections.OrderedDict((k, float(v)) for k, v in policy.items())


g_max_policy_error = 0
g_max_value_error = 0
g_mse_policy = 0
g_mse_value = 0
g_se_policy_sum = 0
g_se_value_sum = 0
g_policy_samples = 0
g_value_samples = 0
コード例 #6
0
ファイル: test.py プロジェクト: shellster/lczero_tools
# For now, if using the tensorflow backend, tfprocess is imported to build the network,
# so training/tf has to be in the Python path
sys.path.append(os.path.expanduser('~/git/leela-chess/training/tf'))

weights_file = os.path.expanduser('~/git/leela-chess/release/weights.txt.gz')

import lcztools

def json_default(obj):
    # Serialize numpy floats
    if isinstance(obj, np.floating):
        return float(obj)
    raise TypeError

print("Test pytorch")
lcz_net = lcztools.load_network('pytorch', weights_file)
lcz_board = lcztools.LeelaBoard()
print(lcz_board)
policy, value = lcz_net.evaluate(lcz_board)
print('Policy: {}'.format(json.dumps(policy, default=json_default, indent=3)))
print('Value: {}'.format(value))

lcz_board.push_uci('e2e4')
print(lcz_board)
policy, value = lcz_net.evaluate(lcz_board)
print('Policy: {}'.format(json.dumps(policy, default=json_default, indent=3)))
print('Value: {}'.format(value))


print("Test tensorflow")
lcz_net = lcztools.load_network('tensorflow', weights_file)
コード例 #7
0
import os
import sys
sys.path.append(os.path.expanduser('~/git/lczero_tools/src/'))
import lcztools
from lcztools.testing.leela_engine import LCZEngine
import numpy as np
import chess.pgn
import time

engine_path = os.path.expanduser('~/git/leela-chess/release/lczero')
weights_file = os.path.expanduser('~/git/leela-chess/release/weights.txt.gz')
engine = LCZEngine(engine_path, weights_file)
board = lcztools.LeelaBoard()
engine.evaluate(board.lcz_to_uci_engine_board())
net = lcztools.load_network('pytorch', weights_file)


def eval_equal(neteval, engineeval, tolerance=.00006):
    npol, nv = neteval
    epol, ev = engineeval
    for uci in npol:
        if abs(npol[uci] - epol[uci]) > tolerance:
            return False
    if (ev is not None) and (abs(nv - ev) > tolerance):
        return False
    return True


net_eval_time = 0
engine_eval_time = 0
コード例 #8
0
from lcztools import load_network, LeelaBoard
import search
import chess
import chess.pgn
import sys
import datetime

weights = sys.argv[1]

board = LeelaBoard()

net = load_network(backend='pytorch_cuda',
                   filename=weights,
                   policy_softmax_temp=2.2)
nn = search.NeuralNet(net=net)
NODES = 10000


def do_nn():
    best, node = search.UCT_search(board, NODES, net=net, C=3.4)


start = datetime.datetime.now()
do_nn()
fini = datetime.datetime.now()

diff = fini - start
msecs = diff.microseconds + (diff.seconds * 1000000.0)
nps = round(NODES * 1000000.0 / (msecs))

print("{} nps".format(nps))