Ejemplo n.º 1
0
def main():
    nnue = M.NNUE.load_from_checkpoint('last.ckpt')
    nnue.eval()
    fuse_layers = [
        ['input', 'input_act'],
        ['l1', 'l1_act'],
        ['l2', 'l2_act'],
    ]
    torch.quantization.fuse_modules(nnue, fuse_layers, inplace=True)

    train = nnue_bin_dataset.NNUEBinData('d8_100000.bin')
    train_small = torch.utils.data.Subset(train, range(0, len(train) // 1000))
    train_loader = DataLoader(train_small)
    val_loader = DataLoader(nnue_bin_dataset.NNUEBinData('d10_10000.bin'))
    trainer = pl.Trainer()

    nnue.qconfig = torch.quantization.get_default_qconfig('fbgemm')
    nnue_prep = torch.quantization.prepare(nnue)
    trainer.test(nnue_prep, train_loader)
    nnue_int8 = torch.quantization.convert(nnue_prep)
    #trainer.test(nnue_int8, train_loader)

    print('Baseline MSE:', compute_mse(nnue, train))
    print('Quantized MSE:', compute_mse(nnue_int8, train))

    writer = NNUEQuantizedWriter(nnue_int8)
    with open('quantized.nnue', 'wb') as f:
        f.write(writer.buf)
Ejemplo n.º 2
0
def data_loader_py(train_filename, val_filename, num_workers, batch_size):
    train = DataLoader(nnue_bin_dataset.NNUEBinData(train_filename),
                       batch_size=batch_size,
                       shuffle=False,
                       num_workers=num_workers)
    val = DataLoader(nnue_bin_dataset.NNUEBinData(val_filename),
                     batch_size=batch_size,
                     shuffle=False)
    return train, val
Ejemplo n.º 3
0
def data_loader_py(train_filename, val_filename, feature_set, batch_size,
                   main_device):
    train = DataLoader(nnue_bin_dataset.NNUEBinData(train_filename,
                                                    feature_set),
                       batch_size=batch_size,
                       shuffle=True,
                       num_workers=4)
    val = DataLoader(nnue_bin_dataset.NNUEBinData(val_filename, feature_set),
                     batch_size=32)
    return train, val
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(
        description="Runs evaluation for a model.")
    parser.add_argument("model", help="Source file (can be .ckpt, .pt)")
    parser.add_argument("--dataset",
                        default="data.bin",
                        help="Dataset to evaluate on (.bin)")
    args = parser.parse_args()

    if args.model.endswith(".pt"):
        nnue = torch.load(args.model, map_location=torch.device('cpu'))
    else:
        nnue = M.NNUE.load_from_checkpoint(args.model)
    data = nnue_bin_dataset.NNUEBinData(args.dataset)

    #trainer = pl.Trainer()
    #trainer.test(nnue, DataLoader(data, batch_size=128))
    print('MSE:', compute_mse(nnue, data))
Ejemplo n.º 5
0
def test(model):
    import nnue_dataset
    dataset = 'd8_100000.bin'
    stream_cpp = nnue_dataset.SparseBatchDataset(halfkp.NAME, dataset, 1)
    stream_cpp_iter = iter(stream_cpp)
    tensors_cpp = next(stream_cpp_iter)[:4]
    print('cpp:', tensors_cpp[3])
    print(model(*tensors_cpp))

    stream_py = nnue_bin_dataset.NNUEBinData(dataset)
    stream_py_iter = iter(stream_py)
    tensors_py = next(stream_py_iter)
    print('python:', torch.nonzero(tensors_py[3]).squeeze())
    tensors_py = [v.reshape((1, -1)) for v in tensors_py[:4]]

    weights = coalesce_weights(model.input.weight.data)
    model.input.weight = torch.nn.Parameter(weights)
    print(model(*tensors_py))
Ejemplo n.º 6
0
def main():
  config = C.Config('config.yaml')

  sample_to_device = lambda x: tuple(map(lambda t: t.to(config.device, non_blocking=True), x))

  M = model.NNUE().to(config.device)

  if (path.exists(config.model_save_path)):
    print('Loading model ... ')
    M.load_state_dict(torch.load(config.model_save_path))

  data = nnue_bin_dataset.NNUEBinData(config)
  data_loader = torch.utils.data.DataLoader(data,\
    batch_size=config.batch_size,\
    num_workers=config.num_workers,\
    pin_memory=True,\
    worker_init_fn=nnue_bin_dataset.worker_init_fn)

  opt = optim.Adadelta(M.parameters(), lr=config.learning_rate)
  scheduler = optim.lr_scheduler.StepLR(opt, 1, gamma=0.5)

  loss_history = []
  queue = []
  
  for epoch in range(1, config.epochs + 1):
    for i, sample in enumerate(data_loader):
      # update visual data
      if (i % config.test_rate) == 0 and i != 0:
        loss_history.append(sum(queue) / len(queue))
        plt.clf()
        plt.plot(loss_history)
        plt.savefig('{}/loss_graph.png'.format(config.visual_directory), bbox_inches='tight')
      
      if (i % config.save_rate) == 0 and i != 0:
        print('Saving model ...')
        M.to_binary_file(config.bin_model_save_path)
        torch.save(M.state_dict(), config.model_save_path)

      train_step(M, sample_to_device(sample), opt, queue, max_queue_size=config.max_queue_size, lambda_=config.lambda_, report=(0 == i % config.report_rate))

    scheduler.step()
Ejemplo n.º 7
0
def make_data_reader(data_path, feature_set):
    return nnue_bin_dataset.NNUEBinData(data_path, feature_set)
Ejemplo n.º 8
0
    if batch_size:
        stream = stream_type(halfkp.NAME, 'd8_100000.bin', batch_size)
    else:
        stream = stream_type(halfkp.NAME, 'd8_100000.bin')

    start_time = time.time()
    for i in range(4096 // (batch_size if batch_size else 1)):
        tensors = next(stream)
    end_time = time.time()
    print('{:6.3f} seconds'.format(end_time - start_time))

    del stream


test_stream(nnue_dataset.SparseBatchProvider, 256)

stream_py = nnue_bin_dataset.NNUEBinData('d8_100000.bin')
stream_cpp = nnue_dataset.SparseBatchDataset(halfkp.NAME, 'd8_100000.bin', 256)

stream_py_iter = iter(stream_py)
stream_cpp_iter = iter(stream_cpp)

diff = 0.0
for i in range(10):
    # Gather a batch
    tensors_cpp = next(stream_cpp_iter)
    for j in range(256):
        tensors_py = next(stream_py_iter)
        diff += sum((a - b[j]).norm() for a, b in zip(tensors_py, tensors_cpp))
print('Diff: {}'.format(diff))
Ejemplo n.º 9
0
import torch
import nnue_bin_dataset
import config as C

config = C.Config('config.yaml')

d = nnue_bin_dataset.NNUEBinData(config)
#d = torch.utils.data.DataLoader(d, num_workers=config.num_workers, worker_init_fn=nnue_bin_dataset.worker_init_fn)

for i in d.seq_data_iter():
    print(i)