Example #1
0
def make_data_loaders(train_filename, val_filename, feature_set, num_workers,
                      batch_size, filtered, random_fen_skipping, main_device):
    # Epoch and validation sizes are arbitrary
    epoch_size = 100000000
    val_size = 1000000
    features_name = feature_set.name
    train_infinite = nnue_dataset.SparseBatchDataset(
        features_name,
        train_filename,
        batch_size,
        num_workers=num_workers,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    val_infinite = nnue_dataset.SparseBatchDataset(
        features_name,
        val_filename,
        batch_size,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    # num_workers has to be 0 for sparse, and 1 for dense
    # it currently cannot work in parallel mode but it shouldn't need to
    train = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        train_infinite, (epoch_size + batch_size - 1) // batch_size),
                       batch_size=None,
                       batch_sampler=None)
    val = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        val_infinite, (val_size + batch_size - 1) // batch_size),
                     batch_size=None,
                     batch_sampler=None)
    return train, val
Example #2
0
def data_loader_cc(train_filename, val_filename, num_workers, batch_size,
                   filtered, random_fen_skipping, main_device):
    # Epoch and validation sizes are arbitrary
    epoch_size = 100000000
    val_size = 2000000
    train_infinite = nnue_dataset.SparseBatchDataset(
        train_filename,
        batch_size,
        num_workers=num_workers,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    val_infinite = nnue_dataset.SparseBatchDataset(
        val_filename,
        batch_size,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    train = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        train_infinite, (epoch_size + batch_size - 1) // batch_size),
                       batch_size=None,
                       batch_sampler=None)
    val = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        val_infinite, (val_size + batch_size - 1) // batch_size),
                     batch_size=None,
                     batch_sampler=None)
    return train, val
Example #3
0
def data_loader_cc(train_filename, val_filename, data_name, num_workers, batch_size, filtered):
  # Epoch and validation sizes are arbitrary
  epoch_size = int(3e7)
  val_size = epoch_size // 20
  train_infinite = nnue_dataset.SparseBatchDataset(data_name, train_filename, batch_size, num_workers=num_workers, filtered=filtered)
  val_infinite = nnue_dataset.SparseBatchDataset(data_name, val_filename, batch_size, filtered=filtered)
  # num_workers has to be 0 for sparse, and 1 for dense
  # it currently cannot work in parallel mode but it shouldn't need to
  train = DataLoader(nnue_dataset.FixedNumBatchesDataset(train_infinite, (epoch_size + batch_size - 1) // batch_size), batch_size=None, batch_sampler=None)
  val = DataLoader(nnue_dataset.FixedNumBatchesDataset(val_infinite, (val_size + batch_size - 1) // batch_size), batch_size=None, batch_sampler=None)
  return train, val
Example #4
0
def create_data_loaders(train_filename, val_filename, epoch_size, val_size,
                        batch_size, use_factorizer, main_device):
    train_dataset = nnue_dataset.SparseBatchDataset(
        train_filename,
        epoch_size,
        batch_size,
        use_factorizer, (epoch_size + batch_size - 1) // batch_size,
        device=main_device)
    val_dataset = nnue_dataset.SparseBatchDataset(
        val_filename,
        val_size,
        batch_size,
        use_factorizer, (val_size + batch_size - 1) // batch_size,
        device=main_device)

    train = DataLoader(train_dataset, batch_size=None, batch_sampler=None)
    val = DataLoader(val_dataset, batch_size=None, batch_sampler=None)

    return train, val
def gather_statistics_from_data(filename, count, bucket_size):
    '''
    Takes a .bin or .binpack file and produces perf% statistics
    The result is a dictionary of the form { eval : (perf%, count) }
    '''
    batch_size = 8192
    cyclic = True
    smart_fen_skipping = True
    # we pass whatever feature set because we have to pass something
    # it doesn't actually matter, all we care about are the scores and outcomes
    # this is just the easiest way to do it
    dataset = nnue_dataset.SparseBatchDataset('HalfKP', filename, batch_size, cyclic, smart_fen_skipping)
    batches = iter(dataset)
    num_batches = (count + batch_size - 1) // batch_size
    data = gather_statistics_from_batches((next(batches) for i in range(num_batches)), bucket_size)
    return data
Example #6
0
def test(model):
    import nnue_dataset
    dataset = 'd8_100000.bin'
    stream_cpp = nnue_dataset.SparseBatchDataset(halfkp.NAME, dataset, 1)
    stream_cpp_iter = iter(stream_cpp)
    tensors_cpp = next(stream_cpp_iter)[:4]
    print('cpp:', tensors_cpp[3])
    print(model(*tensors_cpp))

    stream_py = nnue_bin_dataset.NNUEBinData(dataset)
    stream_py_iter = iter(stream_py)
    tensors_py = next(stream_py_iter)
    print('python:', torch.nonzero(tensors_py[3]).squeeze())
    tensors_py = [v.reshape((1, -1)) for v in tensors_py[:4]]

    weights = coalesce_weights(model.input.weight.data)
    model.input.weight = torch.nn.Parameter(weights)
    print(model(*tensors_py))
Example #7
0
def main():
    parser = argparse.ArgumentParser(
        description="Runs evaluation for a model.")
    parser.add_argument("model",
                        help="Source file (can be .ckpt, .pt or .nnue)")
    parser.add_argument("--dataset",
                        default="d8_128000_21865.binpack",
                        help="Dataset to evaluate on (.bin)")
    args = parser.parse_args()

    if args.model.endswith(".pt"):
        nnue = torch.load(args.model, map_location=torch.device('cpu'))
    else:
        nnue = M.NNUE.load_from_checkpoint(args.model)

    val_infinite = nnue_dataset.SparseBatchDataset(halfkp.NAME, args.dataset,
                                                   8000)
    data = nnue_dataset.FixedNumBatchesDataset(val_infinite, 16)

    trainer = pl.Trainer(progress_bar_refresh_rate=0)
    for i in range(21):
        nnue.lambda_ = i / 20.0
        loss = trainer.test(nnue, data, verbose=False)
        print(nnue.lambda_, ",", loss[0]['test_loss'])
Example #8
0
    if batch_size:
        stream = stream_type(halfkp.NAME, 'd8_100000.bin', batch_size)
    else:
        stream = stream_type(halfkp.NAME, 'd8_100000.bin')

    start_time = time.time()
    for i in range(4096 // (batch_size if batch_size else 1)):
        tensors = next(stream)
    end_time = time.time()
    print('{:6.3f} seconds'.format(end_time - start_time))

    del stream


test_stream(nnue_dataset.SparseBatchProvider, 256)

stream_py = nnue_bin_dataset.NNUEBinData('d8_100000.bin')
stream_cpp = nnue_dataset.SparseBatchDataset(halfkp.NAME, 'd8_100000.bin', 256)

stream_py_iter = iter(stream_py)
stream_cpp_iter = iter(stream_cpp)

diff = 0.0
for i in range(10):
    # Gather a batch
    tensors_cpp = next(stream_cpp_iter)
    for j in range(256):
        tensors_py = next(stream_py_iter)
        diff += sum((a - b[j]).norm() for a, b in zip(tensors_py, tensors_cpp))
print('Diff: {}'.format(diff))