コード例 #1
0
ファイル: train.py プロジェクト: vondele/nnue-pytorch
def make_data_loaders(train_filename, val_filename, feature_set, num_workers,
                      batch_size, filtered, random_fen_skipping, main_device):
    # Epoch and validation sizes are arbitrary
    epoch_size = 100000000
    val_size = 1000000
    features_name = feature_set.name
    train_infinite = nnue_dataset.SparseBatchDataset(
        features_name,
        train_filename,
        batch_size,
        num_workers=num_workers,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    val_infinite = nnue_dataset.SparseBatchDataset(
        features_name,
        val_filename,
        batch_size,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    # num_workers has to be 0 for sparse, and 1 for dense
    # it currently cannot work in parallel mode but it shouldn't need to
    train = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        train_infinite, (epoch_size + batch_size - 1) // batch_size),
                       batch_size=None,
                       batch_sampler=None)
    val = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        val_infinite, (val_size + batch_size - 1) // batch_size),
                     batch_size=None,
                     batch_sampler=None)
    return train, val
コード例 #2
0
def data_loader_cc(train_filename, val_filename, num_workers, batch_size,
                   filtered, random_fen_skipping, main_device):
    # Epoch and validation sizes are arbitrary
    epoch_size = 100000000
    val_size = 2000000
    train_infinite = nnue_dataset.SparseBatchDataset(
        train_filename,
        batch_size,
        num_workers=num_workers,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    val_infinite = nnue_dataset.SparseBatchDataset(
        val_filename,
        batch_size,
        filtered=filtered,
        random_fen_skipping=random_fen_skipping,
        device=main_device)
    train = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        train_infinite, (epoch_size + batch_size - 1) // batch_size),
                       batch_size=None,
                       batch_sampler=None)
    val = DataLoader(nnue_dataset.FixedNumBatchesDataset(
        val_infinite, (val_size + batch_size - 1) // batch_size),
                     batch_size=None,
                     batch_sampler=None)
    return train, val
コード例 #3
0
ファイル: train.py プロジェクト: Sanger2000/nnue-pytorch
def data_loader_cc(train_filename, val_filename, data_name, num_workers, batch_size, filtered):
  # Epoch and validation sizes are arbitrary
  epoch_size = int(3e7)
  val_size = epoch_size // 20
  train_infinite = nnue_dataset.SparseBatchDataset(data_name, train_filename, batch_size, num_workers=num_workers, filtered=filtered)
  val_infinite = nnue_dataset.SparseBatchDataset(data_name, val_filename, batch_size, filtered=filtered)
  # num_workers has to be 0 for sparse, and 1 for dense
  # it currently cannot work in parallel mode but it shouldn't need to
  train = DataLoader(nnue_dataset.FixedNumBatchesDataset(train_infinite, (epoch_size + batch_size - 1) // batch_size), batch_size=None, batch_sampler=None)
  val = DataLoader(nnue_dataset.FixedNumBatchesDataset(val_infinite, (val_size + batch_size - 1) // batch_size), batch_size=None, batch_sampler=None)
  return train, val
コード例 #4
0
ファイル: metrics.py プロジェクト: scchess/nnue-pytorch
def main():
    parser = argparse.ArgumentParser(
        description="Runs evaluation for a model.")
    parser.add_argument("model",
                        help="Source file (can be .ckpt, .pt or .nnue)")
    parser.add_argument("--dataset",
                        default="d8_128000_21865.binpack",
                        help="Dataset to evaluate on (.bin)")
    args = parser.parse_args()

    if args.model.endswith(".pt"):
        nnue = torch.load(args.model, map_location=torch.device('cpu'))
    else:
        nnue = M.NNUE.load_from_checkpoint(args.model)

    val_infinite = nnue_dataset.SparseBatchDataset(halfkp.NAME, args.dataset,
                                                   8000)
    data = nnue_dataset.FixedNumBatchesDataset(val_infinite, 16)

    trainer = pl.Trainer(progress_bar_refresh_rate=0)
    for i in range(21):
        nnue.lambda_ = i / 20.0
        loss = trainer.test(nnue, data, verbose=False)
        print(nnue.lambda_, ",", loss[0]['test_loss'])