Ejemplo n.º 1
0
    def __init__(self, lambda_=1.0):
        super(NNUE, self).__init__()
        BASE = 128
        funcs = [
            piece_position,
        ]

        if withFactorizer:
            # with factorization
            self.white_affine = FeatureTransformer(funcs, BASE)
            self.black_affine = FeatureTransformer(funcs, BASE)
        else:
            # without factorization
            self.white_affine = nn.Linear(halfka.half_ka_numel(), BASE)
            self.black_affine = nn.Linear(halfka.half_ka_numel(), BASE)

        # dropout seems necessary when using factorizer to avoid over-fitting
        self.d0 = nn.Dropout(p=0.05)
        self.fc0 = nn.Linear(2 * BASE, 32)
        self.d1 = nn.Dropout(p=0.1)
        self.fc1 = nn.Linear(32, 32)
        self.d2 = nn.Dropout(p=0.1)
        self.fc2 = nn.Linear(64, 32)
        self.d3 = nn.Dropout(p=0.1)
        self.fc3 = nn.Linear(96, 1)
        self.lambda_ = lambda_
Ejemplo n.º 2
0
 def __init__(self, lambda_=1.0):
     super(NNUE, self).__init__()
     BASE = 128
     self.white_affine = nn.Linear(halfka.half_ka_numel(), BASE)
     self.black_affine = nn.Linear(halfka.half_ka_numel(), BASE)
     self.fc0 = nn.Linear(2 * BASE, 32)
     self.fc1 = nn.Linear(32, 32)
     self.fc2 = nn.Linear(64, 32)
     self.fc3 = nn.Linear(96, 1)
     self.lambda_ = lambda_
Ejemplo n.º 3
0
 def virtual(self):
     with torch.no_grad():
         identity = torch.tensor([i for i in range(halfka.half_ka_numel())],
                                 dtype=torch.long)
         conversion = torch.sparse.FloatTensor(
             torch.stack([identity, self.f], dim=0),
             torch.ones(halfka.half_ka_numel()),
             size=torch.Size([halfka.half_ka_numel(),
                              self.inter_dim])).to(self.weights.device)
         return (conversion.matmul(self.weights)).t()
Ejemplo n.º 4
0
    def factored(self, x):
        N, D = x.size()
        assert D == halfka.half_ka_numel()

        batch, active = x._indices()
        factored = torch.gather(self.f.to(x.device), dim=0, index=active)
        x = torch.sparse.FloatTensor(torch.stack([batch, factored], dim=0),
                                     x._values(),
                                     size=torch.Size([N, self.inter_dim])).to(
                                         x.device).to_dense()
        return x
Ejemplo n.º 5
0
 def __init__(self, funcs, base_dim):
     super(FeatureTransformer, self).__init__()
     self.factored_blocks = nn.ModuleList(
         [FactoredBlock(f, base_dim) for f in funcs])
     self.affine = nn.Linear(halfka.half_ka_numel(), base_dim)
Ejemplo n.º 6
0
 def __init__(self, func, output_dim):
     super(FactoredBlock, self).__init__()
     self.f = torch.tensor([func(i) for i in range(halfka.half_ka_numel())],
                           dtype=torch.long)
     self.inter_dim = 1 + self.f.max()
     self.weights = nn.Parameter(torch.zeros(self.inter_dim, output_dim))