예제 #1
0
    def _reset_params_kaiming(self):
        self.weights = utils.set_tensor(
            torch.empty((self.in_size, self.out_size)))
        self.bias = utils.set_tensor(torch.zeros((self.out_size)))
        if isinstance(self.act_fn, utils.Linear):
            nn.init.kaiming_uniform_(self.weights, a=math.sqrt(5))
        elif isinstance(self.act_fn, utils.Tanh):
            nn.init.kaiming_normal_(self.weights)
        elif isinstance(self.act_fn, utils.ReLU):
            nn.init.kaiming_normal_(self.weights)

        fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weights)
        bound = 1 / math.sqrt(fan_in)
        nn.init.uniform_(self.bias, -bound, bound)
예제 #2
0
파일: models.py 프로젝트: wx-b/pypc
 def reset_mus(self, batch_size, init_std):
     for l in range(self.n_layers):
         self.mus[l] = utils.set_tensor(
             torch.empty(batch_size, self.layers[l].in_size).normal_(mean=0, std=init_std)
         )
예제 #3
0
 def _reset_params(self):
     weights = torch.empty((self.in_size, self.out_size)).normal_(mean=0.0,
                                                                  std=0.05)
     bias = torch.zeros((self.out_size))
     self.weights = utils.set_tensor(weights)
     self.bias = utils.set_tensor(bias)
예제 #4
0
파일: datasets.py 프로젝트: wx-b/pypc
def _preprocess_batch(batch):
    batch[0] = utils.set_tensor(batch[0])
    batch[1] = utils.set_tensor(batch[1])
    return (batch[0], batch[1])