def forward(self, z): activation = F.linear(z, self.weight, self.bias) # 应该是这里z只有一维所以才可以这么写?,否则zw应该是一个外积才对。 psi = (1 - self.tanh(activation)**2) * self.weight # 1+h′(zw^T+b)w det_grad = 1 + torch.mm(psi, self.scale.t()) # 1+h′(zw^T+b)wu^T return safe_log(det_grad.abs())
def __calc_jump_S(self): """ Calculates jump_S, an estimate of the covariance of parameters. """ sample_l_values = [] for t in self._sample: log_values = [safe_log(x) for x in t.get_values()] sample_l_values.append(log_values) self._jump_S = calc_covariance(sample_l_values)
def log_prob(self, x): mu = self.mu.expand(x.shape) dist_sq = (dist_p(mu, x, c=self.c)**2).T dist_sq = dist_sq.view(x.shape[0], len(dist_sq), 1) Z_r = self.Z_R(sigma=self.sigma, dim=self.dim, c=self.c) out = -dist_sq / (2 * self.sigma**2) - safe_log(Z_r) return out
def log_prob_marginal(self, x): mu = self.mu.expand(x.shape) dist_sq = (dist_p(mu, x, c=self.c)**2) dist_sq = dist_sq.unsqueeze(-1) Z_r = self.Z_R(sigma=self.sigma, dim=self.dim, c=self.c) out = -dist_sq / (2 * self.sigma**2) - safe_log(Z_r) return out
def log_inner_sum(sigma, k, d, c): if not isinstance(d, torch.Tensor): d = torch.tensor(d).float() a = torch.lgamma(d) - torch.lgamma(d - k) - torch.lgamma(k + 1) b = (d - 1 - 2 * k)**2 * c * sigma**2 / 2 c = safe_log(1 + torch.erf(( (d - 1 - 2 * k) * np.sqrt(c) * sigma) / np.sqrt(2))) return a + b + c
def __normal_log_pdf(self, x): """ Returns the pdf value of x as if this instance were Normal. """ mu = self.__mu n = len(mu) inv_S = self.__get_S_inverse() det_S = self.__get_S_determinant() x_minus_mu = x - mu x_minus_mu.shape = (n, 1) x_minus_mu_t = x_minus_mu.transpose() term1 = 1 / np.sqrt(np.power(2 * np.pi, n) * det_S) log_term2 = float(-.5 * np.dot(np.dot(x_minus_mu_t, inv_S), x_minus_mu)) return safe_log(term1) + log_term2
def generate_accuracy_plot(df, dataset, loss, model): # Generate 3D plots where color indicates accuracy acccuracy = [float(i) for i in df['acc'].tolist()] l1 = [safe_log(float(i)) for i in df['l1'].tolist()] l2 = [safe_log(float(i)) for i in df['l2'].tolist()] lr = [safe_log(float(i)) for i in df['lr'].tolist()] fig = plt.figure(figsize=(10, 8)) ax = fig.add_subplot(111, projection='3d') ax.set_xlim(-5.1, 0.1) ax.set_ylim(-5.1, 0.1) p = ax.scatter(l1, l2, lr, c=acccuracy, marker='o', cmap='brg', vmin=0, vmax=1) ax.xaxis.set_major_formatter(mticker.FuncFormatter(log_tick_formatter_xy)) ax.yaxis.set_major_formatter(mticker.FuncFormatter(log_tick_formatter_xy)) ax.zaxis.set_major_formatter(mticker.FuncFormatter(log_tick_formatter_z)) ax.set_xticks( [safe_log(i) for i in [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]]) ax.set_yticks( [safe_log(i) for i in [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]]) ax.set_zticks([safe_log(i) for i in [0.00001, 0.0001, 0.001, 0.01]]) ax.set_xlabel('L1 regularization') ax.xaxis.labelpad = 20 ax.set_ylabel('L2 regularization') ax.yaxis.labelpad = 20 ax.set_zlabel('Learning rate') ax.zaxis.labelpad = 20 plt.title('Hyperparameter search with the {} model \n using {} loss on {}'. format(model, loss, dataset), fontsize=16, fontstyle='italic', fontweight='bold', y=1.08) # fig.colorbar(p) plt.show()
def log_pdf (self, x): return safe_log (self.pdf (x))
def log_pdf (self, x): """ Returns the value of the probability density function of this random variable on point x. """ # TODO: simplify calculations return safe_log (self.pdf (x))
def forward(self, zk, log_jacobians): sum_of_log_jacobians = sum(log_jacobians) return (-sum_of_log_jacobians - safe_log(self.density(zk))).mean()
def forward(self, z): activation = F.linear(z, self.weight, self.bias) psi = (1 - self.tanh(activation)**2) * self.weight det_grad = 1 + torch.mm(psi, self.scale.t()) return safe_log(det_grad.abs())
def test_safe_log(self): """ Tests safe_log. """ e = np.exp(1) self.assertEqual(safe_log(e), np.log(e)) self.assertEqual(safe_log(1e-400), float("-inf")) self.assertEqual(safe_log(0), float("-inf"))
def log_pdf(self, j): """ Returns log p_j (j). """ # TODO: simplify calculations return safe_log(self.pdf(j))