def forward(ctx, x): x = x.clamp(-1 + 1e-15, 1 - 1e-15) ctx.save_for_backward(x) dtype = x.dtype x = x.double() res = (torch.log_(1 + x).sub_(torch.log_(1 - x))).mul_(0.5) return res.to(dtype)
def forward(ctx, x): x = x.clamp(-1 + 1e-15, 1 - 1e-15) ctx.save_for_backward(x) z = x.double() return (torch.log_( (1 + z).clamp_min(1e-15)).sub_(torch.log_( (1 - z).clamp_min(1e-15)))).mul_(0.5).to(x.dtype)
def forward(ctx, x): x_dtype = x.dtype x = x.double() x = x.clamp(-1 + 1e-5, 1 - 1e-5) ctx.save_for_backward(x) z = x.double() temp = torch.log_(1 + z).sub_(torch.log_(1 - z)) res = (temp).mul_(0.5).to(x_dtype) return res
def forward(self, x): out = self.conv1(x) out = F.leaky_relu(out) out = torch.log_(1 + torch.abs_(out)) out = self.conv2(out) out = F.leaky_relu(self.bn2(out)) out = F.avg_pool1d(out, kernel_size=7, padding=0, stride=3) out1 = self.layer1(out) out2 = self.layer2(out1) out3 = self.layer3(out2) out4 = self.layer4(out3) out5 = self.layer5(out4) # skip1 = F.avg_pool1d(out1, 15, stride=15) # skip2 = F.avg_pool1d(out2, 7, stride=7) skip3 = F.avg_pool1d(out3, 4, stride=4, padding=1) skip4 = F.avg_pool1d(out4, 3, stride=2, padding=1) out = torch.cat([out5, skip4, skip3], 1) out = out[:, :, 1:-1] # обрезаем концы, чтобы padding не вызывал глюков out = F.avg_pool1d(out, 26) out = out.view(out.size(0), -1) out = self.linear(F.dropout(out, training=self.training, p=0.25)) out = self.logsoftmax(out) return out
def _make_ordinal(logits): """ Ordinal Distribution Network """ sigmoid = torch.nn.Sigmoid() logits = torch.squeeze(logits, 0) s_i = sigmoid(logits) one_minus_s = tensor(1) - s_i _part_1 = torch.log_(s_i) _part_2 = torch.log_(one_minus_s) ordinal_logits = [ torch.sum(_part_1[:i + 1]).item() + torch.sum(_part_2[i + 1:]).item() for i in range(len(s_i)) ] # ordinal_logits = [torch.sum(torch.log_(s_i[:i+1])).item() + torch.sum(torch.log_(one_minus_s[i+1:])).item() for i in range(len(s_i))] ordinal_logits = tensor(ordinal_logits) original_logits = self.softmax(ordinal_logits) ordinal_logits = torch.unsqueeze(ordinal_logits, dim=0) dist = torch.distributions.Categorical(logits=ordinal_logits) return dist
def mish(input, inplace = False): ''' Applies the mish function element-wise: .. math:: mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x})) See additional documentation for :mod:`echoAIAI.Activation.Torch.mish`. ''' if inplace: inp = input.clone() torch.exp_(input) input += 1 torch.tanh_(torch.log_(input)) input *= inp return input else: return input * torch.tanh(F.softplus(input))
optimizer_D.zero_grad() if setting.distance == "wd_gp": real_decision = D(real_data) fake_decision = D(fake_data) gradient_penalty = compute_gradient_penalty( D, real_data.data, fake_data.data) d_loss = -torch.mean(real_decision) + torch.mean( fake_decision) + setting.lambda_gp * gradient_penalty elif setting.distance == "jsd": real_decision = D(real_data) fake_decision = D(fake_data) d_loss = -np.log(2) - 0.5 * torch.mean( torch.log_(real_decision + 1 - 1)) - 0.5 * torch.mean( torch.log_(1 - fake_decision + 1 - 1)) d_loss.backward() optimizer_D.step() print( "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [Teta = %f]" % (epoch, setting.n_epochs, i, sample_num, d_loss.item(), theta)) wsd.append(d_loss.item()) D_LOSSES.append(-d_loss) plt.plot(np.arange(-1., 1.1, 0.1), D_LOSSES, 'ro') plt.plot(np.arange(-1., 1.1, 0.1), D_LOSSES, 'c--') plt.title("Distributions Distances") plt.xlabel(r'$\Phi$') if setting.distance == "wd_gp": plt.ylabel("Wasserstein Distance (WD)")
def forward(self, x): torch.log_(x) return x
def forward(ctx: Any, x: torch.Tensor) -> torch.Tensor: x = clamp(x, min=-1. + 4 * eps, max=1. - 4 * eps) ctx.save_for_backward(x) res = (torch.log_(1 + x).sub_(torch.log_(1 - x))).mul_(0.5) return res
def forward(ctx, x): x = x.clamp(-1 + 1e-5, 1 - 1e-5) ctx.save_for_backward(x) res = (torch.log_(1 + x).sub_(torch.log_(1 - x))).mul_(0.5) return res
print(a.matmul(b)) print(a.matmul(b).shape) # 指数运算 a = torch.tensor([1, 2]) print(torch.pow(a, 3)) print(a.pow(3)) print(a**3) print(a.pow_(3)) # exp a = torch.tensor([1, 2], dtype=torch.float32) print(a.type()) print(torch.exp(a)) print(torch.exp_(a)) print(a.exp()) print(a.exp_()) # 对数 a = torch.tensor([10, 2], dtype=torch.float32) print(torch.log(a)) print(torch.log_(a)) print(a.log()) print(a.log_()) # sqart a = torch.tensor([10, 2], dtype=torch.float32) print(torch.sqrt(a)) print(torch.sqrt_(a)) print(a.sqrt()) print(a.sqrt_())
def forward(self, rgb, spad): img_features = self.feature_extractor(rgb) spad_features = self.hints_extractor(rgb, spad) spad_features = spad_features.expand(-1, -1, img_features.size(2), img_features.size(3)) img_features.add_(self.spad_weight * torch.log_(spad_features + 1e-5)) return img_features
def forward(ctx, z): z = z.clamp(-1 + 1e-5, 1 - 1e-5) ctx.save_for_backward(z) return (torch.log_(1 + z).sub_(torch.log_(1 - z))).mul_(0.5)