예제 #1
0
 def forward(self, input, target):
     bce = F.binary_cross_entropy(input, target, reduction=None)
     pt = F.exp(-bce)
     ones = torch.ones(target.shape,dtype=torch.float)
     focal_factor = torch.pow(ones-pt,self.gamma)
     W = torch.ones(target.shape,dtype=torch.float)
     W[target==1] = self.w
     pixelwise_loss = W * focal_factor * bce
     return torch.mean(pixelwise_loss)
예제 #2
0
 def fstarT(self, v):
     if self.divergence_name == "kl":
         return torch.exp(v - 1.0)
     elif self.divergence_name == "klrev":
         return -1.0 - v
     # According to Table 4 of the f-GAN paper, we use Pearson Chi-Squared.
     # After Pearson Chi-Squared, the next best are KL and then Jensen-Shannon.
     elif self.divergence_name == "pearson":
         return 0.25 * v * v + v
     elif self.divergence_name == "neyman":
         return 2.0 - 2.0 * F.exp(0.5 * v)
     elif self.divergence_name == "hellinger":
         return F.exp(-v) - 1.0
     elif self.divergence_name == "jensen":
         return F.softplus(v) - math.log(2.0)
     elif self.divergence_name == "gan":
         return F.softplus(v)
     else:
         raise ValueError("This is an unknown f-divergence.")
예제 #3
0
 def T(self, v):
     if self.divergence_name == "kl":
         return v
     elif self.divergence_name == "klrev":
         return -F.exp(v)
     # According to Table 4 of the f-GAN paper, we use Pearson Chi-Squared.
     # After Pearson Chi-Squared, the next best are KL and then Jensen-Shannon.
     elif self.divergence_name == "pearson":
         return v
     elif self.divergence_name == "neyman":
         return 1.0 - F.exp(v)
     elif self.divergence_name == "hellinger":
         return 1.0 - F.exp(v)
     elif self.divergence_name == "jensen":
         return math.log(2.0) - F.softplus(-v)
     elif self.divergence_name == "gan":
         return -F.softplus(-v)
     else:
         raise ValueError("Unknown f-divergence.")
예제 #4
0
파일: rbm.py 프로젝트: bubnicbf/phys_710
    def pvh(self, v, h):
        '''
        get the probability of configuration in binary visual/hidden nodes.

        Args:
            v (1darray): visual node configuration.
            h (1darray): hidden node configuration.
        '''
        return F.exp(
            self.h.mv(self.W.mm(v)) + self.h_bias.dot(h) + self.v_bias.mm(v))
예제 #5
0
파일: vae.py 프로젝트: AlexGrig/VAEs
    def forward(self, x):

        #import pdb; pdb.set_trace()

        mu = self.mu(x)

        if self.std_to_positive_transform == 'softplus':
            sigma = F.softplus(self.sigma_lin(x))
        elif self.std_to_positive_transform == 'exp':
            sigma = F.exp(self.sigma_lin(x))
        else:
            raise ValueError("Unknown positive transformation")

        return self.reparametrize(mu, sigma, self.num_samples), mu, sigma
예제 #6
0
 def forward(self, x):
     aria2 = 1 + ((F.exp(-x) ** self.b) ** (-self.a))
     return x * aria2
예제 #7
0
 def forward(self, x):
     aria = self.A + (self.k - self.A) / ((self.C + self.Q * F.exp(-x) ** self.B) ** (1/self.v))
     return x * aria
예제 #8
0
def inverse_log_norm(x, min_clip, mean, std, F=torch):

    x = F.exp(x * std + mean) + min_clip - 1e-5

    return x