def __init__(self, height=64, width=64, with_r=False, with_boundary=False): super(AddCoordsTh, self).__init__() self.with_r = with_r self.with_boundary = with_boundary device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') with torch.no_grad(): x_coords = torch.arange(height).unsqueeze(1).expand( height, width).float() y_coords = torch.arange(width).unsqueeze(0).expand( height, width).float() x_coords = (x_coords / (height - 1)) * 2 - 1 y_coords = (y_coords / (width - 1)) * 2 - 1 coords = torch.stack([x_coords, y_coords], dim=0) # (2, height, width) if self.with_r: rr = torch.sqrt( torch.pow(x_coords, 2) + torch.pow(y_coords, 2)) # (height, width) rr = (rr / torch.max(rr)).unsqueeze(0) coords = torch.cat([coords, rr], dim=0) self.coords = coords.unsqueeze(0).to( device) # (1, 2 or 3, height, width) self.x_coords = x_coords.to(device) self.y_coords = y_coords.to(device)
def forward(self, x, y): x = (x - self.mu) / self.sigma y = (y - self.mu) / self.sigma x_fmaps = self.alexnet(x) y_fmaps = self.alexnet(y) lpips_value = 0 for x_fmap, y_fmap, conv1x1 in zip(x_fmaps, y_fmaps, self.lpips_weights): x_fmap = normalize(x_fmap) y_fmap = normalize(y_fmap) z=torch.pow(x_fmap - y_fmap,2) lpips_value += torch.mean(conv1x1(z)) # print("paddle alexnet mean", torch.mean(z).numpy(),lpips_value.numpy()) return lpips_value
def pow(self, k): return torch.pow(self, k)
def resize(x, p=2): """Resize heatmaps.""" return torch.pow(x, p)