def test_residual(x, gt, bc): y = utils.fd_step(x, bc, None) residual = y - x e = gt - x # Ae = -r A = utils.loss_kernel.view(1, 1, 3, 3) Ae = F.conv2d(e.unsqueeze(1), A).squeeze(1) # z should be all zeros z = Ae + residual[:, 1:-1, 1:-1] print(z) # Solve Ae = -r iteratively e = residual f = -residual for i in range(400): e = utils.fd_step(e, torch.zeros(1, 4), f) Ae = F.conv2d(e.unsqueeze(1), A).squeeze(1) # z should be all zeros z = Ae + residual[:, 1:-1, 1:-1] #print(z) # e = Se e = utils.fd_step(e, torch.zeros(1, 4), None) final = y + e print(torch.abs(gt - final))
def multigrid_step(self, x, bc, f, step): ''' One layer of multigrid. Recursive function. Find solution x to Ax + b = 0. ''' batch_size, image_size, _ = x.size() # Pre smoothing for i in range(self.pre_smoothing): x = utils.fd_step(x, bc, f) if step > 1: # Downsample if f is not None: f_sub = 4 * utils.subsample(f) else: f_sub = None if self.is_bc_mask: # Subsample geometry bc_sub = utils.subsample( bc.view(batch_size * 2, image_size, image_size)) bc_sub = bc_sub.view(batch_size, 2, *bc_sub.size()[-2:]) else: bc_sub = bc x_sub = utils.restriction(x, bc_sub) # Refine x_sub recursively x_sub = self.multigrid_step(x_sub, bc_sub, f_sub, step - 1) # Upsample x = utils.interpolation(x_sub, bc) # Post smoothing for i in range(self.post_smoothing): x = utils.fd_step(x, bc, f) return x
def get_solution(x, bc, f): ''' Iterate until error is below a threshold. ''' frames = [x] error_threshold = 0.00001 max_iters = 8000 error = utils.fd_error(x, bc, f) largest_error = error.max().item() print('largest error {}'.format(largest_error)) if largest_error >= error_threshold: # Iterate with Jacobi until ground truth for i in range(max_iters): x = utils.fd_step(x, bc, f) error = utils.fd_error(x, bc, f) if (i + 1) % 100 == 0: largest_error = error.max().item( ) # largest error in the batch print('Iter {}: largest error {}'.format(i + 1, largest_error)) if largest_error < error_threshold: break # Add ground truth to frames y = x.cpu().numpy() frames.append(y) frames = np.stack( frames, axis=1) # batch_size x (n_frames + 1) x image_size x image_size return frames
def test_heat(): image_size = 65 scale = np.random.uniform(350, 450) / (image_size**2) f = -gaussian(image_size) * scale f = torch.Tensor(f).unsqueeze(0) f = utils.pad_boundary(f, torch.zeros(1, 4)) bc = torch.Tensor(np.random.rand(1, 4) * 80) x = torch.zeros(1, image_size + 2, image_size + 2) x = utils.set_boundary(x, bc) x = utils.initialize(x, bc, 'avg') y = x.clone() for i in range(2000): y = utils.fd_step(y, bc, None) z = x.clone() for i in range(4000): z = utils.fd_step(z, bc, f) # Au = 0 A = utils.loss_kernel.view(1, 1, 3, 3) r = F.conv2d(y.unsqueeze(1), A).squeeze(1) error = torch.abs(r).max().item() print(error) # Au = f A = utils.loss_kernel.view(1, 1, 3, 3) r = F.conv2d(z.unsqueeze(1), A).squeeze(1) - f[:, 1:-1, 1:-1] error = torch.abs(r).max().item() print(error) y = (y / 100).numpy().squeeze(0) z = (z / 100).numpy().squeeze(0) plt.imshow(y) plt.colorbar() plt.show() plt.imshow(z) plt.colorbar() plt.show()
def multigrid_step(self, x, bc, f, step): ''' One layer of multigrid. Recursive function. Find solution x to Ax + b = f. Algorithm: - Update rule: u^{k+1} = S u^{k} + b - f - Residual r^{k} = u^{k+1} - u^{k} = A u^{k} + b - f - Solve A e^{k} = - r^{k} recursively. - u' = u^{k} + e^{k} ''' if step == 0: return None # Pre smoothing x = utils.set_boundary(x, bc) for i in range(self.pre_smoothing): x = utils.fd_step(x, bc, f) # Calculate residual y = utils.fd_step(x, bc, f) r = y - x # Solve e: A e = -r # Restriction: downsample by 2 zeros_bc = torch.zeros(1, 4) r_sub = utils.restriction(r, zeros_bc) # Recursive ek_sub = self.multigrid_step(r_sub, zeros_bc, -r_sub, step - 1) # Upsample if ek_sub is not None: ek = utils.interpolation(ek_sub, zeros_bc) # Add to x x = x + ek # Post smoothing x = utils.set_boundary(x, bc) for i in range(self.post_smoothing): x = utils.fd_step(x, bc, f) return x
def test_upsampling_poisson(x, gt, bc, f): print('Upsampling multigrid') f_sub = utils.subsample(f) x_sub = utils.restriction(x, bc) for i in range(1000): x_sub = utils.fd_step(x_sub, bc, f_sub) # Upsample x = utils.interpolation(x_sub, bc) A = utils.loss_kernel.view(1, 1, 3, 3) r = F.conv2d(x.unsqueeze(1), A).squeeze(1) r = utils.pad_boundary(r, torch.zeros(1, 4)) - f r = r.cpu().numpy() print(r.max())
def test_subsampling_poisson(x, gt, bc, f): print('Subsampling multigrid') for i in range(2000): x = utils.fd_step(x, bc, f) A = utils.loss_kernel.view(1, 1, 3, 3) r = F.conv2d(x.unsqueeze(1), A).squeeze(1) r = utils.pad_boundary(r, torch.zeros(1, 4)) - f print(np.abs(r.cpu().numpy()).max()) # Subsample x_sub = x f_sub = f for i in range(3): f_sub = 4 * utils.subsample(f) x_sub = utils.restriction(x, bc) r_sub = F.conv2d(x_sub.unsqueeze(1), A).squeeze(1) r_sub = utils.pad_boundary(r_sub, torch.zeros(1, 4)) - f_sub print(x_sub.size()) print(np.abs(r_sub.cpu().numpy()).max())
def forward(self, x, bc, f): ''' x: size (batch_size x image_size x image_size) return: same size ''' return utils.fd_step(x, bc, f)