コード例 #1
0
    def backward(self, obj):
        theta = -1 * self.theta
        if theta == 0:
            return obj
        else:
            if not obj.is_cuda:
                obj = obj.cuda()
            theta *= np.pi / 180.0
            alpha = 1.0 * np.tan(theta / 2.0)
            beta = np.sin(-1.0 * theta)

            shear_phase_1 = op.exp(
                op.multiply_complex(op._j, self.coord_phase_1 * alpha))
            shear_phase_2 = op.exp(
                op.multiply_complex(op._j, self.coord_phase_2 * beta))

            self.dim[self.axis] = self.slice_per_tile
            self.obj_rotate = op.r2c(
                torch.zeros([self.dim[0], self.dim[1], self.dim[2]],
                            dtype=self.dtype,
                            device=self.device))

            for idx_start in range(0, obj.shape[self.axis],
                                   self.slice_per_tile):
                idx_end = np.min(
                    [obj.shape[self.axis], idx_start + self.slice_per_tile])
                idx_slice = slice(idx_start, idx_end)
                self.dim[self.axis] = int(idx_end - idx_start)
                if self.axis == 0:
                    self.range_crop_y = slice(0, self.dim[self.axis])
                    obj[idx_slice, :, :] = self._rotate_3d(
                        obj[idx_slice, :, :], alpha, beta, shear_phase_1,
                        shear_phase_2)
                elif self.axis == 1:
                    self.range_crop_x = slice(0, self.dim[self.axis])
                    obj[:, idx_slice, :] = self._rotate_3d(
                        obj[:, idx_slice, :], alpha, beta, shear_phase_1,
                        shear_phase_2)
                elif self.axis == 2:
                    self.range_crop_z = slice(0, self.dim[self.axis])
                    obj[:, :,
                        idx_slice] = self._rotate_3d(obj[:, :,
                                                         idx_slice], alpha,
                                                     beta, shear_phase_1,
                                                     shear_phase_2)
                self.obj_rotate[:] = 0.0
            self.dim[self.axis] = obj.shape[self.axis]
            self.obj_rotate = None
            if not obj.is_cuda:
                obj = obj.cpu()
            return obj
コード例 #2
0
def generate_angular_spectrum_kernel(shape, pixel_size, wavelength, \
                                     numerical_aperture=None,  flag_band_limited=True, \
                                     dtype=torch.float32, device=torch.device('cuda')):
    """
    Function that generates angular spectrum propagation kernel WITHOUT the distance
    The angular spectrum has the following form:
    p = exp(distance * kernel)
    kernel = 1j * 2 * pi * sqrt((ri/wavelength)**2-x**2-y**2)
    and this function generates the kernel only!
    """
    assert len(shape) == 2, "pupil should be two dimensional!"
    ky_lin, kx_lin = util.generate_grid_2d(shape,
                                           pixel_size,
                                           flag_fourier=True,
                                           dtype=dtype,
                                           device=device)
    if flag_band_limited:
        assert numerical_aperture is not None, "need to provide numerical aperture of the system!"
        pupil_crop = op.r2c(
            generate_hard_pupil(shape, pixel_size, numerical_aperture,
                                wavelength))
    else:
        pupil_crop = 1.0
    prop_kernel = 2.0 * np.pi * pupil_crop * \
                  op.exponentiate(op.r2c((1./wavelength)**2 - kx_lin**2 - ky_lin**2), 0.5)
    return op.multiply_complex(op._j, prop_kernel)
コード例 #3
0
 def backward(ctx, grad_output):
     kernel, field = ctx.saved_tensors
     defocus_list = ctx.defocus_list
     grad_defocus_list = defocus_list.clone()
     grad_output = torch.fft(grad_output.permute(2, 0, 1, 3), signal_ndim=2)
     for defocus_idx in range(len(defocus_list)):
         kernel_temp = op.exp(abs(defocus_list[defocus_idx]) * kernel)
         kernel_temp = kernel_temp if defocus_list[
             defocus_idx] < 0. else op.conj(kernel_temp)
         grad_output[defocus_idx, ...] = op.multiply_complex(
             grad_output[defocus_idx, ...], kernel_temp)
         # adaptive_step_size = op.r2c(op.abs(field) / (1e-8 + (torch.max(op.abs(field)) * op.abs(field)**2)))
         # grad_defocus_list[defocus_idx] = op.multiply_complex(op.multiply_complex(op.multiply_complex(grad_output[defocus_idx,...],adaptive_step_size), op.conj(field)), op.conj(kernel)).sum((0,1))[0]
         grad_defocus_list[defocus_idx] = op.multiply_complex(
             op.multiply_complex(grad_output[defocus_idx, ...],
                                 op.conj(field)), op.conj(kernel)).sum(
                                     (0, 1))[0]
     grad_output = torch.ifft(grad_output,
                              signal_ndim=2).permute(1, 2, 0, 3)
     temp_f = grad_output.sum(2)
     return grad_output.sum(2), None, grad_defocus_list
コード例 #4
0
	def compute_prox(self, x):
		if self.parameter_list is not None:
			self.set_parameter(self.parameter_list[self.itr_count])
		x_device = x.device
		x = x.to(device=self.device)
		if self.pure_real:
			x[...,0] = self._compute_prox_real(op.real(x), self.realProjector)
			x[...,1] = 0.0
		elif self.pure_imag:
			x[...,0] = 0.0
			x[...,1] = op.multiply_complex(op._j, op.r2c(self._compute_prox_real(op.imag(x), self.imagProjector)))
		elif self.pure_amplitude:
			x[...,0] = self._compute_prox_real(op.abs(x), self.realProjector)
			x[...,1] = 0.0
		elif self.pure_phase:
			x = op.exp(op.multiply_complex(op._j, op.r2c(self._compute_prox_real(op.angle(x), self.realProjector))))
		else:
			x[...,0] = self._compute_prox_real(op.real(x), self.realProjector)
			self.set_parameter(self.parameter / 1.0, self.maxitr)
			x[...,1] = self._compute_prox_real(op.imag(x), self.imagProjector)
			self.set_parameter(self.parameter * 1.0, self.maxitr)
		self.itr_count += 1	
		return x.to(x_device)
コード例 #5
0
 def _shift_stack_inplace(self, stack, shift_list):
     for img_idx in range(stack.shape[2]):
         y_shift = shift_list[0, img_idx]
         x_shift = shift_list[1, img_idx]
         kernel = op.exp(
             op.multiply_complex(
                 op._j,
                 op.r2c(2 * np.pi *
                        (self.kx_lin * x_shift + self.ky_lin * y_shift))))
         stack[..., img_idx] = op.convolve_kernel(op.r2c(stack[...,
                                                               img_idx]),
                                                  kernel,
                                                  n_dim=2)[..., 0]
     return stack
コード例 #6
0
    def forward(ctx, field, kernel, defocus_list=[0.0]):
        field = torch.fft(field, signal_ndim=2)
        ctx.save_for_backward(kernel, field)
        # ctx.save_for_backward(field)
        ctx.defocus_list = defocus_list

        field = field.unsqueeze(2).repeat(1, 1, len(defocus_list),
                                          1).permute(2, 0, 1, 3)
        for defocus_idx in range(len(defocus_list)):
            kernel_temp = op.exp(abs(defocus_list[defocus_idx]) * kernel)
            kernel_temp = kernel_temp if defocus_list[
                defocus_idx] > 0. else op.conj(kernel_temp)
            field[defocus_idx,
                  ...] = op.multiply_complex(field[defocus_idx, ...],
                                             kernel_temp)
        field = torch.ifft(field, signal_ndim=2).permute(1, 2, 0, 3)
        return field
コード例 #7
0
	def compute_prox(self, x):	
		x = op.exp(op.multiply_complex(op._j, op.r2c(op.angle(x))))
		return x