def update_particle_field(self, poses=None, add=True): if poses is None: poses = self.pos wid = self.viewrad for p in poses: #1. get tile l = np.clip(p - 2 * wid, 0, self.image.shape) r = np.clip(p + 2 * wid, 0, self.image.shape) t = Tile(l, r, mins=0, maxs=self.image.shape) #2. update: c = t.coords(form='broadcast') if add: self.particle_field[t.slicer] += self._particle_func(c, p, wid) else: self.particle_field[t.slicer] -= self._particle_func(c, p, wid)
def get_update_tile(self, params, values): """ Get the amount of support size required for a particular update.""" doglobal, particles = self._update_type(params) if doglobal: return self.shape.copy() # 1) store the current parameters of interest values0 = self.get_values(params) # 2) calculate the current tileset tiles0 = [self._tile(n) for n in particles] # 3) update to newer parameters and calculate tileset self.set_values(params, values) tiles1 = [self._tile(n) for n in particles] # 4) revert parameters & return union of all tiles self.set_values(params, values0) return Tile.boundingtile(tiles0 + tiles1)
def _tile(self, n): """ Get the tile surrounding particle `n` """ zsc = np.array([1.0 / self.zscale, 1, 1]) pos, rad = self.pos[n], self.rad[n] pos = self._trans(pos) return Tile(pos - zsc * rad, pos + zsc * rad).pad(self.support_pad)
def _tile(self, n): """Get the update tile surrounding particle `n` """ pos = self._trans(self.pos[n]) return Tile(pos, pos).pad(self.support_pad)
def get_padding_size(self, tile, z=None): return Tile(self.support)
def get_padding_size(self, tile): self.px = np.sqrt(-2 * np.log(self.error) * self.values[2]**2) self.py = np.sqrt(-2 * np.log(self.error) * self.values[1]**2) self.pz = np.sqrt(-2 * np.log(self.error) * self.values[0]**2) return Tile(np.ceil([self.pz, self.py, self.px]))
def get_padding_size(self, tile): return Tile(np.ones(3))
def _rvecs(self, shape, centered=True): tile = Tile(shape) return tile.kvectors(norm=1.0 / tile.shape, shift=centered)
def identify_misfeatured_regions(st, filter_size=5, sigma_cutoff=8.): """ Identifies regions of missing/misfeatured particles based on the residuals' local deviation from uniform Gaussian noise. Parameters ---------- st : :class:`peri.states.State` The state in which to identify mis-featured regions. filter_size : Int, best if odd. The size of the filter for calculating the local standard deviation; should approximately be the size of a poorly featured region in each dimension. Default is 5. sigma_cutoff : Float or `otsu`, optional The max allowed deviation of the residuals from what is expected, in units of the residuals' standard deviation. Lower means more sensitive, higher = less sensitive. Default is 8.0, i.e. one pixel out of every 7*10^11 is mis-identified randomly. In practice the noise is not Gaussian so there are still some regions mis-identified as improperly featured. Set to ```otsu``` to calculate this number based on an automatic Otsu threshold. Returns ------- tiles : List of :class:`peri.util.Tile` Each tile is the smallest bounding tile that contains an improperly featured region. The list is sorted by the tile's volume. Notes ----- Algorithm is 1. Create a field of the local standard deviation, as measured over a hypercube of size filter_size. 2. Find the maximum reasonable value of the field. [The field should be a random variable with mean of r.std() and standard deviation of ~r.std() / sqrt(N), where r is the residuals and N is the number of pixels in the hypercube.] 3. Label & Identify the misfeatured regions as portions where the local error is too large. 4. Parse the misfeatured regions into tiles. 5. Return the sorted tiles. The Otsu option to calculate the sigma cutoff works well for images that actually contain missing particles, returning a number similar to one calculated with a sigma cutoff. However, if the image is well-featured with Gaussian residuals, then the Otsu threshold splits the Gaussian down the middle instead of at the tails, which is very bad. So use with caution. """ # 1. Field of local std r = st.residuals weights = np.ones([filter_size] * len(r.shape), dtype='float') weights /= weights.sum() f = np.sqrt(nd.filters.convolve(r * r, weights, mode='reflect')) # 2. Maximal reasonable value of the field. if sigma_cutoff == 'otsu': max_ok = initializers.otsu_threshold(f) else: # max_ok = f.mean() * (1 + sigma_cutoff / np.sqrt(weights.size)) max_ok = f.mean() + sigma_cutoff * f.std() # 3. Label & Identify bad = f > max_ok labels, n = nd.measurements.label(bad) inds = [] for i in range(1, n + 1): inds.append(np.nonzero(labels == i)) # 4. Parse into tiles tiles = [ Tile(np.min(ind, axis=1), np.max(ind, axis=1) + 1) for ind in inds ] # 5. Sort and return volumes = [t.volume for t in tiles] return [tiles[i] for i in np.argsort(volumes)[::-1]]
# im = io.imread('/Volumes/PhD/DavidData/Emily/2014_5_23-T3Y_63xoil_1.lsm', as_gray=True) # im_array = np.array(im) # im.shape # im_array_small = im_array[5:,312:, 0:200] # im_array_small.shape # f=tp.locate(im_array,diameter=(3,11,11)) # f[f['z']==f['z']] # f_small = tp.locate(im_array_small,diameter=(3,11,11)) # f_small=f_small[f_small['z']==f_small['z']] # np.save('part_loc_T3Y_1.npy', np.array(f[f.columns[0:3,]])) imFile = '/Volumes/PhD/DavidData/Emily/2014_5_23-T3Y_63xoil_1.lsm' raw_im = RawImage(imFile) im_arr = raw_im.get_image() im_arr.shape() f = tp.locate(im_arr, diameter=(9, 9, 9)) tile = Tile(200) small_im = RawImage(imFile, tile=tile) small_im_arr = small_im.get_image() f_small = tp.locate(small_im_arr, diameter=(9, 9, 9)) particle_positions = np.array(f_small[f_small.columns[0:3, ]]) np.save('part_loc_T3Y_1.npy', particle_positions) # plt.figure() # tp.annotate3d(f[f['z']==f['z']],im_array) plt.imshow(im_array[2, :, :], cmap="gray") scatter = plt.scatter(x=f["x"], y=f["y"], c=f["z"], marker="x") plt.legend(*scatter.legend_elements(), title="z") plt.show()