def apply(self, frames): registrators = opflowreg.RegistrationInterfaces if self.reg_type == 'template': tstart = len(frames)/2 tstop = min(len(frames),tstart+50) template = np.max(frames[tstart:tstop],axis=0) def register_stack(stack, registrator): return opflowreg.register_stack_to_template(stack,template,registrator, njobs=self.n_cpu) elif self.reg_type == 'recursive': def register_stack(stack, registrator): return opflowreg.register_stack_recursive(stack,registrator)[1] else: raise NameError("Unknown registration type") # TODO: below is just crazy. has to be made neat later reg_dispatcher = {'affine':registrators.affine, 'homograhy':registrators.homography, 'shifts':registrators.shifts, 'Greenberg-Kerr':registrators.greenberg_kerr, 'softmesh':registrators.softmesh} operations = self.reg_pipeline.split('->') newframes = frames warp_history = [] for movement_model in operations: warps = register_stack(newframes, reg_dispatcher[movement_model]) warp_history.append(warps) newframes = opflowreg.apply_warps(warps, newframes, njobs=self.n_cpu) final_warps = [lib.flcompose(*warpchain) for warpchain in zip(*warp_history)] if self.save_recipe_to: opflowreg.save_recipe(final_warps, self.save_recipe_to) print 'saved motions stab recipe to %s'%self.save_recipe_to return newframes
def frames(self,): """ Return iterator over frames. The composition of functions in `self.fns` list is applied to each frame. By default, this list is empty. Examples of function "hooks" to put into `self.fns` are ``imfun.lib.DFoSD``, ``imfun.lib.DFoF`` or functions from ``scipy.ndimage``. """ fn = lib.flcompose(identity, *self.fns) return itt.imap(fn,self.mlfimg.flux_frame_iter())
def register_stack_recursive(frames, regfn): """ Given stack of frames, align frames recursively and return a mean frame of the aligned stack and a list of functions, each of which takes an image and return warped image, aligned to this mean frame. """ #import sys #sys.setrecursionlimit(len(frames)) L = len(frames) if L < 2: return frames[0], [lambda f:f] else: mf_l, warps_left = register_stack_recursive(frames[:L/2], regfn) mf_r, warps_right = register_stack_recursive(frames[L/2:], regfn) fn = regfn(mf_l, mf_r) fm = 0.5*(parametric_warp(mf_l,fn) + mf_r) return fm, [lib.flcompose(fx,fn) for fx in warps_left] + warps_right
def apply_reg(frames): if args.type == 'template': if args.verbose > 1: print 'stabilization type is template' tstart = len(frames)/2 tstop = min(len(frames),tstart+50) template = np.max(frames[tstart:tstop],axis=0) def register_stack(stack, registrator, **fnargs): return opflowreg.register_stack_to_template(stack,template,registrator,njobs=args.ncpu,**fnargs) elif args.type == 'recursive': def register_stack(stack, registrator,**fnargs): return opflowreg.register_stack_recursive(stack,registrator,**fnargs)[1] else: raise NameError("Unknown registration type") # TODO: below is just crazy. has to be made neat later reg_dispatcher = {'affine':registrators.affine, 'homography':registrators.homography, 'shifts':registrators.shifts, 'Greenberg-Kerr':registrators.greenberg_kerr, 'softmesh':registrators.softmesh} operations = args.model newframes = frames warp_history = [] for movement_model in operations: if not isinstance(movement_model, basestring): if len(movement_model)>1: model, model_params = movement_model else: model, model_params = movement_model[0],{} else: model = movement_model model_params = {} if args.verbose > 1: print 'correcting for {} with params: {}'.format(model, model_params) warps = register_stack(newframes, reg_dispatcher[model], **model_params) warp_history.append(warps) newframes = opflowreg.apply_warps(warps, newframes, njobs=args.ncpu) final_warps = [lib.flcompose(*warpchain) for warpchain in zip(*warp_history)] del newframes return final_warps
def pipeline(self): """Return the composite function to process frames based on self.fns""" return lib.flcompose(identity, *self.fns)
def find_objects( arr, k=3, level=5, noise_std=None, coefs=None, supp=None, dec_fn=atrous.decompose, retraw=False, # return raw, only used for testing start_scale=0, weights=None, deblendp=True, min_px_size=200, min_nscales=2, rec_variant=2, modulus=False): """Use MVM to find objects in the input array. Parameters: - `arr`: (`numpy array`) -- 1D, 2D or 3D ``numpy`` array. Input data. - `k` : (`number`) -- threshold to regard wavelet coefficient as significant, in :math:`\\times \\sigma` (in noise standard deviations) - `level`: (`int`) -- level of wavelet transform - `noise_std`: (`number` or `None`) -- if known, provide noise :math:`\\sigma` - `coefs`: if already calculated, provide wavelet coefficients - `supp`: if already calculated, provide support of significant wavelet coefficients - `start_scale`: (`int`) -- start reconstruction at this scale (decomposition level) - `weights`: (`list` of numbers) -- weight coefficients at different levels before reconstruction - `min_px_size`: an `MVMNode` should contain at least this number of pixels - `min_nscales`: an object should have at least this scales/levels - `modulus`: if False, only search for light sources - retraw : only used for debugging Returns: a `list` of recovered objects as *embedddings* around non-zero voxels. see `embedding` function for details """ if np.iterable(k): level = len(k) if coefs is None: coefs = dec_fn(arr, level) if noise_std is None: noise_std = atrous.estimate_sigma_mad(coefs[0], True) ## if arr.ndim > 2: ## noise_std = atrous.estimate_sigma_mad(coefs[0], True) ## else: ## noise_std = atrous.estimate_sigma(arr, coefs) ## calculate support taking only positive coefficients (light sources) sigmaej = atrous.sigmaej if dec_fn == mmt.decompose_mwt: sigmaej = mmt.sigmaej_mwts2 if supp is None: supp = multiscale.threshold_w(coefs, np.array(k, _dtype_) * noise_std, modulus=modulus, sigmaej=sigmaej) if weights is None: weights = np.ones(level) structures = get_structures(coefs, supp) g = connectivity_graph(structures) if deblendp: gdeblended = deblend_all(g, coefs, min_nscales) # destructive else: gdeblended = [r for r in g if nscales(r) >= min_nscales] #check = lambda x: len(tree_locations2(x)) > min_px_size def check(x): return len(tree_locations2(x)) > min_px_size objects = sorted([x for x in gdeblended if check(x)], key=lambda u: tree_mass(u), reverse=True) if retraw == 1: return objects if retraw == 2: return [supp_from_obj(o, start_scale) for o in objects] # note: even if we decompose with mmt.decompose_mwt # we use atrous.decompose for object reconstruction because # we don't expect too many outliers and this way it's faster pipelines = [ lib.flcompose(lambda x1, x2: supp_from_obj(x1, x2, weights=weights), lambda x: multiscale.simple_rec(coefs, x), embedding), lib.flcompose( lambda x1, x2: supp_from_obj(x1, x2, weights=weights), lambda x: multiscale.simple_rec_iterative( coefs, x, positive_only=(not modulus)), embedding) ] recovered = (pipelines[rec_variant - 1](obj, start_scale) for obj in objects) return filter(lambda x: np.sum(x[0] > 0) > min_px_size, recovered)