def get_min_flux(self, debug=False): """ Obtaining the low end of the interval for sampling the S/N. Based on the initial estimation of the radial profile of the mean frame. """ # Getting the radial profile in the mean frame of the cube sampling_sep = 1 radius_int = 1 if GARRAY.ndim == 3: global_frame = np.mean(GARRAY, axis=0) elif GARRAY.ndim == 4: global_frame = np.mean(GARRAY.reshape(-1, GARRAY.shape[2], GARRAY.shape[3]), axis=0) me = frame_average_radprofile(global_frame, sep=sampling_sep, init_rad=radius_int, plot=False) radprof = np.array(me.radprof) radprof = radprof[np.array(self.distances) + 1] radprof[radprof < 0] = 0.01 self.radprof = radprof print( "Estimating the lower flux interval for sampling the S/N vs flux " "function") flux_min = pool_map(self.n_proc, _get_min_flux, iterable(self.n_dist), self.distances, radprof, self.fwhm, self.plsc, iterable(self.min_snr), self.wavelengths, self.spectrum, self.algo, self.scaling, self.svd_mode, self.random_seed, debug) self.min_fluxes = flux_min timing(self.starttime)
def _sample_flux_snr(distances, fwhm, plsc, n_injections, flux_min, flux_max, nproc=10, random_seed=42, wavelengths=None, spectrum=None, mode='median', scaling='temp-standard', svd_mode='randsvd'): """ Sensible flux intervals depend on a combination of factors, # of frames, range of rotation, correlation, glare intensity. """ if GARRAY.ndim == 3: frsize = int(GARRAY.shape[1]) elif GARRAY.ndim == 4: frsize = int(GARRAY.shape[2]) ninj = n_injections random_state = np.random.RandomState(random_seed) flux_dist_theta_all = list() snrs_list = list() fluxes_list = list() n_ks = 3 for i, d in enumerate(distances): yy, xx = get_annulus_segments((frsize, frsize), d, 1, 1)[0] num_patches = yy.shape[0] fluxes_dist = random_state.uniform(flux_min[i], flux_max[i], size=ninj) inds_inj = random_state.randint(0, num_patches, size=ninj) for j in range(ninj): injx = xx[inds_inj[j]] injy = yy[inds_inj[j]] injx -= frame_center(GARRAY[0])[1] injy -= frame_center(GARRAY[0])[0] dist = np.sqrt(injx**2 + injy**2) theta = np.mod(np.arctan2(injy, injx) / np.pi * 180, 360) flux_dist_theta_all.append((fluxes_dist[j], dist, theta)) # multiprocessing (pool) for each distance res = pool_map(nproc, _get_adi_snrs, GARRPSF, GARRPA, fwhm, plsc, iterable(flux_dist_theta_all), wavelengths, spectrum, mode, n_ks, scaling, svd_mode) for i in range(len(distances)): flux_dist = [] snr_dist = [] for j in range(ninj): flux_dist.append(res[j + (ninj * i)][0]) snr_dist.append(res[j + (ninj * i)][1]) fluxes_list.append(flux_dist) snrs_list.append(snr_dist) return fluxes_list, snrs_list
def get_max_flux(self, debug=False): """ Obtaining the high end of the interval for sampling the S/N. """ if self.min_fluxes is None: self.get_min_flux() print( "Estimating the upper flux interval for sampling the S/N vs flux " "function") flux_max = pool_map(self.n_proc, _get_max_flux, iterable(self.n_dist), self.distances, self.min_fluxes, self.fwhm, self.plsc, iterable(self.max_snr), self.wavelengths, self.spectrum, self.algo, self.scaling, self.svd_mode, self.random_seed, debug) self.max_fluxes = flux_max timing(self.starttime)
def make_mlar_samples_ann_signal(input_array, angle_list, psf, n_samples, cevr_thresh, n_ks, force_klen, inrad, outrad, patch_size, flux_low, flux_high, plsc=0.01, normalize='slice', nproc=1, nproc2=1, interp='bilinear', lr_mode='eigen', mode='mlar', kss_window=None, tss_window=None, random_seed=42, verbose=False): """ n_samples : For ONEs, half_n_samples SVDs mask is a list of tuples X,Y inputarr is a 3d array or list of 3d arrays orig_zero_patches : percentage of original zero patches """ dist_flux_p1 = flux_low dist_flux_p2 = flux_high collapse_func = np.mean scaling = None # 'temp-mean' random_state = np.random.RandomState(random_seed) # making ones, injecting companions. The other half of n_samples if verbose: print("Creating the ONEs samples") frsize = int(input_array.shape[1]) cube = input_array # if frsize > outrad + outrad + patch_size + 2: # frsize = int(outrad + outrad + patch_size + 2) # cube = cube_crop_frames(input_array, frsize, force=True, # verbose=False) width = outrad - inrad yy, xx = get_annulus_segments((frsize, frsize), inrad, width, 1)[0] num_patches = yy.shape[0] k_list = get_cumexpvar(cube, 'annular', inrad, outrad, patch_size, k_list=None, cevr_thresh=cevr_thresh, n_ks=n_ks, match_len=force_klen, verbose=False) n_req_inject = n_samples if mode == 'mlar': # 4D: n_samples/2, n_k_list, patch_size, patch_size X_ones_array = np.empty((n_req_inject, len(k_list), patch_size, patch_size)) elif mode == 'tmlar': nfr = cube.shape[0] X_ones_array = np.empty((n_req_inject, nfr, patch_size, patch_size)) elif mode == 'tmlar4d': nfr = cube.shape[0] X_ones_array = np.empty((n_req_inject, nfr, len(k_list), patch_size, patch_size)) if verbose: print("{} injections".format(n_req_inject)) if not dist_flux_p2 > dist_flux_p1: err_msg = 'dist_flux_p2 must be larger than dist_flux_p1' raise ValueError(err_msg) fluxes = random_state.uniform(dist_flux_p1, dist_flux_p2, size=n_req_inject) fluxes = np.sort(fluxes) inds_inj = random_state.randint(0, num_patches, size=n_req_inject) dists = [] thetas = [] for m in range(n_req_inject): injx = xx[inds_inj[m]] injy = yy[inds_inj[m]] injx -= frame_center(cube[0])[1] injy -= frame_center(cube[0])[0] dist = np.sqrt(injx**2+injy**2) theta = np.mod(np.arctan2(injy, injx) / np.pi * 180, 360) dists.append(dist) thetas.append(theta) if not nproc: nproc = int((cpu_count()/4)) if nproc == 1: for m in range(n_req_inject): cufc, cox, coy = create_synt_cube(cube, psf, angle_list, plsc, theta=thetas[m], flux=fluxes[m], dist=dists[m], verbose=False) cox = int(np.round(cox)) coy = int(np.round(coy)) cube_residuals = svd_decomp(cufc, angle_list, patch_size, inrad, outrad, scaling, k_list, collapse_func, neg_ang=False, lr_mode=lr_mode, nproc=nproc2, interp=interp, mode=mode) # one patch residuals per injection X_ones_array[m] = cube_crop_frames(np.asarray(cube_residuals), patch_size, xy=(cox, coy), verbose=False, force=True) elif nproc > 1: if lr_mode in ['cupy', 'randcupy', 'eigencupy']: raise RuntimeError('CUPY does not play well with multiproc') if get_start_method() == 'fork' and lr_mode in ['pytorch', 'eigenpytorch', 'randpytorch']: raise RuntimeError("Cannot use pytorch and multiprocessing " "outside main (i.e. from a jupyter cell). " "See: http://pytorch.org/docs/0.3.1/notes/" "multiprocessing.html.") flux_dist_theta = zip(fluxes, dists, thetas) res = pool_map(nproc, _inject_FC, cube, psf, angle_list, plsc, inrad, outrad, iterable(flux_dist_theta), k_list, scaling, collapse_func, patch_size, lr_mode, interp, mode) for m in range(n_req_inject): X_ones_array[m] = res[m] # Moving-subsampling move_subsampling = 'median' if mode == 'mlar' and kss_window is not None: X_ones_array = cube_move_subsample(X_ones_array, kss_window, axis=1, mode=move_subsampling) elif mode == 'tmlar' and tss_window is not None: X_ones_array = cube_move_subsample(X_ones_array, kss_window, axis=1, mode=move_subsampling) elif mode == 'tmlar4d': if tss_window is not None: X_ones_array = cube_move_subsample(X_ones_array, tss_window, axis=1, mode=move_subsampling) if kss_window is not None: X_ones_array = cube_move_subsample(X_ones_array, kss_window, axis=2, mode=move_subsampling) if normalize is not None: if mode == 'tmlar4d': for i in range(X_ones_array.shape[0]): X_ones_array[i] = normalize_01(X_ones_array[i], normalize) else: X_ones_array = normalize_01(X_ones_array, normalize) return X_ones_array.astype('float32')
def predict_pairwise(model, cube, angle_list, fwhm, patch_size_px, delta_rot, radius_int=None, high_pass='******', kernel_size=5, normalization='slice', imlib='opencv', interpolation='bilinear', nproc=1, verbose=True, chunks_per_proc=2): """ Parameters ---------- model : Keras model cube : 3d ndarray angle_list : 1d ndarray fwhm : float patch_size : int, optional delta_rot : float, optional verbose: bool or int, optional 0 / False: no output 1 / True: full output (timing + progress bar) 2: progress bar only [...] Returns ------- probmap : 2d ndarray Notes ----- - support for 4D cubes? """ starttime = time_ini(verbose=verbose) if radius_int is None: radius_int = fwhm n_frames, sizey, sizex = cube.shape width = int(sizey / 2 - patch_size_px / 2 - radius_int) ind = get_annulus_segments(cube[0], inner_radius=radius_int, width=width, nsegm=1) probmap = np.zeros((sizey, sizex)) if high_pass is not None: cube = cube_filter_highpass(cube, high_pass, kernel_size=kernel_size, verbose=False) indices = list(range(ind[0][0].shape[0])) if nproc is None: nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores # prepare patches in parallel nchunks = nproc * chunks_per_proc print("Grabbing patches with {} processes".format(nproc)) res_ = list( Progressbar(pool_imap(nproc, _parallel_make_patches_chunk, iterable(make_chunks(indices, nchunks)), ind, cube, angle_list, fwhm, patch_size_px, delta_rot, normalization, imlib, interpolation), total=nchunks, leave=False, verbose=False)) xx = [] yy = [] pats = [] for r in res_: x, y, pp = r for i in range(len(x)): xx.append(x[i]) yy.append(y[i]) pats.append(pp[i]) if verbose == 1: timing(starttime) print("Prediction on patches:") probas = predict_from_patches(model, pats, len(xx)) for i in range(len(xx)): probmap[xx[i], yy[i]] = probas[i] if verbose == 1: timing(starttime) return probmap