def set_common_mask(self, n_sigma=3.): print("Finding rough mask for creating bootstrap images of RM, alpha," " ...") cs_mask = pol_mask({stokes: self.cc_cs_image_dict[self.freqs[-1]][stokes] for stokes in self.stokes}, rms_cs_dict=None, uv_fits_path=self.uvfits_dict[self.freqs[-1]], n_sigma=n_sigma, path_to_script=self.path_to_script) self._cs_mask = cs_mask self._cs_mask_n_sigma = n_sigma
def process_mf(uvdata_dict, beam, data_dir, path_to_script, clean_after=True, rms_cs_dict=None, mapsize_clean=(512, 0.1)): images_dict = dict() print(" === CLEANing each band and Stokes ===") clean_original_data(uvdata_dict, data_dir, beam, mapsize_clean=mapsize_clean) for band in bands: images_dict[band] = dict() for stokes in ("I", "Q", "U"): ccimage = create_clean_image_from_fits_file( os.path.join(data_dir, "cc_{}_{}.fits".format(band, stokes))) images_dict[band].update({stokes: ccimage}) # Cacluate RMS for each band and Stokes print(" === Cacluate RMS for each band and Stokes ===") if rms_cs_dict is None: rms_cs_dict = dict() for band in bands: rms_cs_dict[band] = { stokes: rms_image_shifted(os.path.join(data_dir, uvdata_dict[band]), stokes=stokes, image=images_dict[band][stokes], path_to_script=path_to_script) for stokes in ("I", "Q", "U") } for band in bands: for stokes in ("I", "Q", "U"): print("rms = {}".format(rms_cs_dict[band][stokes])) # Find mask for "I" for each band and combine them into single mask print("Calculating masks for I at each band and combining them") spix_mask_image = spix_mask( {band: images_dict[band]["I"] for band in bands}, {band: rms_cs_dict[band]["I"] for band in bands}, n_sigma=3, path_to_script=path_to_script) # Find mask for "PPOL" for each band and combine them into single mask print("Calculating masks for PPOL at each band and combining them") ppol_mask_image = dict() for band in bands: ppol_mask_image[band] = pol_mask( {stokes: images_dict[band][stokes] for stokes in ("I", "Q", "U")}, {stokes: rms_cs_dict[band][stokes] for stokes in ("I", "Q", "U")}, n_sigma=2, path_to_script=path_to_script) ppol_mask_image = np.logical_or.reduce( [ppol_mask_image[band] for band in bands]) spix_image, sigma_spix_image, chisq_spix_image =\ spix_map(freqs, [images_dict[band]["I"].image for band in bands], mask=spix_mask_image) print("Calculating PANG and it's error for each band") pang_images = dict() sigma_pang_images = dict() for band in bands: pang_images[band] = pang_map(images_dict[band]["Q"].image, images_dict[band]["U"].image, mask=ppol_mask_image) sigma_pang_images[band] = np.hypot( images_dict[band]["Q"].image * 1.8 * rms_cs_dict[band]["U"], images_dict[band]["U"].image * 1.8 * rms_cs_dict[band]["Q"]) sigma_pang_images[band] = sigma_pang_images[band] / ( 2. * (images_dict[band]["Q"].image**2. + images_dict[band]["U"].image**2.)) print("Calculating ROTM image") rotm_image, sigma_rotm_image, chisq_rotm_image = rotm_map( freqs, [pang_images[band] for band in bands], [sigma_pang_images[band] for band in bands], mask=ppol_mask_image) if clean_after: print("Removing maps") for band in bands: for stokes in ("I", "Q", "U"): os.unlink( os.path.join(data_dir, "cc_{}_{}.fits".format(band, stokes))) result = { "ROTM": { "value": rotm_image, "sigma": sigma_rotm_image, "chisq": chisq_rotm_image }, "SPIX": { "value": spix_image, "sigma": sigma_spix_image, "chisq": chisq_spix_image }, "RMS": rms_cs_dict } return result
x -= i_image.pixref[1] distances = np.sqrt(x**2. + y**2.) max_dist = int(sorted(distances)[-1]) print "Max. distance to n_rms = {} is {} pix.".format( n_rms_max, max_dist) # Creating PPOL image with circular beam print "Creating polarization maps" q_image = create_image_from_fits_file( os.path.join(data_dir, map_fname(source, epoch, 'Q', 'circ'))) u_image = create_image_from_fits_file( os.path.join(data_dir, map_fname(source, epoch, 'U', 'circ'))) stokes_image_dict = {'I': i_image_circ, 'Q': q_image, 'U': u_image} print "Masking polarization flux map at" \ " n_sigma = {}".format(n_sigma_pol) mask = pol_mask(stokes_image_dict, n_sigma=n_sigma_pol) p_image_circ = pol_map(q_image.image, u_image.image) p_image_circ[mask] = 0. # Calculate ridge line for I and PPOL print "Calcualting ridge-line for I" i_coords = np.atleast_2d( jet_ridge_line(i_image_circ.image, max_dist)) print "Calcualting ridge-line for P" p_coords = np.atleast_2d(jet_ridge_line(p_image_circ, max_dist)) deviances = list() fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.matshow(i_image_circ.image) ax.scatter(i_coords[:, 1], i_coords[:, 0]) ax.scatter(p_coords[:, 1], p_coords[:, 0], color='r')
def analyze_source(uv_fits_paths, n_boot, imsizes=None, common_imsize=None, common_beam=None, find_shifts=False, outdir=None, path_to_script=None, clear_difmap_logs=True, rotm_slices=None): """ Function that uses multifrequency self-calibration data for in-depth analysis. :param uv_fits_paths: Iterable of paths to self-calibrated uv-data FITS-files. :param n_boot: Number of bootstrap replications to use in analysis. :param imsizes: (optional) Iterable of image parameters (imsize, pixsize) that should be used for CLEANing of uv-data if no CLEAN-images are supplied. Should be sorted in increasing frequency order. If ``None`` then specify parameters by CLEAN images. (default: ``None``) :param common_imsize: (optional) Image parameters that will be used in making common size images for multifrequency analysis. If ``None`` then use physical image size of lowest frequency and pixel size of highest frequency. (default: ``None``) :param outdir: (optional) Output directory. This directory will be used for saving picture, data, etc. If ``None`` then use CWD. (default: ``None``) :param path_to_script: (optional) Path ot difmap CLEAN script. If ``None`` then use CWD. (default: ``None``) :notes: Workflow: 1) Чистка с родным разрешением всех N диапазонов и получение родных моделей I, Q, U. 2) Выбор общей ДН из N возможных 3) (Опционально) Выбор uv-tapering 4) Чистка uv-данных для всех диапазонов с (опционально применяемым uv-tapering) общей ДН 5) Оценка сдвига ядра 6) Создание B наборов из N многочастотных симулированных данных используя родные модели 7) (Опционально) Чистка B наборов из N многочастотных симданных с родным разрешением для получения карт ошибок I для каждой из N частот 8) Чистка B наборов из N многочастотных симданных для всех диапазонов с (опционально применяемым uv-tapering) общей ДН 9) Оценка ошибки определения сдвига ядра 10) Оценка RM и ее ошибки 11) Оценка alpha и ее ошибки """ # Fail early if imsizes is None: raise Exception("Provide imsizes argument!") if common_imsize is not None: print("Using common image size {}".format(common_imsize)) else: raise Exception("Provide common_imsize argument!") # Setting up the output directory if outdir is None: outdir = os.getcwd() print("Using output directory {}".format(outdir)) os.chdir(outdir) # Assume input self-calibrated uv-data FITS files have different frequencies n_freq = len(uv_fits_paths) print("Using {} frequencies".format(n_freq)) # Assuming full multifrequency analysis stokes = ('I', 'Q', 'U') # Container for original self-calibrated uv-data uv_data_dict = dict() # Container for original self-calibrated uv-data FITS-file paths uv_fits_dict = dict() for uv_fits_path in uv_fits_paths: uvdata = UVData(uv_fits_path) # Mark frequencies by total band center [Hz] for consistency with image. uv_data_dict.update({uvdata.band_center: uvdata}) uv_fits_dict.update({uvdata.band_center: uv_fits_path}) # Lowest frequency goes first freqs = sorted(uv_fits_dict.keys()) print("Frequencies are: {}".format(freqs)) # Assert we have original map parameters for all frequencies assert len(imsizes) == n_freq # Container for original CLEAN-images of self-calibrated uv-data cc_image_dict = dict() # Container for paths to FITS-files with original CLEAN-images of # self-calibrated uv-data cc_fits_dict = dict() # Container for original CLEAN-image's beam parameters cc_beam_dict = dict() for freq in freqs: cc_image_dict.update({freq: dict()}) cc_fits_dict.update({freq: dict()}) cc_beam_dict.update({freq: dict()}) # 1. # Clean original uv-data with specified map parameters print("1. Clean original uv-data with specified map parameters...") imsizes_dict = dict() for i, freq in enumerate(freqs): imsizes_dict.update({freq: imsizes[i]}) for freq in freqs: uv_fits_path = uv_fits_dict[freq] uv_dir, uv_fname = os.path.split(uv_fits_path) for stoke in stokes: outfname = '{}_{}_cc.fits'.format(freq, stoke) outpath = os.path.join(outdir, outfname) clean_difmap(uv_fname, outfname, stoke, imsizes_dict[freq], path=uv_dir, path_to_script=path_to_script, outpath=outdir) cc_fits_dict[freq].update({stoke: os.path.join(outdir, outfname)}) image = create_clean_image_from_fits_file(outpath) cc_image_dict[freq].update({stoke: image}) if stoke == 'I': cc_beam_dict.update({freq: image.beam}) # Containers for images and paths to FITS files with common size images cc_cs_image_dict = dict() cc_cs_fits_dict = dict() # 2. # Choose common beam size print("2. Choosing common beam size...") if common_beam is None: common_beam = cc_beam_dict[freqs[0]] print("Using common beam [mas, mas, deg] : {}".format(common_beam)) # 3. # Optionally uv-tapering uv-data print("3. Optionally uv-tapering uv-data...") print("skipping...") # 4. # Clean original uv-data with common map parameters print("4. Clean original uv-data with common map parameters...") for freq in freqs: cc_cs_image_dict.update({freq: dict()}) cc_cs_fits_dict.update({freq: dict()}) uv_fits_path = uv_fits_dict[freq] uv_dir, uv_fname = os.path.split(uv_fits_path) for stoke in stokes: outfname = 'cs_{}_{}_cc.fits'.format(freq, stoke) outpath = os.path.join(outdir, outfname) # clean_difmap(uv_fname_cc, outfname, stoke, common_imsize, # path=uv_dir, path_to_script=path_to_script, # outpath=outdir, show_difmap_output=False) cc_cs_fits_dict[freq].update( {stoke: os.path.join(outdir, outfname)}) image = create_image_from_fits_file(outpath) cc_cs_image_dict[freq].update({stoke: image}) # 5. # Optionally find shifts between original CLEAN-images print("5. Optionally find shifts between original CLEAN-images...") if find_shifts: print("Determining images shift...") shift_dict = dict() freq_1 = freqs[0] image_1 = cc_image_dict[freq_1]['I'] for freq_2 in freqs[1:]: image_2 = cc_image_dict[freq_2]['I'] # Coarse grid of possible shifts shift = find_shift(image_1, image_2, 100, 5, max_mask_r=200, mask_step=5) # More accurate grid of possible shifts print("Using fine grid for accurate estimate") coarse_grid = range(0, 100, 5) idx = coarse_grid.index(shift) if idx > 0: min_shift = coarse_grid[idx - 1] else: min_shift = 0 shift = find_shift(image_1, image_2, coarse_grid[idx + 1], 1, min_shift=min_shift, max_mask_r=200, mask_step=5) shift_dict.update({str(( freq_1, freq_2, )): shift}) # Dumping shifts to json file in target directory with open(os.path.join(outdir, "shifts_original.json"), 'w') as fp: json.dump(shift_dict, fp) else: print("skipping...") # 6. # Bootstrap self-calibrated uv-data with CLEAN-models print("6. Bootstrap self-calibrated uv-data with CLEAN-models...") uv_boot_fits_dict = dict() for freq, uv_fits_path in uv_fits_dict.items(): # cc_fits_paths = [cc_fits_dict[freq][stoke] for stoke in stokes] # bootstrap_uv_fits(uv_fits_path, cc_fits_paths, n_boot, outpath=outdir, # outname=('boot_{}'.format(freq), '_uv.fits')) files = glob.glob(os.path.join(outdir, 'boot_{}*.fits'.format(freq))) uv_boot_fits_dict.update({freq: sorted(files)}) # 7. # Optionally clean bootstrap replications with original restoring beams and # map sizes to get error estimates for original resolution maps of I, PPOL, # FPOL, ... print( "7. Optionally clean bootstrap replications with original restoring" " beams and map sizes...") print("skipping...") # 8. # Optionally clean bootstrap replications with common restoring beams and # map sizes print( "8. Optionally clean bootstrap replications with common restoring" " beams and map sizes...") cc_boot_fits_dict = dict() for freq in freqs: cc_boot_fits_dict.update({freq: dict()}) uv_fits_paths = uv_boot_fits_dict[freq] for stoke in stokes: for i, uv_fits_path in enumerate(uv_fits_paths): uv_dir, uv_fname = os.path.split(uv_fits_path) outfname = 'boot_{}_{}_cc_{}.fits'.format( freq, stoke, str(i + 1).zfill(3)) # clean_difmap(uv_fname_cc, outfname, stoke, common_imsize, # path=uv_dir, path_to_script=path_to_script, # outpath=outdir, show_difmap_output=False) files = sorted( glob.glob( os.path.join(outdir, 'boot_{}_{}_cc_*.fits'.format(freq, stoke)))) cc_boot_fits_dict[freq].update({stoke: files}) # 9. Optionally estimate RM map and it's error print("9. Optionally estimate RM map and it's error...") original_cs_images = Images() for freq in freqs: for stoke in stokes: original_cs_images.add_images(cc_cs_image_dict[freq][stoke]) # Find rough mask for creating bootstrap images of RM, alpha, ... print("Finding rough mask for creating bootstrap images of RM, alpha, ...") cs_mask = pol_mask( {stoke: cc_cs_image_dict[freqs[-1]][stoke] for stoke in stokes}, n_sigma=3.) rotm_image, _ = original_cs_images.create_rotm_image(mask=cs_mask) boot_images = Images() fnames = sorted(glob.glob(os.path.join(data_dir, "boot_*_*_cc_*.fits"))) for freq in freqs: for stoke in stokes: boot_images.add_from_fits(cc_boot_fits_dict[freq][stoke]) boot_rotm_images = boot_images.create_rotm_images(mask=cs_mask) s_rotm_image = boot_rotm_images.create_error_image(cred_mass=0.95) if rotm_slices is not None: fnames = [ 'rotm_slice_spread_{}.png'.format(i + 1) for i in range(len(rotm_slices)) ] for rotm_slice, fname in zip(rotm_slices, fnames): analyze_rotm_slice(rotm_slice, rotm_image, boot_rotm_images, outdir=outdir, outfname=fname) # # Calculate simulataneous confidence bands # # Bootstrap slices # slices = list() # for image in rotm_images_sym.images: # slice_ = image.slice((216, 276), (296, 276)) # slices.append(slice_[~np.isnan(slice_)]) # # Find means # obs_slice = rotm_image_sym.slice((216, 276), (296, 276)) # x = np.arange(216, 296, 1) # x = x[~np.isnan(obs_slice)] # obs_slice = obs_slice[~np.isnan(obs_slice)] # # Find sigmas # slices_ = [arr.reshape((1, len(obs_slice))) for arr in slices] # sigmas = hdi_of_arrays(slices_).squeeze() # means = np.mean(np.vstack(slices), axis=0) # diff = obs_slice - means # # Move bootstrap curves to original simulated centers # slices_ = [slice_ + diff for slice_ in slices] # # Find low and upper confidence band # low, up = create_sim_conf_band(slices_, obs_slice, sigmas, # alpha=conf_band_alpha) # # Plot confidence bands and model values # fig = plt.figure() # ax = fig.add_subplot(1, 1, 1) # ax.plot(x, low[::-1], 'g') # ax.plot(x, up[::-1], 'g') # [ax.plot(x, slice_[::-1], 'r', lw=0.15) for slice_ in slices_] # ax.plot(x, obs_slice[::-1], '.k') # # Plot ROTM model # ax.plot(np.arange(216, 296, 1), # rotm_grad_value * (np.arange(216, 296, 1) - 256.)[::-1] + # rotm_value_0) # fig.savefig(os.path.join(data_dir, 'rotm_slice_spread.png'), # bbox_inches='tight', dpi=200) # plt.close() if clear_difmap_logs: print("Removing difmap log-files...") difmap_logs = glob.glob(os.path.join(outdir, "difmap.log*")) for difmpa_log in difmap_logs: os.unlink(difmpa_log)