ppol_mask = np.loadtxt(os.path.join(base_path, "ppol_mask.txt")) # Create bootstrap ROTM images with calculated mask rotm_images_list = list() for i in range(1, n_boot + 1): images = Images() for band in bands: for stoke in stokes: map_path = im_fits_path(source, band, epoch, stoke, base_path=base_path) fname = os.path.join(map_path, "cc_{}.fits".format(i)) images.add_from_fits(fnames=[fname]) rotm_image, s_rotm_image = images.create_rotm_image(mask=ppol_mask) rotm_images_list.append(rotm_image) # Stack ROTM images rotm_images_boot = Images() rotm_images_boot.add_images(rotm_images_list) fig = plt.figure() for image in rotm_images_boot.images: plt.plot(np.arange(500, 550, 1), image.slice((550, 500), (550, 550)), '.k') # Plot I, ROTM image i_path = im_fits_path(source, bands[-1], epoch, 'i', base_path=base_path) i_image = create_clean_image_from_fits_file( os.path.join(i_path, 'cc_orig.fits')) # Create original ROTM image
def analyze_source(uv_fits_paths, n_boot, imsizes=None, common_imsize=None, common_beam=None, find_shifts=False, outdir=None, path_to_script=None, clear_difmap_logs=True, rotm_slices=None): """ Function that uses multifrequency self-calibration data for in-depth analysis. :param uv_fits_paths: Iterable of paths to self-calibrated uv-data FITS-files. :param n_boot: Number of bootstrap replications to use in analysis. :param imsizes: (optional) Iterable of image parameters (imsize, pixsize) that should be used for CLEANing of uv-data if no CLEAN-images are supplied. Should be sorted in increasing frequency order. If ``None`` then specify parameters by CLEAN images. (default: ``None``) :param common_imsize: (optional) Image parameters that will be used in making common size images for multifrequency analysis. If ``None`` then use physical image size of lowest frequency and pixel size of highest frequency. (default: ``None``) :param outdir: (optional) Output directory. This directory will be used for saving picture, data, etc. If ``None`` then use CWD. (default: ``None``) :param path_to_script: (optional) Path ot difmap CLEAN script. If ``None`` then use CWD. (default: ``None``) :notes: Workflow: 1) Чистка с родным разрешением всех N диапазонов и получение родных моделей I, Q, U. 2) Выбор общей ДН из N возможных 3) (Опционально) Выбор uv-tapering 4) Чистка uv-данных для всех диапазонов с (опционально применяемым uv-tapering) общей ДН 5) Оценка сдвига ядра 6) Создание B наборов из N многочастотных симулированных данных используя родные модели 7) (Опционально) Чистка B наборов из N многочастотных симданных с родным разрешением для получения карт ошибок I для каждой из N частот 8) Чистка B наборов из N многочастотных симданных для всех диапазонов с (опционально применяемым uv-tapering) общей ДН 9) Оценка ошибки определения сдвига ядра 10) Оценка RM и ее ошибки 11) Оценка alpha и ее ошибки """ # Fail early if imsizes is None: raise Exception("Provide imsizes argument!") if common_imsize is not None: print("Using common image size {}".format(common_imsize)) else: raise Exception("Provide common_imsize argument!") # Setting up the output directory if outdir is None: outdir = os.getcwd() print("Using output directory {}".format(outdir)) os.chdir(outdir) # Assume input self-calibrated uv-data FITS files have different frequencies n_freq = len(uv_fits_paths) print("Using {} frequencies".format(n_freq)) # Assuming full multifrequency analysis stokes = ('I', 'Q', 'U') # Container for original self-calibrated uv-data uv_data_dict = dict() # Container for original self-calibrated uv-data FITS-file paths uv_fits_dict = dict() for uv_fits_path in uv_fits_paths: uvdata = UVData(uv_fits_path) # Mark frequencies by total band center [Hz] for consistency with image. uv_data_dict.update({uvdata.band_center: uvdata}) uv_fits_dict.update({uvdata.band_center: uv_fits_path}) # Lowest frequency goes first freqs = sorted(uv_fits_dict.keys()) print("Frequencies are: {}".format(freqs)) # Assert we have original map parameters for all frequencies assert len(imsizes) == n_freq # Container for original CLEAN-images of self-calibrated uv-data cc_image_dict = dict() # Container for paths to FITS-files with original CLEAN-images of # self-calibrated uv-data cc_fits_dict = dict() # Container for original CLEAN-image's beam parameters cc_beam_dict = dict() for freq in freqs: cc_image_dict.update({freq: dict()}) cc_fits_dict.update({freq: dict()}) cc_beam_dict.update({freq: dict()}) # 1. # Clean original uv-data with specified map parameters print("1. Clean original uv-data with specified map parameters...") imsizes_dict = dict() for i, freq in enumerate(freqs): imsizes_dict.update({freq: imsizes[i]}) for freq in freqs: uv_fits_path = uv_fits_dict[freq] uv_dir, uv_fname = os.path.split(uv_fits_path) for stoke in stokes: outfname = '{}_{}_cc.fits'.format(freq, stoke) outpath = os.path.join(outdir, outfname) clean_difmap(uv_fname, outfname, stoke, imsizes_dict[freq], path=uv_dir, path_to_script=path_to_script, outpath=outdir) cc_fits_dict[freq].update({stoke: os.path.join(outdir, outfname)}) image = create_clean_image_from_fits_file(outpath) cc_image_dict[freq].update({stoke: image}) if stoke == 'I': cc_beam_dict.update({freq: image.beam}) # Containers for images and paths to FITS files with common size images cc_cs_image_dict = dict() cc_cs_fits_dict = dict() # 2. # Choose common beam size print("2. Choosing common beam size...") if common_beam is None: common_beam = cc_beam_dict[freqs[0]] print("Using common beam [mas, mas, deg] : {}".format(common_beam)) # 3. # Optionally uv-tapering uv-data print("3. Optionally uv-tapering uv-data...") print("skipping...") # 4. # Clean original uv-data with common map parameters print("4. Clean original uv-data with common map parameters...") for freq in freqs: cc_cs_image_dict.update({freq: dict()}) cc_cs_fits_dict.update({freq: dict()}) uv_fits_path = uv_fits_dict[freq] uv_dir, uv_fname = os.path.split(uv_fits_path) for stoke in stokes: outfname = 'cs_{}_{}_cc.fits'.format(freq, stoke) outpath = os.path.join(outdir, outfname) # clean_difmap(uv_fname_cc, outfname, stoke, common_imsize, # path=uv_dir, path_to_script=path_to_script, # outpath=outdir, show_difmap_output=False) cc_cs_fits_dict[freq].update( {stoke: os.path.join(outdir, outfname)}) image = create_image_from_fits_file(outpath) cc_cs_image_dict[freq].update({stoke: image}) # 5. # Optionally find shifts between original CLEAN-images print("5. Optionally find shifts between original CLEAN-images...") if find_shifts: print("Determining images shift...") shift_dict = dict() freq_1 = freqs[0] image_1 = cc_image_dict[freq_1]['I'] for freq_2 in freqs[1:]: image_2 = cc_image_dict[freq_2]['I'] # Coarse grid of possible shifts shift = find_shift(image_1, image_2, 100, 5, max_mask_r=200, mask_step=5) # More accurate grid of possible shifts print("Using fine grid for accurate estimate") coarse_grid = range(0, 100, 5) idx = coarse_grid.index(shift) if idx > 0: min_shift = coarse_grid[idx - 1] else: min_shift = 0 shift = find_shift(image_1, image_2, coarse_grid[idx + 1], 1, min_shift=min_shift, max_mask_r=200, mask_step=5) shift_dict.update({str(( freq_1, freq_2, )): shift}) # Dumping shifts to json file in target directory with open(os.path.join(outdir, "shifts_original.json"), 'w') as fp: json.dump(shift_dict, fp) else: print("skipping...") # 6. # Bootstrap self-calibrated uv-data with CLEAN-models print("6. Bootstrap self-calibrated uv-data with CLEAN-models...") uv_boot_fits_dict = dict() for freq, uv_fits_path in uv_fits_dict.items(): # cc_fits_paths = [cc_fits_dict[freq][stoke] for stoke in stokes] # bootstrap_uv_fits(uv_fits_path, cc_fits_paths, n_boot, outpath=outdir, # outname=('boot_{}'.format(freq), '_uv.fits')) files = glob.glob(os.path.join(outdir, 'boot_{}*.fits'.format(freq))) uv_boot_fits_dict.update({freq: sorted(files)}) # 7. # Optionally clean bootstrap replications with original restoring beams and # map sizes to get error estimates for original resolution maps of I, PPOL, # FPOL, ... print( "7. Optionally clean bootstrap replications with original restoring" " beams and map sizes...") print("skipping...") # 8. # Optionally clean bootstrap replications with common restoring beams and # map sizes print( "8. Optionally clean bootstrap replications with common restoring" " beams and map sizes...") cc_boot_fits_dict = dict() for freq in freqs: cc_boot_fits_dict.update({freq: dict()}) uv_fits_paths = uv_boot_fits_dict[freq] for stoke in stokes: for i, uv_fits_path in enumerate(uv_fits_paths): uv_dir, uv_fname = os.path.split(uv_fits_path) outfname = 'boot_{}_{}_cc_{}.fits'.format( freq, stoke, str(i + 1).zfill(3)) # clean_difmap(uv_fname_cc, outfname, stoke, common_imsize, # path=uv_dir, path_to_script=path_to_script, # outpath=outdir, show_difmap_output=False) files = sorted( glob.glob( os.path.join(outdir, 'boot_{}_{}_cc_*.fits'.format(freq, stoke)))) cc_boot_fits_dict[freq].update({stoke: files}) # 9. Optionally estimate RM map and it's error print("9. Optionally estimate RM map and it's error...") original_cs_images = Images() for freq in freqs: for stoke in stokes: original_cs_images.add_images(cc_cs_image_dict[freq][stoke]) # Find rough mask for creating bootstrap images of RM, alpha, ... print("Finding rough mask for creating bootstrap images of RM, alpha, ...") cs_mask = pol_mask( {stoke: cc_cs_image_dict[freqs[-1]][stoke] for stoke in stokes}, n_sigma=3.) rotm_image, _ = original_cs_images.create_rotm_image(mask=cs_mask) boot_images = Images() fnames = sorted(glob.glob(os.path.join(data_dir, "boot_*_*_cc_*.fits"))) for freq in freqs: for stoke in stokes: boot_images.add_from_fits(cc_boot_fits_dict[freq][stoke]) boot_rotm_images = boot_images.create_rotm_images(mask=cs_mask) s_rotm_image = boot_rotm_images.create_error_image(cred_mass=0.95) if rotm_slices is not None: fnames = [ 'rotm_slice_spread_{}.png'.format(i + 1) for i in range(len(rotm_slices)) ] for rotm_slice, fname in zip(rotm_slices, fnames): analyze_rotm_slice(rotm_slice, rotm_image, boot_rotm_images, outdir=outdir, outfname=fname) # # Calculate simulataneous confidence bands # # Bootstrap slices # slices = list() # for image in rotm_images_sym.images: # slice_ = image.slice((216, 276), (296, 276)) # slices.append(slice_[~np.isnan(slice_)]) # # Find means # obs_slice = rotm_image_sym.slice((216, 276), (296, 276)) # x = np.arange(216, 296, 1) # x = x[~np.isnan(obs_slice)] # obs_slice = obs_slice[~np.isnan(obs_slice)] # # Find sigmas # slices_ = [arr.reshape((1, len(obs_slice))) for arr in slices] # sigmas = hdi_of_arrays(slices_).squeeze() # means = np.mean(np.vstack(slices), axis=0) # diff = obs_slice - means # # Move bootstrap curves to original simulated centers # slices_ = [slice_ + diff for slice_ in slices] # # Find low and upper confidence band # low, up = create_sim_conf_band(slices_, obs_slice, sigmas, # alpha=conf_band_alpha) # # Plot confidence bands and model values # fig = plt.figure() # ax = fig.add_subplot(1, 1, 1) # ax.plot(x, low[::-1], 'g') # ax.plot(x, up[::-1], 'g') # [ax.plot(x, slice_[::-1], 'r', lw=0.15) for slice_ in slices_] # ax.plot(x, obs_slice[::-1], '.k') # # Plot ROTM model # ax.plot(np.arange(216, 296, 1), # rotm_grad_value * (np.arange(216, 296, 1) - 256.)[::-1] + # rotm_value_0) # fig.savefig(os.path.join(data_dir, 'rotm_slice_spread.png'), # bbox_inches='tight', dpi=200) # plt.close() if clear_difmap_logs: print("Removing difmap log-files...") difmap_logs = glob.glob(os.path.join(outdir, "difmap.log*")) for difmpa_log in difmap_logs: os.unlink(difmpa_log)