def derive_ideal_box(av_image, cores_dict, box_width, box_height, av_image_error=None, core_rel_pos=0.1, angle_res=1.0):

    import mygeometry as myg

    """
    Parameters
    ----------
    angle_res : float
        Resolution with which to rotate each new box in degrees. 1.0 degree
        gives 360 different box orientations.


    """

    angle_grid = np.arange(0, 360, angle_res)
    box_dict = {}

    for core in cores_dict:
        print("Calculating optimal angle for core {:s}".format(core))

        # axes are reversed
        core_pos = cores_dict[core]["center_pixel"][::-1]

        box_vertices = create_box(core_pos, box_width, box_height, core_rel_pos=core_rel_pos)

        gradient_sums = np.zeros((len(angle_grid)))

        for i, angle in enumerate(angle_grid):
            box_vertices_rotated = rotate_box(box_vertices, core_pos, angle)

            mask = myg.get_polygon_mask(av_image, box_vertices_rotated)

            av_image_masked = np.copy(av_image)

            # extract radial profile weighted by SNR
            radii, profile = get_radial_profile(
                av_image, binsize=3, center=core_pos[::-1], weights=av_image_error, mask=mask
            )

            if angle == 90:
                av_image_masked = np.copy(av_image)
                mask = myg.get_polygon_mask(av_image_masked, box_vertices)
                av_image_masked[mask == 0] = np.NaN

            indices = np.where((radii == radii) & (profile == profile))
            profile, radii = profile[indices], radii[indices]

            # steeper gradients will have smaller sums
            gradient_sum = np.sum(np.gradient(profile, radii))
            gradient_sums[i] = gradient_sum

        # find steepest profile and recreate the box mask
        angle_ideal = angle_grid[gradient_sums == np.min(gradient_sums)][0]

        box_vertices_rotated = rotate_box(box_vertices, core_pos, angle_ideal)

        box_dict[core] = {}
        box_dict[core]["box_vertices_rotated"] = box_vertices_rotated

    return box_dict
示例#2
0
def perform_background_subtraction(av_filename,
                                   background_mask=None,
                                   background_dim=1,
                                   background_filename=None,
                                   background_init=None,
                                   background_region_filename=None):

    # Import external modules
    # -----------------------
    from myio import check_file
    from mycoords import convert_limit_coordinates, get_pix_coords, \
                         hrs2degs, load_ds9_region
    from myimage_analysis import fit_background
    from astropy.io import fits
    import mygeometry as myg

    av_data, av_header = fits.getdata(av_filename, header=True)

    if background_init is not None:
        av_data = av_data - background_init

    file_exists = check_file(background_filename, clobber=False)

    if not file_exists:
        props = {}

        print('writing new background')

        # Load background regions from ds9
        props = load_ds9_region(props,
                                filename=background_region_filename,
                                header=av_header,
                                key='background_regions')

        # Derive relevant region
        background_mask = np.ones(av_data.shape)
        for background_region in props['background_regions']:
            background_vertices = \
              props['background_regions']\
                   [background_region]['poly_verts']['pixel']

            # block off region
            background_mask_temp = ~np.logical_not(
                myg.get_polygon_mask(av_data, background_vertices))

            background_mask[background_mask_temp] = 0
        background_mask = ~np.logical_not(background_mask)

        # Fit the background
        background = fit_background(av_data,
                                    background_mask,
                                    background_dim=background_dim)

        fits.writeto(background_filename, background, av_header)
    else:
        background = fits.getdata(background_filename)

    return background
示例#3
0
def get_contour_mask(image, contour_vertices):
    ''' Gets a mask for an image with contours marked at the contour vertices.

    '''

    import mygeometry as myg

    mask = myg.get_polygon_mask(image, contour_vertices)

    return mask
示例#4
0
def perform_background_subtraction(av_filename, background_mask=None,
        background_dim=1, background_filename=None, background_init=None,
        background_region_filename=None):

    # Import external modules
    # -----------------------
    from myio import check_file
    from mycoords import convert_limit_coordinates, get_pix_coords, \
                         hrs2degs, load_ds9_region
    from myimage_analysis import fit_background
    from astropy.io import fits
    import mygeometry as myg

    av_data, av_header = fits.getdata(av_filename,
                                      header=True)

    if background_init is not None:
        av_data = av_data - background_init

    file_exists = check_file(background_filename, clobber=False)

    if not file_exists:
        props = {}

        print('writing new background')

        # Load background regions from ds9
        props = load_ds9_region(props,
                                filename=background_region_filename,
                                header=av_header,
                                key='background_regions')

        # Derive relevant region
        background_mask = np.ones(av_data.shape)
        for background_region in props['background_regions']:
            background_vertices = \
              props['background_regions']\
                   [background_region]['poly_verts']['pixel']

            # block off region
            background_mask_temp = ~np.logical_not(myg.get_polygon_mask(av_data,
                                                background_vertices))

            background_mask[background_mask_temp] = 0
        background_mask = ~np.logical_not(background_mask)

        # Fit the background
        background = fit_background(av_data, background_mask,
                background_dim=background_dim)

        fits.writeto(background_filename, background, av_header)
    else:
        background = fits.getdata(background_filename)

    return background
def get_contour_mask(image, contour_vertices):

    ''' Gets a mask for an image with contours marked at the contour vertices.

    '''

    import mygeometry as myg

    mask = myg.get_polygon_mask(image, contour_vertices)

    return mask
示例#6
0
def calc_region_mask(filename, data, header, region_name='',):

    ''' Masks all pixels which are not within the region.

    '''

    import mygeometry as myg

    regions = load_ds9_region(filename, header, region_name=region_name)

    region_vertices = regions[region_name]['poly_verts']['pixel']

    # block off region
    region_mask = np.logical_not(myg.get_polygon_mask(data,
                                                      region_vertices))

    return region_mask
示例#7
0
def calc_region_mask(
    filename,
    data,
    header,
    region_name='',
):
    ''' Masks all pixels which are not within the region.

    '''

    import mygeometry as myg

    regions = load_ds9_region(filename, header, region_name=region_name)

    region_vertices = regions[region_name]['poly_verts']['pixel']

    # block off region
    region_mask = np.logical_not(myg.get_polygon_mask(data, region_vertices))

    return region_mask
def main(av_data_type='planck'):

    # Import external modules
    # -----------------------
    import numpy as np
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube
    #from astropy.io import fits
    import pyfits as fits
    import matplotlib.pyplot as plt

    # Set parameters
    # --------------
    # Check if likelihood file already written, rewrite?
    clobber = 0

    # Confidence of parameter errors
    conf = 0.68
    # Confidence of contour levels
    contour_confs = (0.68, 0.95)

    # Name of HI noise cube
    noise_cube_filename = 'perseus_hi_galfa_cube_regrid_planckres_noise'

    # Threshold for converging DGR
    threshold_delta_dgr = 0.00005

    # Number of white noise standard deviations with which to fit the
    # residuals in iterative masking
    resid_width_scale = 3.0

    # Name of property files results are written to
    global_property_file = 'perseus_global_properties.txt'

    # Likelihood axis resolutions
    vel_widths = np.arange(1, 30, 2*0.16667)
    dgrs = np.arange(0.01, 0.2, 1e-3)
    #vel_widths = np.arange(1, 50, 8*0.16667)
    #dgrs = np.arange(0.01, 0.2, 1e-2)

    # Velocity range over which to integrate HI for deriving the mask
    vel_range = (-20, 20)

    # Use binned image?
    use_binned_image = False

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'
    figure_dir = \
        '/d/bip3/ezbc/perseus/figures/'
    av_dir = '/d/bip3/ezbc/perseus/data/av/'
    hi_dir = '/d/bip3/ezbc/perseus/data/hi/'
    co_dir = '/d/bip3/ezbc/perseus/data/co/'
    core_dir = '/d/bip3/ezbc/perseus/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/perseus/data/python_output/'
    region_dir = '/d/bip3/ezbc/perseus/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'

    # Load data
    # ---------
    if use_binned_image:
        bin_string = '_bin'
    else:
        bin_string = ''

    # Adjust filenames
    noise_cube_filename += bin_string
    likelihood_filename = 'perseus_likelihood_{0:s}'.format(av_data_type) + \
                          bin_string
    results_filename = 'perseus_likelihood_{0:s}'.format(av_data_type) + \
                       bin_string

    av_data, av_header = fits.getdata(av_dir + \
                            'perseus_av_planck_5arcmin' + bin_string + '.fits',
                                      header=True)

    av_data_error, av_error_header = fits.getdata(av_dir + \
                'perseus_av_error_planck_5arcmin' + bin_string + '.fits',
            header=True)

    if use_binned_image:
        #av_data_error = (100 * 0.025**2) * np.ones(av_data_error.shape)
        av_data_error *= 5

    hi_data, hi_header = fits.getdata(hi_dir + \
                'perseus_hi_galfa_cube_regrid_planckres' + bin_string + '.fits',
            header=True)

    # Load global properties
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    # Prepare data products
    # ---------------------
    # Change WCS coords to pixel coords of images
    global_props = convert_limit_coordinates(global_props, header=av_header)

    # make the velocity axes
    hi_vel_axis = make_velocity_axis(hi_header)

    # Load the HI noise cube if it exists, else make it
    if not path.isfile(hi_dir + noise_cube_filename + '.fits'):
        noise_cube = calculate_noise_cube(cube=hi_data,
                velocity_axis=hi_vel_axis,
                velocity_noise_range=[90,110], header=hi_header, Tsys=30.,
                filename=hi_dir + noise_cube_filename + '.fits')
    else:
        noise_cube, noise_header = fits.getdata(hi_dir +
                noise_cube_filename + '.fits',
            header=True)

    # Derive relevant region
    pix = global_props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]),
                       (pix[1], pix[2]),
                       (pix[3], pix[2]),
                       (pix[3], pix[0])
                       )

    # block off region
    region_mask = np.logical_not(myg.get_polygon_mask(av_data, region_vertices))

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    # Derive mask by excluding correlated residuals
    # ---------------------------------------------
    nhi_image = calculate_nhi(cube=hi_data,
                              velocity_axis=hi_vel_axis,
                              velocity_range=vel_range,
                              return_nhi_error=False,
                              )

    av_model, mask, dgr = iterate_residual_masking(
                             nhi_image=nhi_image,
                             av_data=av_data,
                             av_data_error=av_data_error,
                             vel_range=vel_range,
                             threshold_delta_dgr=threshold_delta_dgr,
                             resid_width_scale=resid_width_scale,
                             init_mask=region_mask,
                             verbose=1,
                             plot_progress=0,
                             )

    # Combine region mask with new mask
    #mask += np.logical_not(region_mask)
    mask += region_mask

    if 1:
        import matplotlib.pyplot as plt
        plt.imshow(np.ma.array(av_data, mask=mask), origin='lower')
        plt.show()

    # Derive center velocity from hi
    # ------------------------------
    hi_spectrum = np.sum(hi_data[:, ~mask], axis=(1))
    vel_center = np.array((np.average(hi_vel_axis,
                           weights=hi_spectrum**2),))[0]
    print('\nVelocity center from HI = ' +\
            '{0:.2f} km/s'.format(vel_center))

    # Perform likelihood calculation of masked images
    # -----------------------------------------------
    # Define filename for plotting results
    results_filename = figure_dir + 'likelihood/'+ results_filename

    results = calc_likelihoods(
                     hi_cube=hi_data[:, ~mask],
                     hi_vel_axis=hi_vel_axis,
                     av_image=av_data[~mask],
                     av_image_error=av_data_error[~mask],
                     vel_center=vel_center,
                     vel_widths=vel_widths,
                     dgrs=dgrs,
                     results_filename='',
                     return_likelihoods=True,
                     likelihood_filename=None,
                     clobber=False,
                     conf=conf,
                     )

    # Unpack output of likelihood calculation
    (vel_range_confint, width_confint, dgr_confint, likelihoods,
            width_likelihood, dgr_likelihood, width_max, dgr_max,
            vel_range_max) = results

    print('\nHI velocity integration range:')
    print('%.1f to %.1f km/s' % (vel_range_confint[0],
                                 vel_range_confint[1]))
    print('\nDGR:')
    print('%.1f x 10^-20 cm^2 mag' % (dgr_confint[0]))

    # Calulate chi^2 for best fit models
    # ----------------------------------
    nhi_image_temp, nhi_image_error = \
            calculate_nhi(cube=hi_data,
                velocity_axis=hi_vel_axis,
                velocity_range=vel_range_max,
                noise_cube=noise_cube,
                return_nhi_error=True)
    av_image_model = nhi_image_temp * dgr_max
    # avoid NaNs
    indices = ((av_image_model == av_image_model) & \
               (av_data == av_data))
    # add nan locations to the mask
    mask[~indices] = 1

    # count number of pixels used in analysis
    npix = mask[~mask].size

    # finally calculate chi^2
    chisq = np.sum((av_data[~mask] - av_image_model[~mask])**2 / \
            av_data_error[~mask]**2) / av_data[~mask].size

    print('\nTotal number of pixels in analysis, after masking = ' + \
            '{0:.0f}'.format(npix))

    print('\nReduced chi^2 = {0:.1f}'.format(chisq))

    # Write results to global properties
    global_props['dust2gas_ratio'] = {}
    global_props['dust2gas_ratio_error'] = {}
    global_props['hi_velocity_width'] = {}
    global_props['hi_velocity_width_error'] = {}
    global_props['dust2gas_ratio_max'] = {}
    global_props['hi_velocity_center_max'] = {}
    global_props['hi_velocity_width_max'] = {}
    global_props['hi_velocity_range_max'] =  {}
    global_props['av_threshold'] = {}
    global_props['co_threshold'] = {}
    global_props['hi_velocity_width']['value'] = width_confint[0]
    global_props['hi_velocity_width']['unit'] = 'km/s'
    global_props['hi_velocity_width_error']['value'] = width_confint[1:]
    global_props['hi_velocity_width_error']['unit'] = 'km/s'
    global_props['hi_velocity_range'] = vel_range_confint[0:2]
    global_props['hi_velocity_range_error'] = vel_range_confint[2:]
    global_props['dust2gas_ratio']['value'] = dgr_confint[0]
    global_props['dust2gas_ratio_error']['value'] = dgr_confint[1:]
    global_props['dust2gas_ratio_max']['value'] = dgr_max
    global_props['hi_velocity_center_max']['value'] = vel_center
    global_props['hi_velocity_width_max']['value'] = width_max
    global_props['hi_velocity_range_max']['value'] = vel_range_max
    global_props['hi_velocity_range_conf'] = conf
    global_props['width_likelihood'] = width_likelihood.tolist()
    global_props['dgr_likelihood'] = dgr_likelihood.tolist()
    global_props['vel_centers'] = [vel_center,]
    global_props['vel_widths'] = vel_widths.tolist()
    global_props['dgrs'] = dgrs.tolist()
    global_props['likelihoods'] = likelihoods.tolist()
    global_props['av_threshold']['value'] = None
    global_props['av_threshold']['unit'] = 'mag'
    global_props['co_threshold']['value'] = None
    global_props['co_threshold']['unit'] = 'K km/s'
    global_props['chisq'] = chisq
    global_props['npix'] = npix
    global_props['mask'] = mask.tolist()
    global_props['use_binned_image'] = use_binned_image

    with open(property_dir + global_property_file, 'w') as f:
        json.dump(global_props, f)

    # Plot likelihood space
    print('\nWriting likelihood image to\n' + results_filename + '_wd.png')
    plot_likelihoods_hist(global_props,
                          plot_axes=('widths', 'dgrs'),
                          show=0,
                          returnimage=False,
                          filename=results_filename + '_wd.png',
                          contour_confs=contour_confs)

    if 0:
        plt.clf(); plt.close()
        nhi_image_copy = np.copy(nhi_image)
        nhi_image_copy[mask] = np.nan
        av_image_copy = np.copy(av_data)
        resid_image = av_image_copy - nhi_image_copy * dgr
        plt.imshow(resid_image, origin='lower')
        plt.title(r'$A_V$ Data - Model')
        plt.colorbar()
        plt.show()
def main(dgr=None,
         vel_range=None,
         vel_range_type='single',
         region=None,
         av_data_type='planck',
         use_binned_images=False,
         background_dim=1):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    import pyfits as fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json

    # Script parameters
    # -----------------
    if use_binned_images:
        bin_string = '_bin'
    else:
        bin_string = ''

    # Name of noise cube
    noise_cube_filename = \
            'california_hi_galfa_cube_regrid_planckres_noise' + bin_string + \
            '.fits'

    # Name of property files results are written to
    prop_file = 'california_global_properties_' + av_data_type + '_scaled'

    # Name of property files results are written to
    background_file = 'california_background_' + av_data_type

    # Regions, regions to edit the global properties with
    region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/hi/'
    co_dir = '/d/bip3/ezbc/california/data/co/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/california/data/python_output/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'

    # load Planck Av and GALFA HI images, on same grid
    # Load data
    # ---------
    # Adjust filenames
    #noise_cube_filename += bin_string
    likelihood_filename = 'california_likelihood_{0:s}_bin'.format(
        av_data_type)
    results_filename = 'california_likelihood_{0:s}_bin'.format(av_data_type)
    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == 'k09':
        print('\nLoading K+09 2MASS data...')
        av_data, av_header = fits.getdata(av_dir + \
                                  'california_av_k09_regrid_planckres.fits',
                                  header=True)
        av_data_error = 0.1 * np.ones(av_data.shape)
    else:
        print('\nLoading Planck data...')
        av_data, av_header = fits.getdata(av_dir + \
                                          'california_av_planck_tau353_5arcmin.fits',
                                          header=True)

        av_error_data, av_error_data_header = fits.getdata(av_dir + \
                                    'california_av_error_planck_tau353_5arcmin.fits',
                                    header=True)

    #av_data -= 0.9 # background

    # Load global properties of cloud
    # global properties written from script
    # 'av/california_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)

    print('\nReading global parameter file\n' + prop_file + '.txt')
    if 1:
        with open(property_dir + prop_file + '.txt', 'r') as f:
            props = json.load(f)

    # Load background regions from ds9
    props = load_ds9_region(props,
                            filename=region_dir + 'california_background.reg',
                            header=av_header,
                            key='background_regions')

    # Convert plot limits
    props['plot_limit'] = {}
    props['plot_limit']['wcs'] = (((4, 50, 0), (33, 0, 0)), ((4, 10, 0),
                                                             (39, 0, 0)))

    props = convert_limit_coordinates(props,
                                      coords=('plot_limit', ),
                                      header=av_header)

    # Derive relevant region
    background_mask = np.ones(av_data.shape)
    for background_region in props['background_regions']:
        background_vertices = \
          props['background_regions'][background_region]['poly_verts']['pixel']

        # block off region
        background_mask_temp = ~np.logical_not(
            myg.get_polygon_mask(av_data, background_vertices))

        background_mask[background_mask_temp] = 0

    background_mask = ~np.logical_not(background_mask)

    if 0:
        import matplotlib.pyplot as plt
        av_plot_data = np.copy(av_data)
        av_plot_data[background_mask] = np.nan
        plt.imshow(av_plot_data, origin='lower')
        #plt.xlim(props['plot_limit_bin']['pixel'][0:3:2])
        #plt.ylim(props['plot_limit_bin']['pixel'][1:4:2])
        plt.show()

    background = fit_background(av_data,
                                background_mask,
                                background_dim=background_dim)

    if background_dim == 1:
        print('\nBackground A_V = {0:.1f} mag'.format(background))
        props['background_1D'] = float(background)

    if background_dim == 2:
        print('\nBackground A_V is 2D')
        props['background_2D'] = background.tolist()

    print('\nWriting global parameter file\n' + prop_file + '.txt')
    with open(property_dir + background_file + '.txt', 'w') as f:
        json.dump(props, f)

    # Plot
    figure_types = [
        'png',
    ]
    for figure_type in figure_types:
        filename = figure_dir + 'maps/california_av_background_maps_' + \
                   '{0:d}D.'.format(background_dim) + figure_type

        print('\nSaving maps to \n' + filename)

        plot_av_images(av_image=av_data,
                       av_image_backsub=av_data - background,
                       av_background=background,
                       header=av_header,
                       regions=props['background_regions'],
                       av_vlimits=(-1, 16),
                       av_back_vlimits=(0, 3),
                       limits=props['plot_limit']['pixel'],
                       filename=filename,
                       show=False)
def main(dgr=None, vel_range=(-5, 15), vel_range_type='single', region=None,
        av_data_type='planck'):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    import pyfits as fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json
    from os import system,path

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = 'multicloud_hi_galfa_cube_regrid_planckres_noise.fits'

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    #av_data_type = 'lee12'
    #av_data_type = 'planck'

    # Global parameter file
    prop_file = 'multicloud_global_properties'

    # Which cores to include in analysis?
    cores_to_keep = [# taur
                     'L1495',
                     'L1495A',
                     'B213',
                     'L1498',
                     'B215',
                     'B18',
                     'B217',
                     'B220-1',
                     'B220-2',
                     'L1521',
                     'L1524',
                     'L1527-1',
                     'L1527-2',
                     # Calif
                     'L1536',
                     'L1483-1',
                     'L1483-2',
                     'L1482-1',
                     'L1482-2',
                     'L1478-1',
                     'L1478-2',
                     'L1456',
                     'NGC1579',
                     #'L1545',
                     #'L1517',
                     #'L1512',
                     #'L1523',
                     #'L1512',
                     # Pers
                     'B5',
                     'IC348',
                     'B1E',
                     'B1',
                     'NGC1333',
                     'B4',
                     'B3',
                     'L1455',
                     'L1448',
                     ]

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {'wcs' : (((5, 10, 0), (19, 0, 0)),
                                 ((4, 30, 0), (27, 0, 0))),
                          'pixel' : ()
                         }
    elif region == 2:
        region_limit = {'wcs' : (((4, 30, 0), (19, 0, 0)),
                                 ((3, 50, 0), (29, 0, 0))),
                          'pixel' : ()
                        }
    elif region == 3:
        region_limit = {'wcs' : (((4, 30, 0), (29, 0, 0)),
                                 ((3, 50, 0), (33, 0, 0))),
                          'pixel' : ()
                        }
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/multicloud/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/multicloud/figures/'
    av_dir = '/d/bip3/ezbc/multicloud/data/av/'
    hi_dir = '/d/bip3/ezbc/multicloud/data/hi/'
    co_dir = '/d/bip3/ezbc/multicloud/data/co/'
    core_dir = '/d/bip3/ezbc/multicloud/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/multicloud/data/python_output/'
    region_dir = '/d/bip3/ezbc/multicloud/data/python_output/regions/'

    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == 'lee12_2mass':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_2mass_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'lee12_iris':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_iris_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'planck_tau353':
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_tau353_5arcmin.fits',
                return_header=True)
        av_image_error, av_error_header = load_fits(av_dir + \
                'multicloud_av_error_planck_tau353_5arcmin.fits',
                return_header=True)
    else:
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_5arcmin.fits',
                return_header=True)

        av_image_error, av_error_header = load_fits(av_dir + \
                    'multicloud_av_error_planck_5arcmin.fits',
                return_header=True)

    hi_cube, hi_header = load_fits(hi_dir + \
                'multicloud_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    co_data, co_header = load_fits(co_dir + \
                'multicloud_co_cfa_cube_regrid_planckres.fits',
            return_header=True)

    # Prepare data products
    # ---------------------
    # Load global properties of cloud
    # global properties written from script
    # 'av/multicloud_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)

    print('\nLoading global property file {0:s}.txt'.format(prop_file))
    with open(property_dir + prop_file + '.txt', 'r') as f:
        props = json.load(f)

    # Define velocity range
    props['hi_velocity_range'] = vel_range

    # make velocity axis for hi cube
    velocity_axis = make_velocity_axis(hi_header)
    # make velocity axis for co cube
    co_velocity_axis = make_velocity_axis(co_header)

    # Load the HI noise cube if it exists, else make it
    if not path.isfile(hi_dir + noise_cube_filename):
        hi_noise_cube = calculate_noise_cube(cube=hi_cube,
                velocity_axis=velocity_axis,
                velocity_noise_range=[90,110], header=hi_header, Tsys=30.,
                filename=hi_dir + noise_cube_filename)
    else:
        hi_noise_cube, noise_header = fits.getdata(hi_dir + noise_cube_filename,
            header=True)

    # create nhi image
    nhi_image = calculate_nhi(cube=hi_cube,
            velocity_axis=velocity_axis,
            velocity_range=vel_range,
            header=hi_header,
            noise_cube=hi_noise_cube)

    props['plot_limit']['wcs'] = (((5, 20, 0), (19, 0 ,0)),
                                  ((2, 30, 0), (37, 0, 0))
                                  )


    # Change WCS coords to pixel coords of images
    props = convert_limit_coordinates(props,
                                      header=av_header,
                                      coords=('region_limit',
                                              'co_noise_limits',
                                              'plot_limit',
                                              'region_name_pos'))

    # Load cloud division regions from ds9
    props = load_ds9_region(props,
                            filename=region_dir + 'multicloud_divisions.reg',
                            header=av_header)

    # Derive relevant region
    pix = props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]),
                       (pix[1], pix[2]),
                       (pix[3], pix[2]),
                       (pix[3], pix[0])
                       )

    # block offregion
    region_mask = myg.get_polygon_mask(av_image, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    if vel_range_type == 'single':
        print('\nHI velocity integration range:')
        print('%.1f to %.1f km/s' % (vel_range[0],
                                     vel_range[1]))
    elif vel_range_type == 'multiple':
        print('\nHI velocity integration ranges:')
        for i in xrange(0, vel_range.shape[0]):
            print('%.1f to %.1f km/s' % (vel_range[i, 0],
                                         vel_range[i, 1]))


    cloud_dict = {'taurus' : {},
                  'perseus' : {},
                  'california' : {},
                  }

    # load Planck Av and GALFA HI images, on same grid
    for cloud in cloud_dict:

        print('\nLoading core properties for {0:s}'.format(cloud))

        file_dir = '/d/bip3/ezbc/{0:s}/data/av/'.format(cloud)

        # define core properties
        with open('/d/bip3/ezbc/{0:s}/data/python_output/'.format(cloud) + \
                  'core_properties/{0:s}_core_properties.txt'.format(cloud),
                  'r') as f:
             cores = json.load(f)

        # Load core regions from DS9 files
        if cloud == 'aldobaran':
            region_cloud = 'california'
        else:
            region_cloud = cloud
        core_filename = '/d/bip3/ezbc/' + region_cloud + '/data/python_output' + \
                        '/ds9_regions/{0:s}_av_poly_cores'.format(region_cloud)

        cores = load_ds9_core_region(cores,
                                     filename_base=core_filename,
                                     header=av_header)

        cores = convert_core_coordinates(cores, av_header)

        # Remove cores
        cores_to_remove = []
        for core in cores:
            if core not in cores_to_keep:
                cores_to_remove.append(core)
        for core_to_remove in cores_to_remove:
            del cores[core_to_remove]

        cloud_dict[cloud]['cores'] = cores

    # Plot
    figure_types = ['png', 'pdf']
    for figure_type in figure_types:
        filename = 'av_cores_map' + \
                   '.{0:s}'.format(figure_type)

        print('\nSaving Av cores map to \n' + filename)

        plot_cores_map(header=av_header,
                       av_image=av_image,
                       limits=props['plot_limit']['pixel'],
                       regions=props['regions'],
                       cloud_dict=cloud_dict,
                       cores_to_keep=cores_to_keep,
                       props=props,
                       hi_vlimits=(0,20),
                       av_vlimits=(0,16),
                       #av_vlimits=(0.1,30),
                       savedir=figure_dir + 'maps/',
                       filename=filename,
                       show=False)
def main():

    import grid
    import numpy as np
    from myimage_analysis import calculate_nhi
    from mycoords import make_velocity_axis
    import pyfits as pf
    import mygeometry as myg
    import json

    # parameters used in script
    # -------------------------
    # Regions
    # Options are 'ds9' or 'av_gradient'
    box_method = 'av_gradient'

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/perseus/figures/dgr/'
    av_dir = '/d/bip3/ezbc/perseus/data/av/'
    hi_dir = '/d/bip3/ezbc/perseus/data/hi/'
    co_dir = '/d/bip3/ezbc/perseus/data/cfa/'
    core_dir = '/d/bip3/ezbc/perseus/data/python_output/core_properties/'
    region_dir = '/d/bip3/ezbc/perseus/data/python_output/ds9_regions/'
    property_dir = '/d/bip3/ezbc/perseus/data/python_output/'

    av_data_planck, planck_header = pf.getdata(av_dir + \
                'perseus_av_planck_5arcmin.fits',
            header=True)
    av_data_error_planck, planck_header = pf.getdata(av_dir + \
                'perseus_av_error_planck_5arcmin.fits',
            header=True)

    # load GALFA HI
    hi_data, hi_header = pf.getdata(hi_dir + \
            'perseus_hi_galfa_cube_regrid_planckres.fits',
            header=True)
    velocity_axis = make_velocity_axis(hi_header)

    noise_cube, noise_header = pf.getdata(hi_dir + \
            'perseus_hi_galfa_cube_regrid_planckres_noise.fits', header=True)

    # define core properties
    with open(core_dir + 'perseus_core_properties.txt', 'r') as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, planck_header)

    cores = load_ds9_region(cores,
            filename_base = region_dir + 'perseus_av_boxes_',
            header = planck_header)

    # Initialize lists
    av_images = []
    av_error_images = []
    nhi_images = []
    nhi_error_images = []

    for core in cores:
        print('\nCalculating for core %s' % core)
        if box_method == 'ds9':
            # Grab the mask from the DS9 regions
            xy = cores[core]['box_center_pix']
            box_width = cores[core]['box_width']
            box_height = cores[core]['box_height']
            box_angle = cores[core]['box_angle']
            mask = myg.get_rectangular_mask(av_data_planck,
                    xy[0], xy[1],
                    width = box_width,
                    height = box_height,
                    angle = box_angle)
        elif box_method == 'av_gradient':
            mask = myg.get_polygon_mask(av_data_planck,
                    cores[core]['box_vertices_rotated'])
        else:
        	raise ValueError('Method for boxes is either ds9 or av_gradient')

        indices = mask == 1

        # Get only the relevant pixels to decrease computation time
        hi_data_sub = np.copy(hi_data[:, indices])
        noise_cube_sub = np.copy(noise_cube[:, indices])
        av_data_planck_sub = np.copy(av_data_planck[indices])
        av_data_error_planck_sub = np.copy(av_data_error_planck[indices])

        # Derive N(HI) image
        nhi_image, nhi_image_error = calculate_nhi(cube=hi_data_sub,
                velocity_axis=velocity_axis,
                noise_cube=noise_cube_sub,
                velocity_range=cores[core]['hi_velocity_range'])

        nhi_images.append(nhi_image)
        nhi_error_images.append(nhi_image_error)
        av_images.append(av_data_planck_sub)
        av_error_images.append(av_data_error_planck_sub)

    plot_av_vs_nhi_grid(nhi_images,
                        av_images,
                        av_error_images=av_error_images,
                        nhi_error_images=nhi_error_images,
                        #limits=[0,14, 0,10],
                        scale=['linear', 'log'],
                        savedir=figure_dir,
                        plot_type='scatter',
                        filename='perseus_av_vs_nhi_panels.png',
                        color_scale='linear')

    # Derive N(HI) image
    nhi_image, nhi_image_error = calculate_nhi(cube=hi_data,
            velocity_axis=velocity_axis,
            noise_cube=noise_cube,
            velocity_range=cores[core]['hi_velocity_range'])

    # Plot correlation, similar to Figure 3 of Paradis et al. (2012)
    plot_av_vs_nhi(nhi_image,
            av_data_planck,
            savedir=figure_dir,
            scale=['log', 'linear'],
            filename='perseus_av_vs_nhi_global.png',
            color_scale='linear')
def derive_ideal_wedge(av_image, core_sample, wedge_angle=40, wedge_radius=10,
        av_image_error=None, core_rel_pos=0.1, angle_res=1.0, width=3):

    import mygeometry as myg
    import myimage_analysis as myim

    """
    Parameters
    ----------
    angle_res : float
        Resolution with which to rotate each new box in degrees. 1.0 degree
        gives 360 different box orientations.


    """

    angle_grid = np.arange(0, 360, angle_res)
    region_dict = {}

    for cloud_name in core_sample:
    #for cloud_name in ('perseus',):
        cloud_df = core_sample[cloud_name]
        gradient_sums_list = []
        for core_name in cloud_df['Name']:
        #for core_name in ('G158.26-21.81',):

            core = cloud_df[cloud_df['Name'] == core_name]

            print('Calculating optimal angle for core {:s}'.format(core_name))

            # Get center position in pixels
            core_pos = [core['xpix'].values[0], core['ypix'].values[0]][::-1]

            wedge_vertices = myg.create_wedge(core_pos,
                                              wedge_radius,
                                              wedge_angle,
                                              center_rel_pos=core_rel_pos,
                                              width=wedge_width,
                                              )

            gradient_sums = np.zeros((len(angle_grid)))

            for i, angle in enumerate(angle_grid):
                wedge_vertices_rotated = myg.rotate_wedge(wedge_vertices,
                                                          core_pos,
                                                          angle)

                try:
                    mask = \
                        myg.get_polygon_mask(av_image,
                                             wedge_vertices_rotated)
                    av_image_masked = np.copy(av_image)

                    # extract radial profile weighted by SNR
                    radii, profile = \
                        myim.get_radial_profile(av_image,
                                                binsize=1,
                                                center=core_pos, #[::-1],
                                                #weights=av_image_error,
                                                mask=mask
                                                )

                    if angle == 90:
                        av_image_masked = np.copy(av_image)
                        mask = myg.get_polygon_mask(av_image_masked,
                                                    wedge_vertices)
                        av_image_masked[mask==0] = np.NaN

                    indices = np.where((radii == radii) & \
                                       (profile == profile))
                    profile, radii = profile[indices], radii[indices]

                    # steeper gradients will have smaller sums
                    gradient_sum = np.sum(np.gradient(profile, radii))
                    gradient_sums[i] = gradient_sum
                except IndexError:
                    gradient_sums[i] = 0.

                gradient_sums_list.append(gradient_sums)

                #print wedge_vertices_rotated

            # find steepest profile and recreate the box mask
            angle_ideal = angle_grid[gradient_sums == np.min(gradient_sums)][0]

            wedge_vertices_rotated = myg.rotate_wedge(wedge_vertices,
                                                      core_pos,
                                                      angle_ideal)

            region_dict[core_name] = {}
            region_dict[core_name]['xpix'] = wedge_vertices_rotated[:,1]
            region_dict[core_name]['ypix'] = wedge_vertices_rotated[:,0]

    return region_dict
def main():

    import grid
    import numpy as np
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    # parameters used in script
    # -------------------------
    # wedge should be a few tens of pc.
    # D = 300 pc
    # res = 5'
    # d/pix = 0.43 pc/pix
    wedge_angle = 40.0 # degrees
    wedge_radius = 10.0 / 0.43 # pixels,
    core_rel_pos = 0.15 # fraction of radius core is within wedge

    # Which cores to include in analysis?
    cores_to_keep = ('L1495', 'L1495A', 'B213', 'L1498', 'B215')

    # Name of property files
    global_property_file = 'taurus_global_properties.txt'

    # define directory locations
    output_dir = '/d/bip3/ezbc/taurus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/taurus/figures/maps/'
    av_dir = '/d/bip3/ezbc/taurus/data/av/'
    hi_dir = '/d/bip3/ezbc/taurus/data/hi/'
    co_dir = '/d/bip3/ezbc/taurus/data/co/'
    core_dir = '/d/bip3/ezbc/taurus/data/python_output/core_properties/'
    region_dir = '/d/bip3/ezbc/taurus/data/python_output/ds9_regions/'
    property_dir = '/d/bip3/ezbc/taurus/data/python_output/'
    multicloud_region_dir = \
            '/d/bip3/ezbc/multicloud/data/python_output/'

    # load Planck Av and GALFA HI images, on same grid
    av_data, av_header = load_fits(av_dir + \
                'taurus_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data, av_error_header = load_fits(av_dir + \
                'taurus_av_error_planck_5arcmin.fits',
            return_header=True)

    # av_data[dec, ra], axes are switched

    # define core properties
    with open(core_dir + 'taurus_core_properties.txt', 'r') as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, av_header)

    cores = load_ds9_core_region(cores,
                            filename_base = region_dir + 'taurus_av_poly_cores',
                            header = av_header)

    # Open core properties
    with open(core_dir + 'taurus_core_properties.txt', 'w') as f:
        json.dump(cores, f)

    # Open file with WCS region limits
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    global_props = convert_limit_coordinates(global_props, header=av_header)

    # Open cloud boundaries
    global_props = load_ds9_cloud_region(global_props,
                            filename=multicloud_region_dir + \
                                     'multicloud_divisions.reg',
                            header=av_header)

    region_vertices = global_props['regions']['taurus1']['poly_verts']['pixel']

    # block off region
    region_mask = np.logical_not(myg.get_polygon_mask(av_data,
                                                      region_vertices))


    # Remove cores
    cores_to_remove = []
    for core in cores:
        if core not in cores_to_keep:
            cores_to_remove.append(core)
    for core_to_remove in cores_to_remove:
        del cores[core_to_remove]

    # Plot
    figure_types = ['pdf', 'png']
    for figure_type in figure_types:
        plot_av_image(av_image=av_data,
                      header=av_header,
                      boxes=True,
                      cores=cores,
                      #region_boundary=region_vertices,
                      savedir=figure_dir,
                      limits=global_props['region_limit']['pixel'],
                      filename='taurus_av_cores_map.' + figure_type,
                      show=0)
示例#14
0
def derive_ideal_wedge(av_image,
                       core_sample,
                       wedge_angle=40,
                       wedge_radius=10,
                       av_image_error=None,
                       core_rel_pos=0.1,
                       angle_res=1.0,
                       width=3):

    import mygeometry as myg
    import myimage_analysis as myim
    """
    Parameters
    ----------
    angle_res : float
        Resolution with which to rotate each new box in degrees. 1.0 degree
        gives 360 different box orientations.


    """

    angle_grid = np.arange(0, 360, angle_res)
    region_dict = {}

    for cloud_name in core_sample:
        #for cloud_name in ('perseus',):
        cloud_df = core_sample[cloud_name]
        gradient_sums_list = []
        for core_name in cloud_df['Name']:
            #for core_name in ('G158.26-21.81',):

            core = cloud_df[cloud_df['Name'] == core_name]

            print('Calculating optimal angle for core {:s}'.format(core_name))

            # Get center position in pixels
            core_pos = [core['xpix'].values[0], core['ypix'].values[0]][::-1]

            wedge_vertices = myg.create_wedge(
                core_pos,
                wedge_radius,
                wedge_angle,
                center_rel_pos=core_rel_pos,
                width=wedge_width,
            )

            gradient_sums = np.zeros((len(angle_grid)))

            for i, angle in enumerate(angle_grid):
                wedge_vertices_rotated = myg.rotate_wedge(
                    wedge_vertices, core_pos, angle)

                try:
                    mask = \
                        myg.get_polygon_mask(av_image,
                                             wedge_vertices_rotated)
                    av_image_masked = np.copy(av_image)

                    # extract radial profile weighted by SNR
                    radii, profile = \
                        myim.get_radial_profile(av_image,
                                                binsize=1,
                                                center=core_pos, #[::-1],
                                                #weights=av_image_error,
                                                mask=mask
                                                )

                    if angle == 90:
                        av_image_masked = np.copy(av_image)
                        mask = myg.get_polygon_mask(av_image_masked,
                                                    wedge_vertices)
                        av_image_masked[mask == 0] = np.NaN

                    indices = np.where((radii == radii) & \
                                       (profile == profile))
                    profile, radii = profile[indices], radii[indices]

                    # steeper gradients will have smaller sums
                    gradient_sum = np.sum(np.gradient(profile, radii))
                    gradient_sums[i] = gradient_sum
                except IndexError:
                    gradient_sums[i] = 0.

                gradient_sums_list.append(gradient_sums)

                #print wedge_vertices_rotated

            # find steepest profile and recreate the box mask
            angle_ideal = angle_grid[gradient_sums == np.min(gradient_sums)][0]

            wedge_vertices_rotated = myg.rotate_wedge(wedge_vertices, core_pos,
                                                      angle_ideal)

            region_dict[core_name] = {}
            region_dict[core_name]['xpix'] = wedge_vertices_rotated[:, 1]
            region_dict[core_name]['ypix'] = wedge_vertices_rotated[:, 0]

    return region_dict
示例#15
0
def main(dgr=None,
         vel_range=(-5, 15),
         vel_range_type='single',
         region=None,
         av_data_type='planck'):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    import pyfits as fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json
    from os import system, path

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = 'multicloud_hi_galfa_cube_regrid_planckres_noise.fits'

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    #av_data_type = 'lee12'
    #av_data_type = 'planck'

    # Global parameter file
    prop_file = 'multicloud_global_properties'

    # Which cores to include in analysis?
    cores_to_keep = [  # taur
        'L1495',
        'L1495A',
        'B213',
        'L1498',
        'B215',
        'B18',
        'B217',
        'B220-1',
        'B220-2',
        'L1521',
        'L1524',
        'L1527-1',
        'L1527-2',
        # Calif
        'L1536',
        'L1483-1',
        'L1483-2',
        'L1482-1',
        'L1482-2',
        'L1478-1',
        'L1478-2',
        'L1456',
        'NGC1579',
        #'L1545',
        #'L1517',
        #'L1512',
        #'L1523',
        #'L1512',
        # Pers
        'B5',
        'IC348',
        'B1E',
        'B1',
        'NGC1333',
        'B4',
        'B3',
        'L1455',
        'L1448',
    ]

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {
            'wcs': (((5, 10, 0), (19, 0, 0)), ((4, 30, 0), (27, 0, 0))),
            'pixel': ()
        }
    elif region == 2:
        region_limit = {
            'wcs': (((4, 30, 0), (19, 0, 0)), ((3, 50, 0), (29, 0, 0))),
            'pixel': ()
        }
    elif region == 3:
        region_limit = {
            'wcs': (((4, 30, 0), (29, 0, 0)), ((3, 50, 0), (33, 0, 0))),
            'pixel': ()
        }
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/multicloud/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/multicloud/figures/'
    av_dir = '/d/bip3/ezbc/multicloud/data/av/'
    hi_dir = '/d/bip3/ezbc/multicloud/data/hi/'
    co_dir = '/d/bip3/ezbc/multicloud/data/co/'
    core_dir = '/d/bip3/ezbc/multicloud/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/multicloud/data/python_output/'
    region_dir = '/d/bip3/ezbc/multicloud/data/python_output/'

    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == 'lee12_2mass':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_2mass_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'lee12_iris':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_iris_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'planck_rad':
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_radiance_5arcmin.fits',
                return_header=True)
        av_image_error, av_error_header = load_fits(av_dir + \
                    'multicloud_av_error_planck_radiance_5arcmin.fits',
                return_header=True)
    else:
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_5arcmin.fits',
                return_header=True)

        av_image_error, av_error_header = load_fits(av_dir + \
                    'multicloud_av_error_planck_5arcmin.fits',
                return_header=True)

    hi_cube, hi_header = load_fits(hi_dir + \
                'multicloud_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    co_data, co_header = load_fits(co_dir + \
                'multicloud_co_cfa_cube_regrid_planckres.fits',
            return_header=True)

    # Prepare data products
    # ---------------------
    # Load global properties of cloud
    # global properties written from script
    # 'av/multicloud_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)

    print('\nLoading global property file {0:s}.txt'.format(prop_file))
    with open(property_dir + prop_file + '.txt', 'r') as f:
        props = json.load(f)

    # Define velocity range
    props['hi_velocity_range'] = vel_range

    # make velocity axis for hi cube
    velocity_axis = make_velocity_axis(hi_header)
    # make velocity axis for co cube
    co_velocity_axis = make_velocity_axis(co_header)

    # Load the HI noise cube if it exists, else make it
    if not path.isfile(hi_dir + noise_cube_filename):
        hi_noise_cube = calculate_noise_cube(cube=hi_cube,
                                             velocity_axis=velocity_axis,
                                             velocity_noise_range=[90, 110],
                                             header=hi_header,
                                             Tsys=30.,
                                             filename=hi_dir +
                                             noise_cube_filename)
    else:
        hi_noise_cube, noise_header = fits.getdata(hi_dir +
                                                   noise_cube_filename,
                                                   header=True)

    # create nhi image
    nhi_image = calculate_nhi(cube=hi_cube,
                              velocity_axis=velocity_axis,
                              velocity_range=vel_range,
                              header=hi_header,
                              noise_cube=hi_noise_cube)

    # Change WCS coords to pixel coords of images
    props = convert_limit_coordinates(props,
                                      header=av_header,
                                      coords=('region_limit',
                                              'co_noise_limits', 'plot_limit',
                                              'region_name_pos'))

    # Load cloud division regions from ds9
    props = load_ds9_region(props,
                            filename=region_dir + 'multicloud_divisions.reg',
                            header=av_header)

    # Derive relevant region
    pix = props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]), (pix[1], pix[2]), (pix[3], pix[2]),
                       (pix[3], pix[0]))

    # block offregion
    region_mask = myg.get_polygon_mask(av_image, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    if vel_range_type == 'single':
        print('\nHI velocity integration range:')
        print('%.1f to %.1f km/s' % (vel_range[0], vel_range[1]))
    elif vel_range_type == 'multiple':
        print('\nHI velocity integration ranges:')
        for i in xrange(0, vel_range.shape[0]):
            print('%.1f to %.1f km/s' % (vel_range[i, 0], vel_range[i, 1]))

    cloud_dict = {
        'taurus': {},
        'perseus': {},
        'california': {},
    }

    # load Planck Av and GALFA HI images, on same grid
    for cloud in cloud_dict:

        print('\nLoading core properties for {0:s}'.format(cloud))

        file_dir = '/d/bip3/ezbc/{0:s}/data/av/'.format(cloud)

        # define core properties
        with open('/d/bip3/ezbc/{0:s}/data/python_output/'.format(cloud) + \
                  'core_properties/{0:s}_core_properties.txt'.format(cloud),
                  'r') as f:
            cores = json.load(f)

        # Load core regions from DS9 files
        if cloud == 'aldobaran':
            region_cloud = 'california'
        else:
            region_cloud = cloud
        core_filename = region_dir.replace('multicloud',region_cloud) + \
                        '/ds9_regions/{0:s}_av_poly_cores'.format(region_cloud)

        cores = load_ds9_core_region(cores,
                                     filename_base=core_filename,
                                     header=av_header)

        cores = convert_core_coordinates(cores, av_header)

        # Remove cores
        cores_to_remove = []
        for core in cores:
            if core not in cores_to_keep:
                cores_to_remove.append(core)
        for core_to_remove in cores_to_remove:
            del cores[core_to_remove]

        cloud_dict[cloud]['cores'] = cores

    # Plot
    figure_types = ['png', 'pdf']
    for figure_type in figure_types:
        filename = 'multicloud_av_cores_map' + \
                   '.{0:s}'.format(figure_type)

        print('\nSaving Av cores map to \n' + filename)

        plot_cores_map(
            header=av_header,
            av_image=av_image,
            limits=props['plot_limit']['pixel'],
            regions=props['regions'],
            cloud_dict=cloud_dict,
            cores_to_keep=cores_to_keep,
            props=props,
            hi_vlimits=(0, 20),
            av_vlimits=(0, 16),
            #av_vlimits=(0.1,30),
            savedir=figure_dir + 'maps/',
            filename=filename,
            show=False)
def main():

    import grid
    import numpy as np
    import numpy
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error

    # parameters used in script
    # -------------------------
    # HI velocity integration range
    # Determine HI integration velocity by CO or likelihoodelation with Av?
    hi_av_likelihoodelation = True

    center_vary = False
    width_vary = True
    dgr_vary = True

    # Check if likelihood file already written, rewrite?
    clobber = 0

    # Confidence of parameter errors
    conf = 0.68
    # Confidence of contour levels
    contour_confs = (0.68, 0.95)

    # Course, large grid or fine, small grid?
    grid_res = 'course'
    grid_res = 'fine'

    # Results and fits filenames
    likelihood_filename = 'california_nhi_av_likelihoods'
    results_filename = 'california_likelihood'

    # Define ranges of parameters
    if center_vary and width_vary and dgr_vary:
        likelihood_filename += '_width_dgr_center'
        results_filename += '_width_dgr_center'

        velocity_centers = np.arange(-15, 30, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1e-2, 1, 2e-2)
    elif not center_vary and width_vary and dgr_vary:

        if grid_res == 'course':
            likelihood_filename += '_dgr_width_lowres'
            results_filename += '_dgr_width_lowres'
            velocity_centers = np.arange(5, 6, 1)
            velocity_widths = np.arange(1, 80, 1)
            dgrs = np.arange(1e-2, 1, 2e-2)
        elif grid_res == 'fine':
            likelihood_filename += '_dgr_width_highres'
            results_filename += '_dgr_width_highres'
            velocity_centers = np.arange(5, 6, 1)
            velocity_widths = np.arange(1, 40, 0.16667)
            dgrs = np.arange(0.05, 0.5, 1e-3)
    elif center_vary and width_vary and not dgr_vary:
        likelihood_filename += '_width_center'
        results_filename += '_width_center'

        velocity_centers = np.arange(-15, 30, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1.1e-1, 1.2e-1, 0.1e-1)
    elif not center_vary and width_vary and not dgr_vary:
        likelihood_filename += '_width'
        results_filename += '_width'

        velocity_centers = np.arange(5, 6, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1.1e-1, 1.2e-1, 0.1e-1)

    # Which likelihood fits should be performed?
    core_likelihoodelation = 0
    global_likelihoodelation = 1

    # Name of property files results are written to
    global_property_file = 'california_global_properties.txt'
    core_property_file = 'california_core_properties.txt'

    # Threshold of Av below which we expect only atomic gas, in mag
    av_threshold = 1

    # Name of noise cube
    noise_cube_filename = 'california_hi_galfa_cube_regrid_planckres_noise.fits'

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/hi_velocity_range/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/hi/'
    co_dir = '/d/bip3/ezbc/california/data/co/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/california/data/python_output/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'

    # load Planck Av and GALFA HI images, on same grid
    av_data_planck, av_header = load_fits(av_dir + \
                'california_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data_planck, av_error_header = load_fits(av_dir + \
                'california_av_error_planck_5arcmin.fits',
            return_header=True)

    hi_data, h = load_fits(hi_dir + \
                'california_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    # make the velocity axis
    velocity_axis = make_velocity_axis(h)

    # Plot NHI vs. Av for a given velocity range
    if not path.isfile(hi_dir + noise_cube_filename):
        noise_cube = calculate_noise_cube(cube=hi_data,
                velocity_axis=velocity_axis,
                velocity_noise_range=[90,110], header=h, Tsys=30.,
                filename=hi_dir + noise_cube_filename)
    else:
        noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename,
            return_header=True)

    # define core properties
    with open(core_dir + core_property_file, 'r') as f:
        cores = json.load(f)
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    dgr = global_props['dust2gas_ratio']['value']
    dgr = 1.2e-1

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
            filename_base = region_dir + 'california_av_boxes_',
            header = h)

    if core_likelihoodelation:
        for core in cores:
            print('\nCalculating for core %s' % core)

            # Grab the mask
            mask = myg.get_polygon_mask(av_data_planck,
                    cores[core]['box_vertices_rotated'])

            indices = ((mask == 0) &\
                       (av_data_planck < av_threshold))

            hi_data_sub = np.copy(hi_data[:, indices])
            noise_cube_sub = np.copy(noise_cube[:, indices])
            av_data_sub = np.copy(av_data_planck[indices])
            av_error_data_sub = np.copy(av_error_data_planck[indices])

            # Define filename for plotting results
            results_filename = figure_dir + 'california_logL_%s.png' % core

            # likelihoodelate each core region Av and N(HI) for velocity ranges
            vel_range_confint, dgr_confint, likelihoods, center_likelihood,\
                width_likelihood, dgr_likelihood = \
                    calc_likelihood_hi_av(hi_cube=hi_data_sub,
                                    hi_velocity_axis=velocity_axis,
                                    hi_noise_cube=noise_cube_sub,
                                    av_image=av_data_sub,
                                    av_image_error=av_error_data_sub,
                                    dgrs=dgrs,
                                    velocity_centers=velocity_centers,
                                    velocity_widths=velocity_widths,
                                    return_likelihoods=True,
                                    plot_results=True,
                                    results_filename=results_filename,
                                    likelihood_filename=likelihood_dir + \
                                            likelihood_filename + \
                                            '{0:s}.fits'.format(core),
                                    clobber=clobber,
                                    conf=conf)

            print('HI velocity integration range:')
            print('%.1f to %.1f km/s' % (vel_range_confint[0],
                                         vel_range_confint[1]))
            print('DGR:')
            print('%.1f to %.1f km/s' % (vel_range_confint[0],
                                         vel_range_confint[1]))

            cores[core]['hi_velocity_range'] = vel_range_confint[0:2]
            cores[core]['hi_velocity_range_error'] = vel_range_confint[2:]
            cores[core]['center_likelihood'] = center_likelihood.tolist()
            cores[core]['width_likelihood'] = width_likelihood.tolist()
            cores[core]['vel_centers'] = velocity_centers.tolist()
            cores[core]['vel_widths'] = velocity_widths.tolist()

        with open(core_dir + core_property_file, 'w') as f:
            json.dump(cores, f)

    if global_likelihoodelation:
        print('\nCalculating likelihoods globally')


        mask = np.zeros(av_data_planck.shape)
        for core in cores:
            # Grab the mask
            mask += myg.get_polygon_mask(av_data_planck,
                    cores[core]['box_vertices_rotated'])

        indices = ((mask == 0) &\
                   (av_data_planck < av_threshold))


        #indices = ((av_data_planck < av_threshold))

        hi_data_sub = np.copy(hi_data[:, indices])
        noise_cube_sub = np.copy(noise_cube[:, indices])
        av_data_sub = np.copy(av_data_planck[indices])
        av_error_data_sub = np.copy(av_error_data_planck[indices])

        # Define filename for plotting results
        results_filename = figure_dir + results_filename

        # likelihoodelate each core region Av and N(HI) for velocity ranges
        vel_range_confint, dgr_confint, likelihoods, center_likelihood,\
            width_likelihood, dgr_likelihood = \
                calc_likelihood_hi_av(hi_cube=hi_data_sub,
                                hi_velocity_axis=velocity_axis,
                                hi_noise_cube=noise_cube_sub,
                                av_image=av_data_sub,
                                av_image_error=av_error_data_sub,
                                dgrs=dgrs,
                                velocity_centers=velocity_centers,
                                velocity_widths=velocity_widths,
                                return_likelihoods=True,
                                plot_results=True,
                                results_filename=results_filename,
                                likelihood_filename=likelihood_dir + \
                                        likelihood_filename + \
                                        '_global.fits',
                                clobber=clobber,
                                conf=conf,
                                contour_confs=contour_confs)

        print('HI velocity integration range:')
        print('%.1f to %.1f km/s' % (vel_range_confint[0],
                                     vel_range_confint[1]))
        print('DGR:')
        print('%.1f to %.1f km/s' % (dgr_confint[0],
                                     dgr_confint[1]))

        global_props['dust2gas_ratio'] = {}
        global_props['dust2gas_ratio_error'] = {}

        global_props['hi_velocity_range'] = vel_range_confint[0:2]
        global_props['hi_velocity_range_error'] = vel_range_confint[2:]
        global_props['dust2gas_ratio']['value'] = dgr_confint[0]
        global_props['dust2gas_ratio_error']['value'] = dgr_confint[1:]
        global_props['hi_velocity_range_conf'] = conf
        global_props['center_likelihood'] = center_likelihood.tolist()
        global_props['width_likelihood'] = width_likelihood.tolist()
        global_props['dgr_likelihood'] = dgr_likelihood.tolist()
        global_props['vel_centers'] = velocity_centers.tolist()
        global_props['vel_widths'] = velocity_widths.tolist()
        global_props['dgrs'] = dgrs.tolist()
        global_props['likelihoods'] = likelihoods.tolist()

        with open(property_dir + global_property_file, 'w') as f:
            json.dump(global_props, f)
def main():

    import grid
    import numpy as np
    import numpy
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    from multiprocessing import Pool

    global _hi_cube
    global _hi_velocity_axis
    global _hi_noise_cube
    global _av_image
    global _av_image_error

    # parameters used in script
    # -------------------------
    # HI velocity integration range
    # Determine HI integration velocity by CO or likelihoodelation with Av?
    hi_av_likelihoodelation = True

    center_vary = False
    width_vary = True
    dgr_vary = True

    # Check if likelihood file already written, rewrite?
    clobber = 1

    # Include only pixels within core regions for analysis?
    core_mask = 0

    # Confidence of parameter errors
    conf = 0.68
    # Confidence of contour levels
    contour_confs = (0.68, 0.95)

    # Results and fits filenames
    likelihood_filename = 'perseus_nhi_av_likelihoods_mcmc_co_av'
    results_filename = 'perseus_likelihood_mcmc_co_av'
    global _progress_filename
    _progress_filename = 'perseus_mcmc_samples.dat'

    # Define ranges of parameters
    global _av_thres_range
    _av_thres_range = (1.0, 1.1)
    _av_thres_range = (0.1, 2.0)
    global _vel_width_range
    _vel_width_range = (0.0, 80.0)
    global _dgr_range
    _dgr_range = (0.01, 0.4)
    global _velocity_center
    _velocity_center = 5.0  # km/s

    # MCMC parameters
    global _ndim
    _ndim = 3
    global _nwalkers
    _nwalkers = 100
    global _niter
    _niter = 1000
    global _init_guesses
    _init_guesses = np.array((10, 0.10, 1.0))
    global _init_spread
    _init_spread = np.array((0.1, 0.01, 0.01))
    global _mc_threads
    _mc_threads = 10

    # Name of property files results are written to
    global_property_file = 'perseus_global_properties.txt'
    core_property_file = 'perseus_core_properties.txt'

    # Name of noise cube
    noise_cube_filename = 'perseus_hi_galfa_cube_regrid_planckres_noise.fits'

    # Define limits for plotting the map
    prop_dict = {}
    prop_dict['limit_wcs'] = (((3, 58, 0), (27, 6, 0)), ((3, 20, 0), (35, 0,
                                                                      0)))
    prop_dict['limit_wcs'] = (((3, 58, 0), (26, 6, 0)), ((3, 0, 0), (35, 0,
                                                                     0)))

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/perseus/figures/hi_velocity_range/'
    av_dir = '/d/bip3/ezbc/perseus/data/av/'
    hi_dir = '/d/bip3/ezbc/perseus/data/hi/'
    co_dir = '/d/bip3/ezbc/perseus/data/co/'
    core_dir = '/d/bip3/ezbc/perseus/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/perseus/data/python_output/'
    region_dir = '/d/bip3/ezbc/perseus/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'
    global _likelihood_dir
    _likelihood_dir = likelihood_dir

    # load Planck Av and GALFA HI images, on same grid
    av_data_planck, av_header = load_fits(av_dir + \
                'perseus_av_planck_5arcmin.fits',
            return_header=True)
    prop_dict['av_header'] = av_header

    av_error_data_planck, av_error_header = load_fits(av_dir + \
                'perseus_av_error_planck_5arcmin.fits',
            return_header=True)

    hi_data, h = load_fits(hi_dir + \
                'perseus_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    co_data, co_header = load_fits(co_dir + \
                'perseus_co_cfa_cube_regrid_planckres.fits',
            return_header=True)

    # make the velocity axis
    velocity_axis = make_velocity_axis(h)

    # Plot NHI vs. Av for a given velocity range
    if not path.isfile(hi_dir + noise_cube_filename):
        noise_cube = calculate_noise_cube(cube=hi_data,
                                          velocity_axis=velocity_axis,
                                          velocity_noise_range=[90, 110],
                                          header=h,
                                          Tsys=30.,
                                          filename=hi_dir +
                                          noise_cube_filename)
    else:
        noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename,
                                             return_header=True)

    # define core properties
    with open(core_dir + core_property_file, 'r') as f:
        cores = json.load(f)
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
                            filename_base=region_dir + 'perseus_av_boxes_',
                            header=h)

    print('\nCalculating likelihoods globally')

    mask = np.zeros(av_data_planck.shape)
    for core in cores:
        # Grab the mask
        mask += myg.get_polygon_mask(av_data_planck,
                                     cores[core]['wedge_vertices_rotated'])

    co_mom0 = np.sum(co_data, axis=0)

    # Mask images
    core_mask = 0
    if core_mask:
        indices = ((mask == 1) & \
                   (co_mom0 < np.std(co_mom0[~np.isnan(co_mom0)])*2.0))
        mask_type = '_core_mask'
    else:
        indices = (co_mom0 < np.std(co_mom0[~np.isnan(co_mom0)]) * 2.0)
        mask_type = ''

    hi_data_sub = np.copy(hi_data[:, indices])
    noise_cube_sub = np.copy(noise_cube[:, indices])
    av_data_sub = np.copy(av_data_planck[indices])
    av_error_data_sub = np.copy(av_error_data_planck[indices])

    # Set global variables
    _hi_cube = hi_data_sub
    _hi_velocity_axis = velocity_axis
    _hi_noise_cube = noise_cube_sub
    _av_image = av_data_sub
    _av_image_error = av_error_data_sub

    # Define filename for plotting results
    results_filename = figure_dir + results_filename

    # likelihoodelate each core region Av and N(HI) for velocity ranges
    vel_range_confint, dgr_confint, likelihoods, center_likelihood,\
        width_likelihood, dgr_likelihood = \
            calc_likelihood(return_likelihoods=True,
                            plot_results=True,
                            results_filename=results_filename + mask_type,
                            likelihood_filename=likelihood_dir + \
                                    likelihood_filename + \
                                    mask_type + '.npy',
                            clobber=clobber,
                            conf=conf,
                            contour_confs=contour_confs)
    '''
def main():

    import numpy as np
    import numpy
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube

    global hi_cube
    global hi_velocity_axis
    global hi_noise_cube
    global av_image
    global av_image_error

    # parameters used in script
    # -------------------------
    # HI velocity integration range
    # Determine HI integration velocity by CO or likelihoodelation with Av?
    hi_av_likelihoodelation = True

    center_vary = False
    width_vary = True
    dgr_vary = True

    # Check if likelihood file already written, rewrite?
    clobber = 1

    # Confidence of parameter errors
    conf = 0.68
    # Confidence of contour levels
    contour_confs = (0.68, 0.95)

    # Course, large grid or fine, small grid?
    grid_res = "fine"
    grid_res = "course"

    # Use multithreading?
    multithread = False

    # Use Av+CO mask or only CO?
    av_and_co_mask = True

    # Derive CO mask? If co_thres = None, co_thres will be 2 * std(co)
    co_thres = 6.00  # K km/s

    # Threshold of Av below which we expect only atomic gas, in mag
    av_thres = 1.4

    # Results and fits filenames
    if av_and_co_mask:
        likelihood_filename = "taurus_nhi_av_likelihoods_co_" + "av{0:.1f}mag".format(av_thres)
        results_filename = "taurus_likelihood_co_" + "av{0:.1f}mag".format(av_thres)
    else:
        likelihood_filename = "taurus_nhi_av_likelihoods_co_only"
        results_filename = "taurus_likelihood_co_only"

    # Name of property files results are written to
    global_property_file = "taurus_global_properties.txt"
    core_property_file = "taurus_core_properties.txt"

    # Name of noise cube
    noise_cube_filename = "taurus_hi_galfa_cube_regrid_planckres_noise.fits"

    # Define ranges of parameters
    if center_vary and width_vary and dgr_vary:
        likelihood_filename += "_width_dgr_center"
        results_filename += "_width_dgr_center"

        velocity_centers = np.arange(-15, 30, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1e-2, 1, 2e-2)
    elif not center_vary and width_vary and dgr_vary:
        if grid_res == "course":
            likelihood_filename += "_dgr_width_lowres"
            results_filename += "_dgr_width_lowres"
            velocity_centers = np.arange(-5, 10, 10 * 0.16667)
            velocity_widths = np.arange(1, 30, 10 * 0.16667)
            dgrs = np.arange(0.05, 0.7, 2e-2)
        elif grid_res == "fine":
            likelihood_filename += "_dgr_width_highres"
            results_filename += "_dgr_width_highres"
            velocity_centers = np.arange(5, 6, 1)
            velocity_widths = np.arange(1, 100, 0.16667)
            dgrs = np.arange(0.15, 0.4, 1e-3)
            velocity_widths = np.arange(1, 15, 0.16667)
            dgrs = np.arange(0.1, 0.9, 3e-3)
            # velocity_widths = np.arange(1, 40, 1)
            # dgrs = np.arange(0.15, 0.4, 1e-1)
    elif center_vary and width_vary and not dgr_vary:
        likelihood_filename += "_width_center"
        results_filename += "_width_center"

        velocity_centers = np.arange(-15, 30, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1.1e-1, 1.2e-1, 0.1e-1)
    elif not center_vary and width_vary and not dgr_vary:
        likelihood_filename += "_width"
        results_filename += "_width"

        velocity_centers = np.arange(5, 6, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1.1e-1, 1.2e-1, 0.1e-1)

    # define directory locations
    # --------------------------
    output_dir = "/d/bip3/ezbc/taurus/data/python_output/nhi_av/"
    figure_dir = "/d/bip3/ezbc/taurus/figures/hi_velocity_range/"
    av_dir = "/d/bip3/ezbc/taurus/data/av/"
    hi_dir = "/d/bip3/ezbc/taurus/data/hi/"
    co_dir = "/d/bip3/ezbc/taurus/data/co/"
    core_dir = "/d/bip3/ezbc/taurus/data/python_output/core_properties/"
    property_dir = "/d/bip3/ezbc/taurus/data/python_output/"
    region_dir = "/d/bip3/ezbc/taurus/data/python_output/ds9_regions/"
    likelihood_dir = "/d/bip3/ezbc/taurus/data/python_output/nhi_av/"

    # load Planck Av and GALFA HI images, on same grid
    av_data, av_header = load_fits(av_dir + "taurus_av_planck_5arcmin.fits", return_header=True)

    av_data_error, av_error_header = load_fits(av_dir + "taurus_av_error_planck_5arcmin.fits", return_header=True)

    hi_data, h = load_fits(hi_dir + "taurus_hi_galfa_cube_regrid_planckres.fits", return_header=True)

    co_data, co_header = load_fits(co_dir + "taurus_co_cfa_cube_regrid_planckres.fits", return_header=True)

    # make the velocity axis
    velocity_axis = make_velocity_axis(h)
    co_velocity_axis = make_velocity_axis(co_header)

    # Plot NHI vs. Av for a given velocity range
    if not path.isfile(hi_dir + noise_cube_filename):
        noise_cube = calculate_noise_cube(
            cube=hi_data,
            velocity_axis=velocity_axis,
            velocity_noise_range=[90, 110],
            header=h,
            Tsys=30.0,
            filename=hi_dir + noise_cube_filename,
        )
    else:
        noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename, return_header=True)

    # define core properties
    with open(core_dir + core_property_file, "r") as f:
        cores = json.load(f)
    with open(property_dir + global_property_file, "r") as f:
        global_props = json.load(f)

    # Change WCS coords to pixel coords of images
    cores = convert_core_coordinates(cores, h)
    cores = load_ds9_region(cores, filename_base=region_dir + "taurus_av_boxes_", header=h)
    global_props = convert_limit_coordinates(global_props, header=av_header)

    print ("\nCalculating likelihoods globally")

    co_data_nonans = np.copy(co_data)
    co_data_nonans[np.isnan(co_data_nonans)] = 0.0

    # Set velocity center as CO peak
    if not center_vary:
        co_spectrum = np.sum(co_data_nonans, axis=(1, 2))
        co_avg_vel = np.average(co_velocity_axis, weights=co_spectrum)
        co_peak_vel = co_velocity_axis[co_spectrum == np.max(co_spectrum)]
        # velocity_centers = np.arange(co_peak_vel, co_peak_vel + 1, 1)
        velocity_centers = np.arange(co_avg_vel, co_avg_vel + 1, 1)

        print ("\nVelocity center from CO = " + "{0:.2f} km/s".format(velocity_centers[0]))

    # Create mask where CO is present
    core_mask = np.zeros(av_data.shape)
    # for core in cores:
    #    # Grab the mask
    #    core_mask += myg.get_polygon_mask(av_data,
    #            cores[core]['box_vertices_rotated'])

    # Calc moment 0 map of CO
    co_mom0 = np.sum(co_data_nonans, axis=0)

    # calc noise without any emission if CO threshold not already set
    if co_thres is None:
        co_noise = calc_co_noise(co_mom0, global_props)
        co_thres = 2.0 * co_noise

    # Derive relevant region
    pix = global_props["region_limit"]["pixel"]
    region_vertices = ((pix[1], pix[0]), (pix[1], pix[2]), (pix[3], pix[2]), (pix[3], pix[0]))

    # block offregion
    region_mask = myg.get_polygon_mask(av_data, region_vertices)

    print ("\nRegion size = " + "{0:.0f} pix".format(region_mask[region_mask == 1].size))

    # Get indices which trace only atomic gas, i.e., no CO emission
    if av_and_co_mask:
        indices = ((co_mom0 < co_thres) & (av_data < av_thres)) & (region_mask == 1)
    elif not av_and_co_mask:
        indices = (co_mom0 < co_thres) & (region_mask == 1)
        av_thres = None

    # Write mask of pixels not used
    mask = ~indices

    # Mask global data with CO indices
    hi_data_sub = np.copy(hi_data[:, indices])
    noise_cube_sub = np.copy(noise_cube[:, indices])
    av_data_sub = np.copy(av_data[indices])
    av_error_data_sub = np.copy(av_data_error[indices])

    # import matplotlib.pyplot as plt
    # av_plot_data = np.copy(av_data)
    # av_plot_data[~indices] = np.nan
    # plt.imshow(av_plot_data, origin='lower')
    # plt.contour(co_mom0, levels=(6, 12, 24), origin='lower')
    # plt.show()
    # plt.clf()
    # plt.close()

    # Plot the masked image
    av_data_masked = np.copy(av_data)
    av_data_masked[~indices] = np.nan
    figure_types = ["png"]
    for figure_type in figure_types:
        plot_av_image(
            av_image=av_data_masked,
            header=av_header,
            savedir=figure_dir + "../maps/",
            limits=global_props["region_limit"]["pixel"],
            filename="taurus_dgr_co_masked_map." + figure_type,
            show=0,
        )

    # Set global variables
    hi_cube = hi_data_sub
    hi_velocity_axis = velocity_axis
    hi_noise_cube = noise_cube_sub
    av_image = av_data_sub
    av_image_error = av_error_data_sub

    # Define filename for plotting results
    results_filename = figure_dir + results_filename

    # likelihoodelate each core region Av and N(HI) for velocity ranges
    vel_range_confint, dgr_confint, likelihoods, center_likelihood, width_likelihood, dgr_likelihood, center_max, width_max, dgr_max = calc_likelihood_hi_av(
        dgrs=dgrs,
        velocity_centers=velocity_centers,
        velocity_widths=velocity_widths,
        return_likelihoods=True,
        plot_results=True,
        results_filename=results_filename,
        likelihood_filename=likelihood_dir + likelihood_filename + "_global.fits",
        clobber=clobber,
        conf=conf,
        contour_confs=contour_confs,
        multithread=multithread,
    )
    vel_range_max = (center_max - width_max / 2.0, center_max + width_max / 2.0)

    print ("\nHI velocity integration range:")
    print ("%.1f to %.1f km/s" % (vel_range_confint[0], vel_range_confint[1]))
    print ("\nDGR:")
    print ("%.1f x 10^-20 cm^2 mag" % (dgr_confint[0]))

    # Calulate chi^2 for best fit models
    # ----------------------------------
    nhi_image_temp, nhi_image_error = calculate_nhi(
        cube=hi_data, velocity_axis=hi_velocity_axis, velocity_range=vel_range_max, noise_cube=noise_cube
    )
    av_image_model = nhi_image_temp * dgr_max
    # avoid NaNs
    indices = (av_image_model == av_image_model) & (av_data == av_data)
    # add nan locations to the mask
    mask[~indices] = 1

    # count number of pixels used in analysis
    npix = mask[~mask].size

    # finally calculate chi^2
    chisq = np.sum((av_data[~mask] - av_image_model[~mask]) ** 2 / av_data_error[~mask] ** 2) / av_data[~mask].size

    print (
        "\nTotal number of pixels in analysis, after masking = " + "{0:.0f}".format(npix)
    ) + "\nGiven a CO threshold of {0:.2f} K km/s".format(co_thres) + "\nand a Av threshold of {0:.2f} mag".format(
        av_thres
    )

    print ("\nReduced chi^2 = {0:.1f}".format(chisq))

    # Write results to global properties
    global_props["dust2gas_ratio"] = {}
    global_props["dust2gas_ratio_error"] = {}
    global_props["hi_velocity_width"] = {}
    global_props["dust2gas_ratio_max"] = {}
    global_props["hi_velocity_center_max"] = {}
    global_props["hi_velocity_width_max"] = {}
    global_props["hi_velocity_range_max"] = {}
    global_props["av_threshold"] = {}
    global_props["co_threshold"] = {}
    global_props["hi_velocity_width"]["value"] = vel_range_confint[1] - vel_range_confint[0]
    global_props["hi_velocity_width"]["unit"] = "km/s"
    global_props["hi_velocity_range"] = vel_range_confint[0:2]
    global_props["hi_velocity_range_error"] = vel_range_confint[2:]
    global_props["dust2gas_ratio"]["value"] = dgr_confint[0]
    global_props["dust2gas_ratio_error"]["value"] = dgr_confint[1:]
    global_props["dust2gas_ratio_max"]["value"] = dgr_max
    global_props["hi_velocity_center_max"]["value"] = center_max
    global_props["hi_velocity_width_max"]["value"] = width_max
    global_props["hi_velocity_range_max"]["value"] = vel_range_max
    global_props["hi_velocity_range_conf"] = conf
    global_props["center_likelihood"] = center_likelihood.tolist()
    global_props["width_likelihood"] = width_likelihood.tolist()
    global_props["dgr_likelihood"] = dgr_likelihood.tolist()
    global_props["vel_centers"] = velocity_centers.tolist()
    global_props["vel_widths"] = velocity_widths.tolist()
    global_props["dgrs"] = dgrs.tolist()
    global_props["likelihoods"] = likelihoods.tolist()
    global_props["av_threshold"]["value"] = av_thres
    global_props["av_threshold"]["unit"] = "mag"
    global_props["co_threshold"]["value"] = co_thres
    global_props["co_threshold"]["unit"] = "K km/s"
    global_props["chisq"] = chisq
    global_props["npix"] = npix
    global_props["mask"] = mask.tolist()

    with open(property_dir + global_property_file, "w") as f:
        json.dump(global_props, f)
def main(dgr=None, vel_range=None, vel_range_type='single', region=None,
        av_data_type='planck'):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    #import pyfits as fits
    from astropy.io import fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json
    from os import system,path

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = 'multicloud_hi_galfa_cube_regrid_planckres_noise.fits'

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    #av_data_type = 'lee12'
    #av_data_type = 'planck'

    # Global parameter file
    prop_file = 'multicloud_global_properties'

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {'wcs' : (((5, 10, 0), (19, 0, 0)),
                                 ((4, 30, 0), (27, 0, 0))),
                          'pixel' : ()
                         }
    elif region == 2:
        region_limit = {'wcs' : (((4, 30, 0), (19, 0, 0)),
                                 ((3, 50, 0), (29, 0, 0))),
                          'pixel' : ()
                        }
    elif region == 3:
        region_limit = {'wcs' : (((4, 30, 0), (29, 0, 0)),
                                 ((3, 50, 0), (33, 0, 0))),
                          'pixel' : ()
                        }
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/multicloud/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/multicloud/figures/'
    av_dir = '/d/bip3/ezbc/multicloud/data/av/'
    hi_dir = '/d/bip3/ezbc/multicloud/data/hi/'
    co_dir = '/d/bip3/ezbc/multicloud/data/co/'
    core_dir = '/d/bip3/ezbc/multicloud/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/multicloud/data/python_output/'
    region_dir = '/d/bip3/ezbc/multicloud/data/python_output/regions/'

    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == 'lee12_2mass':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_2mass_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'lee12_iris':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_iris_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'planck_rad':
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_tau353_5arcmin.fits',
                return_header=True)
        av_image_error, av_error_header = load_fits(av_dir + \
                    'multicloud_av_error_planck_tau353_5arcmin.fits',
                return_header=True)
    else:
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_tau353_5arcmin.fits',
                return_header=True)

        av_image_error, av_error_header = load_fits(av_dir + \
                    'multicloud_av_error_planck_tau353_5arcmin.fits',
                return_header=True)

    hi_cube, hi_header = load_fits(hi_dir + \
                'multicloud_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    co_data, co_header = load_fits(co_dir + \
                'multicloud_co_cfa_cube_regrid_planckres.fits',
            return_header=True)

    # Prepare data products
    # ---------------------
    # Load global properties of cloud
    # global properties written from script
    # 'av/multicloud_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)
    with open(property_dir + prop_file + '.txt', 'r') as f:
        props = json.load(f)

    if vel_range is not None:
        props['hi_velocity_range'] = vel_range
    else:
        vel_range = props['hi_velocity_range']

    # make velocity axis for hi cube
    velocity_axis = make_velocity_axis(hi_header)
    # make velocity axis for co cube
    co_velocity_axis = make_velocity_axis(co_header)

    # Load the HI noise cube if it exists, else make it
    if not path.isfile(hi_dir + noise_cube_filename):
        hi_noise_cube = calculate_noise_cube(cube=hi_cube,
                velocity_axis=velocity_axis,
                velocity_noise_range=[90,110], header=hi_header, Tsys=30.,
                filename=hi_dir + noise_cube_filename)
    else:
        hi_noise_cube, noise_header = fits.getdata(hi_dir + noise_cube_filename,
            header=True)

    # create nhi image
    nhi_image = calculate_nhi(cube=hi_cube,
            velocity_axis=velocity_axis,
            velocity_range=vel_range,
            header=hi_header,
            noise_cube=hi_noise_cube)

    props['plot_limit']['wcs'] = (((5, 20, 0), (19, 0 ,0)),
                                  ((2, 30, 0), (37, 0, 0))
                                  )

    props['region_name_pos'] = {
             #'taurus 1' : {'wcs' : ((3, 50,  0),
             #                       (21.5, 0, 0)),
             #             },
             #'taurus 2' : {'wcs' : ((5, 10,  0),
             #                       (21.5, 0, 0)),
             #             },
             'taurus' : {'wcs' : ((4, 40,  0),
                                  (21, 0, 0)),
                          },
             'perseus' : {'wcs' : ((3, 30,  0),
                                   (26, 0, 0)),
                          },
             #'perseus 1' : {'wcs' : ((3, 0,  0),
             #                      (34, 0, 0)),
             #             },
             #'perseus 2' : {'wcs' : ((3, 10,  0),
             #                      (22.5, 0, 0)),
             #             },
             'california' : {'wcs' : ((4, 28,  0),
                                      (34, 0, 0)),
                             },
             }

    # Change WCS coords to pixel coords of images
    props = convert_limit_coordinates(props,
                                      header=av_header,
                                      coords=('region_limit',
                                              'co_noise_limits',
                                              'plot_limit',
                                              'region_name_pos'))

    props['plot_limit']['wcs'] = [15*(5+20./60), 15*(2+30./60.), 17, 38.5]


    # Load cloud division regions from ds9
    props = load_ds9_region(props,
                            filename=region_dir + 'multicloud_divisions.reg',
                            header=av_header)

    # Derive relevant region
    pix = props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]),
                       (pix[1], pix[2]),
                       (pix[3], pix[2]),
                       (pix[3], pix[0])
                       )

    # block offregion
    region_mask = myg.get_polygon_mask(av_image, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    if vel_range_type == 'single':
        print('\nHI velocity integration range:')
        print('%.1f to %.1f km/s' % (vel_range[0],
                                     vel_range[1]))
    elif vel_range_type == 'multiple':
        print('\nHI velocity integration ranges:')
        for i in xrange(0, vel_range.shape[0]):
            print('%.1f to %.1f km/s' % (vel_range[i, 0],
                                         vel_range[i, 1]))

    # Plot
    figure_types = ['png', 'pdf']
    for figure_type in figure_types:
        if region is None:
            if vel_range_type == 'single':
                filename = 'multicloud_av_nhi_map' + \
                    '.%s' % figure_type
                    #av_data_type + \
                    #'dgr{0:.3f}_'.format(dgr) + \
                    #'{0:.1f}to{1:.1f}kms'.format(vel_range[0], vel_range[1]) + \
                    #'_' + \
            elif vel_range_type == 'multiple':
                filename = 'multiple_vel_range/multicloud_av_model_map' + \
                           'dgr{0:.3f}'.format(dgr)
                for i in xrange(0, vel_range.shape[0]):
                    filename += '_{0:.1f}to{1:.1f}kms'.format(vel_range[i, 0],
                                                              vel_range[i, 1])
                filename += '.%s' % figure_type
        else:
            filename = 'multicloud_av_model_map_region{0:.0f}'.format(region) + \
                       '.{0:s}'.format(figure_type)

        filename = 'av_map'
        filename = figure_dir + 'maps/' + filename + '.' + figure_type
        print('\nSaving Av model image to \n' + filename)

        plot_av_image(av_image=av_image,
                       header=av_header,
                       limits=[15*(5+20./60), 15*(2+30./60.), 17, 38.5],
                       limits_type='wcs',
                       regions=props['regions'],
                       props=props,
                       av_vlimits=(0,15.5),
                       filename=filename,
                       show=False)

        if 0:
            filename = 'av_nhi_map'
            filename = figure_dir + 'maps/' + filename + '.' + figure_type
            print('\nSaving NHI + Av maps to \n' + filename)
            plot_nhi_image(nhi_image=nhi_image,
                           header=av_header,
                           av_image=av_image,
                           limits=props['plot_limit']['wcs'],
                           limits_type='wcs',
                           regions=props['regions'],
                           props=props,
                           hi_vlimits=(0,20),
                           av_vlimits=(0,15.5),
                           #av_vlimits=(0.1,30),
                           filename=filename,
                           show=False)
def main():

    import grid
    import numpy as np
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    # parameters used in script
    # -------------------------
    # wedge should be a few tens of pc.
    # D = 300 pc
    # res = 5'
    # d/pix = 0.43 pc/pix
    wedge_angle = 40.0  # degrees
    wedge_radius = 10.0 / 0.43  # pixels,
    core_rel_pos = 0.15  # fraction of radius core is within wedge

    # Which cores to include in analysis?
    cores_to_keep = ('L1495', 'L1495A', 'B213', 'L1498', 'B215')

    # Name of property files
    global_property_file = 'taurus_global_properties.txt'

    # define directory locations
    output_dir = '/d/bip3/ezbc/taurus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/taurus/figures/maps/'
    av_dir = '/d/bip3/ezbc/taurus/data/av/'
    hi_dir = '/d/bip3/ezbc/taurus/data/hi/'
    co_dir = '/d/bip3/ezbc/taurus/data/co/'
    core_dir = '/d/bip3/ezbc/taurus/data/python_output/core_properties/'
    region_dir = '/d/bip3/ezbc/taurus/data/python_output/ds9_regions/'
    property_dir = '/d/bip3/ezbc/taurus/data/python_output/'
    multicloud_region_dir = \
            '/d/bip3/ezbc/multicloud/data/python_output/'

    # load Planck Av and GALFA HI images, on same grid
    av_data, av_header = load_fits(av_dir + \
                'taurus_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data, av_error_header = load_fits(av_dir + \
                'taurus_av_error_planck_5arcmin.fits',
            return_header=True)

    # av_data[dec, ra], axes are switched

    # define core properties
    with open(core_dir + 'taurus_core_properties.txt', 'r') as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, av_header)

    cores = load_ds9_core_region(cores,
                                 filename_base=region_dir +
                                 'taurus_av_poly_cores',
                                 header=av_header)

    # Open core properties
    with open(core_dir + 'taurus_core_properties.txt', 'w') as f:
        json.dump(cores, f)

    # Open file with WCS region limits
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    global_props = convert_limit_coordinates(global_props, header=av_header)

    # Open cloud boundaries
    global_props = load_ds9_cloud_region(global_props,
                            filename=multicloud_region_dir + \
                                     'multicloud_divisions.reg',
                            header=av_header)

    region_vertices = global_props['regions']['taurus1']['poly_verts']['pixel']

    # block off region
    region_mask = np.logical_not(myg.get_polygon_mask(av_data,
                                                      region_vertices))

    # Remove cores
    cores_to_remove = []
    for core in cores:
        if core not in cores_to_keep:
            cores_to_remove.append(core)
    for core_to_remove in cores_to_remove:
        del cores[core_to_remove]

    # Plot
    figure_types = ['pdf', 'png']
    for figure_type in figure_types:
        plot_av_image(
            av_image=av_data,
            header=av_header,
            boxes=True,
            cores=cores,
            #region_boundary=region_vertices,
            savedir=figure_dir,
            limits=global_props['region_limit']['pixel'],
            filename='taurus_av_cores_map.' + figure_type,
            show=0)
示例#21
0
def main():

    import grid
    import numpy as np
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    # parameters used in script
    box_width = 4 # in pixels
    box_height = 10 # in pixels
    #box_width = 7 # in pixels
    #box_height = 22 # in pixels
    box_width = 8 # in pixels
    box_height = 40 # in pixels
    angle_res = 10.0 # degrees

    # define directory locations
    output_dir = '/d/bip3/ezbc/taurus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/taurus/figures/maps/'
    av_dir = '/d/bip3/ezbc/taurus/data/av/'
    hi_dir = '/d/bip3/ezbc/taurus/data/hi/'
    co_dir = '/d/bip3/ezbc/taurus/data/co/'
    core_dir = '/d/bip3/ezbc/taurus/data/python_output/core_properties/'
    region_dir = '/d/bip3/ezbc/taurus/data/python_output/ds9_regions/'

    # load Planck Av and GALFA HI images, on same grid
    av_data, av_header = load_fits(av_dir + \
                'taurus_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data, av_error_header = load_fits(av_dir + \
                'taurus_av_error_planck_5arcmin.fits',
            return_header=True)

    # av_data[dec, ra], axes are switched

    # define core properties
    with open(core_dir + 'taurus_core_properties.txt', 'r') as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, av_header)

    cores = load_ds9_region(cores,
            filename_base = region_dir + 'taurus_av_boxes_',
            header = av_header)

    av_image_list = []
    av_image_error_list = []
    core_name_list = []

    box_dict = derive_ideal_box(av_data, cores, box_width, box_height,
            core_rel_pos=0.1, angle_res=angle_res, av_image_error=av_error_data)

    for core in cores:
        cores[core]['box_vertices_rotated'] = \
            box_dict[core]['box_vertices_rotated'].tolist()
        try:
            cores[core]['center_pixel'] = cores[core]['center_pixel'].tolist()
        except AttributeError:
            cores[core]['center_pixel'] = cores[core]['center_pixel']

    with open(core_dir + 'taurus_core_properties.txt', 'w') as f:
        json.dump(cores, f)

    for core in cores:
        mask = myg.get_polygon_mask(av_data,
                cores[core]['box_vertices_rotated'])

        av_data_mask = np.copy(av_data)
        av_data_mask[mask == 0] = np.NaN

    # Plot
    figure_types = ['pdf', 'png']
    for figure_type in figure_types:
        plot_av_image(av_image=av_data,
                      header=av_header,
                      boxes=True,
                      cores=cores,
                      limits=[50,37,200,160],
                      title=r'taurus: A$_V$ map with core boxed-regions.',
                      savedir=figure_dir,
                      filename='taurus_av_cores_map.%s' % \
                      figure_type,
                      show=0)

        # For a poster
        plot_av_image(av_image=av_data,
                      header=av_header,
                      boxes=True,
                      cores=cores,
                      limits=[50,37,200,160],
                      #title=r'taurus: A$_V$ map with core boxed-regions.',
                      savedir=figure_dir,
                      filename='taurus_av_cores_map_poster.%s' % \
                      figure_type,
                      show=0)
def main():

    import grid
    import numpy as np
    import numpy
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error

    # parameters used in script
    # -------------------------
    # HI velocity integration range
    # Determine HI integration velocity by CO or correlation with Av?
    hi_av_correlation = True
    velocity_centers = np.arange(-15, 30, 4)
    velocity_widths = np.arange(1, 80, 4)

    # Which likelihood fits should be performed?
    core_correlation = 0
    global_correlation = 1

    # Name of property files results are written to
    global_property_file = 'california_global_properties.txt'
    core_property_file = 'california_core_properties.txt'

    # Threshold of Av below which we expect only atomic gas, in mag
    av_threshold = 100

    # Check if likelihood file already written, rewrite?>
    likelihood_filename = 'california_nhi_av_likelihoods'
    clobber = 0
    hi_vel_range_conf = 0.50

    # Name of noise cube
    noise_cube_filename = 'california_hi_galfa_cube_regrid_planckres_noise.fits'

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/hi_velocity_range/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/hi/'
    co_dir = '/d/bip3/ezbc/california/data/co/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/california/data/python_output/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'

    # load Planck Av and GALFA HI images, on same grid
    av_data_planck, av_header = load_fits(av_dir + \
                'california_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data_planck, av_error_header = load_fits(av_dir + \
                'california_av_error_planck_5arcmin.fits',
            return_header=True)

    hi_data, h = load_fits(hi_dir + \
                'california_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    # make the velocity axis
    velocity_axis = make_velocity_axis(h)

    # Plot NHI vs. Av for a given velocity range
    if not path.isfile(hi_dir + noise_cube_filename):
        noise_cube = calculate_noise_cube(cube=hi_data,
                                          velocity_axis=velocity_axis,
                                          velocity_noise_range=[90, 110],
                                          header=h,
                                          Tsys=30.,
                                          filename=hi_dir +
                                          noise_cube_filename)
    else:
        noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename,
                                             return_header=True)

    # define core properties
    with open(core_dir + core_property_file, 'r') as f:
        cores = json.load(f)
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    dgr = global_props['dust2gas_ratio']['value']
    dgr = 1.22e-1

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
                            filename_base=region_dir + 'california_av_boxes_',
                            header=h)

    if core_correlation:
        for core in cores:
            print('\nCalculating for core %s' % core)

            # Grab the mask
            mask = myg.get_polygon_mask(av_data_planck,
                                        cores[core]['box_vertices_rotated'])

            indices = ((mask == 0) &\
                       (av_data_planck < av_threshold))

            hi_data_sub = np.copy(hi_data[:, indices])
            noise_cube_sub = np.copy(noise_cube[:, indices])
            av_data_sub = np.copy(av_data_planck[indices])
            av_error_data_sub = np.copy(av_error_data_planck[indices])

            # Define filename for plotting results
            results_filename = figure_dir + 'california_logL_%s.png' % core

            # Correlate each core region Av and N(HI) for velocity ranges
            vel_range_confint, correlations, center_corr, width_corr = \
                    correlate_hi_av(hi_cube=hi_data_sub,
                                    hi_velocity_axis=velocity_axis,
                                    hi_noise_cube=noise_cube_sub,
                                    av_image=av_data_sub,
                                    av_image_error=av_error_data_sub,
                                    dgr=dgr,
                                    velocity_centers=velocity_centers,
                                    velocity_widths=velocity_widths,
                                    return_correlations=True,
                                    plot_results=True,
                                    results_filename=results_filename,
                                    likelihood_filename=likelihood_dir + \
                                            likelihood_filename + \
                                            '{0:s}.fits'.format(core),
                                    clobber=clobber,
                                    hi_vel_range_conf=hi_vel_range_conf)

            print('HI velocity integration range:')
            print('%.1f to %.1f km/s' %
                  (vel_range_confint[0], vel_range_confint[1]))

            cores[core]['hi_velocity_range'] = vel_range_confint[0:2]
            cores[core]['hi_velocity_range_error'] = vel_range_confint[2:]
            cores[core]['center_corr'] = center_corr.tolist()
            cores[core]['width_corr'] = width_corr.tolist()
            cores[core]['vel_centers'] = velocity_centers.tolist()
            cores[core]['vel_widths'] = velocity_widths.tolist()

        with open(core_dir + core_property_file, 'w') as f:
            json.dump(cores, f)

    if global_correlation:
        print('\nCalculating correlations globally')

        indices = ((av_data_planck < av_threshold))

        hi_data_sub = np.copy(hi_data[:, indices])
        noise_cube_sub = np.copy(noise_cube[:, indices])
        av_data_sub = np.copy(av_data_planck[indices])
        av_error_data_sub = np.copy(av_error_data_planck[indices])

        # Define filename for plotting results
        results_filename = figure_dir + 'california_logL_global.png'

        # Correlate each core region Av and N(HI) for velocity ranges
        vel_range_confint, correlations, center_corr, width_corr = \
                correlate_hi_av(hi_cube=hi_data_sub,
                                hi_velocity_axis=velocity_axis,
                                hi_noise_cube=noise_cube_sub,
                                av_image=av_data_sub,
                                av_image_error=av_error_data_sub,
                                dgr=dgr,
                                velocity_centers=velocity_centers,
                                velocity_widths=velocity_widths,
                                return_correlations=True,
                                plot_results=True,
                                results_filename=results_filename,
                                likelihood_filename=likelihood_dir + \
                                        likelihood_filename + '_global.fits',
                                clobber=clobber,
                                hi_vel_range_conf=hi_vel_range_conf)
        '''
        fit_hi_vel_range(guesses=(0, 30),
                         av_image=av_data_sub,
                         av_image_error=av_error_data_sub,
                         hi_cube=hi_data_sub,
                         hi_velocity_axis=velocity_axis,
                         hi_noise_cube=noise_cube_sub,
                         dgr=dgr)
        '''

        print('HI velocity integration range:')
        print('%.1f to %.1f km/s' %
              (vel_range_confint[0], vel_range_confint[1]))

        global_props['hi_velocity_range'] = vel_range_confint[0:2]
        global_props['hi_velocity_range_error'] = vel_range_confint[2:]
        global_props['hi_velocity_range_conf'] = hi_vel_range_conf
        global_props['center_corr'] = center_corr.tolist()
        global_props['width_corr'] = width_corr.tolist()
        global_props['vel_centers'] = velocity_centers.tolist()
        global_props['vel_widths'] = velocity_widths.tolist()

        with open(property_dir + global_property_file, 'w') as f:
            json.dump(global_props, f)
def main():

    import grid
    import numpy as np
    from os import system,path
    import myclumpfinder as clump_finder
    import mygeometry as myg
    import json

    # define directory locations
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/cores/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/galfa/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'

    # load 2mass Av and GALFA HI images, on same grid
    av_image, h = load_fits(av_dir + 'california_av_planck_5arcmin.fits',
            return_header=True)

    # define core properties
    with open(core_dir + 'california_core_properties.txt', 'r') as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
            filename_base = region_dir + 'california_av_boxes_',
            header = h)

    if True:
        limits = [0, 20, -1, 25] # x-linear limits

        # Initialize fit params
        A_p = []
        pho_c = []
        R_flat = []
        p = []

        # Initialize data lists
        radii_pc_list = []
        profile_list = []
        profile_std_list = []
        profile_fit_params_list = []
        core_names_list = []

        for core in cores:
            print('Calculating for core %s' % core)

            # Grab the mask from the DS9 regions
            xy = cores[core]['box_center_pix']
            box_width = cores[core]['box_width']
            box_height = cores[core]['box_height']
            box_angle = cores[core]['box_angle']
            mask = myg.get_rectangular_mask(av_image,
        	        xy[0], xy[1],
                    width = box_width,
                    height = box_height,
                    angle = box_angle)

            mask = myg.get_polygon_mask(av_image,
                    cores[core]['box_vertices_rotated'])

            # Get indices where there is no mask, and extract those pixels
            indices = np.where(mask == 1)

            av_image_sub = np.copy(av_image)
            #av_image_sub[mask == 0] = np.NaN
            av_image_sub = np.ma.array(av_image, mask=(mask == 0))

            # to check the positions of the boxes, uncomment the following
            #import matplotlib.pyplot as plt
            #plt.clf()
            #plt.imshow(np.ma.array(av_image_sub, mask=temp_mask))
            #plt.savefig('/usr/users/ezbc/Desktop/map%s.png' % core)
            #plt.clf()

            pix = cores[core]['center_pixel']

            # extract radial profile weighted by SNR
            radii, profile = get_radial_profile(av_image, binsize=3,
                    center=pix,
                    weights=av_image / 0.3,
                    mask=mask
                    )

            # extract std
            radii, profile_std = get_radial_profile(av_image_sub, binsize=3,
                    center=pix,
                    stddev=True,
                    weights=av_image_sub / 0.3,
                    #mask=mask
                    )

            # convert radii from degrees to parsecs
            radii_arcmin = radii * h['CDELT2'] * 60 * 60. # radii in arcminutes
            radii_pc = radii_arcmin * 300 / 206265. # radii in parsecs

            # extract radii from within the limits
            indices = np.where((radii_pc < limits[1]) & \
                               (profile == profile) & \
                               (profile_std == profile_std))
            radii_pc = radii_pc[indices]
            profile = profile[indices]
            profile_std = profile_std[indices]

            # fit profile with power function
            def function(radius, A_p, pho_c, R_flat, p):
                return A_p * pho_c * R_flat / \
                        (1 + (radius / R_flat)**2)**(p/2. - 0.5)
                #return A_p * radius**p

            profile_fit_params = fit_profile(radii_pc, profile, function,
                    sigma=profile / profile_std)[0]

            # plot the radial profile
            figure_types = ['.pdf', '.png']
            for figure_type in figure_types:
                plot_profile(radii_pc, profile,
                        profile_errors = profile_std,
                        limits = limits,
                        profile_fit_params = profile_fit_params,
                        profile_fit_function = function,
                        savedir=figure_dir + 'individual_cores/',
                        filename = 'california_profile_av_' + core + figure_type,
                        title=r'Radial A$_V$ Profile of california Core ' + core,
                        show = False)

            A_p.append(profile_fit_params[0])
            pho_c.append(profile_fit_params[1])
            R_flat.append(profile_fit_params[2])
            p.append(profile_fit_params[3])

            radii_pc_list.append(radii_pc)
            profile_list.append(profile)
            profile_std_list.append(profile_std)
            profile_fit_params_list.append(profile_fit_params)
            core_names_list.append(core)

        for figure_type in figure_types:
            plot_profile_grid(radii_pc_list, profile_list,
                    profile_errors_list = profile_std_list,
                    limits = limits,
                    profile_fit_params_list = profile_fit_params_list,
                    profile_fit_function = function,
                    savedir=figure_dir + 'panel_cores/',
                    filename = 'california_profile_av_cores_planck' + figure_type,
                    title=r'Radial A$_V$ Profiles of california Cores',
                    core_names=core_names_list,
                    show = False)


        print_fit_params(cores, A_p, pho_c, R_flat, p,
                filename=output_dir + 'core_profile_fit_data.txt')

        print_fit_params(cores, A_p, pho_c, R_flat, p)
def main(dgr=None,
         vel_range=None,
         vel_range_type='single',
         region=None,
         av_data_type='planck'):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    import pyfits as pf
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = 'perseus_hi_galfa_cube_regrid_planckres_noise'

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    #av_data_type = 'lee12'
    #av_data_type = 'planck'

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {
            'wcs': (((5, 10, 0), (19, 0, 0)), ((4, 30, 0), (27, 0, 0))),
            'pixel': ()
        }
    elif region == 2:
        region_limit = {
            'wcs': (((4, 30, 0), (19, 0, 0)), ((3, 50, 0), (29, 0, 0))),
            'pixel': ()
        }
    elif region == 3:
        region_limit = {
            'wcs': (((4, 30, 0), (29, 0, 0)), ((3, 50, 0), (33, 0, 0))),
            'pixel': ()
        }
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/perseus/figures/'
    av_dir = '/d/bip3/ezbc/perseus/data/av/'
    hi_dir = '/d/bip3/ezbc/perseus/data/hi/'
    co_dir = '/d/bip3/ezbc/perseus/data/co/'
    core_dir = '/d/bip3/ezbc/perseus/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/perseus/data/python_output/'
    region_dir = '/d/bip3/ezbc/perseus/data/python_output/ds9_regions/'

    # Load Data
    # ---------
    # Load global properties of cloud
    # global properties written from script
    # 'av/perseus_analysis_global_properties.txt'
    prop_file = 'perseus_global_properties'  # _' + av_data_type
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)
    with open(property_dir + prop_file + '.txt', 'r') as f:
        props = json.load(f)

    if props['use_binned_image']:
        bin_string = '_bin'
    else:
        bin_string = ''

    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == 'lee12_2mass':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'perseus_av_lee12_2mass_regrid_planckres' + bin_string + \
                    '.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'lee12_iris':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'perseus_av_lee12_iris_regrid_planckres' + bin_string + \
                    '.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'planck_rad':
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'perseus_av_planck_radiance_5arcmin' + bin_string + \
                    '.fits',
                return_header=True)
        av_image_error, av_error_header = load_fits(av_dir + \
                    'perseus_av_error_planck_radiance_5arcmin' + bin_string + \
                    '.fits',
                return_header=True)
    else:
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'perseus_av_planck_5arcmin' + bin_string + '.fits',
                return_header=True)

        av_image_error, av_error_header = load_fits(av_dir + \
                    'perseus_av_error_planck_5arcmin' + bin_string + '.fits',
                return_header=True)

    hi_cube, hi_header = load_fits(hi_dir + \
                'perseus_hi_galfa_cube_regrid_planckres' + bin_string + \
                '.fits',
            return_header=True)

    hi_noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename +
                                            bin_string + '.fits',
                                            return_header=True)

    co_data, co_header = load_fits(co_dir + \
                'perseus_co_cfa_cube_regrid_planckres' + bin_string + '.fits',
            return_header=True)

    if vel_range is not None:
        props['hi_velocity_range'] = vel_range
    else:
        vel_range = props['hi_velocity_range']
    if dgr is not None:
        props['dust2gas_ratio']['value'] = dgr
    else:
        dgr = props['dust2gas_ratio']['value']

    # define core properties
    with open(core_dir + 'perseus_core_properties.txt', 'r') as f:
        cores = json.load(f)

    # make velocity axis for hi cube
    velocity_axis = make_velocity_axis(hi_header)
    # make velocity axis for co cube
    co_velocity_axis = make_velocity_axis(co_header)

    # Write core coordinates in pixels
    cores = convert_core_coordinates(cores, hi_header)

    cores = load_ds9_region(cores,
                            filename_base=region_dir + 'perseus_av_boxes_',
                            header=hi_header)

    # create nhi image
    nhi_image = calculate_nhi(cube=hi_cube,
                              velocity_axis=velocity_axis,
                              velocity_range=vel_range,
                              header=hi_header,
                              noise_cube=hi_noise_cube)

    # create model av map
    av_model = nhi_image * dgr

    # Mask the images based on av trheshol
    co_data_nonans = np.copy(co_data)
    co_data_nonans[np.isnan(co_data_nonans)] = 0.0
    co_mom0 = np.sum(co_data_nonans, axis=0)
    mask = ((av_image > props['av_threshold']['value']) & \
            (co_mom0 > props['co_threshold']['value']))

    # Derive relevant region
    pix = props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]), (pix[1], pix[2]), (pix[3], pix[2]),
                       (pix[3], pix[0]))

    # block offregion
    region_mask = myg.get_polygon_mask(av_image, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    if vel_range_type == 'single':
        print('\nHI velocity integration range:')
        print('%.1f to %.1f km/s' % (vel_range[0], vel_range[1]))
    elif vel_range_type == 'multiple':
        print('\nHI velocity integration ranges:')
        for i in xrange(0, vel_range.shape[0]):
            print('%.1f to %.1f km/s' % (vel_range[i, 0], vel_range[i, 1]))

    print('\nDGR:')
    print('%.2f x 10^-20 cm^2 mag' % (dgr))

    # Get mask and mask images
    mask = np.asarray(props['mask'])

    av_image_masked = np.copy(av_image)
    #av_image_masked[(mask == 1) & (region_mask == 1)] = np.nan
    av_image_masked[mask == 1] = np.nan

    av_error_masked = np.copy(av_image_error)
    #av_image_masked[(mask == 1) & (region_mask == 1)] = np.nan
    av_error_masked[mask == 1] = np.nan

    av_model_masked = np.copy(av_model)
    #av_model_masked[(mask == 1) & (region_mask == 1)] = np.nan
    av_model_masked[mask == 1] = np.nan

    indices = ((np.isnan(av_model_masked)) & \
               (np.isnan(av_image_masked)) & \
               (np.isnan(av_image_error)))

    print('\nTotal number of pixels after masking = ' + str(props['npix']))

    # Create HI spectrum
    hi_cube[hi_cube != hi_cube] = 0
    hi_cube[:, mask == 1] = 0
    hi_spectrum = np.mean(hi_cube, axis=(1, 2))

    # Derive CO spectrum
    co_data[:, region_mask == 1] = 0
    co_data[np.isnan(co_data)] = 0
    co_spectrum = np.mean(co_data, axis=(1, 2))

    # Plot
    figure_types = [
        'png',
    ]  # 'pdf']
    for figure_type in figure_types:
        if region is None:
            if vel_range_type == 'single':
                filename = 'single_vel_range/perseus_av_model_map_' + \
                    av_data_type + '.%s' % figure_type
            elif vel_range_type == 'multiple':
                filename = 'multiple_vel_range/perseus_av_model_map_' + \
                           'dgr{0:.3f}'.format(dgr)
                for i in xrange(0, vel_range.shape[0]):
                    filename += '_{0:.1f}to{1:.1f}kms'.format(
                        vel_range[i, 0], vel_range[i, 1])
                filename += '.%s' % figure_type
        else:
            filename = 'perseus_av_model_map_region{0:.0f}'.format(region) + \
                       '.{0:s}'.format(figure_type)

        print('\nSaving Av model image to \n' + filename)

        plot_av_model(
            av_image=av_image_masked,
            av_model=av_model_masked,
            header=av_header,
            results=props,
            hi_velocity_axis=velocity_axis,
            vel_range=vel_range,
            hi_spectrum=hi_spectrum,
            #hi_limits=[-15, 25, -1, 10],
            hi_limits=[-15, 25, None, None],
            co_spectrum=co_spectrum,
            co_velocity_axis=co_velocity_axis,
            limits=props['plot_limit']['pixel'],
            savedir=figure_dir + 'maps/av_models/',
            filename=filename,
            show=False)

        plot_avmod_vs_av(
            (av_model_masked, ),
            (av_image_masked, ),
            av_errors=(av_error_masked, ),
            #limits=[10**-1, 10**1.9, 10**0, 10**1.7],
            limits=[0, 1.5, 0, 1.5],
            savedir=figure_dir + 'av/',
            gridsize=(10, 10),
            #scale=('log', 'log'),
            #scale=('linear', 'linear'),
            filename='perseus_avmod_vs_av.%s' % figure_type,
            show=False,
            std=0.22,
        )

        plot_power_spectrum(av_image_masked - av_model_masked,
            filename_prefix='perseus_av_resid_power_spectrum_' + \
                            '{0:s}'.format(av_data_type),
            filename_suffix='.{0:s}'.format(figure_type),
            savedir=figure_dir + 'power_spectra/',
            show=False)
示例#25
0
def main(av_data_type='planck'):

    # Import external modules
    # -----------------------
    import numpy as np
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube
    #from astropy.io import fits
    import pyfits as fits
    import matplotlib.pyplot as plt

    # Set parameters
    # --------------
    # Check if likelihood file already written, rewrite?
    clobber = 0

    # Confidence of parameter errors
    conf = 0.68
    # Confidence of contour levels
    contour_confs = (0.68, 0.95)

    likelihood_filename = 'taurus_likelihood_{0:s}'.format(av_data_type)
    results_filename = 'taurus_likelihood_{0:s}'.format(av_data_type)

    # Name of HI noise cube
    noise_cube_filename = 'taurus_hi_galfa_cube_regrid_planckres_noise'

    # Threshold for converging DGR
    threshold_delta_dgr = 0.0005

    # Number of white noise standard deviations with which to fit the
    # residuals in iterative masking
    resid_width_scale = 2.0

    # Name of property files results are written to
    global_property_file = 'taurus_global_properties.txt'

    # Likelihood axis resolutions
    vel_widths = np.arange(1, 30, 2*0.16667)
    dgrs = np.arange(0.01, 0.2, 1e-3)

    # Velocity range over which to integrate HI for deriving the mask
    vel_range = (-10, 10)

    # Use binned image?
    use_binned_image = False

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/taurus/data/python_output/nhi_av/'
    figure_dir = \
        '/d/bip3/ezbc/taurus/figures/'
    av_dir = '/d/bip3/ezbc/taurus/data/av/'
    hi_dir = '/d/bip3/ezbc/taurus/data/hi/'
    co_dir = '/d/bip3/ezbc/taurus/data/co/'
    core_dir = '/d/bip3/ezbc/taurus/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/taurus/data/python_output/'
    region_dir = '/d/bip3/ezbc/taurus/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/taurus/data/python_output/nhi_av/'

    # Load data
    # ---------
    if use_binned_image:
        bin_string = '_bin'
    else:
        bin_string = ''
    noise_cube_filename += bin_string

    av_data, av_header = fits.getdata(av_dir + \
                            'taurus_av_planck_5arcmin' + bin_string + '.fits',
                                      header=True)

    av_data_error, av_error_header = fits.getdata(av_dir + \
                'taurus_av_error_planck_5arcmin' + bin_string + '.fits',
            header=True)
    #av_data_error = (100 * 0.025**2) * np.ones(av_data_error.shape)
    #av_data_error *= 10.0

    hi_data, hi_header = fits.getdata(hi_dir + \
                'taurus_hi_galfa_cube_regrid_planckres' + bin_string + '.fits',
            header=True)

    # Load global properties
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    # Prepare data products
    # ---------------------
    # Change WCS coords to pixel coords of images
    global_props = convert_limit_coordinates(global_props, header=av_header)

    # make the velocity axes
    hi_vel_axis = make_velocity_axis(hi_header)

    # Load the HI noise cube if it exists, else make it
    if not path.isfile(hi_dir + noise_cube_filename + '.fits'):
        noise_cube = calculate_noise_cube(cube=hi_data,
                velocity_axis=hi_vel_axis,
                velocity_noise_range=[90,110], header=hi_header, Tsys=30.,
                filename=hi_dir + noise_cube_filename + '.fits')
    else:
        noise_cube, noise_header = fits.getdata(hi_dir + noise_cube_filename,
            header=True)

    # Derive relevant region
    pix = global_props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]),
                       (pix[1], pix[2]),
                       (pix[3], pix[2]),
                       (pix[3], pix[0])
                       )

    # block off region
    region_mask = myg.get_polygon_mask(av_data, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    # Derive mask by excluding correlated residuals
    # ---------------------------------------------
    nhi_image = calculate_nhi(cube=hi_data,
                              velocity_axis=hi_vel_axis,
                              velocity_range=vel_range,
                              return_nhi_error=False,
                              )

    av_model, mask, dgr = iterate_residual_masking(
                             nhi_image=nhi_image,
                             av_data=av_data,
                             av_data_error=av_data_error,
                             vel_range=vel_range,
                             threshold_delta_dgr=threshold_delta_dgr,
                             resid_width_scale=resid_width_scale,
                             plot_progress=False
                             )

    # Combine region mask with new mask
    mask += np.logical_not(region_mask)

    # Derive center velocity from hi
    # ------------------------------
    hi_spectrum = np.sum(hi_data[:, ~mask], axis=(1))
    vel_center = np.array((np.average(hi_vel_axis,
                           weights=hi_spectrum**2),))[0]
    print('\nVelocity center from HI = ' +\
            '{0:.2f} km/s'.format(vel_center))

    # Perform likelihood calculation of masked images
    # -----------------------------------------------
    # Define filename for plotting results
    results_filename = figure_dir + results_filename

    results = calc_likelihoods(
                     hi_cube=hi_data[:, ~mask],
                     hi_vel_axis=hi_vel_axis,
                     av_image=av_data[~mask],
                     av_image_error=av_data_error[~mask],
                     vel_center=vel_center,
                     vel_widths=vel_widths,
                     dgrs=dgrs,
                     results_filename='',
                     return_likelihoods=True,
                     likelihood_filename=None,
                     clobber=False,
                     conf=conf,
                     )

    # Unpack output of likelihood calculation
    (vel_range_confint, width_confint, dgr_confint, likelihoods,
            width_likelihood, dgr_likelihood, width_max, dgr_max,
            vel_range_max) = results

    print('\nHI velocity integration range:')
    print('%.1f to %.1f km/s' % (vel_range_confint[0],
                                 vel_range_confint[1]))
    print('\nDGR:')
    print('%.1f x 10^-20 cm^2 mag' % (dgr_confint[0]))

    # Calulate chi^2 for best fit models
    # ----------------------------------
    nhi_image_temp, nhi_image_error = \
            calculate_nhi(cube=hi_data,
                velocity_axis=hi_vel_axis,
                velocity_range=vel_range_max,
                noise_cube=noise_cube,
                return_nhi_error=True)
    av_image_model = nhi_image_temp * dgr_max
    # avoid NaNs
    indices = ((av_image_model == av_image_model) & \
               (av_data == av_data))
    # add nan locations to the mask
    mask[~indices] = 1

    # count number of pixels used in analysis
    npix = mask[~mask].size

    # finally calculate chi^2
    chisq = np.sum((av_data[~mask] - av_image_model[~mask])**2 / \
            av_data_error[~mask]**2) / av_data[~mask].size

    print('\nTotal number of pixels in analysis, after masking = ' + \
            '{0:.0f}'.format(npix))

    print('\nReduced chi^2 = {0:.1f}'.format(chisq))

    # Write results to global properties
    global_props['dust2gas_ratio'] = {}
    global_props['dust2gas_ratio_error'] = {}
    global_props['hi_velocity_width'] = {}
    global_props['hi_velocity_width_error'] = {}
    global_props['dust2gas_ratio_max'] = {}
    global_props['hi_velocity_center_max'] = {}
    global_props['hi_velocity_width_max'] = {}
    global_props['hi_velocity_range_max'] =  {}
    global_props['av_threshold'] = {}
    global_props['co_threshold'] = {}
    global_props['hi_velocity_width']['value'] = width_confint[0]
    global_props['hi_velocity_width']['unit'] = 'km/s'
    global_props['hi_velocity_width_error']['value'] = width_confint[1:]
    global_props['hi_velocity_width_error']['unit'] = 'km/s'
    global_props['hi_velocity_range'] = vel_range_confint[0:2]
    global_props['hi_velocity_range_error'] = vel_range_confint[2:]
    global_props['dust2gas_ratio']['value'] = dgr_confint[0]
    global_props['dust2gas_ratio_error']['value'] = dgr_confint[1:]
    global_props['dust2gas_ratio_max']['value'] = dgr_max
    global_props['hi_velocity_center_max']['value'] = vel_center
    global_props['hi_velocity_width_max']['value'] = width_max
    global_props['hi_velocity_range_max']['value'] = vel_range_max
    global_props['hi_velocity_range_conf'] = conf
    global_props['width_likelihood'] = width_likelihood.tolist()
    global_props['dgr_likelihood'] = dgr_likelihood.tolist()
    global_props['vel_centers'] = [vel_center,]
    global_props['vel_widths'] = vel_widths.tolist()
    global_props['dgrs'] = dgrs.tolist()
    global_props['likelihoods'] = likelihoods.tolist()
    global_props['av_threshold']['value'] = None
    global_props['av_threshold']['unit'] = 'mag'
    global_props['co_threshold']['value'] = None
    global_props['co_threshold']['unit'] = 'K km/s'
    global_props['chisq'] = chisq
    global_props['npix'] = npix
    global_props['mask'] = mask.tolist()

    with open(property_dir + global_property_file, 'w') as f:
        json.dump(global_props, f)

    # Plot likelihood space
    plot_likelihoods_hist(global_props,
                          plot_axes=('widths', 'dgrs'),
                          show=0,
                          returnimage=False,
                          filename=results_filename + '_wd.png',
                          contour_confs=contour_confs)

    plt.clf(); plt.close()
    nhi_image_copy = np.copy(nhi_image)
    nhi_image_copy[mask] = np.nan
    av_image_copy = np.copy(av_data)
    resid_image = av_image_copy - nhi_image_copy * dgr
    plt.imshow(resid_image, origin='lower')
    plt.title(r'$A_V$ Data - Model')
    plt.colorbar()
    plt.show()
def run_mc_simulations(core_dict, wcs_header, temp_data, temp_error_data,
        beta_data, beta_error_data, N_mc=10):

    from myscience import calc_radiation_field

    cloud_props = {}
    for core_name in core_dict:
        # load cloud regions
        core_dict = add_cloud_region(core_dict)
        vertices_wcs = core_dict[core_name]['cloud_region_vertices'].T

        # Format vertices to be 2 x N array
        #vertices_wcs = np.array((vertices_wcs[0], vertices_wcs[1]))

        # Make a galactic coords object and convert to Ra/dec
        coords_fk5 = SkyCoord(vertices_wcs[0] * u.deg,
                              vertices_wcs[1] * u.deg,
                              frame='fk5',
                              )

        # convert to pixel
        coords_pixel = np.array(coords_fk5.to_pixel(wcs_header))

        # write data to dataframe
        vertices_pix = np.array((coords_pixel[1], coords_pixel[0])).T

        core_dict[core_name]['cloud_region_vertices_pix'] = vertices_pix

        # Mask pixels outside of the region
        region_mask = np.logical_not(myg.get_polygon_mask(temp_data,
                                                          vertices_pix))
        core_dict[core_name]['cloud_region_mask'] = region_mask

        # Grab the temperatures
        core_dict[core_name]['dust_temps'] = temp_data[~region_mask]
        core_dict[core_name]['dust_temp_errors'] = \
            temp_error_data[~region_mask]

        # adjust vertices to get errors on mean T_dust
        cloud = core_dict[core_name]['cloud']
        temp_mc = np.empty(N_mc)
        temp_error_mc = np.empty(N_mc)
        beta_mc = np.empty(N_mc)
        beta_error_mc = np.empty(N_mc)
        rad_mc = np.empty(N_mc)
        rad_error_mc = np.empty(N_mc)
        if cloud not in cloud_props:
            for j in xrange(N_mc):
                if j != 0:
                    new_vertices_wcs = vertices_wcs + \
                                       np.random.normal(scale=1.0,
                                                        size=vertices_wcs.shape)
                else:
                    new_vertices_wcs = vertices_wcs

                # Make a galactic coords object and convert to Ra/dec
                coords_fk5 = SkyCoord(new_vertices_wcs[0] * u.deg,
                                      new_vertices_wcs[1] * u.deg,
                                      frame='fk5',
                                      )

                # convert to pixel
                coords_pixel = np.array(coords_fk5.to_pixel(wcs_header))

                # write data to dataframe
                vertices_pix = np.array((coords_pixel[1],
                                         coords_pixel[0])).T

                # Mask pixels outside of the region
                region_mask = \
                        np.logical_not(myg.get_polygon_mask(temp_data,
                                                            vertices_pix))

                # Get the region's temperature
                if j == 0:
                    temps = temp_data[~region_mask]
                    betas = beta_data[~region_mask]
                    rads = calc_radiation_field(temps,
                                             beta=betas,
                                             )

                # simulate new observation of temperature and beta
                temp_sim = temp_data + np.random.normal(0,
                                                        scale=temp_error_data,)
                beta_sim = beta_data + np.random.normal(0,
                                                        scale=beta_error_data,)

                # Calculate the radiation field
                # -----------------------------
                rad_field = \
                    calc_radiation_field(temp_sim,
                                         beta=beta_sim,
                                         )

                # Grab the median values of temp, beta, and rad field
                temp_mc[j] = np.median(temp_sim[~region_mask])
                beta_mc[j] = np.median(beta_sim[~region_mask])
                rad_mc[j] = np.median(rad_field[~region_mask])

            # Calculate average temp
            #core_dict[core_name]['dust_temp_median'] = \
            #    np.nanmean(temp_data[~region_mask])
            #core_dict[core_name]['dust_temp_median_error'] = \
            #    np.sqrt(np.nansum(temp_error_data[~region_mask]**2)) / \
            #        temp_error_data[~region_mask].size
            dust_temp_median, mc_error = mystats.calc_cdf_error(temp_mc)
            dust_temp_median_error = np.mean(mc_error)
            dust_beta_median, mc_error = mystats.calc_cdf_error(beta_mc)
            dust_beta_median_error = np.mean(mc_error)
            rad_field_draine_median, mc_error = mystats.calc_cdf_error(rad_mc)
            rad_field_draine_median_error = np.mean(mc_error)

            # calculate habing field from draine:
            rad_field_habing_median = rad_field_draine_median * 1.71
            rad_field_habing_median_error = rad_field_draine_median_error * 1.71
            rad_field_mathis_median = rad_field_draine_median * 1.48
            rad_field_mathis_median_error = rad_field_draine_median_error * 1.48


            # write results to cloud
            cloud_props[cloud] = \
                    {
                     'dust_temp_median': dust_temp_median,
                     'dust_temp_median_error': dust_temp_median_error,
                     'dust_temps': temps,
                     'dust_beta_median': dust_beta_median,
                     'dust_beta_median_error': dust_beta_median_error,
                     'dust_betas': betas,
                     'rad_field_draine_median': rad_field_draine_median,
                     'rad_field_draine_median_error': \
                        rad_field_draine_median_error,
                     'rad_field_habing_median': rad_field_habing_median,
                     'rad_field_habing_median_error': \
                        rad_field_habing_median_error,
                     'rad_field_mathis_median': rad_field_mathis_median,
                     'rad_field_mathis_median_error': \
                        rad_field_mathis_median_error,
                     'rad_field_map': rads,
                     }

        else:
            core_dict[core_name]['dust_temp_median'] = \
                cloud_props[cloud]['dust_temp_median']
            core_dict[core_name]['dust_temp_median_error'] = \
                cloud_props[cloud]['dust_temp_median_error']

        # copy cloud params to core dict
        for param_name in cloud_props[cloud]:
            core_dict[core_name][param_name] = \
                    np.copy(cloud_props[cloud][param_name])

    return cloud_props, core_dict
def calc_data(cloud_list):
    ''' Executes script.
    '''
    import numpy as np
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    data_dict = {}

    for i, cloud in enumerate(cloud_list):

        if cloud == 'taurus1' or cloud == 'taurus2':
            cloud_name = 'taurus'
        else:
            cloud_name = cloud

        # define directory locations
        output_dir = '/d/bip3/ezbc/%s/data/python_output/nhi_av/' % cloud_name
        figure_dir = '/d/bip3/ezbc/multicloud/figures/spectra/'
        av_dir = '/d/bip3/ezbc/%s/data/av/' % cloud_name
        hi_dir = '/d/bip3/ezbc/%s/data/hi/' % cloud_name
        co_dir = '/d/bip3/ezbc/%s/data/co/' % cloud_name
        core_dir = output_dir + 'core_arrays/'
        region_dir = '/d/bip3/ezbc/%s/data/' % cloud_name + \
                     'python_output/ds9_regions/'

        # global property filename
        global_property_filename = '{0}_global_properties'.format(cloud)
        property_dir = '/d/bip3/ezbc/{0}/data/python_output/'.format(
            cloud_name)

        av_data_type = 'planck'

        cloud_dict = {}

        # Load HI maps from Taurus, California, and Perseus
        hi_data, hi_header = fits.getdata(
                '/d/bip3/ezbc/{0}/data/hi/{0}_hi_galfa_'.format(cloud_name) + \
                        'cube_regrid_planckres_bin.fits',
                header=True)

        # Load CO maps from Taurus, California, and Perseus
        co_data, co_header = fits.getdata(
                    co_dir + '{0}_co_cfa_'.format(cloud_name) + \
                        'cube_regrid_planckres_bin.fits',
                header=True)
        av_data, av_header = \
                fits.getdata(av_dir + \
                             '{0}_av_planck_tau353_5arcmin_bin.fits'.format(cloud_name),
                             header=True)

        # Mask out region
        with open(property_dir + \
                  global_property_filename + '_' + av_data_type + \
                  '_scaled.txt', 'r') as f:
            global_props = json.load(f)

        # Load cloud division regions from ds9
        global_props = load_ds9_region(global_props,
                        filename='/d/bip3/ezbc/multicloud/data/' + \
                        'python_output/multicloud_divisions.reg',
                        #'python_output/multicloud_divisions_2.reg',
                                header=av_header)

        # Derive relevant region
        region_vertices = \
            np.array(global_props['regions'][cloud]['poly_verts']['pixel'])
        region_mask = np.logical_not(
            myg.get_polygon_mask(av_data, region_vertices))
        if 0:
            import matplotlib.pyplot as plt
            plt.imshow(np.ma.array(av_data, mask=region_mask), origin='lower')
            plt.colorbar()
            plt.show()
        hi_data[:, region_mask] = np.nan
        co_data[:, region_mask] = np.nan

        # sum along spatial axes
        cloud_dict['hi_spectrum'] = calc_global_spectrum(hi_cube=hi_data,
                                                         statistic='median')
        cloud_dict['hi_std'] = calc_global_spectrum(hi_cube=hi_data,
                                                    statistic='std')
        cloud_dict['co_spectrum'] = calc_global_spectrum(hi_cube=co_data,
                                                         statistic='median')
        vel_center = global_props['hi_velocity_width']['value']
        vel_width = global_props['hi_velocity_center']['value']
        #cloud_dict['hi_vel_range'] = (vel_center + vel_width / 2.0,
        #                              vel_center - vel_width / 2.0)
        #cloud_dict['hi_vel_range'] = global_props['hi_velocity_range_conf']
        cloud_dict['hi_vel_range'] = global_props['hi_velocity_range']
        print global_props['hi_velocity_range']

        # Calculate velocity
        cloud_dict['hi_velocity_axis'] = make_velocity_axis(hi_header)
        cloud_dict['co_velocity_axis'] = make_velocity_axis(co_header)

        data_dict[cloud] = cloud_dict

    return data_dict
def run_mc_simulations_cores(core_dict, wcs_header, temp_data, temp_error_data,
        beta_data, beta_error_data, N_mc=10):

    from myscience import calc_radiation_field

    # core_dict with core region-dependent values needs to be different because
    # core_dict with cloud-averaged parameters is used in a different
    # function...
    # this was poor planning
    #core_dict = core_dict.copy()

    for core_name in core_dict:
        # load cloud regions
        vertices_wcs = core_dict[core_name]['region_vertices']

        # Format vertices to be 2 x N array
        #vertices_wcs = np.array((vertices_wcs[0], vertices_wcs[1]))

        # Make a galactic coords object and convert to Ra/dec
        coords_fk5 = SkyCoord(vertices_wcs[0] * u.deg,
                              vertices_wcs[1] * u.deg,
                              frame='fk5',
                              )

        # convert to pixel
        coords_pixel = np.array(coords_fk5.to_pixel(wcs_header))

        # write data to dataframe
        vertices_pix = np.array((coords_pixel[1], coords_pixel[0])).T

        core_dict[core_name]['region_vertices_pix'] = vertices_pix

        # Mask pixels outside of the region
        region_mask = np.logical_not(myg.get_polygon_mask(temp_data,
                                                          vertices_pix))
        core_dict[core_name]['cloud_region_mask'] = region_mask

        # Grab the temperatures
        if 0:
            core_dict[core_name]['dust_temps'] = temp_data[~region_mask]
            core_dict[core_name]['dust_temp_errors'] = \
                temp_error_data[~region_mask]

        # adjust vertices to get errors on mean T_dust
        cloud = core_dict[core_name]['cloud']
        temp_mc = np.empty(N_mc)
        temp_error_mc = np.empty(N_mc)
        beta_mc = np.empty(N_mc)
        beta_error_mc = np.empty(N_mc)
        rad_mc = np.empty(N_mc)
        rad_error_mc = np.empty(N_mc)

        for j in xrange(N_mc):
            if j != 0:
                new_vertices_wcs = vertices_wcs + \
                                   np.random.normal(scale=1.0 / 60.0 * 5,
                                                    size=vertices_wcs.shape)
            else:
                new_vertices_wcs = vertices_wcs

            # Make a galactic coords object and convert to Ra/dec
            coords_fk5 = SkyCoord(new_vertices_wcs[0] * u.deg,
                                  new_vertices_wcs[1] * u.deg,
                                  frame='fk5',
                                  )

            # convert to pixel
            coords_pixel = np.array(coords_fk5.to_pixel(wcs_header))

            # write data to dataframe
            vertices_pix = np.array((coords_pixel[1],
                                     coords_pixel[0])).T

            # Mask pixels outside of the region
            region_mask = \
                    np.logical_not(myg.get_polygon_mask(temp_data,
                                                        vertices_pix))

            if 0:
                import matplotlib.pyplot as plt
                plt.imshow(region_mask, origin='lower')
                plt.title(core_name)
                plt.show()

            # Get the region's temperature
            if j == 0:
                temps = temp_data[~region_mask]
                betas = beta_data[~region_mask]
                rads = calc_radiation_field(temps,
                                         beta=betas,
                                         )

            # grab relevant pixels of core region
            temp_sim = temp_data[~region_mask]
            temp_error_sim = temp_error_data[~region_mask]
            beta_sim = beta_data[~region_mask]
            beta_error_sim = beta_error_data[~region_mask]

            # simulate new observation of temperature and beta
            temp_sim += np.random.normal(0, scale=temp_error_sim,)
            beta_sim += np.random.normal(0, scale=beta_error_sim,)

            # Calculate the radiation field
            # -----------------------------
            rad_field = \
                calc_radiation_field(temp_sim,
                                     beta=beta_sim,
                                     )

            # Grab the median values of temp, beta, and rad field
            temp_mc[j] = np.median(temp_sim)
            beta_mc[j] = np.median(beta_sim)
            rad_mc[j] = np.median(rad_field)

        # Calculate average temp
        #core_dict[core_name]['dust_temp_median'] = \
        #    np.nanmean(temp_data[~region_mask])
        #core_dict[core_name]['dust_temp_median_error'] = \
        #    np.sqrt(np.nansum(temp_error_data[~region_mask]**2)) / \
        #        temp_error_data[~region_mask].size
        dust_temp_median, mc_error = mystats.calc_cdf_error(temp_mc)
        dust_temp_median_error = np.mean(mc_error)
        dust_beta_median, mc_error = mystats.calc_cdf_error(beta_mc)
        dust_beta_median_error = np.mean(mc_error)
        rad_field_draine_median, mc_error = mystats.calc_cdf_error(rad_mc)
        #rad_field_draine_median_error = np.mean(mc_error)
        rad_field_draine_median_error = np.std(rads)

        # calculate habing field from draine:
        rad_field_habing_median = rad_field_draine_median * 1.71
        rad_field_habing_median_error = rad_field_draine_median_error * 1.71
        rad_field_mathis_median = rad_field_draine_median * 1.48
        rad_field_mathis_median_error = rad_field_draine_median_error * 1.48

        # write results to cloud
        core_dict[core_name]['region_values'] = \
                {
                 'dust_temp_median': dust_temp_median,
                 'dust_temp_median_error': dust_temp_median_error,
                 'dust_temps': temps,
                 'dust_beta_median': dust_beta_median,
                 'dust_beta_median_error': dust_beta_median_error,
                 'dust_betas': betas,
                 'rad_field_draine_median': rad_field_draine_median,
                 'rad_field_draine_median_error': \
                    rad_field_draine_median_error,
                 'rad_field_habing_median': rad_field_habing_median,
                 'rad_field_habing_median_error': \
                    rad_field_habing_median_error,
                 'rad_field_mathis_median': rad_field_mathis_median,
                 'rad_field_mathis_median_error': \
                    rad_field_mathis_median_error,
                 'rad_field_map': rads,
                 }

    return core_dict
def calc_data(ra=None, dec=None):

    ''' Executes script.
    '''
    import numpy as np
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    cloud_list = ('california',)
    data_dict = {}

    for i, cloud in enumerate(cloud_list):

        if cloud == 'taurus1' or cloud == 'taurus2':
            cloud_name = 'taurus'
        else:
            cloud_name = cloud

        # define directory locations
        output_dir = '/d/bip3/ezbc/%s/data/python_output/nhi_av/' % cloud_name
        figure_dir = '/d/bip3/ezbc/multicloud/figures/spectra/'
        av_dir = '/d/bip3/ezbc/%s/data/av/' % cloud_name
        hi_dir = '/d/bip3/ezbc/%s/data/hi/' % cloud_name
        co_dir = '/d/bip3/ezbc/%s/data/co/' % cloud_name
        core_dir = output_dir + 'core_arrays/'
        region_dir = '/d/bip3/ezbc/%s/data/' % cloud_name + \
                     'python_output/ds9_regions/'

        # global property filename
        global_property_filename = '{0}_global_properties'.format(cloud)
        property_dir = '/d/bip3/ezbc/{0}/data/python_output/'.format(cloud_name)

        av_data_type = 'planck'

        cloud_dict = {}

        # Load HI maps from Taurus, California, and Perseus
        hi_data, hi_header = fits.getdata(
                '/d/bip3/ezbc/{0}/data/hi/{0}_hi_galfa_'.format(cloud_name) + \
                        'cube_regrid_planckres.fits',
                header=True)

        # Load CO maps from Taurus, California, and Perseus
        co_data, co_header = fits.getdata(
                    co_dir + '{0}_co_cfa_'.format(cloud_name) + \
                        'cube_regrid_planckres.fits',
                header=True)
        av_data, av_header = \
                fits.getdata(av_dir + \
                             '{0}_av_planck_5arcmin.fits'.format(cloud_name),
                             header=True)

        # Mask out region
        with open(property_dir + \
                  global_property_filename + '_' + av_data_type + \
                  '_scaled.txt', 'r') as f:
            global_props = json.load(f)

        # Load cloud division regions from ds9
        global_props = load_ds9_region(global_props,
                        filename='/d/bip3/ezbc/multicloud/data/' + \
                        'python_output/multicloud_divisions.reg',
                                header=av_header)

        # Derive relevant region
        region_vertices = \
            np.array(global_props['regions'][cloud]['poly_verts']['pixel'])
        region_mask = np.logical_not(myg.get_polygon_mask(av_data,
                                                          region_vertices))
        if 0:
            import matplotlib.pyplot as plt
            plt.imshow(np.ma.array(av_data,
                                   mask=region_mask), origin='lower')
            plt.colorbar()
            plt.show()
        #hi_data[:, region_mask] = np.nan
        co_data[:, region_mask] = np.nan

        pix = get_pix_coords(ra=ra, dec=dec, header=av_header)
        print pix, av_data.shape, hi_data.shape

        if 0:
            import matplotlib.pyplot as plt
            plt.plot(hi_data[:, pix[1], pix[0]])
            plt.show()

        # sum along spatial axes
        cloud_dict['hi_spectrum'] = hi_data[:, pix[1], pix[0]]
        cloud_dict['hi_std'] = hi_data[:, pix[1], pix[0]]
        cloud_dict['co_spectrum'] = calc_global_spectrum(
                                        hi_cube=co_data,
                                        statistic='median'
                                        )

        vel_center = global_props['hi_velocity_width']['value']
        vel_width = global_props['hi_velocity_center']['value']
        #cloud_dict['hi_vel_range'] = (vel_center + vel_width / 2.0,
        #                              vel_center - vel_width / 2.0)
        #cloud_dict['hi_vel_range'] = global_props['hi_velocity_range_conf']
        cloud_dict['hi_vel_range'] = global_props['hi_velocity_range']
        #print global_props['hi_velocity_range']

        # Calculate velocity
        cloud_dict['hi_velocity_axis'] = make_velocity_axis(
                                        hi_header)
        cloud_dict['co_velocity_axis'] = make_velocity_axis(
                                        co_header)

        data_dict[cloud] = cloud_dict

    return data_dict
def main(dgr=None, vel_range=(-5, 15), vel_range_type="single", region=None, av_data_type="planck"):
    """ Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    """

    # import external modules
    import pyfits as fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, calculate_sd, calculate_nh2, calculate_nh2_error
    import json
    from os import system, path

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = "multicloud_hi_galfa_cube_regrid_planckres_noise.fits"

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    # av_data_type = 'lee12'
    # av_data_type = 'planck'

    # Global parameter file
    prop_file = "multicloud_global_properties"

    # Which cores to include in analysis?
    cores_to_keep = [  # taur
        "L1495",
        "L1495A",
        "B213",
        "L1498",
        "B215",
        "B18",
        "B217",
        "B220-1",
        "B220-2",
        "L1521",
        "L1524",
        "L1527-1",
        "L1527-2",
        # Calif
        "L1536",
        "L1483-1",
        "L1483-2",
        "L1482-1",
        "L1482-2",
        "L1478-1",
        "L1478-2",
        "L1456",
        "NGC1579",
        #'L1545',
        #'L1517',
        #'L1512',
        #'L1523',
        #'L1512',
        # Pers
        "B5",
        "IC348",
        "B1E",
        "B1",
        "NGC1333",
        "B4",
        "B3",
        "L1455",
        "L1448",
    ]

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {"wcs": (((5, 10, 0), (19, 0, 0)), ((4, 30, 0), (27, 0, 0))), "pixel": ()}
    elif region == 2:
        region_limit = {"wcs": (((4, 30, 0), (19, 0, 0)), ((3, 50, 0), (29, 0, 0))), "pixel": ()}
    elif region == 3:
        region_limit = {"wcs": (((4, 30, 0), (29, 0, 0)), ((3, 50, 0), (33, 0, 0))), "pixel": ()}
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = "/d/bip3/ezbc/multicloud/data/python_output/nhi_av/"
    figure_dir = "/d/bip3/ezbc/multicloud/figures/"
    av_dir = "/d/bip3/ezbc/multicloud/data/av/"
    hi_dir = "/d/bip3/ezbc/multicloud/data/hi/"
    co_dir = "/d/bip3/ezbc/multicloud/data/co/"
    core_dir = "/d/bip3/ezbc/multicloud/data/python_output/core_properties/"
    property_dir = "/d/bip3/ezbc/multicloud/data/python_output/"
    region_dir = "/d/bip3/ezbc/multicloud/data/python_output/"

    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == "lee12_2mass":
        print("\nLoading Lee+12 data...")
        av_image, av_header = load_fits(av_dir + "multicloud_av_lee12_2mass_regrid_planckres.fits", return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == "lee12_iris":
        print("\nLoading Lee+12 data...")
        av_image, av_header = load_fits(av_dir + "multicloud_av_lee12_iris_regrid_planckres.fits", return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == "planck_rad":
        print("\nLoading Planck data...")
        av_image, av_header = load_fits(av_dir + "multicloud_av_planck_radiance_5arcmin.fits", return_header=True)
        av_image_error, av_error_header = load_fits(
            av_dir + "multicloud_av_error_planck_radiance_5arcmin.fits", return_header=True
        )
    else:
        print("\nLoading Planck data...")
        av_image, av_header = load_fits(av_dir + "multicloud_av_planck_5arcmin.fits", return_header=True)

        av_image_error, av_error_header = load_fits(
            av_dir + "multicloud_av_error_planck_5arcmin.fits", return_header=True
        )

    hi_cube, hi_header = load_fits(hi_dir + "multicloud_hi_galfa_cube_regrid_planckres.fits", return_header=True)

    co_data, co_header = load_fits(co_dir + "multicloud_co_cfa_cube_regrid_planckres.fits", return_header=True)

    # Prepare data products
    # ---------------------
    # Load global properties of cloud
    # global properties written from script
    # 'av/multicloud_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += "_region{0:.0f}".format(region)
        results_filename += "_region{0:.0f}".format(region)

    print("\nLoading global property file {0:s}.txt".format(prop_file))
    with open(property_dir + prop_file + ".txt", "r") as f:
        props = json.load(f)

    # Define velocity range
    props["hi_velocity_range"] = vel_range

    # make velocity axis for hi cube
    velocity_axis = make_velocity_axis(hi_header)
    # make velocity axis for co cube
    co_velocity_axis = make_velocity_axis(co_header)

    # Load the HI noise cube if it exists, else make it
    if not path.isfile(hi_dir + noise_cube_filename):
        hi_noise_cube = calculate_noise_cube(
            cube=hi_cube,
            velocity_axis=velocity_axis,
            velocity_noise_range=[90, 110],
            header=hi_header,
            Tsys=30.0,
            filename=hi_dir + noise_cube_filename,
        )
    else:
        hi_noise_cube, noise_header = fits.getdata(hi_dir + noise_cube_filename, header=True)

    # create nhi image
    nhi_image = calculate_nhi(
        cube=hi_cube, velocity_axis=velocity_axis, velocity_range=vel_range, header=hi_header, noise_cube=hi_noise_cube
    )

    # Change WCS coords to pixel coords of images
    props = convert_limit_coordinates(
        props, header=av_header, coords=("region_limit", "co_noise_limits", "plot_limit", "region_name_pos")
    )

    # Load cloud division regions from ds9
    props = load_ds9_region(props, filename=region_dir + "multicloud_divisions.reg", header=av_header)

    # Derive relevant region
    pix = props["region_limit"]["pixel"]
    region_vertices = ((pix[1], pix[0]), (pix[1], pix[2]), (pix[3], pix[2]), (pix[3], pix[0]))

    # block offregion
    region_mask = myg.get_polygon_mask(av_image, region_vertices)

    print("\nRegion size = " + "{0:.0f} pix".format(region_mask[region_mask == 1].size))

    if vel_range_type == "single":
        print("\nHI velocity integration range:")
        print("%.1f to %.1f km/s" % (vel_range[0], vel_range[1]))
    elif vel_range_type == "multiple":
        print("\nHI velocity integration ranges:")
        for i in xrange(0, vel_range.shape[0]):
            print("%.1f to %.1f km/s" % (vel_range[i, 0], vel_range[i, 1]))

    cloud_dict = {"taurus": {}, "perseus": {}, "california": {}}

    # load Planck Av and GALFA HI images, on same grid
    for cloud in cloud_dict:

        print("\nLoading core properties for {0:s}".format(cloud))

        file_dir = "/d/bip3/ezbc/{0:s}/data/av/".format(cloud)

        # define core properties
        with open(
            "/d/bip3/ezbc/{0:s}/data/python_output/".format(cloud)
            + "core_properties/{0:s}_core_properties.txt".format(cloud),
            "r",
        ) as f:
            cores = json.load(f)

        # Load core regions from DS9 files
        if cloud == "aldobaran":
            region_cloud = "california"
        else:
            region_cloud = cloud
        core_filename = region_dir.replace("multicloud", region_cloud) + "/ds9_regions/{0:s}_av_poly_cores".format(
            region_cloud
        )

        cores = load_ds9_core_region(cores, filename_base=core_filename, header=av_header)

        cores = convert_core_coordinates(cores, av_header)

        # Remove cores
        cores_to_remove = []
        for core in cores:
            if core not in cores_to_keep:
                cores_to_remove.append(core)
        for core_to_remove in cores_to_remove:
            del cores[core_to_remove]

        cloud_dict[cloud]["cores"] = cores

    # Plot
    figure_types = ["png", "pdf"]
    for figure_type in figure_types:
        filename = "multicloud_av_cores_map" + ".{0:s}".format(figure_type)

        print("\nSaving Av cores map to \n" + filename)

        plot_cores_map(
            header=av_header,
            av_image=av_image,
            limits=props["plot_limit"]["pixel"],
            regions=props["regions"],
            cloud_dict=cloud_dict,
            cores_to_keep=cores_to_keep,
            props=props,
            hi_vlimits=(0, 20),
            av_vlimits=(0, 16),
            # av_vlimits=(0.1,30),
            savedir=figure_dir + "maps/",
            filename=filename,
            show=False,
        )
示例#31
0
def derive_box_sizes(co_image, cores_dict, co_image_error=None, isoline=0.6):
    """
    Parameters
    ----------
    co_image : array-like
        CO image
    cores_dict : dict
        Dictionary including core information.
    co_image_error : array-like, optional
        Error on CO.
    isoline : float, optional
        Fraction of peak CO core emission to derive the contour. 60% value from
        Meng et al. (2013, ApJ, 209, 36)

    """

    import mygeometry as myg

    for core in cores_dict:
        print('Calculating 12CO size of core {:s}'.format(core))

        # axes are reversed
        core_pos = cores_dict[core]['center_pixel'][::-1]

        box_vertices = create_box(core_pos,
                                  box_width,
                                  box_height,
                                  core_rel_pos=core_rel_pos)

        gradient_sums = np.zeros((len(angle_grid)))

        for i, angle in enumerate(angle_grid):
            box_vertices_rotated = rotate_box(box_vertices, core_pos, angle)

            mask = myg.get_polygon_mask(co_image, box_vertices_rotated)

            co_image_masked = np.copy(co_image)

            # extract radial profile weighted by SNR
            radii, profile = get_radial_profile(co_image,
                                                binsize=3,
                                                center=core_pos[::-1],
                                                weights=co_image_error,
                                                mask=mask)

            if angle == 90:
                co_image_masked = np.copy(co_image)
                mask = myg.get_polygon_mask(co_image_masked, box_vertices)
                co_image_masked[mask == 0] = np.NaN

            indices = np.where((radii == radii) & \
                               (profile == profile))
            profile, radii = profile[indices], radii[indices]

            # steeper gradients will hcoe smaller sums
            gradient_sum = np.sum(np.gradient(profile, radii))
            gradient_sums[i] = gradient_sum

        # find steepest profile and recreate the box mask
        angle_ideal = angle_grid[gradient_sums == np.min(gradient_sums)][0]

        box_vertices_rotated = rotate_box(box_vertices, core_pos, angle_ideal)

        box_dict[core] = {}
        box_dict[core]['box_vertices_rotated'] = box_vertices_rotated

    return box_dict
def main():

    import grid
    import numpy as np
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    # parameters used in script
    # box_width = 3 # in pixels
    # box_height = 10 # in pixels
    box_width = 6  # in pixels
    box_height = 30  # in pixels
    # box_width = 12 # in pixels
    # box_height = 60 # in pixels

    # define directory locations
    output_dir = "/d/bip3/ezbc/perseus/data/python_output/nhi_av/"
    figure_dir = "/d/bip3/ezbc/perseus/figures/maps/"
    av_dir = "/d/bip3/ezbc/perseus/data/av/"
    hi_dir = "/d/bip3/ezbc/perseus/data/hi/"
    co_dir = "/d/bip3/ezbc/perseus/data/co/"
    core_dir = "/d/bip3/ezbc/perseus/data/python_output/core_properties/"
    region_dir = "/d/bip3/ezbc/perseus/data/python_output/ds9_regions/"

    # load Planck Av and GALFA HI images, on same grid
    av_data, av_header = load_fits(av_dir + "perseus_av_planck_5arcmin.fits", return_header=True)

    av_error_data, av_error_header = load_fits(av_dir + "perseus_av_error_planck_5arcmin.fits", return_header=True)

    # av_data[dec, ra], axes are switched

    # define core properties
    with open(core_dir + "perseus_core_properties.txt", "r") as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, av_header)

    cores = load_ds9_region(cores, filename_base=region_dir + "perseus_av_boxes_", header=av_header)

    av_image_list = []
    av_image_error_list = []
    core_name_list = []

    box_dict = derive_ideal_box(
        av_data, cores, box_width, box_height, core_rel_pos=0.1, angle_res=10.0, av_image_error=av_error_data
    )

    for core in cores:
        cores[core]["box_vertices_rotated"] = box_dict[core]["box_vertices_rotated"].tolist()
        try:
            cores[core]["center_pixel"] = cores[core]["center_pixel"].tolist()
        except AttributeError:
            cores[core]["center_pixel"] = cores[core]["center_pixel"]

    with open(core_dir + "perseus_core_properties.txt", "w") as f:
        json.dump(cores, f)

    for core in cores:
        mask = myg.get_polygon_mask(av_data, cores[core]["box_vertices_rotated"])

        av_data_mask = np.copy(av_data)
        av_data_mask[mask == 0] = np.NaN

    # Define limits for plotting the map
    prop_dict = {}
    prop_dict["limit_wcs"] = (((3, 58, 0), (27, 6, 0)), ((3, 20, 0), (35, 0, 0)))
    prop_dict["limit_wcs"] = (((3, 58, 0), (26, 6, 0)), ((3, 0, 0), (35, 0, 0)))
    prop_dict["av_header"] = av_header

    prop_dict = convert_limit_coordinates(prop_dict)

    # Plot
    figure_types = ["pdf", "png"]
    for figure_type in figure_types:
        plot_av_image(
            av_image=av_data,
            header=av_header,
            boxes=True,
            cores=cores,  # limits=[50,37,200,160],
            # title=r'perseus: A$_V$ map with core boxed-regions.',
            savedir=figure_dir,
            limits=prop_dict["limit_pixels"],
            filename="perseus_av_cores_map.%s" % figure_type,
            show=0,
        )
def main():

    import grid
    import numpy as np
    import numpy
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    from multiprocessing import Pool

    global _hi_cube
    global _hi_velocity_axis
    global _hi_noise_cube
    global _av_image
    global _av_image_error

    # parameters used in script
    # -------------------------
    # HI velocity integration range
    # Determine HI integration velocity by CO or likelihoodelation with Av?
    hi_av_likelihoodelation = True

    center_vary = False
    width_vary = True
    dgr_vary = True

    # Check if likelihood file already written, rewrite?
    clobber = 1

    # Include only pixels within core regions for analysis?
    core_mask = 0

    # Confidence of parameter errors
    conf = 0.68
    # Confidence of contour levels
    contour_confs = (0.68, 0.95)

    # Results and fits filenames
    likelihood_filename = 'perseus_nhi_av_likelihoods_mcmc_co_av'
    results_filename = 'perseus_likelihood_mcmc_co_av'
    global _progress_filename
    _progress_filename = 'perseus_mcmc_samples.dat'

    # Define ranges of parameters
    global _av_thres_range
    _av_thres_range = (1.0, 1.1)
    _av_thres_range = (0.1, 2.0)
    global _vel_width_range
    _vel_width_range = (0.0, 80.0)
    global _dgr_range
    _dgr_range = (0.01, 0.4)
    global _velocity_center
    _velocity_center = 5.0 # km/s

    # MCMC parameters
    global _ndim
    _ndim = 3
    global _nwalkers
    _nwalkers = 100
    global _niter
    _niter = 1000
    global _init_guesses
    _init_guesses = np.array((10, 0.10, 1.0))
    global _init_spread
    _init_spread = np.array((0.1, 0.01, 0.01))
    global _mc_threads
    _mc_threads = 10

    # Name of property files results are written to
    global_property_file = 'perseus_global_properties.txt'
    core_property_file = 'perseus_core_properties.txt'

    # Name of noise cube
    noise_cube_filename = 'perseus_hi_galfa_cube_regrid_planckres_noise.fits'

    # Define limits for plotting the map
    prop_dict = {}
    prop_dict['limit_wcs'] = (((3, 58, 0), (27, 6, 0)),
                              ((3, 20, 0), (35, 0, 0)))
    prop_dict['limit_wcs'] = (((3, 58, 0), (26, 6, 0)),
                              ((3, 0, 0), (35, 0, 0)))

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/perseus/figures/hi_velocity_range/'
    av_dir = '/d/bip3/ezbc/perseus/data/av/'
    hi_dir = '/d/bip3/ezbc/perseus/data/hi/'
    co_dir = '/d/bip3/ezbc/perseus/data/co/'
    core_dir = '/d/bip3/ezbc/perseus/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/perseus/data/python_output/'
    region_dir = '/d/bip3/ezbc/perseus/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/perseus/data/python_output/nhi_av/'
    global _likelihood_dir
    _likelihood_dir = likelihood_dir

    # load Planck Av and GALFA HI images, on same grid
    av_data_planck, av_header = load_fits(av_dir + \
                'perseus_av_planck_5arcmin.fits',
            return_header=True)
    prop_dict['av_header'] = av_header

    av_error_data_planck, av_error_header = load_fits(av_dir + \
                'perseus_av_error_planck_5arcmin.fits',
            return_header=True)

    hi_data, h = load_fits(hi_dir + \
                'perseus_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    co_data, co_header = load_fits(co_dir + \
                'perseus_co_cfa_cube_regrid_planckres.fits',
            return_header=True)

    # make the velocity axis
    velocity_axis = make_velocity_axis(h)

    # Plot NHI vs. Av for a given velocity range
    if not path.isfile(hi_dir + noise_cube_filename):
        noise_cube = calculate_noise_cube(cube=hi_data,
                velocity_axis=velocity_axis,
                velocity_noise_range=[90,110], header=h, Tsys=30.,
                filename=hi_dir + noise_cube_filename)
    else:
        noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename,
            return_header=True)

    # define core properties
    with open(core_dir + core_property_file, 'r') as f:
        cores = json.load(f)
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
            filename_base = region_dir + 'perseus_av_boxes_',
            header = h)

    print('\nCalculating likelihoods globally')

    mask = np.zeros(av_data_planck.shape)
    for core in cores:
        # Grab the mask
        mask += myg.get_polygon_mask(av_data_planck,
                cores[core]['wedge_vertices_rotated'])

    co_mom0 = np.sum(co_data, axis=0)

    # Mask images
    core_mask = 0
    if core_mask:
        indices = ((mask == 1) & \
                   (co_mom0 < np.std(co_mom0[~np.isnan(co_mom0)])*2.0))
        mask_type = '_core_mask'
    else:
        indices = (co_mom0 < np.std(co_mom0[~np.isnan(co_mom0)])*2.0)
        mask_type = ''

    hi_data_sub = np.copy(hi_data[:, indices])
    noise_cube_sub = np.copy(noise_cube[:, indices])
    av_data_sub = np.copy(av_data_planck[indices])
    av_error_data_sub = np.copy(av_error_data_planck[indices])

    # Set global variables
    _hi_cube = hi_data_sub
    _hi_velocity_axis = velocity_axis
    _hi_noise_cube = noise_cube_sub
    _av_image = av_data_sub
    _av_image_error = av_error_data_sub

    # Define filename for plotting results
    results_filename = figure_dir + results_filename

    # likelihoodelate each core region Av and N(HI) for velocity ranges
    vel_range_confint, dgr_confint, likelihoods, center_likelihood,\
        width_likelihood, dgr_likelihood = \
            calc_likelihood(return_likelihoods=True,
                            plot_results=True,
                            results_filename=results_filename + mask_type,
                            likelihood_filename=likelihood_dir + \
                                    likelihood_filename + \
                                    mask_type + '.npy',
                            clobber=clobber,
                            conf=conf,
                            contour_confs=contour_confs)

    '''
示例#34
0
def derive_ideal_wedge(av_image, cores_dict, wedge_angle, wedge_radius,
        av_image_error=None, core_rel_pos=0.1, angle_res=1.0):

    import mygeometry as myg

    """
    Parameters
    ----------
    angle_res : float
        Resolution with which to rotate each new box in degrees. 1.0 degree
        gives 360 different box orientations.


    """

    angle_grid = np.arange(0, 360, angle_res)
    wedge_dict = {}

    for core in cores_dict:
        print('Calculating optimal angle for core {:s}'.format(core))

        # axes are reversed
        core_pos = cores_dict[core]['center_pixel'][::-1]

        wedge_vertices = create_wedge(core_pos, wedge_radius, wedge_angle,
                core_rel_pos=core_rel_pos)

        gradient_sums = np.zeros((len(angle_grid)))

        for i, angle in enumerate(angle_grid):
            wedge_vertices_rotated = rotate_wedge(wedge_vertices, core_pos, angle)

            mask = myg.get_polygon_mask(av_image, wedge_vertices_rotated)

            av_image_masked = np.copy(av_image)

            # extract radial profile weighted by SNR
            radii, profile = get_radial_profile(av_image, binsize=3,
                    center=core_pos[::-1],
                    weights=av_image_error,
                    mask=mask
                    )

            if angle == 90:
                av_image_masked = np.copy(av_image)
                mask = myg.get_polygon_mask(av_image_masked, wedge_vertices)
                av_image_masked[mask==0]=np.NaN

            indices = np.where((radii == radii) & \
                               (profile == profile))
            profile, radii = profile[indices], radii[indices]

            # steeper gradients will have smaller sums
            gradient_sum = np.sum(np.gradient(profile, radii))
            gradient_sums[i] = gradient_sum

        # find steepest profile and recreate the box mask
        angle_ideal = angle_grid[gradient_sums == np.min(gradient_sums)][0]

        wedge_vertices_rotated = rotate_wedge(wedge_vertices, core_pos, angle_ideal)

        wedge_dict[core] = {}
        wedge_dict[core]['wedge_vertices_rotated'] = wedge_vertices_rotated

    return wedge_dict
def main():

    import grid
    import numpy as np
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    # parameters used in script
    # -------------------------
    # wedge should be a few tens of pc.
    # D = 300 pc
    # res = 5'
    # d/pix = 0.43 pc/pix
    wedge_angle = 40.0  # degrees
    wedge_radius = 20.0 / 0.43  # pixels,
    core_rel_pos = 0.15  # fraction of radius core is within wedge

    # define directory locations
    output_dir = "/d/bip3/ezbc/california/data/python_output/nhi_av/"
    figure_dir = "/d/bip3/ezbc/california/figures/maps/"
    av_dir = "/d/bip3/ezbc/california/data/av/"
    hi_dir = "/d/bip3/ezbc/california/data/hi/"
    co_dir = "/d/bip3/ezbc/california/data/co/"
    core_dir = "/d/bip3/ezbc/california/data/python_output/core_properties/"
    region_dir = "/d/bip3/ezbc/california/data/python_output/ds9_regions/"

    # load Planck Av and GALFA HI images, on same grid
    av_data, av_header = load_fits(av_dir + "california_av_planck_5arcmin.fits", return_header=True)

    av_error_data, av_error_header = load_fits(av_dir + "california_av_error_planck_5arcmin.fits", return_header=True)

    # av_data[dec, ra], axes are switched

    # define core properties
    with open(core_dir + "california_core_properties.txt", "r") as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, av_header)

    cores = load_ds9_region(cores, filename_base=region_dir + "california_av_boxes_", header=av_header)

    av_image_list = []
    av_image_error_list = []
    core_name_list = []

    wedge_dict = derive_ideal_wedge(
        av_data,
        cores,
        wedge_angle,
        wedge_radius,
        core_rel_pos=core_rel_pos,
        angle_res=5.0,
        av_image_error=av_error_data,
    )

    for core in cores:
        cores[core]["wedge_vertices_rotated"] = wedge_dict[core]["wedge_vertices_rotated"].tolist()
        try:
            cores[core]["center_pixel"] = cores[core]["center_pixel"].tolist()
        except AttributeError:
            cores[core]["center_pixel"] = cores[core]["center_pixel"]

    with open(core_dir + "california_core_properties.txt", "w") as f:
        json.dump(cores, f)

    for core in cores:
        mask = myg.get_polygon_mask(av_data, cores[core]["wedge_vertices_rotated"])

        av_data_mask = np.copy(av_data)
        av_data_mask[mask == 0] = np.NaN

    # Define limits for plotting the map
    prop_dict = {}
    prop_dict["limit_wcs"] = (((4, 50, 0), (32, 0, 0)), ((3, 40, 0), (44, 0, 0)))
    prop_dict["av_header"] = av_header

    prop_dict = convert_limit_coordinates(prop_dict)

    # Plot
    figure_types = ["pdf", "png"]
    for figure_type in figure_types:
        plot_av_image(
            av_image=av_data,
            header=av_header,
            boxes=True,
            cores=cores,  # limits=[50,37,200,160],
            # title=r'california: A$_V$ map with core boxed-regions.',
            savedir=figure_dir,
            limits=prop_dict["limit_pixels"],
            filename="california_av_cores_map.%s" % figure_type,
            show=0,
        )
示例#36
0
def main(dgr=None,
         vel_range=None,
         vel_range_type='single',
         region=None,
         av_data_type='planck'):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    #import pyfits as fits
    from astropy.io import fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json
    from os import system, path

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = 'multicloud_hi_galfa_cube_regrid_planckres_noise.fits'

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    #av_data_type = 'lee12'
    #av_data_type = 'planck'

    # Global parameter file
    prop_file = 'multicloud_global_properties'

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {
            'wcs': (((5, 10, 0), (19, 0, 0)), ((4, 30, 0), (27, 0, 0))),
            'pixel': ()
        }
    elif region == 2:
        region_limit = {
            'wcs': (((4, 30, 0), (19, 0, 0)), ((3, 50, 0), (29, 0, 0))),
            'pixel': ()
        }
    elif region == 3:
        region_limit = {
            'wcs': (((4, 30, 0), (29, 0, 0)), ((3, 50, 0), (33, 0, 0))),
            'pixel': ()
        }
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/multicloud/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/multicloud/figures/'
    av_dir = '/d/bip3/ezbc/multicloud/data/av/'
    hi_dir = '/d/bip3/ezbc/multicloud/data/hi/'
    co_dir = '/d/bip3/ezbc/multicloud/data/co/'
    core_dir = '/d/bip3/ezbc/multicloud/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/multicloud/data/python_output/'
    region_dir = '/d/bip3/ezbc/multicloud/data/python_output/regions/'

    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == 'lee12_2mass':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_2mass_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'lee12_iris':
        print('\nLoading Lee+12 data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_lee12_iris_regrid_planckres.fits',
                return_header=True)
        av_image_error = 0.1 * np.ones(av_image.shape)
    elif av_data_type == 'planck_rad':
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_tau353_5arcmin.fits',
                return_header=True)
        av_image_error, av_error_header = load_fits(av_dir + \
                    'multicloud_av_error_planck_tau353_5arcmin.fits',
                return_header=True)
    else:
        print('\nLoading Planck data...')
        av_image, av_header = load_fits(av_dir + \
                    'multicloud_av_planck_tau353_5arcmin.fits',
                return_header=True)

        av_image_error, av_error_header = load_fits(av_dir + \
                    'multicloud_av_error_planck_tau353_5arcmin.fits',
                return_header=True)

    hi_cube, hi_header = load_fits(hi_dir + \
                'multicloud_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    co_data, co_header = load_fits(co_dir + \
                'multicloud_co_cfa_cube_regrid_planckres.fits',
            return_header=True)

    # Prepare data products
    # ---------------------
    # Load global properties of cloud
    # global properties written from script
    # 'av/multicloud_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)
    with open(property_dir + prop_file + '.txt', 'r') as f:
        props = json.load(f)

    if vel_range is not None:
        props['hi_velocity_range'] = vel_range
    else:
        vel_range = props['hi_velocity_range']

    # make velocity axis for hi cube
    velocity_axis = make_velocity_axis(hi_header)
    # make velocity axis for co cube
    co_velocity_axis = make_velocity_axis(co_header)

    # Load the HI noise cube if it exists, else make it
    if not path.isfile(hi_dir + noise_cube_filename):
        hi_noise_cube = calculate_noise_cube(cube=hi_cube,
                                             velocity_axis=velocity_axis,
                                             velocity_noise_range=[90, 110],
                                             header=hi_header,
                                             Tsys=30.,
                                             filename=hi_dir +
                                             noise_cube_filename)
    else:
        hi_noise_cube, noise_header = fits.getdata(hi_dir +
                                                   noise_cube_filename,
                                                   header=True)

    # create nhi image
    nhi_image = calculate_nhi(cube=hi_cube,
                              velocity_axis=velocity_axis,
                              velocity_range=vel_range,
                              header=hi_header,
                              noise_cube=hi_noise_cube)

    props['plot_limit']['wcs'] = (((5, 20, 0), (19, 0, 0)), ((2, 30, 0),
                                                             (37, 0, 0)))

    props['region_name_pos'] = {
        #'taurus 1' : {'wcs' : ((3, 50,  0),
        #                       (21.5, 0, 0)),
        #             },
        #'taurus 2' : {'wcs' : ((5, 10,  0),
        #                       (21.5, 0, 0)),
        #             },
        'taurus': {
            'wcs': ((4, 40, 0), (21, 0, 0)),
        },
        'perseus': {
            'wcs': ((3, 30, 0), (26, 0, 0)),
        },
        #'perseus 1' : {'wcs' : ((3, 0,  0),
        #                      (34, 0, 0)),
        #             },
        #'perseus 2' : {'wcs' : ((3, 10,  0),
        #                      (22.5, 0, 0)),
        #             },
        'california': {
            'wcs': ((4, 28, 0), (34, 0, 0)),
        },
    }

    # Change WCS coords to pixel coords of images
    props = convert_limit_coordinates(props,
                                      header=av_header,
                                      coords=('region_limit',
                                              'co_noise_limits', 'plot_limit',
                                              'region_name_pos'))

    props['plot_limit']['wcs'] = [
        15 * (5 + 20. / 60), 15 * (2 + 30. / 60.), 17, 38.5
    ]

    # Load cloud division regions from ds9
    props = load_ds9_region(props,
                            filename=region_dir + 'multicloud_divisions.reg',
                            header=av_header)

    # Derive relevant region
    pix = props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]), (pix[1], pix[2]), (pix[3], pix[2]),
                       (pix[3], pix[0]))

    # block offregion
    region_mask = myg.get_polygon_mask(av_image, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    if vel_range_type == 'single':
        print('\nHI velocity integration range:')
        print('%.1f to %.1f km/s' % (vel_range[0], vel_range[1]))
    elif vel_range_type == 'multiple':
        print('\nHI velocity integration ranges:')
        for i in xrange(0, vel_range.shape[0]):
            print('%.1f to %.1f km/s' % (vel_range[i, 0], vel_range[i, 1]))

    # Plot
    figure_types = ['png', 'pdf']
    for figure_type in figure_types:
        if region is None:
            if vel_range_type == 'single':
                filename = 'multicloud_av_nhi_map' + \
                    '.%s' % figure_type
                #av_data_type + \
                #'dgr{0:.3f}_'.format(dgr) + \
                #'{0:.1f}to{1:.1f}kms'.format(vel_range[0], vel_range[1]) + \
                #'_' + \
            elif vel_range_type == 'multiple':
                filename = 'multiple_vel_range/multicloud_av_model_map' + \
                           'dgr{0:.3f}'.format(dgr)
                for i in xrange(0, vel_range.shape[0]):
                    filename += '_{0:.1f}to{1:.1f}kms'.format(
                        vel_range[i, 0], vel_range[i, 1])
                filename += '.%s' % figure_type
        else:
            filename = 'multicloud_av_model_map_region{0:.0f}'.format(region) + \
                       '.{0:s}'.format(figure_type)

        filename = 'av_map'
        filename = figure_dir + 'maps/' + filename + '.' + figure_type
        print('\nSaving Av model image to \n' + filename)

        plot_av_image(
            av_image=av_image,
            header=av_header,
            limits=[15 * (5 + 20. / 60), 15 * (2 + 30. / 60.), 17, 38.5],
            limits_type='wcs',
            regions=props['regions'],
            props=props,
            av_vlimits=(0, 15.5),
            filename=filename,
            show=False)

        if 0:
            filename = 'av_nhi_map'
            filename = figure_dir + 'maps/' + filename + '.' + figure_type
            print('\nSaving NHI + Av maps to \n' + filename)
            plot_nhi_image(
                nhi_image=nhi_image,
                header=av_header,
                av_image=av_image,
                limits=props['plot_limit']['wcs'],
                limits_type='wcs',
                regions=props['regions'],
                props=props,
                hi_vlimits=(0, 20),
                av_vlimits=(0, 15.5),
                #av_vlimits=(0.1,30),
                filename=filename,
                show=False)
示例#37
0
def main():

    import grid
    import numpy as np
    import numpy
    from os import system, path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error

    # parameters used in script
    # -------------------------
    # HI velocity integration range
    # Determine HI integration velocity by CO or likelihoodelation with Av?
    hi_av_likelihoodelation = True

    center_vary = False
    width_vary = True
    dgr_vary = True

    # Check if likelihood file already written, rewrite?
    clobber = 0

    # Confidence of parameter errors
    conf = 0.68
    # Confidence of contour levels
    contour_confs = (0.68, 0.95)

    # Course, large grid or fine, small grid?
    grid_res = 'course'
    grid_res = 'fine'

    # Results and fits filenames
    likelihood_filename = 'california_nhi_av_likelihoods'
    results_filename = 'california_likelihood'

    # Define ranges of parameters
    if center_vary and width_vary and dgr_vary:
        likelihood_filename += '_width_dgr_center'
        results_filename += '_width_dgr_center'

        velocity_centers = np.arange(-15, 30, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1e-2, 1, 2e-2)
    elif not center_vary and width_vary and dgr_vary:

        if grid_res == 'course':
            likelihood_filename += '_dgr_width_lowres'
            results_filename += '_dgr_width_lowres'
            velocity_centers = np.arange(5, 6, 1)
            velocity_widths = np.arange(1, 80, 1)
            dgrs = np.arange(1e-2, 1, 2e-2)
        elif grid_res == 'fine':
            likelihood_filename += '_dgr_width_highres'
            results_filename += '_dgr_width_highres'
            velocity_centers = np.arange(5, 6, 1)
            velocity_widths = np.arange(1, 40, 0.16667)
            dgrs = np.arange(0.05, 0.5, 1e-3)
    elif center_vary and width_vary and not dgr_vary:
        likelihood_filename += '_width_center'
        results_filename += '_width_center'

        velocity_centers = np.arange(-15, 30, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1.1e-1, 1.2e-1, 0.1e-1)
    elif not center_vary and width_vary and not dgr_vary:
        likelihood_filename += '_width'
        results_filename += '_width'

        velocity_centers = np.arange(5, 6, 1)
        velocity_widths = np.arange(1, 80, 1)
        dgrs = np.arange(1.1e-1, 1.2e-1, 0.1e-1)

    # Which likelihood fits should be performed?
    core_likelihoodelation = 0
    global_likelihoodelation = 1

    # Name of property files results are written to
    global_property_file = 'california_global_properties.txt'
    core_property_file = 'california_core_properties.txt'

    # Threshold of Av below which we expect only atomic gas, in mag
    av_threshold = 1

    # Name of noise cube
    noise_cube_filename = 'california_hi_galfa_cube_regrid_planckres_noise.fits'

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/hi_velocity_range/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/hi/'
    co_dir = '/d/bip3/ezbc/california/data/co/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/california/data/python_output/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'

    # load Planck Av and GALFA HI images, on same grid
    av_data_planck, av_header = load_fits(av_dir + \
                'california_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data_planck, av_error_header = load_fits(av_dir + \
                'california_av_error_planck_5arcmin.fits',
            return_header=True)

    hi_data, h = load_fits(hi_dir + \
                'california_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    # make the velocity axis
    velocity_axis = make_velocity_axis(h)

    # Plot NHI vs. Av for a given velocity range
    if not path.isfile(hi_dir + noise_cube_filename):
        noise_cube = calculate_noise_cube(cube=hi_data,
                                          velocity_axis=velocity_axis,
                                          velocity_noise_range=[90, 110],
                                          header=h,
                                          Tsys=30.,
                                          filename=hi_dir +
                                          noise_cube_filename)
    else:
        noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename,
                                             return_header=True)

    # define core properties
    with open(core_dir + core_property_file, 'r') as f:
        cores = json.load(f)
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    dgr = global_props['dust2gas_ratio']['value']
    dgr = 1.2e-1

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
                            filename_base=region_dir + 'california_av_boxes_',
                            header=h)

    if core_likelihoodelation:
        for core in cores:
            print('\nCalculating for core %s' % core)

            # Grab the mask
            mask = myg.get_polygon_mask(av_data_planck,
                                        cores[core]['box_vertices_rotated'])

            indices = ((mask == 0) &\
                       (av_data_planck < av_threshold))

            hi_data_sub = np.copy(hi_data[:, indices])
            noise_cube_sub = np.copy(noise_cube[:, indices])
            av_data_sub = np.copy(av_data_planck[indices])
            av_error_data_sub = np.copy(av_error_data_planck[indices])

            # Define filename for plotting results
            results_filename = figure_dir + 'california_logL_%s.png' % core

            # likelihoodelate each core region Av and N(HI) for velocity ranges
            vel_range_confint, dgr_confint, likelihoods, center_likelihood,\
                width_likelihood, dgr_likelihood = \
                    calc_likelihood_hi_av(hi_cube=hi_data_sub,
                                    hi_velocity_axis=velocity_axis,
                                    hi_noise_cube=noise_cube_sub,
                                    av_image=av_data_sub,
                                    av_image_error=av_error_data_sub,
                                    dgrs=dgrs,
                                    velocity_centers=velocity_centers,
                                    velocity_widths=velocity_widths,
                                    return_likelihoods=True,
                                    plot_results=True,
                                    results_filename=results_filename,
                                    likelihood_filename=likelihood_dir + \
                                            likelihood_filename + \
                                            '{0:s}.fits'.format(core),
                                    clobber=clobber,
                                    conf=conf)

            print('HI velocity integration range:')
            print('%.1f to %.1f km/s' %
                  (vel_range_confint[0], vel_range_confint[1]))
            print('DGR:')
            print('%.1f to %.1f km/s' %
                  (vel_range_confint[0], vel_range_confint[1]))

            cores[core]['hi_velocity_range'] = vel_range_confint[0:2]
            cores[core]['hi_velocity_range_error'] = vel_range_confint[2:]
            cores[core]['center_likelihood'] = center_likelihood.tolist()
            cores[core]['width_likelihood'] = width_likelihood.tolist()
            cores[core]['vel_centers'] = velocity_centers.tolist()
            cores[core]['vel_widths'] = velocity_widths.tolist()

        with open(core_dir + core_property_file, 'w') as f:
            json.dump(cores, f)

    if global_likelihoodelation:
        print('\nCalculating likelihoods globally')

        mask = np.zeros(av_data_planck.shape)
        for core in cores:
            # Grab the mask
            mask += myg.get_polygon_mask(av_data_planck,
                                         cores[core]['box_vertices_rotated'])

        indices = ((mask == 0) &\
                   (av_data_planck < av_threshold))

        #indices = ((av_data_planck < av_threshold))

        hi_data_sub = np.copy(hi_data[:, indices])
        noise_cube_sub = np.copy(noise_cube[:, indices])
        av_data_sub = np.copy(av_data_planck[indices])
        av_error_data_sub = np.copy(av_error_data_planck[indices])

        # Define filename for plotting results
        results_filename = figure_dir + results_filename

        # likelihoodelate each core region Av and N(HI) for velocity ranges
        vel_range_confint, dgr_confint, likelihoods, center_likelihood,\
            width_likelihood, dgr_likelihood = \
                calc_likelihood_hi_av(hi_cube=hi_data_sub,
                                hi_velocity_axis=velocity_axis,
                                hi_noise_cube=noise_cube_sub,
                                av_image=av_data_sub,
                                av_image_error=av_error_data_sub,
                                dgrs=dgrs,
                                velocity_centers=velocity_centers,
                                velocity_widths=velocity_widths,
                                return_likelihoods=True,
                                plot_results=True,
                                results_filename=results_filename,
                                likelihood_filename=likelihood_dir + \
                                        likelihood_filename + \
                                        '_global.fits',
                                clobber=clobber,
                                conf=conf,
                                contour_confs=contour_confs)

        print('HI velocity integration range:')
        print('%.1f to %.1f km/s' %
              (vel_range_confint[0], vel_range_confint[1]))
        print('DGR:')
        print('%.1f to %.1f km/s' % (dgr_confint[0], dgr_confint[1]))

        global_props['dust2gas_ratio'] = {}
        global_props['dust2gas_ratio_error'] = {}

        global_props['hi_velocity_range'] = vel_range_confint[0:2]
        global_props['hi_velocity_range_error'] = vel_range_confint[2:]
        global_props['dust2gas_ratio']['value'] = dgr_confint[0]
        global_props['dust2gas_ratio_error']['value'] = dgr_confint[1:]
        global_props['hi_velocity_range_conf'] = conf
        global_props['center_likelihood'] = center_likelihood.tolist()
        global_props['width_likelihood'] = width_likelihood.tolist()
        global_props['dgr_likelihood'] = dgr_likelihood.tolist()
        global_props['vel_centers'] = velocity_centers.tolist()
        global_props['vel_widths'] = velocity_widths.tolist()
        global_props['dgrs'] = dgrs.tolist()
        global_props['likelihoods'] = likelihoods.tolist()

        with open(property_dir + global_property_file, 'w') as f:
            json.dump(global_props, f)
def main(dgr=None, vel_range=(-5, 15), vel_range_type='single', region=None,
        av_data_type='planck'):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    import pyfits as fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json
    from os import system,path

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = 'multicloud_hi_galfa_cube_regrid_planckres_noise.fits'

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    #av_data_type = 'lee12'
    #av_data_type = 'planck'

    # Global parameter file
    prop_file = 'multicloud_global_properties'

    # Which cores to include in analysis?
    cores_to_keep = [# taur
                     'L1495',
                     'L1495A',
                     'B213',
                     'L1498',
                     'B215',
                     'B18',
                     'B217',
                     'B220-1',
                     'B220-2',
                     'L1521',
                     'L1524',
                     'L1527-1',
                     'L1527-2',
                     # Calif
                     'L1536',
                     'L1483-1',
                     'L1483-2',
                     'L1482-1',
                     'L1482-2',
                     'L1478-1',
                     'L1478-2',
                     'L1456',
                     'NGC1579',
                     #'L1545',
                     #'L1517',
                     #'L1512',
                     #'L1523',
                     #'L1512',
                     # Pers
                     'B5',
                     'IC348',
                     'B1E',
                     'B1',
                     'NGC1333',
                     'B4',
                     'B3',
                     'L1455',
                     'L1448',
                     ]

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {'wcs' : (((5, 10, 0), (19, 0, 0)),
                                 ((4, 30, 0), (27, 0, 0))),
                          'pixel' : ()
                         }
    elif region == 2:
        region_limit = {'wcs' : (((4, 30, 0), (19, 0, 0)),
                                 ((3, 50, 0), (29, 0, 0))),
                          'pixel' : ()
                        }
    elif region == 3:
        region_limit = {'wcs' : (((4, 30, 0), (29, 0, 0)),
                                 ((3, 50, 0), (33, 0, 0))),
                          'pixel' : ()
                        }
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/multicloud/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/multicloud/figures/'
    av_dir = '/d/bip3/ezbc/multicloud/data/av/'
    dtemp_dir = '/d/bip3/ezbc/multicloud/data/dust_temp/'
    hi_dir = '/d/bip3/ezbc/multicloud/data/hi/'
    co_dir = '/d/bip3/ezbc/multicloud/data/co/'
    core_dir = '/d/bip3/ezbc/multicloud/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/multicloud/data/python_output/'
    region_dir = '/d/bip3/ezbc/multicloud/data/python_output/regions/'

    dust_temp, dust_temp_header = load_fits(dtemp_dir + \
                'multicloud_dust_temp_5arcmin.fits',
            return_header=True)
    dust_temp_error, av_error_header = load_fits(dtemp_dir + \
            'multicloud_dust_temp_error_5arcmin.fits',
            return_header=True)

    # Prepare data products
    # ---------------------
    # Load global properties of cloud
    # global properties written from script
    # 'av/multicloud_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)

    print('\nLoading global property file {0:s}.txt'.format(prop_file))
    with open(property_dir + prop_file + '.txt', 'r') as f:
        props = json.load(f)

    # Change WCS coords to pixel coords of images
    props = convert_limit_coordinates(props,
                                      header=dust_temp_header,
                                      coords=('region_limit',
                                              'plot_limit',
                                              'region_name_pos'))

    # Load cloud division regions from ds9
    #region_filename = region_dir + \
    #        'multicloud_divisions_coldcore_selection.reg'
    region_filename = region_dir + 'multicloud_divisions.reg'
    props = load_ds9_region(props,
                            filename=region_filename,
                            header=dust_temp_header)

    # Write region name pos
    props['region_name_pos'] = {
             #'taurus 1' : {'wcs' : ((3, 50,  0),
             #                       (21.5, 0, 0)),
             #             },
             #'taurus 2' : {'wcs' : ((5, 10,  0),
             #                       (21.5, 0, 0)),
             #             },
             'taurus' : {'wcs' : ((4, 40,  0),
                                  (21, 0, 0)),
                          },
             'perseus' : {'wcs' : ((3, 30,  0),
                                   (26, 0, 0)),
                          },
             #'perseus 1' : {'wcs' : ((3, 0,  0),
             #                      (34, 0, 0)),
             #             },
             #'perseus 2' : {'wcs' : ((3, 10,  0),
             #                      (22.5, 0, 0)),
             #             },
             'california' : {'wcs' : ((4, 28,  0),
                                      (34, 0, 0)),
                             },
             }


    # Derive relevant region
    pix = props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]),
                       (pix[1], pix[2]),
                       (pix[3], pix[2]),
                       (pix[3], pix[0])
                       )

    # block offregion
    region_mask = myg.get_polygon_mask(dust_temp, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    cloud_dict = {'taurus' : {},
                  'perseus' : {},
                  'california' : {},
                  }

    # Plot
    figure_types = ['png', 'pdf']
    for figure_type in figure_types:
        filename = 'multicloud_dust_temp_map' + \
                   '.{0:s}'.format(figure_type)

        print('\nSaving Av cores map to \n' + filename)

        plot_temp_map(header=dust_temp_header,
                       dust_temp=dust_temp,
                       limits=props['plot_limit']['pixel'],
                       regions=props['regions'],
                       cloud_dict=cloud_dict,
                       cores_to_keep=cores_to_keep,
                       props=props,
                       dtemp_vlimits=(15,18),
                       #dtemp_vlimits=(0.1,30),
                       savedir=figure_dir + 'maps/',
                       filename=filename,
                       show=False)
示例#39
0
def main(dgr=None,
         vel_range=(-5, 15),
         vel_range_type='single',
         region=None,
         av_data_type='planck'):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    import pyfits as fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json
    from os import system, path

    # Script parameters
    # -----------------
    # Name of noise cube
    noise_cube_filename = 'multicloud_hi_galfa_cube_regrid_planckres_noise.fits'

    # Use Planck dust Av map or Kainulainen 2009 optical extinction Av map?
    # options are 'planck' or 'lee12'
    #av_data_type = 'lee12'
    #av_data_type = 'planck'

    # Global parameter file
    prop_file = 'multicloud_global_properties'

    # Which cores to include in analysis?
    cores_to_keep = [  # taur
        'L1495',
        'L1495A',
        'B213',
        'L1498',
        'B215',
        'B18',
        'B217',
        'B220-1',
        'B220-2',
        'L1521',
        'L1524',
        'L1527-1',
        'L1527-2',
        # Calif
        'L1536',
        'L1483-1',
        'L1483-2',
        'L1482-1',
        'L1482-2',
        'L1478-1',
        'L1478-2',
        'L1456',
        'NGC1579',
        #'L1545',
        #'L1517',
        #'L1512',
        #'L1523',
        #'L1512',
        # Pers
        'B5',
        'IC348',
        'B1E',
        'B1',
        'NGC1333',
        'B4',
        'B3',
        'L1455',
        'L1448',
    ]

    # Regions, regions to edit the global properties with
    if region == 1:
        region_limit = {
            'wcs': (((5, 10, 0), (19, 0, 0)), ((4, 30, 0), (27, 0, 0))),
            'pixel': ()
        }
    elif region == 2:
        region_limit = {
            'wcs': (((4, 30, 0), (19, 0, 0)), ((3, 50, 0), (29, 0, 0))),
            'pixel': ()
        }
    elif region == 3:
        region_limit = {
            'wcs': (((4, 30, 0), (29, 0, 0)), ((3, 50, 0), (33, 0, 0))),
            'pixel': ()
        }
    else:
        region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/multicloud/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/multicloud/figures/'
    av_dir = '/d/bip3/ezbc/multicloud/data/av/'
    beta_dir = '/d/bip3/ezbc/multicloud/data/dust_temp/'
    hi_dir = '/d/bip3/ezbc/multicloud/data/hi/'
    co_dir = '/d/bip3/ezbc/multicloud/data/co/'
    core_dir = '/d/bip3/ezbc/multicloud/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/multicloud/data/python_output/'
    region_dir = '/d/bip3/ezbc/multicloud/data/python_output/regions/'

    beta, beta_header = load_fits(beta_dir + \
                'multicloud_dust_beta_5arcmin.fits',
            return_header=True)

    temp, temp_header = load_fits(beta_dir + \
                'multicloud_dust_temp_5arcmin.fits',
            return_header=True)
    av, av_header = load_fits(av_dir + \
                'multicloud_av_planck_5arcmin.fits',
            return_header=True)
    beta_error, av_error_header = load_fits(beta_dir + \
            'multicloud_dust_beta_error_5arcmin.fits',
            return_header=True)

    # Prepare data products
    # ---------------------
    # Load global properties of cloud
    # global properties written from script
    # 'av/multicloud_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)

    print('\nLoading global property file {0:s}.txt'.format(prop_file))
    with open(property_dir + prop_file + '.txt', 'r') as f:
        props = json.load(f)

    # Change WCS coords to pixel coords of images
    props = convert_limit_coordinates(props,
                                      header=beta_header,
                                      coords=('region_limit', 'plot_limit',
                                              'region_name_pos'))

    # Load cloud division regions from ds9
    #region_filename = region_dir + \
    #        'multicloud_divisions_coldcore_selection.reg'
    region_filename = region_dir + 'multicloud_divisions.reg'
    props = load_ds9_region(props,
                            filename=region_filename,
                            header=beta_header)

    # Write region name pos
    props['region_name_pos'] = {
        #'taurus 1' : {'wcs' : ((3, 50,  0),
        #                       (21.5, 0, 0)),
        #             },
        #'taurus 2' : {'wcs' : ((5, 10,  0),
        #                       (21.5, 0, 0)),
        #             },
        'taurus': {
            'wcs': ((4, 40, 0), (21, 0, 0)),
        },
        'perseus': {
            'wcs': ((3, 30, 0), (26, 0, 0)),
        },
        #'perseus 1' : {'wcs' : ((3, 0,  0),
        #                      (34, 0, 0)),
        #             },
        #'perseus 2' : {'wcs' : ((3, 10,  0),
        #                      (22.5, 0, 0)),
        #             },
        'california': {
            'wcs': ((4, 28, 0), (34, 0, 0)),
        },
    }

    # Derive relevant region
    pix = props['region_limit']['pixel']
    region_vertices = ((pix[1], pix[0]), (pix[1], pix[2]), (pix[3], pix[2]),
                       (pix[3], pix[0]))

    # block offregion
    region_mask = myg.get_polygon_mask(beta, region_vertices)

    print('\nRegion size = ' + \
          '{0:.0f} pix'.format(region_mask[region_mask == 1].size))

    cloud_dict = {
        'taurus': {},
        'perseus': {},
        'california': {},
    }

    # Plot
    figure_types = ['png', 'pdf']
    for figure_type in figure_types:
        filename = 'multicloud_beta_map' + \
                   '.{0:s}'.format(figure_type)

        print('\nSaving beta field map to \n' + filename)

        beta[beta < 0] = np.nan

        plot_beta_map(
            header=beta_header,
            #beta=np.log10(beta),
            beta=beta,
            limits=props['plot_limit']['pixel'],
            regions=props['regions'],
            cloud_dict=cloud_dict,
            cores_to_keep=cores_to_keep,
            props=props,
            #vlimits=(0.5,10),
            contour_image=av,
            contours=(3, ),
            vlimits=(1.6, 1.85),
            vscale='linear',
            #vlimits=(0.1,30),
            savedir=figure_dir + 'maps/',
            filename=filename,
            show=False)

        filename = 'multicloud_temp_map' + \
                   '.{0:s}'.format(figure_type)

        print('\nSaving temp field map to \n' + filename)

        temp[temp < 0] = np.nan

        plot_temp_map(
            header=beta_header,
            #beta=np.log10(beta),
            temp=temp,
            limits=props['plot_limit']['pixel'],
            regions=props['regions'],
            cloud_dict=cloud_dict,
            cores_to_keep=cores_to_keep,
            props=props,
            #vlimits=(0.5,10),
            contour_image=av,
            contours=(3, ),
            vlimits=(13.6, 21),
            vscale='linear',
            #vlimits=(0.1,30),
            savedir=figure_dir + 'maps/',
            filename=filename,
            show=False)
def main():

    import grid
    import numpy as np
    import numpy
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error

    # parameters used in script
    # -------------------------
    # HI velocity integration range
    # Determine HI integration velocity by CO or correlation with Av?
    hi_av_correlation = True
    velocity_centers = np.arange(-15, 30, 4)
    velocity_widths = np.arange(1, 80, 4)

    # Which likelihood fits should be performed?
    core_correlation = 0
    global_correlation = 1

    # Name of property files results are written to
    global_property_file = 'california_global_properties.txt'
    core_property_file = 'california_core_properties.txt'

    # Threshold of Av below which we expect only atomic gas, in mag
    av_threshold = 100

    # Check if likelihood file already written, rewrite?>
    likelihood_filename = 'california_nhi_av_likelihoods'
    clobber = 0
    hi_vel_range_conf = 0.50

    # Name of noise cube
    noise_cube_filename = 'california_hi_galfa_cube_regrid_planckres_noise.fits'

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/hi_velocity_range/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/hi/'
    co_dir = '/d/bip3/ezbc/california/data/co/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/california/data/python_output/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'
    likelihood_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'

    # load Planck Av and GALFA HI images, on same grid
    av_data_planck, av_header = load_fits(av_dir + \
                'california_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data_planck, av_error_header = load_fits(av_dir + \
                'california_av_error_planck_5arcmin.fits',
            return_header=True)

    hi_data, h = load_fits(hi_dir + \
                'california_hi_galfa_cube_regrid_planckres.fits',
            return_header=True)

    # make the velocity axis
    velocity_axis = make_velocity_axis(h)

    # Plot NHI vs. Av for a given velocity range
    if not path.isfile(hi_dir + noise_cube_filename):
        noise_cube = calculate_noise_cube(cube=hi_data,
                velocity_axis=velocity_axis,
                velocity_noise_range=[90,110], header=h, Tsys=30.,
                filename=hi_dir + noise_cube_filename)
    else:
        noise_cube, noise_header = load_fits(hi_dir + noise_cube_filename,
            return_header=True)

    # define core properties
    with open(core_dir + core_property_file, 'r') as f:
        cores = json.load(f)
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    dgr = global_props['dust2gas_ratio']['value']
    dgr = 1.22e-1

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
            filename_base = region_dir + 'california_av_boxes_',
            header = h)

    if core_correlation:
        for core in cores:
            print('\nCalculating for core %s' % core)

            # Grab the mask
            mask = myg.get_polygon_mask(av_data_planck,
                    cores[core]['box_vertices_rotated'])

            indices = ((mask == 0) &\
                       (av_data_planck < av_threshold))

            hi_data_sub = np.copy(hi_data[:, indices])
            noise_cube_sub = np.copy(noise_cube[:, indices])
            av_data_sub = np.copy(av_data_planck[indices])
            av_error_data_sub = np.copy(av_error_data_planck[indices])

            # Define filename for plotting results
            results_filename = figure_dir + 'california_logL_%s.png' % core

            # Correlate each core region Av and N(HI) for velocity ranges
            vel_range_confint, correlations, center_corr, width_corr = \
                    correlate_hi_av(hi_cube=hi_data_sub,
                                    hi_velocity_axis=velocity_axis,
                                    hi_noise_cube=noise_cube_sub,
                                    av_image=av_data_sub,
                                    av_image_error=av_error_data_sub,
                                    dgr=dgr,
                                    velocity_centers=velocity_centers,
                                    velocity_widths=velocity_widths,
                                    return_correlations=True,
                                    plot_results=True,
                                    results_filename=results_filename,
                                    likelihood_filename=likelihood_dir + \
                                            likelihood_filename + \
                                            '{0:s}.fits'.format(core),
                                    clobber=clobber,
                                    hi_vel_range_conf=hi_vel_range_conf)

            print('HI velocity integration range:')
            print('%.1f to %.1f km/s' % (vel_range_confint[0],
                                         vel_range_confint[1]))

            cores[core]['hi_velocity_range'] = vel_range_confint[0:2]
            cores[core]['hi_velocity_range_error'] = vel_range_confint[2:]
            cores[core]['center_corr'] = center_corr.tolist()
            cores[core]['width_corr'] = width_corr.tolist()
            cores[core]['vel_centers'] = velocity_centers.tolist()
            cores[core]['vel_widths'] = velocity_widths.tolist()

        with open(core_dir + core_property_file, 'w') as f:
            json.dump(cores, f)

    if global_correlation:
        print('\nCalculating correlations globally')

        indices = ((av_data_planck < av_threshold))

        hi_data_sub = np.copy(hi_data[:, indices])
        noise_cube_sub = np.copy(noise_cube[:, indices])
        av_data_sub = np.copy(av_data_planck[indices])
        av_error_data_sub = np.copy(av_error_data_planck[indices])

        # Define filename for plotting results
        results_filename = figure_dir + 'california_logL_global.png'

        # Correlate each core region Av and N(HI) for velocity ranges
        vel_range_confint, correlations, center_corr, width_corr = \
                correlate_hi_av(hi_cube=hi_data_sub,
                                hi_velocity_axis=velocity_axis,
                                hi_noise_cube=noise_cube_sub,
                                av_image=av_data_sub,
                                av_image_error=av_error_data_sub,
                                dgr=dgr,
                                velocity_centers=velocity_centers,
                                velocity_widths=velocity_widths,
                                return_correlations=True,
                                plot_results=True,
                                results_filename=results_filename,
                                likelihood_filename=likelihood_dir + \
                                        likelihood_filename + '_global.fits',
                                clobber=clobber,
                                hi_vel_range_conf=hi_vel_range_conf)

        '''
        fit_hi_vel_range(guesses=(0, 30),
                         av_image=av_data_sub,
                         av_image_error=av_error_data_sub,
                         hi_cube=hi_data_sub,
                         hi_velocity_axis=velocity_axis,
                         hi_noise_cube=noise_cube_sub,
                         dgr=dgr)
        '''

        print('HI velocity integration range:')
        print('%.1f to %.1f km/s' % (vel_range_confint[0],
                                     vel_range_confint[1]))

        global_props['hi_velocity_range'] = vel_range_confint[0:2]
        global_props['hi_velocity_range_error'] = vel_range_confint[2:]
        global_props['hi_velocity_range_conf'] = hi_vel_range_conf
        global_props['center_corr'] = center_corr.tolist()
        global_props['width_corr'] = width_corr.tolist()
        global_props['vel_centers'] = velocity_centers.tolist()
        global_props['vel_widths'] = velocity_widths.tolist()

        with open(property_dir + global_property_file, 'w') as f:
            json.dump(global_props, f)
def main(dgr=None, vel_range=None, vel_range_type='single', region=None,
        av_data_type='planck', use_binned_images=False, background_dim=1):
    ''' Executes script.

    Parameters
    ----------
    dgr : float
        If None, pulls best-fit value from properties.
    vel_range : tuple
        If None, pulls best-fit value from properties.
    '''

    # import external modules
    import pyfits as fits
    import numpy as np
    from mycoords import make_velocity_axis
    import mygeometry as myg
    from myimage_analysis import calculate_nhi, calculate_noise_cube, \
        calculate_sd, calculate_nh2, calculate_nh2_error
    import json

    # Script parameters
    # -----------------
    if use_binned_images:
        bin_string = '_bin'
    else:
        bin_string = ''

    # Name of noise cube
    noise_cube_filename = \
            'california_hi_galfa_cube_regrid_planckres_noise' + bin_string + \
            '.fits'

    # Name of property files results are written to
    prop_file = 'california_global_properties_' + av_data_type + '_scaled'

    # Name of property files results are written to
    background_file = 'california_background_' + av_data_type

    # Regions, regions to edit the global properties with
    region_limit = None

    # define directory locations
    # --------------------------
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/hi/'
    co_dir = '/d/bip3/ezbc/california/data/co/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'
    property_dir = '/d/bip3/ezbc/california/data/python_output/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'

    # load Planck Av and GALFA HI images, on same grid
    # Load data
    # ---------
    # Adjust filenames
    #noise_cube_filename += bin_string
    likelihood_filename = 'california_likelihood_{0:s}_bin'.format(av_data_type)
    results_filename = 'california_likelihood_{0:s}_bin'.format(av_data_type)
    # load Planck Av and GALFA HI images, on same grid
    if av_data_type == 'k09':
        print('\nLoading K+09 2MASS data...')
        av_data, av_header = fits.getdata(av_dir + \
                                  'california_av_k09_regrid_planckres.fits',
                                  header=True)
        av_data_error = 0.1 * np.ones(av_data.shape)
    else:
    	print('\nLoading Planck data...')
        av_data, av_header = fits.getdata(av_dir + \
                                          'california_av_planck_tau353_5arcmin.fits',
                                          header=True)

        av_error_data, av_error_data_header = fits.getdata(av_dir + \
                                    'california_av_error_planck_tau353_5arcmin.fits',
                                    header=True)

        #av_data -= 0.9 # background

    # Load global properties of cloud
    # global properties written from script
    # 'av/california_analysis_global_properties.txt'
    if region is not None:
        likelihood_filename += '_region{0:.0f}'.format(region)
        results_filename += '_region{0:.0f}'.format(region)

    print('\nReading global parameter file\n' + prop_file + '.txt')
    if 1:
        with open(property_dir + prop_file + '.txt', 'r') as f:
            props = json.load(f)

    # Load background regions from ds9
    props = load_ds9_region(props,
                            filename=region_dir + 'california_background.reg',
                            header=av_header,
                            key='background_regions')

    # Convert plot limits
    props['plot_limit'] = {}
    props['plot_limit']['wcs'] = (((4, 50, 0), (33, 0, 0)),
                                  ((4, 10, 0), (39, 0, 0)))

    props = convert_limit_coordinates(props, coords=('plot_limit',),
            header=av_header)

    # Derive relevant region
    background_mask = np.ones(av_data.shape)
    for background_region in props['background_regions']:
        background_vertices = \
          props['background_regions'][background_region]['poly_verts']['pixel']

        # block off region
        background_mask_temp = ~np.logical_not(myg.get_polygon_mask(av_data,
                                            background_vertices))

        background_mask[background_mask_temp] = 0

    background_mask = ~np.logical_not(background_mask)

    if 0:
        import matplotlib.pyplot as plt
        av_plot_data = np.copy(av_data)
        av_plot_data[background_mask] = np.nan
        plt.imshow(av_plot_data, origin='lower')
        #plt.xlim(props['plot_limit_bin']['pixel'][0:3:2])
        #plt.ylim(props['plot_limit_bin']['pixel'][1:4:2])
        plt.show()

    background = fit_background(av_data, background_mask,
            background_dim=background_dim)

    if background_dim == 1:
        print('\nBackground A_V = {0:.1f} mag'.format(background))
        props['background_1D'] = float(background)

    if background_dim == 2:
        print('\nBackground A_V is 2D')
        props['background_2D'] = background.tolist()

    print('\nWriting global parameter file\n' + prop_file + '.txt')
    with open(property_dir + background_file + '.txt', 'w') as f:
        json.dump(props, f)

    # Plot
    figure_types = ['png',]
    for figure_type in figure_types:
        filename = figure_dir + 'maps/california_av_background_maps_' + \
                   '{0:d}D.'.format(background_dim) + figure_type

        print('\nSaving maps to \n' + filename)

        plot_av_images(av_image=av_data,
                       av_image_backsub=av_data - background,
                       av_background=background,
                       header=av_header,
                       regions=props['background_regions'],
                       av_vlimits=(-1,16),
                       av_back_vlimits=(0,3),
                       limits=props['plot_limit']['pixel'],
                       filename=filename,
                       show=False)
def derive_box_sizes(co_image, cores_dict, co_image_error=None, isoline=0.6):

    """
    Parameters
    ----------
    co_image : array-like
        CO image
    cores_dict : dict
        Dictionary including core information.
    co_image_error : array-like, optional
        Error on CO.
    isoline : float, optional
        Fraction of peak CO core emission to derive the contour. 60% value from
        Meng et al. (2013, ApJ, 209, 36)

    """

    import mygeometry as myg

    for core in cores_dict:
        print('Calculating 12CO size of core {:s}'.format(core))

        # axes are reversed
        core_pos = cores_dict[core]['center_pixel'][::-1]

        box_vertices = create_box(core_pos, box_width, box_height,
                core_rel_pos=core_rel_pos)

        gradient_sums = np.zeros((len(angle_grid)))

        for i, angle in enumerate(angle_grid):
            box_vertices_rotated = rotate_box(box_vertices, core_pos, angle)

            mask = myg.get_polygon_mask(co_image, box_vertices_rotated)

            co_image_masked = np.copy(co_image)

            # extract radial profile weighted by SNR
            radii, profile = get_radial_profile(co_image, binsize=3,
                    center=core_pos[::-1],
                    weights=co_image_error,
                    mask=mask
                    )

            if angle == 90:
                co_image_masked = np.copy(co_image)
                mask = myg.get_polygon_mask(co_image_masked, box_vertices)
                co_image_masked[mask==0]=np.NaN

            indices = np.where((radii == radii) & \
                               (profile == profile))
            profile, radii = profile[indices], radii[indices]

            # steeper gradients will hcoe smaller sums
            gradient_sum = np.sum(np.gradient(profile, radii))
            gradient_sums[i] = gradient_sum

        # find steepest profile and recreate the box mask
        angle_ideal = angle_grid[gradient_sums == np.min(gradient_sums)][0]

        box_vertices_rotated = rotate_box(box_vertices, core_pos, angle_ideal)

        box_dict[core] = {}
        box_dict[core]['box_vertices_rotated'] = box_vertices_rotated

    return box_dict
示例#43
0
def main():

    import grid
    import numpy as np
    from os import system,path
    import mygeometry as myg
    from mycoords import make_velocity_axis
    import json

    # parameters used in script
    # -------------------------
    # wedge should be a few tens of pc.
    # D = 300 pc
    # res = 5'
    # d/pix = 0.43 pc/pix
    wedge_angle = 40.0 # degrees
    wedge_radius = 10.0 / 0.43 # pixels,
    core_rel_pos = 0.15 # fraction of radius core is within wedge

    # Name of property files
    global_property_file = 'taurus_global_properties.txt'

    # define directory locations
    output_dir = '/d/bip3/ezbc/taurus/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/taurus/figures/maps/'
    av_dir = '/d/bip3/ezbc/taurus/data/av/'
    hi_dir = '/d/bip3/ezbc/taurus/data/hi/'
    co_dir = '/d/bip3/ezbc/taurus/data/co/'
    core_dir = '/d/bip3/ezbc/taurus/data/python_output/core_properties/'
    region_dir = '/d/bip3/ezbc/taurus/data/python_output/ds9_regions/'
    property_dir = '/d/bip3/ezbc/taurus/data/python_output/'

    # load Planck Av and GALFA HI images, on same grid
    av_data, av_header = load_fits(av_dir + \
                'taurus_av_planck_5arcmin.fits',
            return_header=True)

    av_error_data, av_error_header = load_fits(av_dir + \
                'taurus_av_error_planck_5arcmin.fits',
            return_header=True)

    # av_data[dec, ra], axes are switched

    # define core properties
    with open(core_dir + 'taurus_core_properties.txt', 'r') as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, av_header)

    cores = load_ds9_region(cores,
            filename_base = region_dir + 'taurus_av_boxes_',
            header = av_header)

    av_image_list = []
    av_image_error_list = []
    core_name_list = []

    wedge_dict = derive_ideal_wedge(av_data, cores, wedge_angle, wedge_radius,
            core_rel_pos=core_rel_pos, angle_res=5.,
            av_image_error=av_error_data)

    for core in cores:
        cores[core]['wedge_vertices_rotated'] = \
            wedge_dict[core]['wedge_vertices_rotated'].tolist()
        try:
            cores[core]['center_pixel'] = cores[core]['center_pixel'].tolist()
        except AttributeError:
            cores[core]['center_pixel'] = cores[core]['center_pixel']

    # Open core properties
    with open(core_dir + 'taurus_core_properties.txt', 'w') as f:
        json.dump(cores, f)

    # Derive mask from wedges
    for core in cores:
        mask = myg.get_polygon_mask(av_data,
                cores[core]['wedge_vertices_rotated'])

        av_data_mask = np.copy(av_data)
        av_data_mask[mask == 0] = np.NaN

    # Open file with WCS region limits
    with open(property_dir + global_property_file, 'r') as f:
        global_props = json.load(f)

    global_props = convert_limit_coordinates(global_props, header=av_header)

    # Plot
    figure_types = ['pdf', 'png']
    for figure_type in figure_types:
        plot_av_image(av_image=av_data, header=av_header,
                boxes=True, cores=cores, #limits=[50,37,200,160],
                #title=r'taurus: A$_V$ map with core boxed-regions.',
                savedir=figure_dir,
                limits=global_props['region_limit']['pixel'],
                filename='taurus_av_cores_map.%s' % \
                        figure_type,
                show=0)
def main():

    import grid
    import numpy as np
    from os import system, path
    import myclumpfinder as clump_finder
    import mygeometry as myg
    import json

    # define directory locations
    output_dir = '/d/bip3/ezbc/california/data/python_output/nhi_av/'
    figure_dir = '/d/bip3/ezbc/california/figures/cores/'
    av_dir = '/d/bip3/ezbc/california/data/av/'
    hi_dir = '/d/bip3/ezbc/california/data/galfa/'
    region_dir = '/d/bip3/ezbc/california/data/python_output/ds9_regions/'
    core_dir = '/d/bip3/ezbc/california/data/python_output/core_properties/'

    # load 2mass Av and GALFA HI images, on same grid
    av_image, h = load_fits(av_dir + 'california_av_planck_5arcmin.fits',
                            return_header=True)

    # define core properties
    with open(core_dir + 'california_core_properties.txt', 'r') as f:
        cores = json.load(f)

    cores = convert_core_coordinates(cores, h)

    cores = load_ds9_region(cores,
                            filename_base=region_dir + 'california_av_boxes_',
                            header=h)

    if True:
        limits = [0, 20, -1, 25]  # x-linear limits

        # Initialize fit params
        A_p = []
        pho_c = []
        R_flat = []
        p = []

        # Initialize data lists
        radii_pc_list = []
        profile_list = []
        profile_std_list = []
        profile_fit_params_list = []
        core_names_list = []

        for core in cores:
            print('Calculating for core %s' % core)

            # Grab the mask from the DS9 regions
            xy = cores[core]['box_center_pix']
            box_width = cores[core]['box_width']
            box_height = cores[core]['box_height']
            box_angle = cores[core]['box_angle']
            mask = myg.get_rectangular_mask(av_image,
                                            xy[0],
                                            xy[1],
                                            width=box_width,
                                            height=box_height,
                                            angle=box_angle)

            mask = myg.get_polygon_mask(av_image,
                                        cores[core]['box_vertices_rotated'])

            # Get indices where there is no mask, and extract those pixels
            indices = np.where(mask == 1)

            av_image_sub = np.copy(av_image)
            #av_image_sub[mask == 0] = np.NaN
            av_image_sub = np.ma.array(av_image, mask=(mask == 0))

            # to check the positions of the boxes, uncomment the following
            #import matplotlib.pyplot as plt
            #plt.clf()
            #plt.imshow(np.ma.array(av_image_sub, mask=temp_mask))
            #plt.savefig('/usr/users/ezbc/Desktop/map%s.png' % core)
            #plt.clf()

            pix = cores[core]['center_pixel']

            # extract radial profile weighted by SNR
            radii, profile = get_radial_profile(av_image,
                                                binsize=3,
                                                center=pix,
                                                weights=av_image / 0.3,
                                                mask=mask)

            # extract std
            radii, profile_std = get_radial_profile(
                av_image_sub,
                binsize=3,
                center=pix,
                stddev=True,
                weights=av_image_sub / 0.3,
                #mask=mask
            )

            # convert radii from degrees to parsecs
            radii_arcmin = radii * h['CDELT2'] * 60 * 60.  # radii in arcminutes
            radii_pc = radii_arcmin * 300 / 206265.  # radii in parsecs

            # extract radii from within the limits
            indices = np.where((radii_pc < limits[1]) & \
                               (profile == profile) & \
                               (profile_std == profile_std))
            radii_pc = radii_pc[indices]
            profile = profile[indices]
            profile_std = profile_std[indices]

            # fit profile with power function
            def function(radius, A_p, pho_c, R_flat, p):
                return A_p * pho_c * R_flat / \
                        (1 + (radius / R_flat)**2)**(p/2. - 0.5)
                #return A_p * radius**p

            profile_fit_params = fit_profile(radii_pc,
                                             profile,
                                             function,
                                             sigma=profile / profile_std)[0]

            # plot the radial profile
            figure_types = ['.pdf', '.png']
            for figure_type in figure_types:
                plot_profile(
                    radii_pc,
                    profile,
                    profile_errors=profile_std,
                    limits=limits,
                    profile_fit_params=profile_fit_params,
                    profile_fit_function=function,
                    savedir=figure_dir + 'individual_cores/',
                    filename='california_profile_av_' + core + figure_type,
                    title=r'Radial A$_V$ Profile of california Core ' + core,
                    show=False)

            A_p.append(profile_fit_params[0])
            pho_c.append(profile_fit_params[1])
            R_flat.append(profile_fit_params[2])
            p.append(profile_fit_params[3])

            radii_pc_list.append(radii_pc)
            profile_list.append(profile)
            profile_std_list.append(profile_std)
            profile_fit_params_list.append(profile_fit_params)
            core_names_list.append(core)

        for figure_type in figure_types:
            plot_profile_grid(
                radii_pc_list,
                profile_list,
                profile_errors_list=profile_std_list,
                limits=limits,
                profile_fit_params_list=profile_fit_params_list,
                profile_fit_function=function,
                savedir=figure_dir + 'panel_cores/',
                filename='california_profile_av_cores_planck' + figure_type,
                title=r'Radial A$_V$ Profiles of california Cores',
                core_names=core_names_list,
                show=False)

        print_fit_params(cores,
                         A_p,
                         pho_c,
                         R_flat,
                         p,
                         filename=output_dir + 'core_profile_fit_data.txt')

        print_fit_params(cores, A_p, pho_c, R_flat, p)