Exemplo n.º 1
0
def clean(runpath='./',itmin=0,itmax=None,distance=8.0):
    """
    Name: clean
    Author: Kari A. Frank
    Date: November 1, 2015
    Purpose: Create a dataframe and associated saved file that includes
             an iteration and emission measure column, and only includes
             iterations after convergence 

    Usage: 
      import xmcinter.diagnostics as xd
      xd.clean(runpath='./',itmin=0,itmax=None,distance=8.0)

    Input:

     runpath: string of path to the deconvolution files, or a dataframe
              resulting from a previous call to xw.merge_output()

     itmin: minimum iteration to keep

     distance: distance to the object in kpc (default=8.0), used to 
               calculate the emission measure


    Output:

     Returns the dataframe of deconvolution parameters, filtered by iteration
      add with the emission measure and iteration columns included, plus a 
      column with the blob sizes in arcsec (if blob shape = gaussian)


    Usage Notes:
     - typically this is run after xplt.chi2, to determine the minimum 
       iteration 
     - assumes that relevant column names begin with 'blob'. if not found,
       will skip adding the new column.

    Example:
    """

    # -- import modules --
    import astro_utilities as astro

    # -- read deconvolution files --
    if isinstance(runpath,str):
        df = merge_output(runpath,save=False)
    else:
        df = runpath

    # -- add blob size in arcsec column --
    if 'blob_lnsigma' in df.columns:
        df['blob_sigma'] = np.exp(df['blob_lnsigma'])

    # -- add tau column, if used lvpshock --
    if 'blob_logtau' in df.columns:
        df['blob_tau'] = 10.0**(df['blob_tau'])

    # -- add emission measure column --
    if 'blob_norm' in df.columns:
        df['blob_em'] = astro.norm_to_em(df['blob_norm'],
                                         astro.convert_distance(distance,
                                                                'kpc',
                                                                'cm'))
    
    # -- add hydrogen number densities of blobs in cm^-3, hydrogen mass --
    if 'blob_sigma' in df.columns:
        df['blob_volume'] = xw.gaussian_volume(astro.convert_arcsec(\
                df['blob_sigma'],distance,'kpc','cm'))
        df['blob_numberdensity'] = astro.em_to_density(df['blob_em'],\
                                   df['blob_volume'],density_type='number')

        df['blob_mass'] =astro.em_to_mass(df['blob_em'],df['blob_volume'],
                                          tounit='sol')

    # -- remove iterations before convergence --
    if itmax == None:
        itmax = np.max(df['iteration'])
    df = xw.filterblobs(df,'iteration',minvals=itmin,maxvals=itmax)

    # -- save as file --
    outfile = ('deconvolution_merged_iter'
               +str(int(itmin))+'-'+str(int(itmax))+'.txt')
    df.to_csv(outfile,sep='\t')

    # -- make traceplots --
#    tracefigs = xplt.traceplots(df)

    return df
Exemplo n.º 2
0
def iteration_image(data,params,weights,nbins_x,nbins_y,binsize,xmin,ymin,
                    iteration_type,shape,blobx,bloby,blobsize,use_ctypes,
                    fast=True,
                    n_int_steps=1000):
    """Function to combine blobs from single iteration into 1 image."""
    from wrangle import weighted_median,gaussian_volume

    #--initialize stack of 2D images, one for each parameter--
    iterimages = np.zeros((nbins_x,nbins_y,len(params)))

   #----Calculate blob volumes in correct units (usually arcsec^3)----
    if shape == 'gauss':
        volumes = gaussian_volume(data[blobsize]) 
    elif shape == 'sphere':
        volumes = (4.0/3.0)*np.pi*data[blobsize]**3.0
    else: # points
        volumes = (0.1*binsize)**3.0 # set to much smaller than pixel

    #--loop over image--
    for x in xrange(nbins_x):
        #get x integral
        lowerx = int(xmin + x*binsize)
        upperx = int(xmin + x*binsize + binsize)
        if shape == 'gauss' or shape == 'points':
            if fast is False: 
                # only use fast=False if scipy.integrate is
                #   not available
                x_blob_integrals = gaussian_integral(lowerx,upperx,
                                                     n_int_steps,
                                                     data[blobx],
                                                     data[blobsize])
            else:
                x_blob_integrals = data.apply(lambda d: \
                                gaussian_integral_quad(lowerx,\
                                upperx,d[blobx],d[blobsize],\
                                use_ctypes=use_ctypes),\
                                axis=1)
        elif shape == 'sphere':
            print "ERROR: spherical_integral() not yet implemented"
            x_blob_integrals = spherical_integral(lowerx,upperx,\
                                                      n_int_steps,\
                                                     data[blobx],
                                                  data[blobsize])
        for y in xrange(nbins_y):
            #get y integral
            lowery = int(ymin + y*binsize)
            uppery = int(ymin + y*binsize + binsize)
            if shape == 'gauss' or shape == 'points':
                if fast is False:
                    y_blob_integrals = gaussian_integral(lowery,uppery,\
                                                     n_int_steps,\
                                                     data[bloby],
                                                         data[blobsize])
                else:
                    y_blob_integrals = data.apply(lambda d: \
                                gaussian_integral_quad(lowery,\
                                uppery,d[bloby],d[blobsize],\
                                use_ctypes=use_ctypes),\
                                axis=1)

            elif shape == 'sphere':
                y_blob_integrals = spherical_integral(lowery,uppery,\
                                                     n_int_steps,\
                                                     data[bloby],
                                                      data[blobsize])
                #calculate fraction of blob volume in this pixel

            if shape != 'points':
                # !! for now this assumes gaussian volume !!
                fractions = (x_blob_integrals*y_blob_integrals*
                             (2.0*np.pi*data[blobsize]**2.0)**.5 / volumes)
                #times dz integral to get total volume in pixel, 
                #then divided by total volume

            else:
                # for now, points is implemented by setting the volumes 
                #   to be much smaller than a pixel size
                fractions = (x_blob_integrals*y_blob_integrals*
                             (2.0*np.pi*data[blobsize]**2.0)**.5 / 
                             volumes)
#                    print "points is not yet implemented"
                    # if assuming points, then fraction=1 or 0
#                    fractions = point_integral(lowerx,upperx,lowery,uppery,
#                                               data['x'],data['y'])

            #-combine blobs in this pixel (loop over parameters)-
            for p in xrange(len(params)):
                if weights[p] is None: # default is equal weights
                    w = pd.Series(np.ones_like(data[params[p]]),
                                  index=data[params[p]].index)
#                elif weights[p] == 'densityspecial': 
                    # for plotting density from EM - assumes the column passed
                    # was sqrt(EM*Volume/1.21), so weights=1/Vblobpix
#                    w = 1.0/(fractions*data['blob_volume'])
                else: 
                    w = data[weights[p]]
                if iteration_type[p] == 'median':
                    iterimages[x,y,p]=weighted_median(data[params[p]],
                                                  weights=w*fractions)
                elif iteration_type[p] == 'average':
                    iterimages[x,y,p]=np.average(data[params[p]],
                                                 weights=w*fractions)
                elif iteration_type[p] == 'total':
                    iterimages[x,y,p]=np.sum(data[params[p]]*w*fractions)
                elif iteration_type[p] == 'max':
                    iterimages[x,y,p]=np.max(data[params[p]]*w*fractions)
                else:
                    print "ERROR: unrecognized iteration_type"

    return iterimages