Beispiel #1
0
def compute_Q_analytical(component_table):
    """Compute Q factors analytically.
    """
    q_table = Table()
    q_table.add_column(Column(data=component_table['Name'], name='Q_AB'))
    
    Q_all_list = ['All others']
      
    for j in range(len(component_table)):
        # Get parameters A
        row_A = component_table[j]
        x_A, y_A, sigma_A, N_A = row_A['GLON'], row_A['GLAT'], row_A['Sigma'], row_A['Norm']
         
        # Compute Q_factor all others
        components_all = [[row['GLON'], row['GLAT'], row['Sigma'], row['Norm']] for row in component_table if not row == row_A]
        Q_All = compute_Q_from_components([[x_A, y_A, sigma_A, N_A]], components_all)
        Q_all_list.append(Q_All)
        
        # Compute Q factors pairwise
        Q_AB_list = np.zeros(len(component_table))
        for i, row_B in enumerate(component_table):
            # Get parameters B
            x_B, y_B, sigma_B, N_B = row_B['GLON'], row_B['GLAT'], row_B['Sigma'], row_B['Norm']
            
            # Compute Q_factor
            Q_AB = Q_factor_analytical(sigma_A, sigma_B, x_A, y_A, x_B, y_B)
            Q_AB_list[i] = Q_AB
        q_table.add_column(Column(data=Q_AB_list, name=row_A['Name']))
    q_table.add_row(Q_all_list)    
    return q_table
Beispiel #2
0
def read_model_components(cfg_file):
    """Read model components from ``model_components/*.fits`` and return
    a list of 2D component images with containment masks.
    """
    cfg = configobj.ConfigObj(cfg_file)
    column_names = ('Name', 'Type', 'GLON', 'GLAT', 'Sigma', 'Norm')
    column_types = ('S25', 'S25', np.float32, np.float32, np.float32, np.float32)
    component_table = Table(names=column_names, dtype=column_types)
    
    # Build data table
    for component in cfg.keys():
        type_ = cfg[component]['Type']
        glon = cfg[component]['GLON']
        glat = cfg[component]['GLAT']
        sigma = cfg[component]['Sigma']
        norm = cfg[component]['Norm']
        component_table.add_row([component, type_, glon, glat, sigma, norm])
    if os.path.exists('model_components/'):
        read_fits_files(component_table)
    else:
        logging.error('No model components found. Please reuse morph_fit.')
    if os.path.exists('fit.reg'):
        read_region_file(component_table)
    else:
        compute_containment_radius(component_table)
        logging.info('Computing containment radii')
    return component_table
Beispiel #3
0
def _create_bibcode_table(data, splitter):
    ref_list = [splitter + ref for ref in data.split(splitter)][2:]
    max_len = max([len(r) for r in ref_list])
    table = Table(names=['References'], dtypes=['S%i' % max_len])
    for ref in ref_list:
        table.add_row([ref.decode('utf-8')])
    return table
Beispiel #4
0
def findzrange_line(linelist, l0, l1, inside=True, threshold=0.2, survey='sdss'):
    """
    Find the right red shift range such that lines are inside/outside of
    throughput > treshold*max range of band. 

    Note that if choose outside (inside=False), in the file z0>z1 for later 
    convenience. 


    Parameters
    -----
    linelist: string
        like OIII or HaNIISII

    l0, l1: float 
        line wavelength range in angstrom, doesn't need to be in order

    inside: bool
        if True, then return the z range that contains line in the band, 
        otherwise the range that have non of the lines in the band. 
    """

    # setup file paths
    localpath = filtertools.getlocalpath()
    filefb = localpath+survey+'/'+'filterboundary_'+str(threshold)+'.txt'
    if inside: 
        fileout = localpath+survey+'/'+'zrange_wline_'+linelist+'_'+'%.1f'%threshold+'.txt'
    else:
        fileout = localpath+survey+'/'+'zrange_nline_'+linelist+'_'+'%.1f'%threshold+'.txt'

    # setup params
    bands = filtertools.surveybands[survey]

    # make table
    lmin, lmax = np.sort(np.array([l0,l1]))

    tabout = Table([[],[],[],], names=('band','z0','z1'), dtype=('S1', 'f8', 'f8'))

    for band in bands:

        # get filter boundary
        fb = Table.read(filefb,format='ascii')
        w1,w2 = fb[fb['band']==band]['w1','w2'][0]

        # calculate corresponding redshift
        if inside:
            z0 = w1/lmin-1.
            z1 = w2/lmax-1.
        else: 
            z0 = w2/lmin-1.
            z1 = w1/lmax-1.

        tabout.add_row([band,z0,z1])

    # output
    tabout.write(fileout, format='ascii.fixed_width', delimiter='', overwrite=True)
    return tabout


    
Beispiel #5
0
def filter_by_alpha(data, key, value):
    print("Filtering by %s == %s" % (key, str(value)))
    new_data = Table(data[0:0])
    for d in data:
        if d[key] == value:
            new_data.add_row(d)
    return new_data
Beispiel #6
0
def get_linelists(filelist_file, suffix='PAH.dat', wave_lab=pah_wave_lab, skipr=1):
    """generate a table combining linelists for different objects"""

    # first set up the empty table
    collist = ['ID', 'Filename'] 
    for i,wavelength in enumerate(wave_lab): # make column names
        collist += [wave_lab[i],wave_lab[i]+'_unc']
    dt_list = ['a20'] * 2 + ['f4'] * (len(collist) -2)  # list of data types: one string plus bunch of floats
    linetab = Table(names=collist, dtype = dt_list) # generate a new table

    filelist = np.loadtxt(filelist_file, dtype='string')

    # now put stuff in the table
    for f in filelist: # loop through each file
        vals, uncerts = np.loadtxt(f, unpack=True,usecols=[0,1],skiprows=skipr)
        obj_dict={}
        obj_dict['Filename'] = f 
        obj_dict['ID'] = f[:string.find(f,suffix)]
        for i,line_lab in enumerate(wave_lab): # put the columns from the file into one row of a table
            if np.isnan(uncerts[i]) or np.abs(vals[i] < upperlim_tol):  # non-detection or upper limit
                obj_dict[line_lab] = 0.0           
                obj_dict[line_lab+'_unc'] = np.nan    
            else:
                obj_dict[line_lab] = vals[i]
                obj_dict[line_lab+'_unc'] = uncerts[i]
        linetab.add_row(obj_dict)
    return(linetab)
Beispiel #7
0
def radius_testing(cenX1, cenY1, fnames1, cenX2, cenY2, fnames2, r_src_low, r_src_up, r_src_inc, r_in_low, r_in_up, r_in_inc, r_out_low, r_out_up, r_out_inc, Red = False, Red2 = False):
    
    r_source = np.arange(r_src_low,r_src_up,r_src_inc)
    r_inner = np.arange(r_in_low,r_in_up,r_in_inc)
    r_outer = np.arange(r_out_low,r_out_up,r_out_inc)
    rad_test = Table(names=('norm_stdev', 'r_source', 'r_in','r_out', 'rIn - r', 'rOut - rIn'))
    for R in r_source:
        for R_in in r_inner:
            for R_out in r_outer:
                if (R<R_in) and (R<R_out) and (R_in<R_out):
                    data1 = time_series(cenX1, cenY1, fnames1, r = R, r_in = R_in, r_out = R_out, red = Red, red2 = Red2)
                    data2 = time_series(cenX2, cenY2, fnames2, r = R, r_in = R_in, r_out = R_out, red = Red, red2 = Red2)
                    detrended1 = linear_bestfit(data1['time'], data1['res_flux'], 0.00002, 1)
                    detrended2 = linear_bestfit(data2['time'], data2['res_flux'], 0.00002, 1)
                    av = (detrended1 + detrended2)/2.
                    stdev = np.std(av)/np.median(av)
                    rad_test.add_row([stdev, R, R_in, R_out, R_in-R, R_out-R_in])
    
    #Finding the best combination
    a = rad_test['norm_stdev']
    min_std_dev = np.amin(a)
    best_r = rad_test['r_source'][np.argmin(a)]
    best_r_in = rad_test['r_in'][np.argmin(a)]
    best_r_out = rad_test['r_out'][np.argmin(a)]
    print "The minimum Standard deviation is %f" % min_std_dev
    print "It occurs for the radius r = %f" % best_r
    print "It occurs for the inner radius r_in = %f" % best_r_in
    print "It occurs for the outer radius r_out = %f" % best_r_out
    return rad_test
    def get_table(self, **kwargs):

        D = self.lineLum(**kwargs)

        # remap names to match RADEX
        name_mapping = {'upper':'upperlevel',
                        'lower':'lowerlevel',
                        'freq':'frequency',}


        names = D[0].keys()
        T = Table(names=[name_mapping[n]
                         if n in name_mapping
                         else n
                         for n in names],
                  dtype=[type(D[0][k]) for k in names])
    
        for row in D:
            T.add_row([row[k] for k in names])

        T.add_column(Column(name='upperlevelpop',
                            data=self.upperlevelpop,
                            dtype='float'))
        T.add_column(Column(name='lowerlevelpop',
                            data=self.lowerlevelpop,
                            dtype='float'))

        return T
Beispiel #9
0
def _json_to_table(results):
    """Parse select JSON results into Astropy table."""
    from astropy.table import Table

    colnames = ('created_at', 'type', 'public', 'repo_name', 'description')
    coltypes = ('S11', 'S50', 'S6', 'S50', 'S255')
    tab = Table(names=colnames, dtype=coltypes)

    for r in results:
        r_type = r['type']
        payload = r['payload']

        # TODO: What payload info is useful to display?
        if r_type == 'PushEvent':
            r_descrip = payload['commits'][0]['message']
        elif r_type == 'PullRequestEvent':
            r_descrip = f"{payload['action']} {payload['number']}"
        elif r_type == 'IssuesEvent':
            r_descrip = f"{payload['action']} {payload['issue']['number']}"
        # TODO: Implement CreateEvent, IssueCommentEvent,
        #       PullRequestReviewCommentEvent, ReleaseEvent
        else:
            r_descrip = f'unknown for {r_type}'

        tab.add_row((r['created_at'], r_type, r['public'],
                     r['repo']['name'], r_descrip))

    return tab
Beispiel #10
0
    def abs_kin(self, lbl):
        """  Create a Table of the Kinematic info

        Parameters
        ----------
        lbl : string
          Label for the Kinematics dict
        """
        from astropy.table import Table

        keys = self.cgm_abs[0].abs_sys.kin[lbl].keys
        t = Table(names=keys,
                  dtype=self.cgm_abs[0].abs_sys.kin[lbl].key_dtype)

        for cgm_abs in self.cgm_abs:
            try:
                kdict = cgm_abs.abs_sys.kin[lbl]
            except KeyError:
                # No dict.  Filling in zeros
                row =  [0 for key in keys]
                t.add_row( row )   
                continue
            # Filling
            row = [kdict[key] for key in keys]
            t.add_row( row )   
        return t
def query_mongo(db, collection, query):
    if collection == 'weather':
        names=('date', 'temp', 'clouds', 'wind', 'gust', 'rain', 'safe')
        dtype=(dt, np.float, np.float, np.float, np.float, np.int, np.bool)
    elif collection == 'V20status':
        names=('date', 'focuser_temperature', 'primary_temperature',
               'secondary_temperature', 'truss_temperature',
               'focuser_position', 'fan_speed',
               'alt', 'az', 'RA', 'DEC', 
              )
        dtype=(dt, np.float, np.float, np.float, np.float, np.int, np.int,
               np.float, np.float, np.float, np.float)
    elif collection == 'images':
        names=('date', 'telescope', 'moon_separation', 'perr_arcmin',
               'airmass', 'FWHM_pix', 'ellipticity')
        dtype=(dt, np.str, np.float, np.float, np.float, np.float, np.float)

    result = Table(names=names, dtype=dtype)
    for entry in db[collection].find(query):
        insert = {}
        for name in names:
            if name in entry.keys():
                insert[name] = entry[name]
        result.add_row(insert)
    return result
Beispiel #12
0
def lineProfile(i,spec, lambd,ListLines,ListGal):
    linePr = Table(names=('lambda', 'inf', 'sup'), dtype=('i5','i5', 'i5'))
    for c in range(0, len(ListLines)):
        lambdLine=ListLines['LAMBDA VAC ANG'][c]          #Líneas obtenidas de la base de datos Astroquery
        v = np.where((lambd >= lambdLine-1) & (lambd <= lambdLine+1))
        ind = 1
        f = FWHM(lambd[v[0][0]-ind:v[0][0]+ind], ListGal[i]['flux'][v[0][0]-ind:v[0][0]+ind])
        while (len(f)==0):
            ind = ind+1
            f = FWHM(lambd[v[0][0]-ind:v[0][0]+ind], ListGal[i]['flux'][v[0][0]-ind:v[0][0]+ind])
        l_inf = v[0][0]-ind
        l_sup = v[0][0]+ind
        # Una vez encontrado el intervalo se busca el indice donde
        # tiene el máximo valor de intensidad
        indMaxInt = np.where(ListGal[i]['flux'][l_inf:l_sup]==np.max(ListGal[i]['flux'][l_inf:l_sup]))
        indMax=indMaxInt[0][0]+l_inf
        iM = indMax
        l_inf = 1
        l_sup = 1
        a=ListGal[i]['flux'][indMax]
        while (ListGal[i]['flux'][indMax-l_inf] <= a) | ((ListGal[i]['flux'][indMax-l_inf]-spec[indMax-l_inf])>10):
            a = ListGal[i]['flux'][indMax-l_inf]
            l_inf = l_inf+1

        a=ListGal[i]['flux'][indMax]
        while (ListGal[i]['flux'][indMax+l_sup] <= a) | ((ListGal[i]['flux'][indMax+l_sup]-spec[indMax+l_sup])>10):
            a = ListGal[i]['flux'][indMax+l_sup]
            l_sup = l_sup+1

        l_inf = indMax-l_inf+1
        l_sup = indMax+l_sup
        linePr.add_row((iM, l_inf, l_sup))        
    return linePr
Beispiel #13
0
 def table(self, filter_homogenous=False):
     """Return a text table for this datagroup. See :meth:`output`.
     
     .. note:: This method currently returns a list of strings. In the future, it will return an :class:`astropy.table.Table` object containing only the desired header keywords.
     
     """
     from astropy.table import Table, Column
             
     if filter_homogenous:
         groups = [ group for group in self if not isinstance(group, ListFITSDataGroup) ]
         list_groups = []
     else:
         groups = [ group for group in self if not isinstance(group, ListFITSDataGroup) ]
         list_groups = [ group for group in self if isinstance(group, ListFITSDataGroup) ]
         
     groups.sort(key=lambda g : g.name)
     
     result = Table([ group.keylist for group in groups ], names=map(str, self.keywords))
     
     name_column = Column(name=str("Name"), data=[ group.name for group in groups ])
     result.add_column(name_column, index=0)
     
     number_column = Column(name=str("N"), data=[ len(group) for group in groups ])
     result.add_column(number_column)
     
     for lgroup in list_groups:
         result.add_row({"Name":lgroup.name, "N":len(lgroup)})
     
     return result
    def setup(self, SE_only, no_conv):
        """Return lists of all images and filters used in this Par.
           We will use the unrotated images for use with a single psf
           Image filenames:
             ParXXX/DATA/UVIS/IRtoUVIS/FYYY_UVIS_sci.fits
             ParXXX/DATA/UVIS/IRtoUVIS/FYYY_UVIS_rms.fits
        """
        images = glob(os.path.join(self.imdir, 'F*_UVIS_sci.fits'))

        # dictionary of zero points
        zps = self.get_zp(images[0])

        # build table
        t = Table(data=None, 
                  names=['filt','image','convim','rms','wht','exptime','zp'],
                  dtype=['S10', 'S60', 'S60', 'S60', 'S60', float, float])
        for image in images:
            filt = fits.getheader(image)['FILTER']

            # weight map
            wht = image.split('_sci.fits')[0] + '_wht.fits'

            # clean image for convolution
            tmp = os.path.splitext(image)[0] + '_cln.fits'
            image_cln = os.path.join(self.outdir, os.path.basename(tmp))
            if SE_only is False:
                print 'Cleaning %s' % os.path.basename(image)
                if no_conv:
                    clean_image(image, image_cln, cln_by_wht=False, whtim=wht)
                else:
                    clean_image(image, image_cln, cln_by_wht=True, whtim=wht)
            
            # names of convolved images
            if filt == self.reddest_filt:
                convim = image_cln
            else:
                check = re.search('\d+', self.reddest_filt)
                rf = check.group(0)
                convim = os.path.join(self.outdir,'%s_convto%s.fits'%(filt,rf))

            # replace zeros with 1.e9 in rms analysis maps
            rms0 = image.split('_sci.fits')[0] + '_rms.fits'
            tmp = os.path.splitext(rms0)[0] + '_analysis.fits'
            rms_analysis = os.path.join(self.outdir, os.path.basename(tmp))
            self.fix_rms_map(rms0, rms_analysis, value=1.e10,rmstype='analysis')

            # for detection image, create detection RMS map as well
            if filt == self.detect_filt:
                tmp2 = os.path.splitext(rms0)[0] + '_detection.fits'
                rms_detect = os.path.join(self.outdir, os.path.basename(tmp2))
                self.fix_rms_map(rms0, rms_detect, value=0.01, 
                                 rmstype='detection', whtim=wht)
            
            exptime = fits.getheader(image)['EXPTIME']
            zp = zps[filt]

            t.add_row([filt, image_cln, convim, rms_analysis, wht, exptime, zp])
        # set detection RMS map
        self.detect_rms = rms_detect
        return t
Beispiel #15
0
 def frames(self):
     """Returns metadata about the 32 frames in the exposure.
     
     pseudocode:
     
     for filename in filenames:
         for frame in frames:
             obtain frame_corners, seeing, limmag,
             background level, qcgrade, indr
             add to table
     """
     tbl = Table(names=('frame', 'offset', 'ccd', 'band', 'filename', 'ra1', 'dec1', 'ra2', 'dec2', 'ra3', 'dec3', 'ra4', 'dec4'),
                 dtype=('O', 'O', int, 'O', 'O', float, float, float, float, float, float, float, float))
     for ccd in np.arange(1, 33):
         name = '{0}-{1}-{2}'.format(self.offset_name, ccd, self.band)
         row = [name, self.offset_name, ccd, self.band, self.filename]
         xmax, ymax = self.hdulist[ccd].header['NAXIS1'], self.hdulist[ccd].header['NAXIS2']
         corners = [[0, 0], [xmax, 0], [xmax, ymax], [0, ymax]]
         wcs = WCS(self.hdulist[ccd].header)
         mycorners = wcs.wcs_pix2world(corners, 1)
         import pdb
         pdb.set_trace()
         for value in mycorners.reshape(8):
             row.append(value)
         tbl.add_row(row)
     return tbl
Beispiel #16
0
def produce_metadata(inputs):
    '''
    Collate image meta data
    This routine takes a list of FITS files, collates the metadata, and outputs them to an astropytable for quick reference.
    Create an astropy table, all entries are saved as strings
    '''
    print 'Initiating metadata table' 
    metadata = Table(names=('filename', 'MJD-OBS', 'FILTER', 'EXPTIME', 'RA0', 'DEC0'), dtype=('S100', 'f8', 'S100', 'S100', 'S100', 'S100'))
    for FITS_file_name in file(inputs['FILE_LIST']):
        fnm = FITS_file_name.split('\n')[0]
        if(os.path.isfile(fnm)):
   	    hdulist = fits.open(fnm) # the split function is used to ignore the text file line breaks
	    row = [] 
	    row.append(FITS_file_name.split('\n')[0])
	    row.append(float(hdulist[0].header['MJD-OBS']))
	    row.append(hdulist[0].header['FILTER'])
	    row.append(hdulist[0].header['EXPTIME'])
	    row.append(hdulist[0].header['RA'])
	    row.append(hdulist[0].header['DEC'])
	    metadata.add_row(row)
	    hdulist.close()
	if(len(fnm)==0):  # ignore blank lines
		continue
	if('#' in fnm[0]): # ignore commented lines
		continue
        if(not os.path.isfile(fnm)):
            print 'WARNING, THE FOLLOWING FILE DOES NOT EXIST:', fnm
    metadata.sort(['MJD-OBS'])
    out_file = '../data/'+'metadata_'+inputs['TAG']+'.tbl'
    metadata.write(out_file, format = 'aastex')
    return metadata
def photometry():
    filters=['I','B','V','R'] #filter names
    for f in filters:
        All_data=Table(names=('Count','Error','Time'))
        file=open('photometry_'+str(sn_name)+str(f)+'.txt','w')
        super_lotis_path='/Users/zeynepyaseminkalender/Documents/MyFiles_Pyhton/SuperLOTIS_final'
        date_search_path=os.path.join(super_lotis_path, '13*')
        search_str=os.path.join(date_search_path,str(sn_name)+str(f)+'.fits')
        for name in glob(search_str):
            date=extract_date_from_fullpath(name)
            final_date=convert_time(date)
            with fits.open(name) as analysis:
                z = .086
                r = 1 * u.kpc / cosmo.kpc_comoving_per_arcmin(z)
                cordinate = SkyCoord('01:48:08.66 +37:33:29.22', unit = (u.hourangle, u.deg))
                aperture = SkyCircularAperture(cordinate, r)
                exp_time= analysis[0].header['EXPTIME'] # error calculation
                
                data_error = np.sqrt(analysis[0].data*exp_time) / exp_time
                tbl=Table(aperture_photometry(analysis[0],aperture,error=data_error),names=('Count','Count_Error','x_center','y_center','center_input'))
                tbl.keep_columns(['Count','Count_Error'])
                count=tbl[0]['Count']
                error=tbl[0]['Count_Error']
                All_data.add_row((count,error,final_date))
    
        print >> file , All_data
        file.close()
    plot(filters)
Beispiel #18
0
def build_targets(field, path="./"):
    """Top-level program to build target info 

    Parameters:
    -----------
    field: tuple
      (Name, ra, dec)
    """
    # MMT
    mmt_masks, mmt_obs, mmt_targs = mmt_targets(field)

    # DEIMOS
    deimos_sex, deimos_masks, deimos_obs, deimos_targs = deimos_targets(field)

    # COLLATE
    all_masks = deimos_masks + mmt_masks
    all_obs = deimos_obs + mmt_obs
    all_sex = vstack([deimos_sex, mmt_targs], join_type="inner")  # Use vstack when needed

    # Generate Target table
    targ_file = xcasbahu.get_filename(field, "TARGETS")
    cut_sex = all_sex[["TARG_RA", "TARG_DEC", "EPOCH", "TARG_ID", "TARG_MAG", "TARG_IMG", "INSTR", "MASK_NAME"]]
    # cut_sex.write(targ_file,overwrite=True)
    cut_sex.write(targ_file, format="ascii.fixed_width", delimiter="|")
    print("Wrote file {:s}".format(targ_file))

    # Generate MULTI_OBJ file
    multi_file = xcasbahu.get_filename(field, "MULTI_OBJ")
    tab_names = (
        "INSTR",
        "MASK_NAME",
        "MASK_RA",
        "MASK_DEC",
        "MASK_EPOCH",
        "MASK_PA",
        "DATE_OBS",
        "DISPERSER",
        "TEXP",
        "CONDITIONS",
    )
    dtypes = ("S12", "S25", "S12", "S13", "f4", "f4", "S11", "S10", "f4", "S25")
    multi_tab = Table(names=tab_names, dtype=dtypes, masked=True)
    for kk, mask in enumerate(all_masks):
        # Loop on observations
        if all_obs[kk] is not None:
            for obs in all_obs[kk]:
                maskobs = copy.deepcopy(mask)
                maskobs["DATE_OBS"] = obs["DATE"]
                maskobs["TEXP"] = obs["TEXP"]
                maskobs["DISPERSER"] = obs["DISPERSER"]
                maskobs["CONDITIONS"] = obs["CONDITIONS"]
                # Add row
                multi_tab.add_row(maskobs)
        else:
            multi_tab.add_row(mask)

    multi_tab.write(multi_file, format="ascii.fixed_width", delimiter="|")
    print("Wrote file {:s}".format(multi_file))
Beispiel #19
0
 def table(self):
     bibcode_match = bibcode_regex.search(self.script)
     splitter = bibcode_match.group(2)
     ref_list = [splitter + ref for ref in self.data.split(splitter)][1:]
     max_len = max([len(r) for r in ref_list])
     table = Table(names=['References'], dtype=['S%i' % max_len])
     for ref in ref_list:
         table.add_row([ref])
     return table
Beispiel #20
0
 def test_add_masked_row_to_masked_table_mapping4(self):
     # When adding values to a masked table, if the mask is specified as a
     # dict, then keys in values should match keys in mask
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
     t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
     with pytest.raises(ValueError) as exc:
         t.add_row({'b': 5}, mask={'a': True})
     assert exc.value.args[0] == 'keys in mask should match keys in vals'
Beispiel #21
0
def filter_by_keyvalues(data, key, values):
    print("Filtering by %s == %s" % (key, str(values)))
    new_data = Table(data[0:0])
    for d in data:
        for v in values:
            if d[key] == v:
                new_data.add_row(d)
                break
    return new_data
def write_index_file(info_table):
    """
    Write a summary index file in tabular format from ``info_table`` dict, e.g.

        updated_file       baseline_file           date       dy_acis_i dz_acis_i dy_acis_s dz_acis_s
    ------------------- ------------------- ----------------- --------- --------- --------- ---------
    CHARACTERIS_12OCT15 CHARACTERIS_12MAR15 2015:285:01:21:25     -5.00     10.00     15.00    -20.00
    """
    index_file = os.path.join(opt.data_root, 'characteristics', 'index')
    if os.path.exists(index_file):
        index = Table.read(index_file, format='ascii.fixed_width_two_line', guess=False)
        matching = index['updated_file'] == info_table['updated_file']
        index = index[~matching]
        if np.any(matching):
            logger.info('WARNING: replacing existing entry for updated_file={}'
                        .format(info_table['updated_file']))
        shutil.copy(index_file, index_file + '.bak')
    else:
        index = Table(names=['updated_file', 'baseline_file', 'date',
                             'dy_acis_i', 'dz_acis_i', 'dy_acis_s', 'dz_acis_s'],
                      dtype=['S19', 'S19', 'S17', 'f', 'f', 'f', 'f'])
    index.add_row(info_table)
    for colname in ('dy_acis_i', 'dz_acis_i', 'dy_acis_s', 'dz_acis_s'):
        index[colname].format = '.2f'

    logger.info('Writing index file {}'.format(index_file))
    index.write(index_file, format='ascii.fixed_width_two_line')

    # Write an HTML index file as a table
    def self_link(vals):
        """Turn vals into a list of self-referenced links"""
        return ['<a href="{0}">{0}</a>'.format(val) for val in vals]

    # Turn table entries into HTML links to same
    diffs = self_link(x + '_diff.html' for x in index['updated_file'])
    jsons = self_link(x + '.json' for x in index['updated_file'])
    updated_files = self_link(index['updated_file'])
    baseline_files = self_link(index['baseline_file'])
    index.remove_columns(['updated_file', 'baseline_file'])
    index.add_columns([Column(updated_files, name='updated_file'),
                       Column(baseline_files, name='baseline_file'),
                       Column(jsons, name='JSON_info'),
                       Column(diffs, name='diff')],
                      [0, 0, 0, 0])

    index_file = index_file + '.html'
    logger.info('Writing index.html file {}'.format(index_file))
    if os.path.exists(index_file):
        shutil.copy(index_file, index_file + '.bak')
    index.write(index_file, format='ascii.html')

    # Hack: undo the HTML escaping that table write does.
    # TODO: just do the whole thing with jinja template.
    lines = (re.sub(r'&gt;', '>', line) for line in open(index_file, 'r'))
    lines = [re.sub(r'&lt;', '<', line) for line in lines]
    with open(index_file, 'w') as fh:
        fh.writelines(lines)
def dendropix(fileprefix='SgrB2_b3_12M.HC3N'):
    cube = SpectralCube.read(dpath('{0}.image.pbcor.contsub.fits'.format(fileprefix))).minimal_subcube()
    noise = cube.spectral_slab(-200*u.km/u.s, -100*u.km/u.s).std(axis=0)
    keep_mask = cube.max(axis=0) > noise

    tblfile = tpath('{0}.dendrotable.ecsv'.format(fileprefix))
    if os.path.exists(tblfile):
        table = Table.read(tblfile, format='ascii.ecsv')
    else:
        table = Table([Column(name='xpix'),
                       Column(name='ypix'),
                       Column(name='zpix'),
                       Column(name='lon'),
                       Column(name='lat'),
                       Column(name='velo'),
                       Column(name='peakval'),])

    xpyp_done = set(zip(table['xpix'], table['ypix']))
    all_keepers = zip(*np.where(keep_mask))
    xpyp = [x for x in all_keepers if x not in xpyp_done]
    print(len(xpyp), len(all_keepers), len(xpyp_done))
    if len(xpyp_done) > 0:
        assert len(xpyp) < len(all_keepers)


    for ii,(ypix,xpix) in enumerate(ProgressBar(xpyp)):

        data = cube[:,ypix,xpix].value

        error = noise[ypix,xpix].value
        # alternative:
        #error = stats.sigma_clipped_stats(data)[2]

        D = astrodendro.Dendrogram.compute(data, min_value=0,
                                           min_delta=2*error, min_npix=7,
                                           is_independent=astrodendro.pruning.min_peak(5*error))
        if not D.leaves:
            table.add_row([xpix,ypix,]+[np.nan]*5)
            del D
            continue

        #peaks = [S.get_peak()[0][0] for S in D]
        #peak_vals = [S.get_peak()[1] for S in D]
        #peaks = [cube.spectral_axis[S.get_peak()[0][0]].to(u.km/u.s).value for S in D]

        for S in D:
            (peak_pix,),peak_val = S.get_peak()
            velo,lat,lon = cube.world[peak_pix, ypix, xpix]
            table.add_row([xpix,ypix,peak_pix,lon,lat,velo,peak_val])

        if ii % 100 == 0:
            table.write(tblfile, format='ascii.ecsv')

        del D
        del S
        del data
Beispiel #24
0
 def test_add_masked_row_to_masked_table_mismatch(self):
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
     t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
     with pytest.raises(TypeError) as exc:
         t.add_row([2, 5], mask={'a': 1, 'b': 0})
     assert exc.value.args[0] == "Mismatch between type of vals and mask"
     with pytest.raises(TypeError) as exc:
         t.add_row({'b': 5, 'a': 2}, mask=[1, 0])
     assert exc.value.args[0] == "Mismatch between type of vals and mask"
Beispiel #25
0
def var_test_ica(flux_arr_orig, exposure_list, wavelengths, low_n=3, hi_n=100, n_step=1, show_plots=False,
                    show_summary_plot=False, save_summary_plot=True, test_ind=7, real_time_progress=False,
                    idstr=None):
    start_ind = np.min(np.nonzero(flux_arr_orig[test_ind]))
    end_ind = np.max(np.nonzero(flux_arr_orig[test_ind]))

    perf_table = Table(names=["n", "avg_diff2", "max_diff_scaled"], dtype=["i4", "f4", "f4"])
    if hi_n > flux_arr_orig.shape[0]-1:
        hi_n = flux_arr_orig.shape[0]-1

    for n in range(low_n, hi_n, n_step):
        ica = FastICA(n_components = n, whiten=True, max_iter=750, random_state=1234975)
        test_arr = flux_arr_orig[test_ind].copy()

        flux_arr = np.vstack([flux_arr_orig[:test_ind], flux_arr_orig[test_ind+1:]])
        ica_flux_arr = flux_arr.copy()  #keep back one for testing
        ica.fit(ica_flux_arr)

        ica_trans = ica.transform(test_arr.copy(), copy=True)
        ica_rev = ica.inverse_transform(ica_trans.copy(), copy=True)

        avg_diff2 = np.ma.sum(np.ma.power(test_arr-ica_rev[0],2)) / (end_ind-start_ind)
        max_diff_scaled = np.ma.max(np.ma.abs(test_arr-ica_rev[0])) / (end_ind-start_ind)
        perf_table.add_row([n, avg_diff2, max_diff_scaled])

        if real_time_progress:
            print "n: {:4d}, avg (diff^2): {:0.5f}, scaled (max diff): {:0.5f}".format(n, avg_diff2, max_diff_scaled)

        if show_plots:
            plt.plot(wavelengths, test_arr)
            plt.plot(wavelengths, ica_rev[0])
            plt.plot(wavelengths, test_arr-ica_rev[0])

            plt.legend(['orig', 'ica', 'orig-ica'])
            plt.xlim((wavelengths[start_ind], wavelengths[end_ind]))

            plt.title("n={}, avg (diff^2)={}".format(n, avg_diff2))
            plt.tight_layout()
            plt.show()
            plt.close()

    if show_summary_plot or save_summary_plot:
        plt.plot(perf_table['n'], perf_table['avg_diff2'])
        plt.plot(perf_table['n'], perf_table['max_diff_scaled'])
        plt.title("performance")
        plt.tight_layout()
        if show_summary_plot:
            plt.show()
        if save_summary_plot:
            if idstr is None:
                idstr = random.randint(1000000, 9999999)
            plt.savefig("ica_performance_{}.png".format(idstr))
        plt.close()

    return perf_table
Beispiel #26
0
 def test_add_masked_row_to_masked_table_mapping1(self):
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
     t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
     t.add_row({'b': 5, 'a': 2}, mask={'a': 1, 'b': 0})
     t.add_row({'a': 3, 'b': 6}, mask={'b': 1, 'a': 0})
     assert t.masked
     assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
     assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
     assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
     assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
Beispiel #27
0
 def test_add_masked_row_to_masked_table_iterable(self):
     t = Table(masked=True)
     t.add_column(MaskedColumn(name='a', data=[1], mask=[0]))
     t.add_column(MaskedColumn(name='b', data=[4], mask=[1]))
     t.add_row([2, 5], mask=[1, 0])
     t.add_row([3, 6], mask=[0, 1])
     assert t.masked
     assert np.all(np.array(t['a']) == np.array([1, 2, 3]))
     assert np.all(t['a'].mask == np.array([0, 1, 0], bool))
     assert np.all(np.array(t['b']) == np.array([4, 5, 6]))
     assert np.all(t['b'].mask == np.array([1, 0, 1], bool))
Beispiel #28
0
def filter_by_keyvalue(data, key, values):
    print("Filtering by %s == %s" % (key, str(value)))
    new_data = Table(data[0:0])
    for d in data:
        keep_it = False
        for v in values:
            keep_it  = keep_it and data[key] == v
            if not keep_it:
                break
        if keep_it:
            new_data.add_row(d)
    return new_data
Beispiel #29
0
def main():

    # get command-line arguments:
    parser=argparse.ArgumentParser()
    parser.add_argument('filename', metavar='SIMTEL_FILE',
                        help='Input simtelarray file')
    parser.add_argument('-o','--output', metavar='FILENAME',
                        help=('output filename (e.g. times.fits), which '
                              'can be any format supported by astropy.table'),
                        default='times.fits.gz')
    args = parser.parse_args()

    # setup output table
    events = Table(names=['EVENT_ID', 'T_REL', 'TRIGGERED_TELS'],
                dtype=[np.int64, np.float64, np.uint8])

    events['TRIGGERED_TELS'].shape = (0, MAX_TELS)
    events['T_REL'].unit = u.s
    events['T_REL'].description = 'Time relative to first event'
    events.meta['INPUT'] = args.filename
    
    trigpattern = np.zeros(MAX_TELS)
    starttime = None
        
    try:
        pyhessio.file_open(args.filename)

        for run_id, event_id in pyhessio.move_to_next_event():

            ts, tns = pyhessio.get_central_event_gps_time()
            gpstime = Time(ts*u.s, tns*u.ns, format='gps', scale='utc')
            if starttime is None:
                starttime = gpstime

            reltime = (gpstime - starttime).sec
                                
            # build the trigger pattern as a fixed-length array
            # (better for storage in FITS format)
            trigtels = pyhessio.get_telescope_with_data_list()
            trigpattern[:] = 0        # zero the trigger pattern
            trigpattern[trigtels] = 1  # set the triggered telescopes to 1

            events.add_row((event_id, reltime, trigpattern))

        events.write(args.output)
        print("Table written to '{}'".format(args.output))
        print(events)

    except Exception as err:
        print("ERROR: {}, stopping".format(err))
        
    finally:
        pyhessio.close_file()
Beispiel #30
0
def focplane(tagversion={}):
    '''Write location of CCD detectors to CALDB file.

    Parameters
    ----------
    tagversion : dict
        Keywords for `arcus.utils.TagVersion`. See Examples.

    Example
    -------
    Write focplane.fits to the CALDB:

    >>> focplane(tagversion={'creator': 'Guenther', 'origin': 'MIT'})
    '''
    tagversion = TagVersion(**tagversion)
    det = DetCamera(defaultconf)

    tab = Table(names=['CCDID', 'XPIX', 'YPIX', 'XPXL', 'YPXL', 'XWIDTH', 'YWIDTH',
                       'FOC0', 'FOCN', 'FOCX', 'READDIR'],
                dtype=[int, int, int, float, float, float, float,
                       '3f4', '3f4', '3f4', 'a2']
                )
    for c in ['XPIX', 'YPIX']:
        tab[c].unit = u.pixel
    for c in ['XPXL', 'YPXL', 'XWIDTH', 'YWIDTH', 'FOC0', 'FOCX']:
        tab[c].unit = u.mm

    for e in det.elements:
        row = {'CCDID': e.id_num,
               'XPIX': e.npix[0],
               'YPIX': e.npix[1],
               'XPXL': e.pixsize,
               'YPXL': e.pixsize,
               'XWIDTH': np.linalg.norm(h2e(e.geometry['v_y'])) * 2,
               'YWIDTH': np.linalg.norm(h2e(e.geometry['v_z'])) * 2,
               'FOC0': h2e(e.geometry['center'] - e.geometry['v_y'] - e.geometry['v_z']),
               'FOCN': h2e(e.geometry['e_x']),
               'FOCX': h2e(e.geometry['e_y']),
               'READDIR': '+y',
        }
        if tab is None:
            tab = Table(row) # first row
        else:
            tab.add_row(row) # later rows
    tab.meta['CALTYPE'] = 'FOCPLANE'
    tab.meta['VERSION'] = versionstring()
    tab.meta['INSTRUME'] = 'CCD'
    tab.meta['FILTER'] = 'none'
    tab.meta['EXTNAME'] = 'CALTYPE'
    tab = tagversion(tab)
    tab.sort('CCDID')
    tab.write(pjoin(conf.caldb_inputdata, 'fits', 'focplane.fits'), overwrite=True)
def make_source_dens_map(catfile,
                         pix_size=10.,
                         mag_name='F475W_VEGA',
                         mag_cut=[24.5, 27]):
    """
    Computes the source density map and store it in a pyfits HDU
    Also writes a text file storing the source density for each source

    INPUTS:
    -------
    catfile: filename of observed catalog
    pix_size: float
         size of pixels in which the source density is computed
    mag_name: string
         name of magnitude column in table
    mag_cut: 2-element list
         magnitude range on which the source density is computed

    OUTPUT:
    -------
    FITS files written to disk.
    """

    cat = Table.read(catfile)
    # force catalog column names to be upper case
    for name in cat.colnames:
        cat.rename_column(name, name.upper())
    # force filter magnitude name to be upper case to match column names
    mag_name = mag_name.upper()

    # get the columns with fluxes
    rate_cols = [s for s in cat.colnames if s[-4:] == 'RATE']
    n_filters = len(rate_cols)

    # create the indexs where any of the rates are zero and non-zero
    #   zero = missing data, etc. -> bad for fitting
    #   non-zero = good data, etc. -> great for fitting
    initialize_zero = False
    band_zero_indxs = {}
    print('band, good, zero')
    for cur_rate in rate_cols:
        cur_good_indxs, = np.where(cat[cur_rate] != 0.0)
        cur_indxs, = np.where(cat[cur_rate] == 0.0)
        print(cur_rate, len(cur_good_indxs), len(cur_indxs))
        if not initialize_zero:
            initialize_zero = True
            zero_indxs = cur_indxs
            nonzero_indxs = cur_good_indxs
        else:
            zero_indxs = np.union1d(zero_indxs, cur_indxs)
            nonzero_indxs = np.intersect1d(nonzero_indxs, cur_good_indxs)

        # save the zero indexs for each band
        band_zero_indxs[cur_rate] = zero_indxs

    print('all bands', len(nonzero_indxs), len(zero_indxs))

    # Setting map fame
    min_ra = cat['RA'].min()
    max_ra = cat['RA'].max()
    min_dec = cat['DEC'].min()
    max_dec = cat['DEC'].max()

    #Compute number of pixel alog each axis pix_size in arcsec
    dec_delt = pix_size / 3600.
    n_y = np.fix(np.round((max_dec - min_dec) / dec_delt))
    ra_delt = dec_delt
    n_x = np.fix(
        np.round(
            math.cos(0.5 * (max_dec + min_dec) * math.pi / 180.) *
            (max_ra - min_ra) / ra_delt))
    #ra_delt *= -1. #Not sure why the ra delta would want to be negative...

    n_x = int(np.max([n_x, 1]))
    n_y = int(np.max([n_y, 1]))

    print('# of x & y pixels = ', n_x, n_y)

    ra_limits = min_ra + ra_delt * np.arange(0, n_x + 1, dtype=float)
    dec_limits = min_dec + dec_delt * np.arange(0, n_y + 1, dtype=float)

    cdelt = [ra_delt, dec_delt]
    crpix = np.asarray([n_x, n_y], dtype=float) / 2.
    crval = np.asarray([(min_ra + max_ra), (min_dec + max_dec)]) / 2.

    w = wcs.WCS(naxis=2)
    w.wcs.crpix = crpix
    w.wcs.cdelt = cdelt
    w.wcs.crval = crval
    w.wcs.ctype = ["RA---TAN", "DEC--TAN"]

    N_stars = len(cat)

    world = np.zeros((N_stars, 2), float)
    world[:, 0] = cat['RA']
    world[:, 1] = cat['DEC']
    print('working on converting ra, dec to pix x,y')
    pixcrd = w.wcs_world2pix(world, 1)
    pix_x = pixcrd[:, 0]
    pix_y = pixcrd[:, 1]

    npts_map = np.zeros([n_x, n_y], dtype=float)
    npts_zero_map = np.zeros([n_x, n_y], dtype=float)
    npts_band_zero_map = np.zeros([n_x, n_y, n_filters], dtype=float)
    source_dens = np.zeros(N_stars, dtype=float)

    for i in range(n_x):
        print('x = %s out of %s' % (str(i + 1), str(n_x)))
        for j in range(n_y):
            indxs, = np.where((pix_x > i) & (pix_x <= i + 1)
                              & (pix_y > j) & (pix_y <= j + 1))
            n_indxs = len(indxs)
            indxs_for_SD, = np.where((cat[mag_name][indxs] >= mag_cut[0])
                                     & (cat[mag_name][indxs] <= mag_cut[1]))
            n_indxs = len(indxs_for_SD)
            if n_indxs > 0:
                npts_map[i, j] = n_indxs / (pix_size**2)

                # now make a map of the sources with zero fluxes in
                #   at least one band
                zindxs, = np.where((pix_x[zero_indxs] > i)
                                   & (pix_x[zero_indxs] <= i + 1)
                                   & (pix_y[zero_indxs] > j)
                                   & (pix_y[zero_indxs] <= j + 1))
                if len(zindxs) > 0:
                    npts_zero_map[i, j] = len(zindxs)

                # do the same for each band
                for k, cur_rate in enumerate(rate_cols):
                    tindxs = band_zero_indxs[cur_rate]
                    zindxs, = np.where((pix_x[tindxs] > i)
                                       & (pix_x[tindxs] <= i + 1)
                                       & (pix_y[tindxs] > j)
                                       & (pix_y[tindxs] <= j + 1))
                    if len(zindxs) > 0:
                        npts_band_zero_map[i, j, k] = len(zindxs)

            # save the source density as an entry for each source
            source_dens[indxs] = npts_map[i, j]

    # Save to FITS file
    header = w.to_header()
    hdu = fits.PrimaryHDU(npts_map.T, header=header)
    hdu.writeto(catfile.replace('.fits', '_source_den_image.fits'),
                overwrite=True)

    # Save to FITS file (zero flux sources)
    header = w.to_header()
    hdu = fits.PrimaryHDU(npts_zero_map.T, header=header)
    hdu.writeto(catfile.replace('.fits', '_npts_zero_fluxes_image.fits'),
                overwrite=True)

    for k, cur_rate in enumerate(rate_cols):
        # Save to FITS file (zero flux sources)
        header = w.to_header()
        hdu = fits.PrimaryHDU(npts_band_zero_map[:, :, k].T, header=header)
        hdu.writeto(catfile.replace(
            '.fits', '_npts_zero_fluxes_' + cur_rate + '_image.fits'),
                    overwrite=True)

    # Save the source density for individual stars in a new catalog file
    cat['SourceDensity'] = source_dens
    cat.write(catfile.replace('.fits', '_with_sourceden_inc_zerofluxes.fits'),
              overwrite=True)

    # Save the source density for individual stars in a new catalog file
    #   only those that have non-zero fluxes in all bands
    cat[nonzero_indxs].write(catfile.replace('.fits', '_with_sourceden.fits'),
                             overwrite=True)

    bin_details = Table(names=[
        'i_ra', 'i_dec', 'value', 'min_ra', 'max_ra', 'min_dec', 'max_dec'
    ])
    bin_details.meta['ra_grid'] = ra_limits
    bin_details.meta['dec_grid'] = dec_limits

    for i in range(n_x):

        for j in range(n_y):

            bin_details.add_row([
                i, j, npts_map[i, j], ra_limits[i], ra_limits[i + 1],
                dec_limits[j], dec_limits[j + 1]
            ])

    dm = DensityMap(bin_details)
    dm.write(catfile.replace('.fits', '_sourcedens_map.hd5'))
Beispiel #32
0
        return self._id

    def next(self):
        """
        :return: a set of values that can be used for an planted object builder.
        """
        #        x          y        mag   pix rate      angle  ''/h rate     id
        # 912.48     991.06      22.01      57.32     -45.23      10.60      0
        self._n -= 1
        if self._n < 0:
            raise StopIteration()
        return {'x': self.x(), 'y': self.y(), 'mag': self.mag(), 'sky_rate': self.rate(), 'angle': self.angle(),
                'id': self.id}


planted = Table(names=('x', 'y', 'mag', 'sky_rate', 'angle', 'id'))

for kbo in KBOGenerator(10,
                        rate=Range(0.5, 15.0),
                        angle=Range(-30, 30),
                        mag=Range(21, 25),
                        x=Range(1, 2048),
                        y=Range(1, 4096)):
    planted.add_row(kbo)

fd = open('Object.planted', 'w')
fd.write("# ")
planted.write(fd, format='ascii.fixed_width', delimiter=None)
fd.close()

Beispiel #33
0
def main():

    observation_num = None

    usage = "Usage: %prog [options] <obsids>\n"
    parser = OptionParser(usage=usage)
    parser.add_option(
        '--start',
        dest='starttime',
        default=None,
        type='str',
        help='Starting time for processing (GPSseconds or ISO UT date)')
    parser.add_option(
        '--stop',
        dest='stoptime',
        default=None,
        type='str',
        help='Final time for processing (GPSseconds or ISO UT date)')
    parser.add_option('--proj',
                      dest='project',
                      default=None,
                      help='Project ID')
    parser.add_option('--destination',
                      dest='destination',
                      default=None,
                      help='Final destination to (remote) copy the MS file')
    parser.add_option('--summaryname',
                      dest='summaryname',
                      default=None,
                      help='Name for output summary table')
    parser.add_option(
        '--downloads',
        dest='downloads',
        default=4,
        type='int',
        help='Number of simultaneous NGAS downloads [default=%default]')
    parser.add_option('--nomissing',
                      dest='allowmissing',
                      default=True,
                      action='store_false',
                      help='Do not allow missing GPU box files')
    parser.add_option('--timeres',
                      dest='timeres',
                      default=4,
                      type='float',
                      help='Output time resolution (s) [default=%default]')
    parser.add_option(
        '--freqres',
        dest='freqres',
        default=40,
        type='int',
        help='Output frequency resolution (kHz) [default=%default]')
    parser.add_option(
        '--caltimeres',
        dest='caltimeres',
        default=0,
        type='float',
        help='Output time resolution for calibrators (s) [default=<timeres>]')
    parser.add_option(
        '--calfreqres',
        dest='calfreqres',
        default=0,
        type='int',
        help='Output frequency resolution (kHz) [default=<freqres>]')
    parser.add_option('--ra',
                      dest='ra',
                      default=None,
                      help='Output RA phase center (deg) [default=metafits]')
    parser.add_option('--dec',
                      dest='dec',
                      default=None,
                      help='Output Dec phase center (deg) [default=metafits]')
    parser.add_option('--cal',
                      dest='cal',
                      default=False,
                      action='store_true',
                      help='Always include a calibrator observation?')
    parser.add_option('--cpus',
                      dest='cottercpus',
                      default=4,
                      type='int',
                      help='Number of CPUs for cotter [default %default]')
    parser.add_option('--clobber',
                      dest='clobber',
                      default=False,
                      action='store_true',
                      help='Clobber existing MS file? [default=False]')
    parser.add_option(
        '--cleanall',
        dest='cleanall',
        default=False,
        action='store_true',
        help=
        'Delete downloaded FITS files and original MS file when done? [default=False]'
    )
    parser.add_option(
        '--cleanfits',
        dest='cleanfits',
        default=False,
        action='store_true',
        help='Delete downloaded FITS files when done? [default=False]')
    parser.add_option(
        '--cleanms',
        dest='cleanms',
        default=False,
        action='store_true',
        help='Delete original MS file when done? [default=False]')
    parser.add_option('--copycommand',
                      dest='copycommand',
                      default='rsync -aruvP',
                      help='Command for (remote) copying [default=%default]')
    parser.add_option(
        '--summaryformat',
        dest='summaryformat',
        default='ascii.commented_header',
        help=
        'Format for output summary table (from astropy.table) [default=%default]'
    )
    parser.add_option('--email',
                      dest='notifyemail',
                      default=None,
                      help='Notification email [default=no email]')
    parser.add_option(
        '--ignorespace',
        dest='ignorespace',
        default=False,
        action='store_true',
        help='Ignore free-space for file download, processing, and copying?')
    parser.add_option(
        "-v",
        "--verbose",
        dest="loudness",
        default=0,
        action="count",
        help="Each -v option produces more informational/debugging output")
    parser.add_option(
        "-q",
        "--quiet",
        dest="quietness",
        default=0,
        action="count",
        help="Each -q option produces less error/warning/informational output")

    (options, args) = parser.parse_args()
    if options.cleanall:
        options.cleanfits = True
        options.cleanms = True

    loglevels = {
        0: [logging.DEBUG, 'DEBUG'],
        1: [logging.INFO, 'INFO'],
        2: [logging.WARNING, 'WARNING'],
        3: [logging.ERROR, 'ERROR'],
        4: [logging.CRITICAL, 'CRITICAL']
    }
    logdefault = 2  # WARNING
    level = max(min(logdefault - options.loudness + options.quietness, 4), 0)
    logging.getLogger('').handlers[1].setLevel(loglevels[level][0])
    #logger.info('Log level set: messages that are %s or higher will be shown.' % loglevels[level][1])

    eh = extra_utils.ExitHandler(os.path.split(sys.argv[0])[-1],
                                 email=options.notifyemail)
    logger.info('**************************************************')
    logger.info('%s starting at %s UT on host %s with user %s' %
                (sys.argv[0], datetime.datetime.now(), socket.gethostname(),
                 os.environ['USER']))
    logger.info('**************************************************')

    if options.ra is not None and options.dec is not None:
        phasecenter = SkyCoord(options.ra,
                               options.dec,
                               frame='icrs',
                               unit='deg')
    else:
        phasecenter = None

    if len(args) == 0:
        if options.starttime is None:
            logger.error('Must supply starttime')
            eh.exit(1)
        if options.stoptime is None:
            logger.error('Must supply stoptime')
            eh.exit(1)

    logger.debug('Using mwapy version %s' % mwapy.__version__)
    result = subprocess.Popen(['cotter', '-version'],
                              stdout=subprocess.PIPE).communicate()[0].strip()
    cotterversion = result.split('\n')[0].split('version')[1].strip()
    AOFlaggerversion = result.split('\n')[1].split('AOFlagger')[1].strip()
    logger.debug('Using cotter version %s' % cotterversion)
    logger.debug('Using AOFlagger version %s' % AOFlaggerversion)
    results = []
    havecalibrator = {}

    if not (options.starttime is None and options.stoptime is None):
        GPSstart = parse_time(options.starttime)
        GPSstop = parse_time(options.stoptime)
        if options.project is not None:
            logger.info(
                'Will preprocess observations from %s to %s with project=%s' %
                (GPSstart, GPSstop, options.project))
        else:
            logger.info('Will preprocess observations from %s to %s' %
                        (GPSstart, GPSstop))

        if options.project is None:
            results = metadata.fetch_observations(mintime=GPSstart - 1,
                                                  maxtime=GPSstop + 1)
        else:
            results = metadata.fetch_observations(mintime=GPSstart - 1,
                                                  maxtime=GPSstop + 1,
                                                  projectid=tokenize(
                                                      options.project))
    if len(args) > 0:
        # add in some additional obsids
        for arg in args:
            results += metadata.fetch_observations(mintime=int(arg) - 1,
                                                   maxtime=int(arg) + 1)

    if results is None or len(results) == 0:
        logger.error('No observations found')
        eh.exit(1)
    logger.info(metadata.MWA_Observation_Summary.string_header())
    # check if there is a calibrator present
    observations = []
    for item in results:
        o = metadata.MWA_Observation_Summary(item)
        logger.info(str(o))
        observations.append(metadata.MWA_Observation(item[0]))
        if havecalibrator.has_key(observations[-1].center_channel):
            havecalibrator[observations[-1].center_channel] = havecalibrator[
                observations[-1].
                center_channel] or observations[-1].calibration
        else:
            havecalibrator[
                observations[-1].center_channel] = observations[-1].calibration

    if not all(havecalibrator.values()):
        for k in havecalibrator.keys():
            if not havecalibrator[k]:
                logger.warning(
                    'No calibrator observation found for center channel %d' %
                    k)

        if options.cal:
            cals = []
            for i in xrange(len(results)):
                channel = observations[i].center_channel
                if not havecalibrator[channel]:
                    cal = find_calibrator.find_calibrator(
                        observations[i].observation_number,
                        matchproject=False,
                        priority='time',
                        all=False)
                    if cal is None:
                        logger.error(
                            'Unable to find an appropriate calibrator scan')
                        eh.exit(1)
                    logger.info(
                        'Found calibrator observation %d for center channel %d'
                        % (cal[0], channel))
                    cals += metadata.fetch_observations(mintime=cal[0] - 1,
                                                        maxtime=cal[0] + 1)
                    logger.info(str(metadata.MWA_Observation_Summary(
                        cals[-1])))
                    havecalibrator[channel] = True
            results += cals
    data = None

    logger.info('Processing observations...\n\n')
    basedir = os.path.abspath(os.curdir)
    for obs in results:
        timeres, freqres = None, None
        processstart = datetime.datetime.now()
        observation = Observation(obs[0],
                                  basedir=basedir,
                                  copycommand=options.copycommand,
                                  clobber=options.clobber,
                                  cleanms=options.cleanms,
                                  cleanfits=options.cleanfits,
                                  checkfree=not options.ignorespace)
        downloadedfiles = observation.download(numdownload=options.downloads)
        if downloadedfiles is None:
            break

        if observation.makemetafits() is None:
            break

        if observation.observation.calibration is not None and observation.observation.calibration:
            if options.caltimeres == 0:
                timeres = options.timeres
            else:
                timeres = options.caltimeres
            if options.calfreqres == 0:
                freqres = options.freqres
            else:
                freqres = options.calfreqres
        else:
            timeres, freqres = options.timeres, options.freqres
        msfilesize = observation.cotter(cottercpus=options.cottercpus,
                                        timeres=timeres,
                                        freqres=freqres,
                                        phasecenter=phasecenter,
                                        allowmissing=options.allowmissing)
        if msfilesize is None:
            break

        if options.destination is not None and observation.copy(
                options.destination) is None:
            break

        row = collections.OrderedDict({
            'date':
            processstart.strftime('%Y-%m-%dT%H:%M:%S'),
            'obsid':
            obs[0],
            'user':
            os.environ['USER'],
            'host':
            socket.gethostname(),
            'cotter':
            cotterversion.replace(' ', '_'),
            'mwapy':
            mwapy.__version__,
            'rawfiles':
            len(observation.downloadedfiles),
            'msfilesize':
            msfilesize
        })
        if options.destination is not None:
            row['msfile'] = os.path.join(options.destination,
                                         observation.msfile)
        else:
            row['msfile'] = os.path.join(observation.outputdir,
                                         observation.msfile)
        if data is None:
            data = Table([row])
        else:
            data.add_row(row)
    os.chdir(basedir)
    if data is not None and options.summaryname is not None:
        data = data[('date', 'obsid', 'user', 'host', 'cotter', 'mwapy',
                     'rawfiles', 'msfilesize', 'msfile')]
        try:
            data.write(options.summaryname, format=options.summaryformat)
            logger.info('Summary table written to %s' % options.summaryname)
        except Exception, e:
            logger.error(
                'Unable to write summary table %s with format %s:\n%s' %
                (options.summaryname, options.summaryformat, e))
Beispiel #34
0
def differential_energy_point_source(
        events,
        energy_bin_edges,
        alpha,
        signal_list=("g"),
        mode="MC",
        sensitivity_source_flux=spectra.crab_source_rate,
        n_draws=1000):
    """
    Calculates the energy-differential sensitivity to a point-source

    Parameters
    ----------
    alpha : float
        area-ratio of the on- over the off-region
    energy_bin_edges : numpy array
        array of the bin edges for the sensitivity calculation
    signal_list : iterable of strings, optional (default: ("g"))
        list of keys to consider as signal channels
    mode : string ["MC", "Data"] (default: "MC")
        interprete the signal/not-signal channels in all the dictionaries as
        gamma/background ("MC") or as on-region/off-region ("Data")
        - if "MC":
            the signal channel is taken as the part comming from the source and
            the background channels multiplied by `alpha` is used as the background
            part in the on-region; the background channels themselves are taken as
            coming from the off-regions
        - if "Data":
            the signal channel is taken as the counts reconstructed in the on-region
            the counts from the background channels multiplied by `alpha` are taken as
            the background estimate for the on-region
    sensitivity_source_flux : callable, optional (default: `crab_source_rate`)
        function of the flux the sensitivity is calculated with
    n_draws : int, optional (default: 1000)
        number of random draws to calculate uncertainties on the sensitivity

    Returns
    -------
    sensitivities : astropy.table.Table
        the sensitivity for every energy bin of `energy_bin_edges`

    """

    # sensitivities go in here
    sensitivities = Table(names=("Energy", "Sensitivity", "Sensitivity_low",
                                 "Sensitivity_up"))
    try:
        sensitivities["Energy"].unit = energy_bin_edges.unit
    except AttributeError:
        sensitivities["Energy"].unit = irf.energy_unit
    sensitivities["Sensitivity"].unit = irf.flux_unit
    sensitivities["Sensitivity_up"].unit = irf.flux_unit
    sensitivities["Sensitivity_low"].unit = irf.flux_unit

    try:
        # trying if every channel has a `weight` column
        for ev in events.values():
            ev["weight"]

        # in case we do have event weights, we sum them within the energy bin

        def sum_events(ev, mask):
            return np.sum(
                ev["weight"][mask]) if len(ev["weight"][mask]) > 1 else 0

    except KeyError:
        # otherwise we simply check the length of the masked energy array
        # since the weights are 1 here, `sum_events` is the same as `len(events)`
        def sum_events(ev, mask):
            return len(ev[e_mask])

    # loop over all energy bins
    # the bins are spaced logarithmically: use the geometric mean as the bin-centre,
    # so when plotted logarithmically, they appear at the middle between
    # the bin-edges
    for elow, ehigh, emid in zip(
            energy_bin_edges[:-1], energy_bin_edges[1:],
            np.sqrt(energy_bin_edges[:-1] * energy_bin_edges[1:])):

        S_events = np.zeros(2)  # [on-signal, off-background]
        N_events = np.zeros(2)  # [on-signal, off-background]

        # count the (weights of the) events in the on and off regions for this
        # energy bin
        for ch, ev in events.items():
            # single out the events in this energy bin
            e_mask = (ev[irf.energy_names["reco"]] > elow) & \
                     (ev[irf.energy_names["reco"]] < ehigh)

            # we need the actual number of events to estimate the statistical error
            N_events[0 if ch in signal_list else 1] += len(ev[e_mask])
            S_events[0 if ch in signal_list else 1] += sum_events(ev, e_mask)

        # If we have no counts in the on-region, there is no sensitivity.
        # If on data the background estimate from the off-region is larger than the
        # counts in the on-region, `sigma_lima` will break! Skip those
        # cases, too.
        if N_events[0] <= 0:
            sensitivities.add_row([emid, np.nan, np.nan, np.nan])
            continue

        if mode.lower() == "data":
            # the background estimate for the on-region is `alpha` times the
            # background in the off-region
            # if running on data, the signal estimate for the on-region is the counts
            # in the on-region minus the background estimate for the on-region
            S_events[0] -= S_events[1] * alpha
            N_events[0] -= N_events[1] * alpha

        MC_scale = S_events / N_events

        scales = []
        # to get the proper Poisson fluctuation in MC, draw the events with
        # `N_events` as lambda and then scale the result to the weighted number
        # of expected events
        if n_draws > 0:
            trials = np.random.poisson(N_events, size=(n_draws, 2))
            trials = trials * MC_scale
        else:
            # if `n_draws` is zero or smaller, don't do any draws and just take the
            # numbers that we have
            trials = [S_events]

        for trial_events in trials:
            # find the scaling factor for the gamma events that gives a 5 sigma
            # discovery in this energy bin
            scale = minimize(diff_to_x_sigma, [1e-3],
                             args=(trial_events, alpha),
                             method='L-BFGS-B',
                             bounds=[(1e-4, None)],
                             options={
                                 'disp': False
                             }).x[0]

            scales.append(scale)

        # get the scaling factors for the median and the 1sigma containment region
        scale = np.percentile(scales, (50, 32, 68))

        # get the flux at the bin centre
        flux = sensitivity_source_flux(emid).to(irf.flux_unit)

        # and scale it up by the determined factors
        sensitivity = flux * scale

        # store results in table
        sensitivities.add_row([emid, *sensitivity])

    return sensitivities
Beispiel #35
0
def _make_catalog(structures, fields, metadata, statistic, verbose=False):
    """
    Make a catalog from a list of structures
    """

    result = None

    try:
        shape_tuple = structures.data.shape
    except AttributeError:
        shape_tuple = None

    if verbose:
        print("Computing catalog for {0} structures".format(len(structures)))
        progress_bar = AnimatedProgressBar(end=max(len(structures), 1),
                                           width=40,
                                           fill='=',
                                           blank=' ')

    for struct in structures:

        values = struct.values(subtree=True)
        indices = np.copy(struct.indices(subtree=True))

        if shape_tuple is not None:
            for index_array, shape in zip(indices, shape_tuple):
                # catch simple cases where a structure wraps around the image boundary
                i2 = np.where(index_array < shape / 2, index_array + shape,
                              index_array)
                if i2.ptp() < index_array.ptp(
                ):  # more compact with wrapping. Use this
                    index_array[:] = i2

        stat = ScalarStatistic(values, indices)
        stat = statistic(stat, metadata)
        row = {}
        for lbl in fields:
            row[lbl] = getattr(stat, lbl)

        row = dict((lbl, getattr(stat, lbl)) for lbl in fields)
        row.update(_idx=struct.idx)

        # first row
        if result is None:
            sorted_row_keys = sorted(row.keys())
            try:
                result = Table(names=sorted_row_keys,
                               dtype=[
                                   int if x == '_idx' else float
                                   for x in sorted_row_keys
                               ])
            except TypeError:  # dtype was called dtypes in older versions of Astropy
                result = Table(names=sorted_row_keys,
                               dtypes=[
                                   int if x == '_idx' else float
                                   for x in sorted_row_keys
                               ])
            for k, v in row.items():
                try:  # Astropy API change
                    result[k].unit = _unit(v)
                except AttributeError:
                    result[k].units = _unit(v)

        # astropy.table.Table should in future support setting row items from
        # quantities, but for now we need to strip off the quantities
        new_row = {}
        for x in row:
            if row[x] is not None:  # in Astropy 0.3+ we no longer need to exclude None items
                if isinstance(row[x], Quantity):
                    new_row[x] = row[x].value
                else:
                    new_row[x] = row[x]
        result.add_row(new_row)

        # Print stats
        if verbose:
            progress_bar + 1
            progress_bar.show_progress()

    result.sort('_idx')

    if verbose:
        progress_bar.progress = 100  # Done
        progress_bar.show_progress()
        print("")  # newline

    return result
Beispiel #36
0
def plant(expnums,
          ccd,
          rmin,
          rmax,
          ang,
          width,
          number=10,
          version='s',
          dry_run=False):
    """Plant artificial sources into the list of images provided.

    :param expnums: list of MegaPrime exposure numbers to add artificial KBOs to
    :param ccd: which ccd to work on.
    :param rmin: The minimum rate of motion to add sources at (''/hour)
    :param rmax: The maximum rate of motion to add sources at (''/hour)
    :param ang: The mean angle of motion to add sources
    :param width: The +/- range of angles of motion
    :param version: Add sources to the 'o', 'p' or 's' images
    :param dry_run: don't push results to VOSpace.
    """

    # Construct a list of artificial KBOs with positions in the image
    # and rates of motion within the bounds given by the caller.
    filename = storage.get_image(expnums[0], ccd=ccd, version=version)
    header = fits.open(filename)[0].header
    bounds = util.get_pixel_bounds_from_datasec_keyword(
        header.get('DATASEC', '[33:2080,1:4612]'))

    # generate a set of artifical KBOs to add to the image.
    kbos = Table(names=('x', 'y', 'mag', 'sky_rate', 'angle', 'id'))
    for kbo in KBOGenerator(n=number,
                            x=Range(bounds[0][0], bounds[0][1]),
                            y=Range(bounds[1][0], bounds[1][1]),
                            rate=Range(rmin, rmax),
                            angle=Range(ang - width, ang + width),
                            mag=Range(21.0, 25.0)):
        kbos.add_row(kbo)

    fd = open('Object.planted', 'w')
    fd.write("# ")
    kbos.write(fd, format='ascii.fixed_width', delimiter=None)
    fd.close()
    for expnum in expnums:
        filename = storage.get_image(expnum, ccd, version)
        psf = storage.get_file(expnum, ccd, version, ext='psf.fits')
        plant_kbos(filename, psf, kbos, get_shifts(expnum, ccd, version), "fk")

    if dry_run:
        return

    uri = storage.get_uri('Object',
                          ext='planted',
                          version='',
                          subdir=str(expnums[0]) + "/ccd%s" %
                          (str(ccd).zfill(2)))

    storage.copy('Object.planted', uri)
    for expnum in expnums:
        uri = storage.get_uri(expnum,
                              ccd=ccd,
                              version=version,
                              ext='fits',
                              prefix='fk')
        filename = os.path.basename(uri)
        storage.copy(filename, uri)

    return
Beispiel #37
0
class StdCoeffs:

    coeffs = None  # astropy.table.Table of std coeffs
    fileName = None  # coeff fiel name
    observer = None  # observer name
    date = None  # date of the current image
    camera = None  # camera using to crate current image
    telescope = None  # telescope using to create current image
    std_area = None  # standard area or other field name

    fields = [
        {
            'name': 'OBSERVER',
            'format': '%-10s',
            'dtype': 'U5'
        },
        {
            'name': 'DATE',
            'format': '%-14s',
            'dtype': 'U10'
        },
        {
            'name': 'TV',
            'format': '%11.4f',
            'dtype': 'f4'
        },
        {
            'name': 'ERR_TV',
            'format': '%11.4f',
            'dtype': 'f4'
        },
        {
            'name': 'TVR',
            'format': '%11.4f',
            'dtype': 'f4'
        },
        {
            'name': 'ERR_TVR',
            'format': '%11.4f',
            'dtype': 'f4'
        },
        {
            'name': 'TBV',
            'format': '%11.4f',
            'dtype': 'f4'
        },
        {
            'name': 'ERR_TBV',
            'format': '%11.4f',
            'dtype': 'f4'
        },
        {
            'name': 'CAMERA',
            'format': '%-24s',
            'dtype': 'U24'
        },
        {
            'name': 'TELESCOPE',
            'format': '%-24s',
            'dtype': 'U24'
        },
        {
            'name': 'FIELD',
            'format': '%-24s',
            'dtype': 'U24'
        },
    ]

    def __init__(self, coeffFileName, observer, date, camera, telescope,
                 std_area):
        self.fileName = coeffFileName
        self.observer = observer
        self.date = date
        self.camera = camera
        self.telescope = telescope
        self.std_area = std_area

    def open(self):
        if not exists(self.fileName):
            self.coeffs = Table()
            for field in self.fields:
                self.coeffs.add_column(
                    Column(name=field['name'],
                           dtype=field['dtype'],
                           format=field['format']))

#            self.coeffs.add_column(Column(name = 'OBSERVER', dtype = 'U5', format = '%-10s'))
#            self.coeffs.add_column(Column(name = 'DATE', dtype = 'U10', format = '%-14s'))
#            self.coeffs.add_column(Column(name = 'TV', dtype = 'f4', format = '%11.4f'))
#            self.coeffs.add_column(Column(name = 'ERR_TV', dtype = 'f4', format = '%11.4f'))
#            self.coeffs.add_column(Column(name = 'TVR', dtype = 'f4', format = '%11.4f'))
#            self.coeffs.add_column(Column(name = 'ERR_TVR', dtype = 'f4', format = '%11.4f'))
#            self.coeffs.add_column(Column(name = 'TBV', dtype = 'f4', format = '%11.4f'))
#            self.coeffs.add_column(Column(name = 'ERR_TBV', dtype = 'f4', format = '%11.4f'))
#            self.coeffs.add_column(Column(name = 'CAMERA', dtype = 'U24', format = '%-24s'))
#            self.coeffs.add_column(Column(name = 'TELESCOPE', dtype = 'U24', format = '%-24s'))
#            self.coeffs.add_column(Column(name = 'FIELD', dtype = 'U24', format = '%-24s'))
        else:
            self.coeffs = Table.read(self.fileName, format='ascii')
            for field in self.fields:
                self.coeffs[field['name']].format = field['format']

#            self.coeffs['OBSERVER'].format = '%-10s'
#            self.coeffs['DATE'].format = '%-14s'
#            self.coeffs['TV'].format = '%11.4f'
#            self.coeffs['ERR_TV'].format = '%11.4f'
#            self.coeffs['TVR'].format = '%11.4f'
#            self.coeffs['ERR_TVR'].format = '%11.4f'
#            self.coeffs['TBV'].format = '%11.4f'
#            self.coeffs['ERR_TBV'].format = '%11.4f'
#            self.coeffs['CAMERA'].format = '%-24s'
#            self.coeffs['TELESCOPE'].format = '%-24s'
#            self.coeffs['FIELD'].format = '%-24s'

    def addCoeffs(self, Tv, Tvr, Tbv, errTv=None, errTvr=None, errTbv=None):
        lastCoeffs = self.getCoeffs()
        if not lastCoeffs:
            self.coeffs.add_row(
                (self.observer, self.date, Tv, errTv, Tvr, errTvr, Tbv, errTbv,
                 self.camera, self.telescope, self.std_area))
        else:
            lastCoeffs['TV'] = Tv
            lastCoeffs['ERR_TV'] = errTv
            lastCoeffs['TVR'] = Tvr
            lastCoeffs['ERR_TVR'] = errTvr
            lastCoeffs['TBV'] = Tbv
            lastCoeffs['ERR_TBV'] = errTbv


#            lastCoeffs['FIELD'] = field

    def getCoeffs(self):
        flt = (self.coeffs['OBSERVER']
               == self.observer) & (self.coeffs['DATE'] == self.date) & (
                   self.coeffs['CAMERA'] == self.camera) & (
                       self.coeffs['TELESCOPE'] == self.telescope)
        rows = self.coeffs[flt]
        if len(rows) == 0:
            return None
        else:
            return rows[0]

    def exists(self):
        return getCoeffs

    def getBestCoeffs(self):
        flt = (self.coeffs['OBSERVER'] == self.observer) & (
            self.coeffs['CAMERA'] == self.camera) & (self.coeffs['TELESCOPE']
                                                     == self.telescope)
        rows = self.coeffs[flt]
        if len(rows) == 0:
            return None
        elif len(rows) == 1:
            return rows[0]
        else:
            jdnow = pm.jd(self.date)
            delta = abs(jdnow - pm.jd(rows[0]['DATE']))
            ix = 0
            for j in range(len(rows) - 1):
                d = abs(jdnow - pm.jd(rows[j + 1]['DATE']))
                if d < delta:
                    delta = d
                    ix = j + 1
            return rows[ix]

    def getAvgCoeffs(self):
        flt = (self.coeffs['OBSERVER'] == self.observer) & (
            self.coeffs['CAMERA'] == self.camera) & (self.coeffs['TELESCOPE']
                                                     == self.telescope)
        rows = self.coeffs[flt]
        if len(rows) == 0:
            return None
        avgc = deepcopy(rows[0])
        avgc['TV'] = np.average(rows['TV'])
        avgc['TVR'] = np.average(rows['TVR'])
        avgc['TBV'] = np.average(rows['TBV'])

        # TODO: calculate average errors

        return avgc

    def save(self):
        self.coeffs.write(self.fileName,
                          format='ascii.fixed_width_two_line',
                          delimiter=' ')
Beispiel #38
0
                with open('astrometry.txt', 'w') as astrometry:
                    astrometry.write('%f %f %f %f' %
                                     (s_nvss['RA'], s_nvss['DEC'],
                                      s_tgss['RA'], s_tgss['DEC']))

                ra = np.average(
                    [s_nvss['RA'], s_tgss['RA']],
                    weights=[1 / s_nvss['E_RA']**2, 1 / s_tgss['E_RA']**2])
                dec = np.average(
                    [s_nvss['DEC'], s_tgss['DEC']],
                    weights=[1 / s_nvss['E_DEC']**2, 1 / s_tgss['E_DEC']**2])
                spidx, e_spidx = twopoint_spidx_bootstrap([147.,1400.], \
                        [s_tgss['Total_flux'],s_nvss['Total_flux']], [s_tgss['E_Total_flux'], s_nvss['E_Total_flux']], niter=1000)

                t.add_row((ra, dec, \
                        s_nvss['Total_flux'], s_nvss['E_Total_flux'], s_nvss['Peak_flux'], s_nvss['E_Peak_flux'], s_nvss['Isl_rms'], \
                        s_tgss['Total_flux'], s_tgss['E_Total_flux'], s_tgss['Peak_flux'], s_tgss['E_Peak_flux'], s_tgss['Isl_rms'], \
                        spidx, e_spidx, s2n, code, num_match, num_unmatch_NVSS, num_unmatch_TGSS, blob, os.path.basename(image_mask)))

            # if in an island with one or more unmatched NVSS sources -> add each as lower limit (multiple entries)
            if num_unmatch_NVSS > 0 and len(idx_blob_tgss[0]) == 0:
                code = 'L'
                for i in xrange(num_unmatch_NVSS):
                    s_nvss = nvss.t[idx_blob_unmatched_nvss[i]]
                    s2n = np.sum(s_nvss['Peak_flux'] / s_nvss['Isl_rms'])
                    if s2n < 5:
                        #    print 'skip L: s2n==%f' % s2n
                        continue
                    x, y, _, _ = nvss.w.all_world2pix(s_nvss['RA'],
                                                      s_nvss['DEC'],
                                                      0,
                                                      0,
Beispiel #39
0
    def show_properties(self):
        """Prints a summary of the non-callable attributes of the Periodogram object.

        Prints in order of type (ints, strings, lists, arrays and others).
        Prints in alphabetical order.
        """
        attrs = {}
        for attr in dir(self):
            if not attr.startswith('_'):
                res = getattr(self, attr)
                if callable(res):
                    continue

                if isinstance(res, astropy.units.quantity.Quantity):
                    unit = res.unit
                    res = res.value
                    attrs[attr] = {'res': res}
                    attrs[attr]['unit'] = unit.to_string()
                else:
                    attrs[attr] = {'res': res}
                    attrs[attr]['unit'] = ''

                if attr == 'hdu':
                    attrs[attr] = {'res': res, 'type': 'list'}
                    for idx, r in enumerate(res):
                        if idx == 0:
                            attrs[attr]['print'] = '{}'.format(r.header['EXTNAME'])
                        else:
                            attrs[attr]['print'] = '{}, {}'.format(
                                attrs[attr]['print'], '{}'.format(r.header['EXTNAME']))
                    continue

                if isinstance(res, int):
                    attrs[attr]['print'] = '{}'.format(res)
                    attrs[attr]['type'] = 'int'
                elif isinstance(res, float):
                    attrs[attr]['print'] = '{}'.format(np.round(res, 4))
                    attrs[attr]['type'] = 'float'
                elif isinstance(res, np.ndarray):
                    attrs[attr]['print'] = 'array {}'.format(res.shape)
                    attrs[attr]['type'] = 'array'
                elif isinstance(res, list):
                    attrs[attr]['print'] = 'list length {}'.format(len(res))
                    attrs[attr]['type'] = 'list'
                elif isinstance(res, str):
                    if res == '':
                        attrs[attr]['print'] = '{}'.format('None')
                    else:
                        attrs[attr]['print'] = '{}'.format(res)
                    attrs[attr]['type'] = 'str'
                elif attr == 'wcs':
                    attrs[attr]['print'] = 'astropy.wcs.wcs.WCS'
                    attrs[attr]['type'] = 'other'
                else:
                    attrs[attr]['print'] = '{}'.format(type(res))
                    attrs[attr]['type'] = 'other'

        output = Table(names=['Attribute', 'Description', 'Units'],
                       dtype=[object, object, object])
        idx = 0
        types = ['int', 'str', 'float', 'list', 'array', 'other']
        for typ in types:
            for attr, dic in attrs.items():
                if dic['type'] == typ:
                    output.add_row([attr, dic['print'], dic['unit']])
                    idx += 1
        print('lightkurve.Periodogram properties:')
        output.pprint(max_lines=-1, max_width=-1)
Beispiel #40
0
    def build_scandata_table(self, **kwargs):
        """Build a FITS table with likelihood scan data

        Keywords
        --------
        norm_type : str or None
            Type of normalization to use.  Valid options are:

            * norm : Self-normalized
            * sigmav : Reference values of sigmav and J
            * tau : Reference values of tau and D

        Returns
        -------

        table : `astropy.table.Table`
            The table has these columns

        astro_value : float
            The astrophysical J-factor (or D-factor) for this target
        ref_astro : float
            The reference J-factor (or D-factor) used to build `DMSpecTable`
        ref_inter : float
            The reference <sigmav> (or tau) used to build `DMSpecTable`

        norm_scan : array
            The test values of <sigmav> (or tau)
        dloglike_scan : array
            The corresponding values of the negative log-likelihood

        """
        norm_type = kwargs.get('norm_type', None)

        if self.decay:
            astro_str = 'ref_D'
            inter_str = 'ref_tau'
            if norm_type is None:
                norm_type = 'tau'
        else:
            astro_str = 'ref_J'
            inter_str = 'ref_sigmav'
            if norm_type is None:
                norm_type = 'sigmav'

        shape = self._norm_vals.shape
        #dtype = 'f%i'%self._norm_vals.size

        col_normv = Column(name="norm_scan", dtype=float,
                           shape=shape)
        col_dll = Column(name="dloglike_scan", dtype=float,
                         shape=shape)
        col_offset = Column(name="dloglike_offset", dtype=float,
                            shape=shape[0])

        col_astro_val = Column(name="astro_value", dtype=float)
        col_ref_astro = Column(name=astro_str, dtype=float)
        col_ref_inter = Column(name=inter_str, dtype=float)

        collist = [col_normv, col_dll, col_offset,
                   col_astro_val, col_ref_astro,
                   col_ref_inter]

        if norm_type in ['sigmav', 'tau']:
            norm_vals = self._norm_vals / self.ref_inter
        elif norm_type in ['norm']:
            norm_vals = self._norm_vals
        else:
            raise ValueError('Unrecognized normalization type: %s' % norm_type)

        valdict = {"norm_scan": norm_vals,
                   "dloglike_scan": -1 * self._nll_vals,
                   "dloglike_offset": -1 * self._nll_offsets,
                   "astro_value": self.astro_value,
                   astro_str: self.ref_astro,
                   inter_str: self.ref_inter}

        if self._astro_prior is not None:
            col_prior_type = Column(name="prior_type", dtype="S16")
            col_prior_mean = Column(name="prior_mean", dtype=float)
            col_prior_sigma = Column(name="prior_sigma", dtype=float)
            col_prior_applied = Column(name="prior_applied", dtype=bool)
            collist += [col_prior_type, col_prior_mean,
                        col_prior_sigma, col_prior_applied]
            valdict["prior_type"] = self.prior_type
            valdict["prior_mean"] = self.prior_mean
            valdict["prior_sigma"] = self.prior_sigma
            valdict["prior_applied"] = self.prior_applied

        tab = Table(data=collist)
        tab.add_row(valdict)
        return tab
Beispiel #41
0
def remove_duplicates(file_cat='spidx-cat.txt'):
    """
    Remove duplicates from overlapping regions.
    For each source check if the closest file-center is the one from which is extracted.
    If not it means the same source is closer in another file, delete it.
    """
    from astropy.table import Table
    from astropy.coordinates import match_coordinates_sky
    from astropy.coordinates import SkyCoord
    import astropy.units as u

    # get all file centers
    print "Collecting centers..."
    centers = Table([[], [], []],
                    names=('Mask', 'RA', 'DEC'),
                    dtype=['S100', float, float])
    centers['RA'].unit = 'deg'
    centers['DEC'].unit = 'deg'
    for i, mask_file in enumerate(glob.glob('masks/mask*.fits')):
        with pyfits.open(mask_file) as fits:
            head = fits[0].header
            ra = head['CRVAL1']
            dec = head['CRVAL2']
            centers.add_row([os.path.basename(mask_file), ra, dec])

    sources = Table.read('spidx-cat.fits', format='fits')
    print "Matching catalogues..."
    idx, _, _ = match_coordinates_sky(SkyCoord(sources['RA']*u.deg, sources['DEC']*u.deg),\
                                      SkyCoord(centers['RA'], centers['DEC']))
    print "Matching catalogues 2..."
    idx2, _, _ = match_coordinates_sky(SkyCoord(sources['RA']*u.deg, sources['DEC']*u.deg),\
                                      SkyCoord(centers['RA'], centers['DEC']), nthneighbor=2)

    print "Using second closest for wrong matches..."
    # this is a speed up not to fetch data each source but once each mask
    masks = {}
    for maskname in glob.glob('masks/*fits'):
        mask = pyfits.open(maskname)[0]
        masks[maskname] = [wcs.WCS(mask.header), mask.data.shape]

    # check if the closest mask is actually covering the source, otherwise get the second closest
    for i, source in enumerate(sources):
        proposed_mask = 'masks/' + centers[int(idx[i])]['Mask']
        w, shape = masks[proposed_mask]
        x, y, _, _ = w.all_world2pix(source['RA'],
                                     source['DEC'],
                                     0,
                                     0,
                                     0,
                                     ra_dec_order=True)
        # if this source might be on the border (or outside) the mask
        if x < 10 or x > (shape[2] - 10) or y < 10 or y > (shape[3] - 10):
            print "Alternative mask found:", idx[i], "(" + centers[int(
                idx[i])]['Mask'] + ") -> ", idx2[i], "(" + centers[int(
                    idx2[i])]['Mask'] + ") - xy:", x, y
            idx[i] = idx2[i]

    print "Removing duplicates..."
    idx_duplicates = []
    for i, source in enumerate(sources):
        # check if closest
        #        print idx[i], source['Mask'], centers[int(idx[i])]['Mask']
        if source['Mask'] != centers[int(idx[i])]['Mask']:
            idx_duplicates.append(i)


#            print "Removing source ", i
    print "Removing a total of", len(idx_duplicates), "sources over", len(
        sources)
    sources.remove_rows(idx_duplicates)

    print "Add unique Isl_id..."
    # add unique island idx based on combination of Mask name and blob idx
    last_isl_id = 0
    for mask in set(sources['Mask']):
        # first cycle add 0 to blob_id, next cycles add highest isl_id from previous cycle (blob_ids are >0)
        incr = np.max(sources['Isl_id'][np.where(sources['Mask'] == mask)])
        sources['Isl_id'][np.where(sources['Mask'] == mask)] += last_isl_id
        last_isl_id += incr

    sources.remove_columns('Mask')
    sources['Source_id'] = range(len(sources))  # set id after removal
    sources.write('spidx-cat-nodup.fits', format='fits', overwrite=True)
Beispiel #42
0
    def build_limits_table(self, limit_dict):
        """Build a FITS table with limits data

        Paramters
        ---------

        limit_dict : dict
            Dictionary from limit names to values


        Returns
        -------

        table : `astropy.table.Table`
            The table has these columns

        astro_value : float
            The astrophysical J-factor for this target
        ref_astro : float
            The reference J-factor used to build `DMSpecTable`
        ref_inter : float
            The reference <sigmav> used to build `DMSpecTable`
        <LIMIT> : array
            The upper limits

        If a prior was applied these additional colums will be present

        prior_type : str
            Key specifying what kind of prior was applied
        prior_mean : float
            Central value for the prior
        prior_sigma : float
            Width of the prior
        prior_applied : bool
            Flag to indicate that the prior was applied

        """
        if self.decay:
            astro_str = "ref_D"
            inter_str = "ref_tau"
        else:
            astro_str = "ref_J"
            inter_str = "ref_sigmav"

        col_astro_val = Column(name="astro_value", dtype=float)
        col_ref_astro = Column(name=astro_str, dtype=float)
        col_ref_inter = Column(name=inter_str, dtype=float)
        collist = [col_astro_val, col_ref_astro, col_ref_inter]
        valdict = {"astro_value": self.astro_value,
                   astro_str: self.ref_astro,
                   inter_str: self.ref_inter}

        for k, v in list(limit_dict.items()):
            collist.append(Column(name=k, dtype=float, shape=v.shape))
            valdict[k] = v

        if self._astro_prior is not None:
            col_prior_type = Column(name="prior_type", dtype="S16")
            col_prior_mean = Column(name="prior_mean", dtype=float)
            col_prior_sigma = Column(name="prior_sigma", dtype=float)
            col_prior_applied = Column(name="prior_applied", dtype=bool)
            collist += [col_prior_type, col_prior_mean,
                        col_prior_sigma, col_prior_applied]
            valdict["prior_type"] = self.prior_type
            valdict["prior_mean"] = self.prior_mean
            valdict["prior_sigma"] = self.prior_sigma
            valdict["prior_applied"] = self.prior_applied

        tab = Table(data=collist)
        tab.add_row(valdict)
        return tab
def update_caldb_leapsec(NewLeapSecondDate,
                         NewLeapSecond,
                         updater="MFC",
                         outdir=".",
                         clobber=True):
    """
    Given the date of a new leapsecond (for example 2017-01-01T00:00:00)
    creates a new leapsecond file for transfer to the caldb

    NewLeapSecDate = date of new leap second in ISOT format YYYY-MM-DDTHH:MM:SS
    NewLeapSecond = amount of new leap second (usually 1.0)

    writes a new FITS file to the current working directory by default
    """
    from astropy.io import fits as pyfits
    from astropy.time import Time
    from astropy.table import Table
    import ftputil
    import time
    #
    # this block retrieves the lastest leapsecond file from /FTP/caldb/data/gen/bcf
    # based on the leapsecond file naming convention, "leapsec_mmddyy.fits"
    #
    LSdir = "FTP/caldb/data/gen/bcf/"
    host = ftputil.FTPHost('heasarc.gsfc.nasa.gov', "anonymous",
                           "*****@*****.**")
    genbcf = host.listdir(LSdir)  # get directory listing
    host.close()

    LeapsecFileList = [f for f in genbcf if 'leapsec' in f]
    LeapsecFileYear = [
        y.split("_")[1].split(".fits")[0][4:6] for y in LeapsecFileList
    ]
    LeapsecFileYear = [('19' + y if int(y) > 50 else '20' + y)
                       for y in LeapsecFileYear]
    LeapsecFileMonth = [
        m.split("_")[1].split(".fits")[0][2:4] for m in LeapsecFileList
    ]
    LeapsecFileDay = [
        d.split("_")[1].split(".fits")[0][0:2] for d in LeapsecFileList
    ]

    maxjd = 0.0

    for i in range(len(LeapsecFileList)):
        tiso = LeapsecFileYear[i] + "-" + LeapsecFileMonth[
            i] + "-" + LeapsecFileDay[i]
        fjd = Time(tiso).jd
        if fjd > maxjd:
            maxjd = fjd
            LatestLSF = LeapsecFileList[i]
    print("Latest Leapsecond File = {0}".format(LatestLSF))

    hdu = pyfits.open("http://heasarc.gsfc.nasa.gov/" + LSdir + "/" +
                      LatestLSF)  # open the file

    orig_header = hdu[1].header
    mjdref = orig_header['MJDREF']

    UpdateDate = time.strftime('%Y-%m-%d %H:%M:%S')
    outfile = 'leapsec_' + NewLeapSecondDate[8:10] + NewLeapSecondDate[
        5:7] + NewLeapSecondDate[2:4] + '.fits'

    #
    # create new row for new leapsecond information
    #
    newdate = NewLeapSecondDate.split('T')[0]
    newtime = NewLeapSecondDate.split('T')[1]
    newmjd = Time(NewLeapSecondDate, format='isot').mjd
    newsecs = (newmjd - mjdref) * 86400
    newLS = NewLeapSecond
    NewLeapSecondRow = [newdate, newtime, newmjd, newsecs, newLS]
    #
    # append new leapsecond to table and write to output file
    #
    tbdata = hdu[1].data
    t = Table(tbdata)  # convert hdu data to a python Table to add new row
    t.add_row(NewLeapSecondRow)  # add row of data
    hdunew = pyfits.table_to_hdu(
        t)  # convert table back to hdu (with minimal header)

    hdunew.columns.change_unit(
        'SECONDS', 's')  # table_to_hdu doesn't seem to preserve the Unit
    hdunew.columns.change_unit(
        'LEAPSECS', 's')  # table_to_hdu doesn't seem to preserve the Unit

    hdunew.header = orig_header  # use header from original file
    hdunew.header[
        'COMMENT'] = UpdateDate + ": " + updater + " ADDED " + NewLeapSecondDate + " LEAP SECOND"
    hdunew.header[
        'HISTORY'] = "File modified by user " + updater + " on " + UpdateDate
    pyfits.writeto(outdir + "/" + outfile,
                   hdunew.data,
                   hdunew.header,
                   clobber=clobber,
                   checksum=True)
    return outfile
Beispiel #44
0
@author: Dan Murphy
Web Scraping Practice
"""
import requests
from bs4 import BeautifulSoup
from astropy.table import Table, Column
import numpy as np


url = "http://yann.lecun.com/exdb/mnist/"
response = requests.get(url)
print(response) # Check for 200 response
bs = BeautifulSoup(response.text, "html.parser")
aTags = bs.findAll('a') # Find all a tags


# loop through a tags and keep count of them
i = 1
for link in aTags:
    print("Here is link " + str(i) + ": " + str(link) +"\n")
    i+=1
# There are a total of 76 <a> tags on this page!

# Create a table to portray links
t = Table(names = ('Links', 'Description'))
print(t)

t.add_row(vals=(1, 2))
print(t)
# Messing aroudn with table values. More tomorrow...
Beispiel #45
0
    def save(self, obs_fit, filename, outform):
        """
        Save the model parameters to a user defined file format.

        Parameters
        ----------
        obs_fit : PAHFITBase model
            Model giving all the components and parameters.
        filename : string
            String used to name the output file.
            Currently using the input data file name.
        outform : string
            Sets the output file format (ascii, fits, csv, etc.).
        """
        # setup the tables for the different components
        bb_table = Table(
            names=(
                "Name",
                "Form",
                "temp",
                "temp_min",
                "temp_max",
                "temp_fixed",
                "amp",
                "amp_min",
                "amp_max",
                "amp_fixed",
            ),
            dtype=(
                "U25",
                "U25",
                "float64",
                "float64",
                "float64",
                "bool",
                "float64",
                "float64",
                "float64",
                "bool",
            ),
        )
        line_table = Table(
            names=(
                "Name",
                "Form",
                "x_0",
                "x_0_min",
                "x_0_max",
                "x_0_fixed",
                "amp",
                "amp_min",
                "amp_max",
                "amp_fixed",
                "fwhm",
                "fwhm_min",
                "fwhm_max",
                "fwhm_fixed",
            ),
            dtype=(
                "U25",
                "U25",
                "float64",
                "float64",
                "float64",
                "bool",
                "float64",
                "float64",
                "float64",
                "bool",
                "float64",
                "float64",
                "float64",
                "bool",
            ),
        )
        att_table = Table(
            names=("Name", "Form", "amp", "amp_min", "amp_max", "amp_fixed"),
            dtype=("U25", "U25", "float64", "float64", "float64", "bool"),
        )

        for component in obs_fit:
            comp_type = component.__class__.__name__

            if comp_type == "BlackBody1D":
                bb_table.add_row(
                    [
                        component.name,
                        comp_type,
                        component.temperature.value,
                        component.temperature.bounds[0],
                        component.temperature.bounds[1],
                        component.temperature.fixed,
                        component.amplitude.value,
                        component.amplitude.bounds[0],
                        component.amplitude.bounds[1],
                        component.amplitude.fixed,
                    ]
                )
            elif comp_type == "Drude1D":
                line_table.add_row(
                    [
                        component.name,
                        comp_type,
                        component.x_0.value,
                        component.x_0.bounds[0],
                        component.x_0.bounds[1],
                        component.x_0.fixed,
                        component.amplitude.value,
                        component.amplitude.bounds[0],
                        component.amplitude.bounds[1],
                        component.amplitude.fixed,
                        component.fwhm.value,
                        component.fwhm.bounds[0],
                        component.fwhm.bounds[1],
                        component.fwhm.fixed,
                    ]
                )
            elif comp_type == "Gaussian1D":
                line_table.add_row(
                    [
                        component.name,
                        comp_type,
                        component.mean.value,
                        component.mean.bounds[0],
                        component.mean.bounds[1],
                        component.mean.fixed,
                        component.amplitude.value,
                        component.amplitude.bounds[0],
                        component.amplitude.bounds[1],
                        component.amplitude.fixed,
                        2.355 * component.stddev.value,
                        2.355 * component.stddev.bounds[0],
                        2.355 * component.stddev.bounds[1],
                        component.stddev.fixed,
                    ]
                )
            elif comp_type == "S07_attenuation":
                att_table.add_row(
                    [
                        component.name,
                        comp_type,
                        component.tau_sil.value,
                        component.tau_sil.bounds[0],
                        component.tau_sil.bounds[1],
                        component.tau_sil.fixed,
                    ]
                )

        # stack the tables (handles missing columns between tables)
        out_table = vstack([bb_table, line_table, att_table])

        # Writing output table
        out_table.write(
            "{}_output.{}".format(filename, outform), format=outform, overwrite=True
        )
Beispiel #46
0
def get_table(fnorurl=None, dropmw=True, sanitizenames=False):
    """
    Get's the Mcconnachie 12 table from the file.

    Parameters
    ----------
    fnorurl : str, optional
        Local file or URL.  If URL, will be cached.
    dropmw : bool, optional
        If true, the entry for the MW will be skipped.
    sanitizenames : bool, optional
        If True, names like "gal (I)" will be changed to "gal I"

    Returns
    -------
    mctab : astropy.table.Table
        The Mcconnachie 12 "database" as an astropy table
    """
    import os
    import ssl
    ssl._create_default_https_context = ssl._create_unverified_context

    from astropy.coordinates import Angle, SkyCoord, Distance
    from astropy.table import Table, Column
    from astropy.utils import data

    if fnorurl is None:
        if os.path.isfile('NearbyGalaxies.dat'):
            fnorurl = 'NearbyGalaxies.dat'
        else:
            fnorurl = 'https://www.astrosci.ca/users/alan/Nearby_Dwarfs_Database_files/NearbyGalaxies.dat'

    datstr = data.get_file_contents(fnorurl, cache=True)
    datlines = datstr.split('\n')

    #pull from the header.
    for hdri, hdrstr in enumerate(datlines):
        if hdrstr.startswith('GalaxyName'):
            break
    else:
        raise ValueError('No field name line found')
    #hdrstr now is the line with the field names

    # type is 'pm' for float w/ err+/- or 'coord'
    fieldinfo = [('name', 'S19', None), ('center', 'coord', None),
                 ('EBmV', float, u.mag), ('distmod', 'pm', u.mag),
                 ('vh', 'pm', u.km / u.s), ('Vmag', 'pm', u.mag),
                 ('PA', 'pm', u.degree), ('e', 'pm', u.dimensionless_unscaled),
                 ('muV0', 'pm', u.mag * u.arcsec**-2), ('rh', 'pm', u.arcmin),
                 ('sigma_s', 'pm', u.km / u.s), ('vrot_s', 'pm', u.km / u.s),
                 ('MHI', float, u.Unit('1e6 solMass')),
                 ('sigma_g', 'pm', u.km / u.s), ('vrot_g', 'pm', u.km / u.s),
                 ('[Fe/H]', 'pm', u.dimensionless_unscaled), ('F', int, None),
                 ('References', 'S40', None)]

    fieldnames = []
    fielddtypes = []
    fieldunits = []
    for nm, tp, un in fieldinfo:
        if tp == 'coord':
            fieldnames.append(nm)
            fielddtypes.append(object)
            fieldunits.append(None)
        elif tp == 'pm':
            fieldnames.append(nm)
            fielddtypes.append(float)
            fieldunits.append(un)
            fieldnames.append(nm + '+')
            fielddtypes.append(float)
            fieldunits.append(un)
            fieldnames.append(nm + '-')
            fielddtypes.append(float)
            fieldunits.append(un)
        else:
            fieldnames.append(nm)
            fielddtypes.append(tp)
            fieldunits.append(un)

    t = Table(names=fieldnames, dtype=fielddtypes)
    for nm, un in zip(fieldnames, fieldunits):
        t[nm].units = un

    for l in datlines[(hdri + 2 + int(dropmw)):]:
        if l.strip() == '':
            continue

        vals = l[19:].split()
        ra = Angle(tuple([float(v) for v in vals[0:3]]), unit=u.hour)
        dec = Angle(tuple([float(v) for v in vals[3:6]]), unit=u.degree)
        vals = vals[6:]
        vals.insert(0, SkyCoord(ra, dec))
        vals.insert(0, l[:19].strip())
        if '(' not in vals[-1]:
            vals.append('')
        t.add_row(vals)

    #now add derived columns
    t.add_column(
        Column(name='Vabs', data=t['Vmag'] - t['distmod'],
               unit=u.mag))  # Vmag apparently already includes dereddening?
    t.add_column(
        Column(name='logLV', data=(t['Vabs'] - 4.83) / -2.5, unit=u.solLum))
    t.add_column(
        Column(name='distance', data=10**(t['distmod'] / 5. - 2), unit=u.kpc))
    t.add_column(
        Column(name='distance+',
               data=t['distmod+'] * t['distance'] * np.log(10) / 5,
               unit=u.kpc))
    t.add_column(
        Column(name='distance-',
               data=t['distmod-'] * t['distance'] * np.log(10) / 5,
               unit=u.kpc))
    t.add_column(
        Column(name='rh_phys',
               data=t['distance'] * np.radians(t['rh'] / 60.),
               unit=u.kpc))
    t.add_column(
        Column(name='radeg',
               data=[((ti.ra.degree + 180) % 360 - 180) for ti in t['center']],
               unit=u.degree))
    t.add_column(
        Column(name='decdeg',
               data=[ti.dec.degree for ti in t['center']],
               unit=u.degree))

    #also populate "distance" in 'center' and cartesian coords
    for i in range(len(t)):
        c = t['center'][i]
        d = t['distance'][i]
        t['center'][i] = SkyCoord(c.ra, c.dec, distance=np.array(d) * u.kpc)

    x, y, z = [], [], []
    for c, d in zip(t['center'], t['distance']):
        #c.distance = Distance(d, unit=u.kpc)
        x.append(c.cartesian.x.value)
        y.append(c.cartesian.y.value)
        z.append(c.cartesian.z.value)
    t.add_column(Column(name='x', data=x, unit=u.kpc))
    t.add_column(Column(name='y', data=y, unit=u.kpc))
    t.add_column(Column(name='z', data=z, unit=u.kpc))

    #andromeda dSph numbers
    andnum = []
    for nm in t['name'].astype(str):
        if nm.startswith('Andromeda '):
            andnum.append(roman_to_int(nm.replace('Andromeda ', '')))
        elif nm == 'Andromeda':
            andnum.append(0)
        elif nm in ('M32', 'NGC 205', 'NGC 185', 'NGC 147'):
            andnum.append(-1)
        elif nm in ('IC 10', 'LGS 3'):
            andnum.append(-2)
        elif nm in ('IC 1613', 'Pegasus dIrr'):
            andnum.append(-3)
        else:
            andnum.append(-99)
    t.add_column(Column(name='and_number', data=andnum, unit=None))

    if sanitizenames:
        t['name'] = [nm.replace('(I)', 'I') for nm in t['name']]

    return t
Beispiel #47
0
def make_summary(filelist,
                 extension=0,
                 fname_option='relative',
                 output=None,
                 format='ascii.csv',
                 keywords=[],
                 dtypes=[],
                 example_header=None,
                 sort_by='file',
                 verbose=True):
    """ Extracts summary from the headers of FITS files.
    Parameters
    ----------
    filelist: list of str (path-like)
        The list of file paths relative to the current working directory.

    extension: int or str
        The extension to be summarized.

    fname_option: str {'absolute', 'relative', 'name'}
        Whether to save full absolute/relative path or only the filename.

    output: str or path-like
        The directory and file name of the output summary file.

    format: str
        The astropy.table.Table output format.

    keywords: list
        The list of the keywords to extract (keywords should be in ``str``).

    dtypes: list
        The list of dtypes of keywords if you want to specify. If ``[]``,
        ``['U80'] * len(keywords)`` will be used. Otherwise, it should have
        the same length with ``keywords``.

    example_header: str or path-like
        The path including the filename of the output summary text file.

    sort_by: str
        The column name to sort the results. It can be any element of
        ``keywords`` or ``'file'``, which sorts the table by the file name.
    """

    if len(filelist) == 0:
        print("No FITS file found.")
        return

    def _get_fname(path):
        if fname_option == 'relative':
            return str(path)
        elif fname_option == 'absolute':
            return str(path.absolute())
        else:
            return path.name

    options = ['absolute', 'relative', 'name']
    if fname_option not in options:
        raise KeyError(f"fname_option must be one of {options}.")

    skip_keys = ['COMMENT', 'HISTORY']

    if verbose:
        if (keywords != []) and (keywords != '*'):
            print("Extracting keys: ", keywords)
        str_example_hdr = "Extract example header from {:s}\n\tand save as {:s}"
        str_keywords = "All {:d} keywords will be loaded."
        str_keyerror_fill = "Key {:s} not found for {:s}, filling with '--'."
        str_valerror = "Please use 'U80' as the dtype for the key {:s}."
        str_filesave = 'Saving the summary file to "{:s}"'

    # Save example header
    if example_header is not None:
        example_fits = filelist[0]
        if verbose:
            print(str_example_hdr.format(str(example_fits), example_header))
        ex_hdu = fits.open(example_fits)
        ex_hdr = ex_hdu[extension].header
        ex_hdr.totextfile(example_header, overwrite=True)

    # load ALL keywords for special cases
    if (keywords == []) or (keywords == '*'):
        example_fits = filelist[0]
        ex_hdu = fits.open(example_fits)
        ex_hdu.verify('fix')
        ex_hdr = ex_hdu[extension].header
        N_hdr = len(ex_hdr.cards)
        keywords = []

        for i in range(N_hdr):
            key_i = ex_hdr.cards[i][0]
            if (key_i in skip_keys):
                continue
            elif (key_i in keywords):
                str_duplicate = "Key {:s} is duplicated! Only first one will be saved."
                print(str_duplicate.format(key_i))
                continue
            keywords.append(key_i)

        if verbose:
            print(str_keywords.format(len(keywords)))


#            except fits.VerifyError:
#                str_unparsable = '{:d}-th key is skipped since it is unparsable.'
#                print(str_unparsable.format(i))
#                continue

# Initialize
    if len(dtypes) == 0:
        dtypes = ['U80'] * len(keywords)
        # FITS header MUST be within 80 characters! (FITS standard)

    summarytab = Table(names=keywords, dtype=dtypes)
    fnames = []

    # Run through all the fits files
    for fitsfile in filelist:
        fnames.append(_get_fname(fitsfile))
        hdu = fits.open(fitsfile)
        hdu.verify('fix')
        hdr = hdu[extension].header
        row = []
        for key in keywords:
            try:
                row.append(hdr[key])
            except KeyError:
                if verbose:
                    print(str_keyerror_fill.format(key, str(fitsfile)))
                try:
                    row.append('--')
                except ValueError:
                    raise ValueError(str_valerror.format('U80'))
        summarytab.add_row(row)
        hdu.close()

    # Attache the file name, and then sort.
    fnames = Column(data=fnames, name='file')
    summarytab.add_column(fnames, index=0)
    summarytab.sort(sort_by)

    tmppath = Path('tmp.csv')
    summarytab.write(tmppath, format=format)
    summarytab = Table.read(tmppath, format=format)

    if output is None or output == '':
        tmppath.unlink()

    else:
        output = Path(output)
        if verbose:
            print(str_filesave.format(str(output)))
        tmppath.rename(output)

    return summarytab
Beispiel #48
0
def analyze_data(input_file_list, log_level=logutil.logging.DEBUG, type=""):
    """
    Determine if images within the dataset can be aligned

    Parameters
    ==========
    input_file_list : list
        List containing FLT and/or FLC filenames for all input images which comprise an associated
        dataset where 'associated dataset' may be a single image, multiple images, an HST
        association, or a number of HST associations

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the .log file.
        Default value is 20, or 'info'.

    type : string
        String indicating whether this file is for MVM or some other processing.
        If type == "MVM", then Grism/Prism data is ignored.  If type == "" (default) or any
        other string, the Grism/Prism data is considered available for processing unless there is
        some other issue (i.e., exposure time of zero).

    Returns
    =======
    output_table : object
        Astropy Table object containing data pertaining to the associated dataset, including
        the do_process bool.  It is intended this table is updated by subsequent functions for
        bookkeeping purposes.

    Notes
    =====
    The keyword/value pairs below define the "cannot process categories".
    OBSTYPE : is not IMAGING
    MTFLAG : T
    SCAN-TYP : C or D (or !N)
    FILTER : G*, PR*,  where G=Grism and PR=Prism
    FILTER1 : G*, PR*, where G=Grism and PR=Prism
    FILTER2 : G*, PR*, where G=Grism and PR=Prism
    TARGNAME : DARK, TUNGSTEN, BIAS, FLAT, EARTH-CALIB, DEUTERIUM
    EXPTIME : 0
    CHINJECT : is not NONE
    DRIZCORR : OMIT, SKIPPED

    The keyword/value pairs below define the category which the data can be processed, but
    the results may be compromised
    FGSLOCK : FINE/GYRO, FINE/GY, COARSE, GYROS

    FITS Keywords only for WFC3 data: SCAN_TYP, FILTER, and CHINJECT (UVIS)
    FITS Keywords only for ACS data: FILTER1 and FILTER2

    Please be aware of the FITS keyword value NONE vs the Python None.
    """
    # Set logging level to user-specified level
    log.setLevel(log_level)

    acs_filt_name_list = [FILKEY1, FILKEY2]

    # Interpret input filenames and adjust size of column accordingly
    max_name_length = max([len(f) for f in input_file_list])
    name_data_type = 'S{}'.format(
        max_name_length +
        2)  # add a couple of spaces to insure separation of cols

    # Initialize the column entries which will be populated in successive
    # processing steps
    fit_method = ""  # Fit algorithm used for alignment
    catalog = ""  # Astrometric catalog used for alignment
    catalog_sources = 0  # No. of astrometric catalog sources found based on coordinate overlap with image
    found_sources = 0  # No. of sources detected in images
    match_sources = 0  # No. of sources cross matched between astrometric catalog and detected in image
    offset_x = None
    offset_y = None
    rot = None
    scale = None
    rms_x = -1.0
    rms_y = -1.0
    rms_ra = -1.0
    rms_dec = -1.0
    completed = False  # If true, there was no exception and the processing completed all logic
    date_obs = None  # Human readable date
    mjdutc = -1.0  # MJD UTC start of exposure
    fgslock = None
    process_msg = ""
    status = 9999
    compromised = 0
    headerlet_file = ""
    fit_qual = -1

    fit_rms = -1.0
    total_rms = -1.0
    dataset_key = -1.0

    names_array = ('imageName', 'instrument', 'detector', 'filter', 'aperture',
                   'obstype', 'subarray', 'dateObs', 'mjdutc', 'doProcess',
                   'processMsg', 'fit_method', 'catalog', 'foundSources',
                   'catalogSources', 'matchSources', 'offset_x', 'offset_y',
                   'rotation', 'scale', 'rms_x', 'rms_y', 'rms_ra', 'rms_dec',
                   'completed', 'fit_rms', 'total_rms', 'datasetKey', 'status',
                   'fit_qual', 'headerletFile', 'compromised')
    data_type = (name_data_type, 'S20', 'S20', 'S20', 'S20', 'S20', 'b', 'S20',
                 'f8', 'b', 'S30', 'S20', 'S20', 'i4', 'i4', 'i4', 'f8', 'f8',
                 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'b', 'f8', 'f8', 'i8',
                 'i4', 'i4', 'S60', 'i4')

    # Create an astropy table
    output_table = Table(names=names_array, dtype=data_type)

    # Loop over the list of images to determine viability for alignment processing
    #
    # Capture the data characteristics before any evaluation so the information is
    # available for the output table regardless of which keyword is used to
    # to determine the data is not viable for alignment.

    for input_file in input_file_list:
        header_hdu = 0
        header_data = getheader(input_file, header_hdu)

        # Keywords to use potentially for downstream analysis
        instrume = (header_data['INSTRUME']).upper()
        detector = (header_data['DETECTOR']).upper()
        subarray = header_data['SUBARRAY']
        date_obs = header_data['DATE-OBS']
        mjdutc = header_data['EXPSTART']

        # Obtain keyword values for analysis of viability
        drizcorr = (header_data[DRIZKEY]).upper()
        obstype = (header_data[OBSKEY]).upper()
        mtflag = (header_data[MTKEY]).upper()
        scan_typ = ''
        if instrume == 'WFC3':
            scan_typ = (header_data[SCNKEY]).upper()

        sfilter = ''
        if instrume == 'WFC3':
            sfilter = (header_data[FILKEY]).upper()
        # Concatenate the two ACS filter names together with an underscore
        # If the filter name is blank, skip it
        if instrume == 'ACS':
            for filtname in acs_filt_name_list:

                # The filter keyword value could be zero or more blank spaces
                # Strip off any leading or trailing blanks
                if header_data[filtname].upper().strip():

                    # If the current filter variable already has some content,
                    # need to append an underscore before adding more text
                    if sfilter:
                        sfilter += '_'
                    sfilter += header_data[filtname].upper().strip()

        # The aperture is only read for informational purposes as it is no
        # longer used for filtering input data.
        aperture = (header_data[APKEY]).upper()
        targname = (header_data[TARKEY]).upper()
        exptime = header_data[EXPKEY]
        fgslock = (header_data[FGSKEY]).upper()

        chinject = 'NONE'
        if instrume == 'WFC3' and detector == 'UVIS':
            chinject = (header_data[CHINKEY]).upper()

        # Determine if the image has one of these conditions.  The routine
        # will exit processing upon the first satisfied condition.

        # Compute if the exposure time is very close to zero as it will be
        # needed when deciding whether or not to use the particular Grism/Prism data
        is_zero = True if math.isclose(exptime, 0.0, abs_tol=1e-5) else False

        no_proc_key = None
        no_proc_value = None
        do_process = True
        # Imaging vs spectroscopic or coronagraphic
        if obstype != 'IMAGING':
            no_proc_key = OBSKEY
            no_proc_value = obstype

        # drizzling has been turned off
        elif drizcorr in ['OMIT', 'SKIPPED']:
            no_proc_key = DRIZKEY
            no_proc_value = drizcorr

        # Moving target
        elif mtflag == 'T':
            no_proc_key = MTKEY
            no_proc_value = mtflag

        # Bostrophidon without or with dwell (WFC3 only)
        elif any([scan_typ == 'C', scan_typ == 'D']):
            no_proc_key = SCNKEY
            no_proc_value = scan_typ

        # Calibration target
        elif any(x in targname for x in
                 ['DARK', 'TUNG', 'BIAS', 'FLAT', 'DEUT', 'EARTH-CAL']):
            no_proc_key = TARKEY
            no_proc_value = targname

        # Exposure time of effectively zero
        elif math.isclose(exptime, 0.0, abs_tol=1e-5):
            no_proc_key = EXPKEY
            no_proc_value = exptime

        # Commanded FGS lock
        elif any(x in fgslock for x in ['GY', 'COARSE']):
            no_proc_key = FGSKEY
            no_proc_value = fgslock

        # Charge injection mode
        elif chinject != 'NONE':
            no_proc_key = CHINKEY
            no_proc_value = chinject

        # Filter name which starts with "BLOCK" for internal calibration of SBC
        # The sfilter variable may be the concatenation of two filters (F160_CLEAR)
        #
        # Grism/Prism images are also IMAGING=SPECTROSCOPIC, suppress the "no processing"
        # indicators to allow the Grism/Prism images to be minimally processed for
        # keyword updates.  This was done as a retrofit to allow Grism/Prism images
        # to have the same WCS solution as the direct images in the visit (same detector).
        # The exception to this will be if the Grism/Prism data has a zero exposure time as
        # the WCS will be only "OPUS" under this condition, and this will disrupt processing
        # for all the good data.
        split_sfilter = sfilter.upper().split('_')
        for item in split_sfilter:
            # This is the only circumstance when Grism/Prism data WILL be processed.
            if item.startswith(
                ('G', 'PR')) and not is_zero and type.upper() == "SVM":
                no_proc_key = None
                no_proc_value = None
                log.info("The Grism/Prism data, {}, will be processed.".format(
                    input_file))
            # Grism/Prism WILL NOT be processed primarily if MVM processing or with an exposure time of zero.
            elif item.startswith(('G', 'PR')):
                if type.upper() == "MVM":
                    no_proc_value += ", Grism/Prism data and MVM processing"
                    log.warning(
                        "The Grism/Prism data {} with MVM processing will be ignored."
                        .format(input_file))
                elif is_zero:
                    no_proc_value += ", Grism/Prism data and EXPTIME = 0.0"
                    log.warning(
                        "The Grism/Prism data {} with zero exposure time will be ignored."
                        .format(input_file))

            if item.startswith(('BLOCK')):
                no_proc_key = FILKEY
                no_proc_value = sfilter

        # If no_proc_key is set to a keyword, then this image has been found to not be viable for
        # alignment purposes.
        if no_proc_key is not None:
            if no_proc_key != FGSKEY:
                do_process = False
                msg_type = Messages.NOPROC.value
            else:
                msg_type = Messages.WARN.value

            process_msg = no_proc_key + '=' + str(no_proc_value)

            # Issue message to log file for this data indicating no processing to be done or
            # processing should be allowed, but there may be some issue with the result (e.g.,
            # GYROS mode so some drift)
            generate_msg(input_file, msg_type, no_proc_key, no_proc_value)

        # Populate a row of the table
        output_table.add_row([
            input_file, instrume, detector, sfilter, aperture, obstype,
            subarray, date_obs, mjdutc, do_process, process_msg, fit_method,
            catalog, found_sources, catalog_sources, match_sources, offset_x,
            offset_y, rot, scale, rms_x, rms_y, rms_ra, rms_dec, completed,
            fit_rms, total_rms, dataset_key, status, fit_qual, headerlet_file,
            compromised
        ])
        process_msg = ""

    return output_table
def calc_ang_mom_and_fluxes(halo, foggie_dir, output_dir, run, **kwargs):
    outs = kwargs.get("outs", "all")
    trackname = kwargs.get("trackname", "halo_track")

    ### set up the table of all the stuff we want
    data = Table(names=('redshift', 'radius', 'nref_mode', \
                        'net_mass_flux', 'net_metal_flux', \
                        'mass_flux_in', 'mass_flux_out', \
                        'metal_flux_in', 'metal_flux_out', \
                        'net_cold_mass_flux', 'cold_mass_flux_in', 'cold_mass_flux_out', \
                        'net_cool_mass_flux', 'cool_mass_flux_in', 'cool_mass_flux_out', \
                        'net_warm_mass_flux', 'warm_mass_flux_in', 'warm_mass_flux_out', \
                        'net_hot_mass_flux', 'hot_mass_flux_in', 'hot_mass_flux_out', \
                        'annular_ang_mom_gas_x', 'annular_ang_mom_gas_y','annular_ang_mom_gas_z', \
                        'annular_spec_ang_mom_gas_x', 'annular_spec_ang_mom_gas_y','annular_spec_ang_mom_gas_z',\
                        'annular_ang_mom_dm_x', 'annular_ang_mom_dm_y','annular_ang_mom_dm_z', \
                        'annular_spec_ang_mom_dm_x', 'annular_spec_ang_mom_dm_y', 'annular_spec_ang_mom_dm_z', \
                        'outside_ang_mom_gas_x', 'outside_ang_mom_gas_y', 'outside_ang_mom_gas_z',  \
                        'outside_spec_ang_mom_gas_x', 'outside_spec_ang_mom_gas_y', 'outside_spec_ang_mom_gas_z', \
                        'outside_ang_mom_dm_x', 'outside_ang_mom_dm_y','outside_ang_mom_dm_z',\
                        'outside_spec_ang_mom_dm_x', 'outside_spec_ang_mom_dm_y', 'outside_spec_ang_mom_dm_z', \
                        'inside_ang_mom_stars_x', 'inside_ang_mom_stars_y', 'inside_ang_mom_stars_z', \
                        'inside_spec_ang_mom_stars_x', 'inside_spec_ang_mom_stars_y', 'inside_spec_ang_mom_stars_z'),
                  dtype=('f8', 'f8', 'i8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8',
                         'f8', 'f8', 'f8', 'f8', 'f8', 'f8'
                        ))

    print(foggie_dir)
    track_name = foggie_dir + 'halo_00' + str(
        halo) + '/' + run + '/' + trackname
    if args.system == "pleiades":
        track_name = foggie_dir + "halo_008508/nref11f_refine200kpc_z4to2/halo_track"

    print("opening track: " + track_name)
    track = Table.read(track_name, format='ascii')
    track.sort('col1')

    ## default is do allll the snaps in the directory
    ## want to add flag for if just one
    run_dir = foggie_dir + 'halo_00' + str(halo) + '/' + run
    if halo == "8508":
        prefix = output_dir + 'plots_halo_008508/' + run + '/'
    else:
        prefix = output_dir + 'other_halo_plots/' + str(halo) + '/' + run + '/'
    if not (os.path.exists(prefix)):
        os.system("mkdir " + prefix)

    if outs == "all":
        print("looking for outputs in ", run_dir)
        outs = glob.glob(os.path.join(run_dir, '?D????/?D????'))
    else:
        print("outs = ", outs)
        new_outs = [glob.glob(os.path.join(run_dir, snap)) for snap in outs]
        print("new_outs = ", new_outs)
        new_new_outs = [snap[0] for snap in new_outs]
        outs = new_new_outs

    for snap in outs:
        # load the snapshot
        print('opening snapshot ' + snap)
        ds = yt.load(snap)

        # add all the things
        ds.add_particle_filter('stars')
        ds.add_particle_filter('dm')
        ds.add_field(('gas_density_in'),
                     function=_gas_density_in,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('gas_density_out'),
                     function=_gas_density_out,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('metal_density_in'),
                     function=_metal_density_in,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('metal_density_out'),
                     function=_metal_density_out,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('hot_gas_density'),
                     function=_hot_gas_density,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('hot_gas_density_in'),
                     function=_hot_gas_density_in,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('hot_gas_density_out'),
                     function=_hot_gas_density_out,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('warm_gas_density'),
                     function=_warm_gas_density,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('warm_gas_density_in'),
                     function=_warm_gas_density_in,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('warm_gas_density_out'),
                     function=_warm_gas_density_out,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('cool_gas_density'),
                     function=_cool_gas_density,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('cool_gas_density_in'),
                     function=_cool_gas_density_in,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('cool_gas_density_out'),
                     function=_cool_gas_density_out,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('cold_gas_density'),
                     function=_cold_gas_density,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('cold_gas_density_in'),
                     function=_cold_gas_density_in,
                     units="Msun/kpc**3",
                     force_override=True)
        ds.add_field(('cold_gas_density_out'),
                     function=_cold_gas_density_out,
                     units="Msun/kpc**3",
                     force_override=True)

        # create all the regions
        zsnap = ds.get_parameter('CosmologyCurrentRedshift')
        proper_box_size = get_proper_box_size(ds)

        refine_box, refine_box_center, refine_width_code = get_refine_box(
            ds, zsnap, track)
        refine_width = refine_width_code * proper_box_size

        # center is trying to be the center of the halo
        halo_center, halo_velocity = get_halo_center(ds, refine_box_center)

        ### OK, now want to set up some spheres of some sizes and get the stuff
        radii = refine_width_code * 0.5 * np.arange(0.9, 0.1,
                                                    -0.1)  # 0.5 because radius
        small_sphere = ds.sphere(halo_center,
                                 0.05 * refine_width_code)  # R=10ckpc/h
        big_sphere = ds.sphere(halo_center, 0.45 * refine_width_code)

        for radius in radii:
            this_sphere = ds.sphere(halo_center, radius)
            if radius != np.max(radii):
                # region (big_sphere) must be larger than surface, defined here by "radius"
                surface = ds.surface(big_sphere, 'radius',
                                     (radius, 'code_length'))

                nref_mode = stats.mode(surface[('index', 'grid_level')])
                gas_flux = surface.calculate_flux("velocity_x", "velocity_y",
                                                  "velocity_z", "density")
                metal_flux = surface.calculate_flux("velocity_x", "velocity_y",
                                                    "velocity_z",
                                                    "metal_density")
                ## also want to filter based on radial velocity to get fluxes in and mass flux out
                gas_flux_in = surface.calculate_flux("velocity_x",
                                                     "velocity_y",
                                                     "velocity_z",
                                                     "gas_density_in")
                metal_flux_in = surface.calculate_flux("velocity_x",
                                                       "velocity_y",
                                                       "velocity_z",
                                                       "metal_density_in")
                gas_flux_out = surface.calculate_flux("velocity_x",
                                                      "velocity_y",
                                                      "velocity_z",
                                                      "gas_density_out")
                metal_flux_out = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "metal_density_out")

                ## aaand want to filter based on temperature
                hot_gas_flux = surface.calculate_flux("velocity_x",
                                                      "velocity_y",
                                                      "velocity_z",
                                                      "hot_gas_density")
                hot_gas_flux_in = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "hot_gas_density_in")
                hot_gas_flux_out = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "hot_gas_density_out")
                warm_gas_flux = surface.calculate_flux("velocity_x",
                                                       "velocity_y",
                                                       "velocity_z",
                                                       "warm_gas_density")
                warm_gas_flux_in = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "warm_gas_density_in")
                warm_gas_flux_out = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "warm_gas_density_out")
                cool_gas_flux = surface.calculate_flux("velocity_x",
                                                       "velocity_y",
                                                       "velocity_z",
                                                       "cool_gas_density")
                cool_gas_flux_in = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "cool_gas_density_in")
                cool_gas_flux_out = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "cool_gas_density_out")
                cold_gas_flux = surface.calculate_flux("velocity_x",
                                                       "velocity_y",
                                                       "velocity_z",
                                                       "cold_gas_density")
                cold_gas_flux_in = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "cold_gas_density_in")
                cold_gas_flux_out = surface.calculate_flux(
                    "velocity_x", "velocity_y", "velocity_z",
                    "cold_gas_density_out")

                # annuli
                big_annulus = big_sphere - this_sphere
                # note that refine_fracs is in decreasing order!
                this_annulus = last_sphere - this_sphere
                inside_ang_mom_stars_x = this_sphere[
                    'stars', 'particle_angular_momentum_x'].sum()
                inside_ang_mom_stars_y = this_sphere[
                    'stars', 'particle_angular_momentum_y'].sum()
                inside_ang_mom_stars_z = this_sphere[
                    'stars', 'particle_angular_momentum_z'].sum()
                inside_spec_ang_mom_stars_x = this_sphere[
                    'stars', 'particle_specific_angular_momentum_x'].mean()
                inside_spec_ang_mom_stars_y = this_sphere[
                    'stars', 'particle_specific_angular_momentum_y'].mean()
                inside_spec_ang_mom_stars_z = this_sphere[
                    'stars', 'particle_specific_angular_momentum_z'].mean()

                ## ok want angular momenta
                annular_ang_mom_gas_x = this_annulus[(
                    'gas', 'angular_momentum_x')].sum()
                annular_ang_mom_gas_y = this_annulus[(
                    'gas', 'angular_momentum_y')].sum()
                annular_ang_mom_gas_z = this_annulus[(
                    'gas', 'angular_momentum_z')].sum()
                annular_spec_ang_mom_gas_x = this_annulus[(
                    'gas', 'specific_angular_momentum_x')].mean()
                annular_spec_ang_mom_gas_y = this_annulus[(
                    'gas', 'specific_angular_momentum_y')].mean()
                annular_spec_ang_mom_gas_z = this_annulus[(
                    'gas', 'specific_angular_momentum_z')].mean()

                annular_ang_mom_dm_x = this_annulus[(
                    'dm', 'particle_angular_momentum_x')].sum()
                annular_ang_mom_dm_y = this_annulus[(
                    'dm', 'particle_angular_momentum_y')].sum()
                annular_ang_mom_dm_z = this_annulus[(
                    'dm', 'particle_angular_momentum_z')].sum()
                annular_spec_ang_mom_dm_x = this_annulus[(
                    'dm', 'particle_specific_angular_momentum_x')].mean()
                annular_spec_ang_mom_dm_y = this_annulus[(
                    'dm', 'particle_specific_angular_momentum_y')].mean()
                annular_spec_ang_mom_dm_z = this_annulus[(
                    'dm', 'particle_specific_angular_momentum_z')].mean()

                outside_ang_mom_gas_x = big_annulus[(
                    'gas', 'angular_momentum_x')].sum()
                outside_ang_mom_gas_y = big_annulus[(
                    'gas', 'angular_momentum_y')].sum()
                outside_ang_mom_gas_z = big_annulus[(
                    'gas', 'angular_momentum_z')].sum()
                outside_spec_ang_mom_gas_x = big_annulus[(
                    'gas', 'specific_angular_momentum_x')].mean()
                outside_spec_ang_mom_gas_y = big_annulus[(
                    'gas', 'specific_angular_momentum_y')].mean()
                outside_spec_ang_mom_gas_z = big_annulus[(
                    'gas', 'specific_angular_momentum_z')].mean()

                outside_ang_mom_dm_x = big_annulus[(
                    'dm', 'particle_angular_momentum_x')].sum()
                outside_ang_mom_dm_y = big_annulus[(
                    'dm', 'particle_angular_momentum_y')].sum()
                outside_ang_mom_dm_z = big_annulus[(
                    'dm', 'particle_angular_momentum_z')].sum()
                outside_spec_ang_mom_dm_x = big_annulus[(
                    'dm', 'particle_specific_angular_momentum_x')].mean()
                outside_spec_ang_mom_dm_y = big_annulus[(
                    'dm', 'particle_specific_angular_momentum_y')].mean()
                outside_spec_ang_mom_dm_z = big_annulus[(
                    'dm', 'particle_specific_angular_momentum_z')].mean()

                # let's add everything to the giant table!
                data.add_row([zsnap, radius, int(nref_mode[0][0]), gas_flux, metal_flux, \
                                gas_flux_in, gas_flux_out, metal_flux_in, metal_flux_out, \
                                cold_gas_flux, cold_gas_flux_in, cold_gas_flux_out, \
                                cool_gas_flux, cool_gas_flux_in, cool_gas_flux_out, \
                                warm_gas_flux, warm_gas_flux_in, warm_gas_flux_out, \
                                hot_gas_flux, hot_gas_flux_in, hot_gas_flux_out,
                                annular_ang_mom_gas_x, annular_ang_mom_gas_y,annular_ang_mom_gas_z, \
                                annular_spec_ang_mom_gas_x, annular_spec_ang_mom_gas_y,annular_spec_ang_mom_gas_z,\
                                annular_ang_mom_dm_x, annular_ang_mom_dm_y,annular_ang_mom_dm_z, \
                                annular_spec_ang_mom_dm_x, annular_spec_ang_mom_dm_y, annular_spec_ang_mom_dm_z, \
                                outside_ang_mom_gas_x, outside_ang_mom_gas_y, outside_ang_mom_gas_z,  \
                                outside_spec_ang_mom_gas_x, outside_spec_ang_mom_gas_y, outside_spec_ang_mom_gas_z, \
                                outside_ang_mom_dm_x, outside_ang_mom_dm_y,outside_ang_mom_dm_z,\
                                outside_spec_ang_mom_dm_x, outside_spec_ang_mom_dm_y, outside_spec_ang_mom_dm_z, \
                                inside_ang_mom_stars_x, inside_ang_mom_stars_y, inside_ang_mom_stars_z, \
                                inside_spec_ang_mom_stars_x, inside_spec_ang_mom_stars_y, inside_spec_ang_mom_stars_z])

                # this apparently makes fluxes work in a loop?
                surface._vertices = None
            last_sphere = this_sphere

    # perhaps we should save the table?
    tablename = run_dir + '/' + args.run + '_angular_momenta_and_fluxes.dat'
    ascii.write(data, tablename, format='fixed_width')

    return "whooooo angular momentum wheeeeeeee"
Beispiel #50
0
#Here be where I grab the input from the terminal
# Please note my justified outrage that in this way, we are in a 1-based indexing system
# And a 1 based indexing system is ALWAYS STUPID. WHO ARE YOU HOW DID YOU GET INTO MY HOUSE?

## HERE BE THE BEGINNING OF THE LOOP
#def prospect(galaxy_name):

run_params['outfile'] = 'Results/' + galaxy_name + ptype
if not os.path.exists('Results/' + galaxy_name + ptype):
    os.makedirs('Results/' + galaxy_name + ptype)

with PdfPages(run_params['outfile'] + 'total.pdf') as pdf:
    for i in range(startindex, endindex):
        #if galaxy_name in obs['objid'][i]:
        # I uncommented the previous line, and all of this will jump down a tab space.
        table.add_row()
        table['objid'][i] = galaxy_name
        ws.write(i + 1, 0, obs['objid'][i])
        print(obs['objid'][i])
        print()
        #try:

        obsap = [
            obs['all_maggies'][j][i] for j in range(0, len(obs['all_maggies']))
        ]
        errap = [
            obs['all_maggies_unc'][j][i]
            for j in range(0, len(obs['all_maggies_unc']))
        ]
        obs['maggies'] = tuple(obsap)
        obs['maggies_unc'] = errap
Beispiel #51
0
class Map3DComparer(object):
    """
    | Class for comparrison of vector fields.
    | There are two classification of test:
    | * **Mono**: returns a value for a given vector field. Can be normalized to the benchmark field.
    | * **Binary**: requires comparrison between two vector fields.
    | By default:
    | * Benchmark field is the first/original vector field. This is used as the baseline for comparrison. This can be changed using the ``benchmark=n`` kwarg.
    | * Normalise will be set to false.
    | Individual tests can be run and return results for imediate viewing (using astropy.table).
    | Likewise, compare_all can be used to run the whole series of tests.
    | Note: all vector fields must be of the same shape.

    """
    def __init__(self, map3D, *args, **kwargs):
        # Use all the user parameters
        self.maps_list = map3D + expand_list(args)
        self.benchmark = kwargs.get(
            'benchmark', 0)  # Defaults to the first vector field in the list
        self.normalise = kwargs.get('normalise', False)

        # The table to store the test results
        self.results = Table(names=('extrapolator routine',
                                    'extrapolation duration',
                                    'fig of merit 1'),
                             meta={'name': '3D field comparison table'},
                             dtype=('S24', 'f8', 'f8'))
        t['time (ave)'].unit = u.s

        # An empty table for the results:
        #N = len(self.maps_list)
        #t1, t2, t3, t4, t5, t6, t7 = [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N, [None] * N
        #self.results = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'})
        #self.results_normalised = Table([t1, t2, t3, t4, t5, t6, t7], names=('l-infinity norm', 'test 2', 'test 3', 'test 4', 'test 5', 'test 6', 'test 7'), meta={'name': 'Results Table'})

        # Ensure that the input maps are all the same type and shape.
        for m in self.maps_list:  #self.maps:
            # Check that this is a Map3D object.
            if not isinstance(m, Map3D):
                raise ValueError(
                    'Map3DComparer expects pre-constructed map3D objects.')

            # Compare the shape of this Map3D to the first in the Map3D list.
            if not m.data.shape == self.maps_list[0]:
                raise ValueError(
                    'Map3DComparer expects map3D objects with identical dimensions.'
                )

    def _normalise():
        """
        Return the normalised table.
        """
        # Get the benchmark extrapolation result.
        row_benchmark = self.results[self.benchmark]

        # Create a copy of the table
        tbl_output = deepcopy(self.results)

        for row in tbl_output:
            for val, val_benchmark in zip(row, row_benchmark):
                # If the value is a float then normalise.
                if type(val) == np.float64 or type(val) == np.float32 or type(
                        val) == np.float16:
                    val = val / val_benchmark

    def L_infin_norm(map_field, benchmark, **kwargs):
        """
        l-infinity norm of the vector field.
        For vector field :math:`\bfx` this would be:

        .. math::

           \| \mathbf{x} \| \infty = \sqrt[\infty]{\Sigma_i x_i^\infty} \approx \text{max}(|x_i|)

        (the malue of the maximum component)

        From: https://rorasa.wordpress.com/2012/05/13/l0-norm-l1-norm-l2-norm-l-infinity-norm/
        """

        # Placeholder for the maximum value.
        output = -10.0**15

        # Iterate through the volume
        ni, nj, nk, D = map_field.shape
        for i in range(0, ni):
            for j in range(0, nj):
                for k in range(0, nk):
                    # Get the sum of the components
                    component_sum = 0.0
                    for component in map_field[i][j][k]:
                        component_sum += component

                    # If this is bigger then the current max value.
                    if output < component_sum:
                        output = component_sum

        # Output
        return output

    def compare_all(self, **kwargs):
        """
        Compare all of the given vector fields and return the results as an
        astropy.table.
        """
        #num_tests = 1
        #num_maps = len(self.maps)
        #arr_data = np.zeros([num_tests, num_maps])

        # For each given 3D field, run all the tests and add a row to the table.
        for map3D in self.maps:
            # Get the data
            arr_data = map3D.data

            # Store the results from each test for this field.
            lis_results = [
                map3D.meta.get('extrapolator_routine', 'Unknown Routine'),
                map3D.meta.get('extrapolator_duration', 0.0)
            ]

            # Run through all the tests and append results to the list.
            lis_results.append(self.L_infin_norm(arr_data))

            # Now add the results to the table.
            self.results.add_row(lis_results)

        if self.normalise:
            self.results_normalised
        else:
            self.results
    samples['Xlan_2'] = opts.Xlan2

    t = Table(np.array([[
        opts.mej1, opts.vej1, opts.Xlan1, opts.mej1, opts.vej1, opts.Xlan1,
        opts.mej2, opts.vej2, opts.Xlan2
    ]]),
              names=[
                  'mej', 'vej', 'Xlan', 'mej_1', 'vej_1', 'Xlan_1', 'mej_2',
                  'vej_2', 'Xlan_2'
              ])
    #for key, val in samples.iteritems():
    #    t.add_column(Column(data=[val],name=key))

    t.add_row(
        np.array([
            opts.mej2, opts.vej2, opts.Xlan2, opts.mej1, opts.vej1, opts.Xlan1,
            opts.mej2, opts.vej2, opts.Xlan2
        ]))

    samples = t

    samples_all = {}
    for model in models:
        samples_all[model] = samples

for model in models:
    if opts.nsamples > 0:
        samples_all[model] = samples_all[model].downsample(
            Nsamples=opts.nsamples)

    #add default values from above to table
Beispiel #53
0
 def table(self):
     max_len = max([len(i) for i in self.data.splitlines()])
     table = Table(names=['ID'], dtype=['S%i' % max_len])
     for id in self.data.splitlines():
         table.add_row([id.strip()])
     return table
Beispiel #54
0
##############################################################################
# You iterate through the extrapolations on each dataset, adding teh runtime to
# the table.
for extrapolation in lis_datasets:
    # Setup the extrapolator and table
    aPotExt = PotentialExtrapolator(extrapolation[3], zshape=extrapolation[1], zrange=extrapolation[2])

    # List to store the trial
    lis_times = []

    # Run the extrapolation without numba for each dataset (map and ranges).
    for i in range(0, int_trials):
        aMap3D = aPotExt.extrapolate(enable_numba=False)
        lis_times.append(aMap3D.meta['extrapolator_duration'])
    t.add_row([extrapolation[0], np.round(np.min(lis_times), 2), np.round(np.average(lis_times), 2), np.round(np.std(lis_times), 2)])

    # List to store the trial
    lis_times = []

    # Run the extrapolation with numba for each dataset (map and ranges).
    for i in range(0, int_trials):
        aMap3D = aPotExt.extrapolate(enable_numba=True)
        lis_times.append(aMap3D.meta['extrapolator_duration'])
    t.add_row(['(numba)'+extrapolation[0], np.round(np.min(lis_times), 2), np.round(np.average(lis_times), 2), np.round(np.std(lis_times), 2)])

##############################################################################
# You can now see the results in the table.
print t

Beispiel #55
0
def single_target_phot(fnames, targetCrd, src_r, bkg_rIn, bkg_rOut):
    """
    For a set of images.
    """

    data = Table(names=('File#', 'Coord Conversion Issue', 'Centroiding Issue',
                        'Bad Center Guess', 'Not In FOV', 'Ap Out Of Bound',
                        'Xc', 'Yc', 'Fx', 'Fy', 'Time[MJD]', 'Raw_Flux',
                        'Bkg_Flux', 'Res_Flux'),
                 dtype=('S25', 'S5', 'S5', 'S5', 'S5', 'S5', 'f8', 'f8', 'f8',
                        'f8', 'f8', 'f8', 'f8', 'f8'))

    for i, fn in tqdm(enumerate(fnames)):

        #Issues list
        #Initializing values to False
        (crd_conversion, centroiding, bad_cen_guess, not_in_fov,
         ap_out_of_bound) = ('N', 'N', 'N', 'N', 'N')

        #setting default value to NaN
        (raw_flux, bkg_flux, res_flux, cenX, cenY, fx,
         fy) = (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)

        #Extracting header and image data regardless of filetype
        hdu = fits.open(fn)
        header = hdu[0].header
        image = hdu[0].data
        hdu.close()

        #Extracting header and image data of bcd files for subarray data
        if header['READMODE'] == 'SUB':
            bcd_fn = fn.replace('sub2d', 'bcd')
            bcd_hdu = fits.open(bcd_fn)
            bcd_header = bcd_hdu[0].header
            bcd_image = bcd_hdu[0].data
            image = np.median(
                bcd_image[14:],
                axis=0)  #taking a median of the last 50 bcd frames
            bcd_hdu.close()

        Time = header['MJD_OBS']

        try:
            w = WCS(header)
            pix = targetCrd.to_pixel(w)
        except (ValueError, NoConvergence):
            crd_conversion = 'Y'
            data.add_row([
                i + 1, crd_conversion, centroiding, bad_cen_guess, not_in_fov,
                ap_out_of_bound, cenX, cenY, fx, fy, Time, raw_flux, bkg_flux,
                res_flux
            ])
            continue

        if (pix[0] > 0) & (pix[0] < image.shape[0]) & (pix[1] > 0) & (
                pix[1] < image.shape[1]):

            try:
                cenX, cenY, fx, fy = gen_center_g2d(image, pix[0], pix[1], 7,
                                                    5, 4, 4, 0)
            except TypeError:
                centroiding = 'Y'
                data.add_row([
                    i + 1, crd_conversion, centroiding, bad_cen_guess,
                    not_in_fov, ap_out_of_bound, cenX, cenY, fx, fy, Time,
                    raw_flux, bkg_flux, res_flux
                ])
                continue

            if (ap_overflow(cenX, cenY, bkg_rIn,
                            image) == True) | (ap_overflow(
                                cenX, cenY, bkg_rOut, image) == True):
                ap_out_of_bound = 'Y'
                data.add_row([
                    i + 1, crd_conversion, centroiding, bad_cen_guess,
                    not_in_fov, ap_out_of_bound, cenX, cenY, fx, fy, Time,
                    raw_flux, bkg_flux, res_flux
                ])
                continue

            if (np.abs(cenX - pix[0]) <= 2) & (np.abs(cenY - pix[1]) <= 2):

                # Extracting raw flux
                raw_flux, src_ap = photometry(image, [cenX], [cenY], rad=src_r)

                # Extrating a mean background flux
                bkg, bkg_ap = photometry(image, [cenX], [cenY],
                                         shape='CircAnn',
                                         r_in=bkg_rIn,
                                         r_out=bkg_rOut)
                bkg_mean = bkg / bkg_ap.area()
                bkg_flux = bkg_mean * src_ap.area()

                # Subtracting background
                res_flux = raw_flux - bkg_flux

            else:
                bad_cen_guess = 'Y'

        else:
            not_in_fov = 'Y'
            ap_out_of_bound = 'Y'

        data.add_row([
            i + 1, crd_conversion, centroiding, bad_cen_guess, not_in_fov,
            ap_out_of_bound, cenX, cenY, fx, fy, Time, raw_flux, bkg_flux,
            res_flux
        ])

    return data, header
def listWrite(outfile, list):
   
  NoGals = len(list)
  myTable = Table()
  
  empty = []
  myTable.add_column(Column(data=empty,name='pgc', dtype=np.dtype(int)))
  myTable.add_column(Column(data=empty,name='inc', format='%0.1f'))
  myTable.add_column(Column(data=empty,name='flag', dtype=np.dtype(int)))
  myTable.add_column(Column(data=empty,name='sort', dtype=np.dtype(int)))
  myTable.add_column(Column(data=empty,name='reason', dtype=np.dtype(int)))
  myTable.add_column(Column(data=empty,name='dPA', dtype=np.dtype(int)))
  myTable.add_column(Column(data=empty,name='zoom', format='%0.5f'))
  myTable.add_column(Column(data=empty,name='user', dtype='S16'))
  
  ################### BEGIN - Modifying inclinations using standard inclinations
  inc_piv_top = -10
  inc_piv_bot = 100
  inc_piv = -10
  
  p = 0
  while p<NoGals:
     if list[p].flag==-1:
        inc_piv_bot=list[p].inc
        break
     p+=1 
  if p==NoGals:
     inc_piv_top=100
  
  for i in range(NoGals):
      galaxy = list[i]
      if galaxy.flag==0 and (galaxy.inc<inc_piv_top or galaxy.inc>inc_piv_bot or galaxy.inc<inc_piv):
            
            p = i-1
            inc1 = -1
            while p>=0:
                if list[p].inc>0 and list[p].flag<0:
                   inc1 = list[p].inc
                   break
                p-=1
            
            p = i+1
            inc2 = -1
            while p<NoGals:
                if list[p].inc>0 and list[p].flag<0:
                   inc2 = list[p].inc
                   break
                p+=1    
            
            if inc1!=-1 and inc2!=-1:
                galaxy.inc = 0.5*(inc1+inc2)
            
            if p==NoGals: 
                galaxy.inc = inc1  
            
            inc_piv = galaxy.inc
            
      if galaxy.flag==-1:  
          if galaxy.inc>=inc_piv_top: 
             inc_piv_top = galaxy.inc
          p = i+1
          while p<NoGals:
              if list[p].flag==-1:
                  if list[p].inc<=inc_piv_bot: 
                     inc_piv_bot=list[p].inc
                     break
              p+=1 
          if p==NoGals:
              inc_piv_top=100
  ################### END - Modifying inclinations
  
  
          

  flagged_lst = []
  unflagged_lst = []
  for i in range(NoGals):
      galaxy = list[i]
      if galaxy.flag<=0:
         unflagged_lst.append(galaxy)
      else:
         flagged_lst.append(galaxy)

  
  incls = []
  for i in range(len(unflagged_lst)):
      galaxy = unflagged_lst[i]
      incls.append(galaxy.inc)
  incls=np.asarray(incls)
  indices = np.argsort(incls, kind='mergesort')

  unflagged_lst_sort = []
  for i in range(len(indices)):
    unflagged_lst_sort.append(unflagged_lst[indices[i]])
  unflagged_lst = unflagged_lst_sort
  
  for i in range(len(unflagged_lst)):
      galaxy = unflagged_lst[i]
      myTable.add_row([galaxy.pgc, galaxy.inc, galaxy.flag, galaxy.sort, galaxy.reason, galaxy.dPA, galaxy.zoom, galaxy.user])  
   
         
  for i in range(len(flagged_lst)):
      galaxy = flagged_lst[i]
      myTable.add_row([galaxy.pgc, galaxy.inc, galaxy.flag, galaxy.sort, galaxy.reason, galaxy.dPA, galaxy.zoom, galaxy.user])
  
  myTable.write(outfile, format='ascii.fixed_width',delimiter=',', bookend=False, overwrite=True)
  
  unflag_index = len(unflagged_lst)
  
  return unflagged_lst+flagged_lst, unflag_index
Beispiel #57
0
    ok = iv > 0.

    w = subs.vac2air(10.**table['loglam'])[ok]
    f = table['flux'][ok]
    wd = table['wdisp'][ok]

    if spec[spec.find('-') + 1:spec.find('.')] in cv_names:
        cls = 1
    else:
        cls = 0
    he2 = flux_var(4686)
    h_b = flux_var(4862)
    he1 = flux_var(5876)
    h_a = flux_var(6561)
    ar3 = flux_var(7135)
    max_flux = max([(v, i) for i, v in enumerate(f)])

    t.add_row([
        spec[spec.find('-') + 1:spec.find('.')], h_a[1] - h_a[3], h_a[0],
        h_a[1], h_a[1] - h_a[4], h_a[2], h_b[1] - h_b[3], h_b[0], h_b[1],
        h_b[1] - h_b[4], h_b[2], he1[1] - he1[3], he1[0], he1[1],
        he1[1] - he1[4], he1[2], he2[1] - he2[3], he2[0], he2[1],
        he2[1] - he2[4], he2[2], ar3[1] - ar3[3], ar3[0], ar3[1],
        ar3[1] - ar3[4], ar3[2], w[max_flux[1]], cls
    ])
    '''for e in (h_a, h_b, s_2, he1, he2):
        plot_spectra(w[e[4] - 50:e[4] + 50],
                     f[e[4] - 50:e[4] + 50])'''
t.write("table.fits", "w")
print("%.3fs" % (time.time() - start))
Beispiel #58
0
def SGroupwrite(outfile, SuperList, GroupList):
  
   NoSGroups = len(SuperList)
   
   myTable = Table()
   
   empty = []
   myTable.add_column(Column(data=empty,name='pgc', dtype=np.dtype(int)))
   myTable.add_column(Column(data=empty,name='flag', dtype=np.dtype(int)))
   myTable.add_column(Column(data=empty,name='ra', format='%0.4f'))
   myTable.add_column(Column(data=empty,name='dec', format='%0.4f', length=10))
   myTable.add_column(Column(data=empty,name='gl', format='%0.4f'))
   myTable.add_column(Column(data=empty,name='gb', format='%0.4f', length=10))    
   myTable.add_column(Column(data=empty,name='sgl', format='%0.4f'))
   myTable.add_column(Column(data=empty,name='sgb', format='%0.4f', length=10))

   myTable.add_column(Column(data=empty,name='Ks', format='%0.2f'))
   myTable.add_column(Column(data=empty,name='logK', format='%0.4f'))
   myTable.add_column(Column(data=empty,name='Vls', format='%0.0f'))
   myTable.add_column(Column(data=empty,name='dist', format='%0.2f'))
   myTable.add_column(Column(data=empty,name='mDist', format='%0.2f'))
   myTable.add_column(Column(data=empty,name='mDistErr', format='%0.2f'))

   myTable.add_column(Column(data=empty,name='sigmaP_dyn', format='%0.1f'))
   myTable.add_column(Column(data=empty,name='sigmaP_lum', format='%0.1f'))
  
   myTable.add_column(Column(data=empty,name='Mv_lum', format='%1.2e'))
   myTable.add_column(Column(data=empty,name='R2t_lum', format='%0.3f'))
   myTable.add_column(Column(data=empty,name='r1t_lum', format='%0.3f'))  
   myTable.add_column(Column(data=empty,name='tX_lum', format='%1.2e'))  
   
   myTable.add_column(Column(data=empty,name='No_Galaxies',dtype=np.dtype(int)))
   myTable.add_column(Column(data=empty,name='nest', dtype=np.dtype(int)))
   
   for i in range(0, NoSGroups):  # for all groups
        table_row(myTable, SuperList[i][0])
        for Group in SuperList[i][1]:
	  table_row(myTable, Group)
   for Group in GroupList:
     if Group.flag<=2: 
       table_row(myTable, Group)
   
   
   pgc = 999999999; 
   ra = 999.9999; dec=-99.99;
   gl = ra; gb = dec
   sgl = ra; sgb=dec
   Ty = -100000.00; B_mag=Ty
   Ks= 99.99
   logK = 99.9999
   Vls = 9999
   dcf2 = 99.99
   ed = 9.99
   Mv_dyn = 9.99E99; Mv_lum = Mv_dyn
   tX_dyn = Mv_lum; tX_lum=Mv_lum
   nest = 9999999   
   
   flag = 0
   mDist = 0
   mDistErr = 0
   dist = 0
   sigmaP_dyn = 0
   sigmaP_lum = 0 
   R2t_lum = 0
   r1t_lum = 0
   subGalaxies = 0
   
   myTable.add_row([pgc,flag,ra,dec,gl, gb, sgl,sgb,Ks,logK,Vls, dist, \
	       mDist, mDistErr, sigmaP_dyn, sigmaP_lum, \
	          Mv_lum, R2t_lum, r1t_lum, tX_lum, subGalaxies, nest])
   
   
   myTable.write(outfile, format='ascii.fixed_width',delimiter='|', bookend=False)
   
   ### removing the last line, (it sits o adjust the column wodths)
   command =  ["csh", "remove_lastline.csh", outfile]
   subprocess.call(command)  
Beispiel #59
0
class Ring(object):
    '''
    Class for an exoring system. Can be initialised with radii and tau, or empty.
    Each single ring (called a segment) consists of a radius and an optical depth tau.

    Segments are stored in strictly increasing radius r.

    Starting at r=0, the first segment goes out to r with constant optical depth tau.
    The next segment starts at the previous inner radius out to the next tau, and so on.

    Optical depth is defined as:

    tau = ln(I_ring/I_0) = -ln T

    ...which can be rearranged to:

    T = exp(-tau)

    T is the transmission (from 1 to 0)

    tau is from 0 to +inf, I_0 is incident flux behind the ring, I_ring is the 
    flux emerging from the ring towards the observer.

    '''
    def __init__(self, new_radii=None, new_tau=None):
        self.segments = Table(names=('radius', 'tau'), dtype=('f4', 'f4'))
        # PROBLEM: I want to initialise the Table with new_segments if they're specified
        # but I don't want to duplicate the Table creation. I need a command to add the
        # two columns specified in
        if new_radii is not None:
            self.segments = Table([new_radii, new_tau],
                                  names=('radius', 'tau'),
                                  dtype=('f4', 'f4'))
            self.segments.sort(['radius'])

    def getRadius(self):
        return self.segments['radius']

    def getTau(self):
        return self.segments['tau']

    def addRing(self, segment):
        '''add a ring segment at outer radius r with transmission tau'''
        self.segments.add_row(segment)
        self.segments.sort(['radius'])

    def delRing(self, r):
        '''delete the ring segment closest to the input radius r'''
        dist = np.abs(self.segments['radius'] - r)
        self.segments.remove_row(np.argmin(dist))

    def showRing(self, outer_radius=20, outer_tau=0.0):
        '''
        returns (r,t) array suitable for plotting with plt.step()
        
        Example:
            p = Ring(5-np.arange(5),np.arange(5)*0.1)
            (r,t) = p.showRing()
            plt.step(r,t, where='pre', zorder=-10)
        '''
        r = np.append(np.insert(self.getRadius(), 0, 0), outer_radius)
        t = np.append(np.insert(self.getTau(), 0, self.getTau()[0]), outer_tau)

        return (r, t)
        # draw the ring edges
        #ax.step(r, t, where='pre', zorder=-10)

    def _tau_to_T(tau):
        return (-np.ln(tau))

    def _T_to_tau(T):
        return (np.exp(-T))
Beispiel #60
0
class halo_props:
    '''
    Systematically analyse the halo X-ray properties based 
    on other modules.

    Attributes
    -----------
    datatype : str
        A copy of the input type of simulation data.
    catalogue_original : pynbody.halo.HaloCatalogue
        The input halo catalogue.
    length : length of the input catalogue.
    host_id_of_top_level
        How catalogue record "hostHalo" for those halos 
        without a host halo. Default is 0.
    errorlist : list
        Record when the host halo ID of a certain subhalo is not 
        recorded in the catalogue (weird but will happen in
        ahf sometimes).
    rho_crit
        Critical density of the current snapshot in Msol kpc**-3.
    ovdens
        Virial overdensity factor :math:`\Delta_{vir}` of the current snapshot.
    dict : astropy.table.Table
        A copy of the halo.properties dictionary but in a table form
        to make future reference more convenient.
    haloid
        List of halo_id given by property dictionary.
    IDlist
        Table of halo_id and corresponding #ID given in the property 
        dictionary.
    hostid
        List of the halo_id of the host halo of each halo (originally 
        recorded in the property dictionary in the form of #ID).
    new_catalogue : dict
        The new catalogue which includes all the subhalo particles 
        in its host halo. The keys of the dictionary are the indexes of 
        halos in `catalogue_original`.
    prop
        Table of quantities corresponding to input field.
    host_list
        List of host halos.
    tophost
        halo_ids of the top-level host halo for each halo.
    children : list of sets
        Each set corresponds to the one-level down children of each halo.
    galaxy_list
        List of all galaxies (as long as n_star > 0).
    lumi_galaxy_list
        List of all luminous galaxies (self_m_star > galaxy_low_limit).
    galaxies : list of sets
        Each set corresponds to the embeded galaxies of each halo. All 
        the subhalos will not be considered and will have an empty set. 
        And for host halos it will include all the galaxies within it, 
        including the galaxies actually embedded in the subhalo (i.e., 
        the children of subhalo).
    lumi_galaxies
        Each set corresponds to the embeded luminous galaxies of each 
        halo. Same as `galaxies`, only care about host halo and include 
        all the luminous galaxies within.
    n_lgal
        Number of total luminous galaxies embedded in each halo. Again, 
        only care about host halos and the galaxies within subhalos 
        (i.e., subhalos themselves) will also be taken into account.
    group_list
        halo_id of the halo identified as group in the catalogue.
    '''
    def __init__(self, halocatalogue, datatype, field=default_field, host_id_of_top_level=0, verbose=True):
        '''
        Initialization routine.

        Input
        -----
        ahfcatalogue : pynbody.halo.HaloCatalogue
            Only has been tested for pynbody.halo.AHFCatalogue
        field
            Quantities to calculate. When changing specific_mass_field, 
            luminosity_field and temp_field, source codes must be modified.
        datatype : str
            What kind of simulation data you are dealing with. 
            Accepted datatype for now: 'gizmo_ahf' and 'tipsy_ahf'.
        host_id_of_top_level
            How catalogue record "hostHalo" for those halos 
            without a host halo. Default is 0.
        '''
        self.datatype=datatype
        self.catalogue_original = halocatalogue
        self.length = len(self.catalogue_original)
        init_zeros = np.zeros(self.length)
        self.host_id_of_top_level = host_id_of_top_level
        self.errorlist = [{}, {}, {}]
        self.verbose = verbose
        self.rho_crit = pnb.analysis.cosmology.rho_crit(f=self.catalogue_original[1], unit='Msol kpc**-3')
        self.ovdens = cosmology.Delta_vir(self.catalogue_original[1])

        self.dict = []
        for j in range(self.length):
            i = j + 1
            if self.verbose:
                print('Loading properties... {:7} / {}'.format(i, self.length), end='\r')
            prop = self.catalogue_original[i].properties
            hid = prop['halo_id']
            if i != hid:
                raise Exception('Attention! halo_id doesn\'t equal i !!!')
            self.dict.append(prop)

        self.dict = Table(self.dict)
        self.haloid = self.dict['halo_id']
        IDs = self.dict['#ID']
        self.ID_list = Table([IDs, self.haloid], names=['#ID', 'halo_id'])
        self.ID_list.add_row([host_id_of_top_level, host_id_of_top_level])
        
        self.ID_list.add_index('#ID')

        host_in_IDlist = np.isin(self.dict['hostHalo'], self.ID_list['#ID'])
        # Some hostHalo id will not be listed in #ID list, this is probably due to AHF algorithm
        in_idx, = np.where(host_in_IDlist)
        _not_in_ = np.invert(host_in_IDlist)
        not_in_idx, = np.where(_not_in_)
        self.hostid = np.zeros(self.length, dtype=np.int)
        self.hostid[in_idx] = self.ID_list.loc[self.dict['hostHalo'][in_idx]]['halo_id']
        self.hostid = np.ma.array(self.hostid, dtype=np.int, mask=_not_in_)
        # loc method enables using #ID as index
        if len(not_in_idx) > 0:
            for error in not_in_idx:
                self.errorlist[0][self.haloid[error]] = self.dict['hostHalo'][error]

        # prop initialization
        self.prop = {}
        for field_type in default_units:
            init_prop_table = [init_zeros for _ in range(len(field[field_type]))]
            self.prop[field_type] = Table(init_prop_table, names=field[field_type])
            # astropy.table.Table is only used for generating a structured array more conveniently
            self.prop[field_type] = pnb.array.SimArray(self.prop[field_type], units=default_units[field_type])
        
        self._have_children = False
        self._have_galaxy = False
        self._have_group = False
        self._have_radii = False
        self._have_temp = False
        self._have_new_catalogue = False
        self._have_center = False

    def init_relationship(self, galaxy_low_limit, include_sub=False, N_galaxy=3):
        '''
        Get basic information regarding groups, hosts, children, etc.

        Parameters
        ------------
        galaxy_low_limit : pynbody.array.SimArray
            Required by get_galaxyh(). Limit above which galaxies will 
            be identified as luminous galaxies.
        include_sub
            Whether or not to include all the subhalo particles when 
            generating the new catalogue. See get_new_catalogue() for 
            details.
        N_galaxy : int
            Required by get_group_list(). Number of luminous galaxies 
            above which host halos are considered as groups.
        '''
        self.get_children()
        self.get_new_catalogue(include_ = include_sub)
        self.get_galaxy(g_low_limit = galaxy_low_limit)
        self.get_group_list(N_galaxy)
        self.get_center()
    
    def calcu_radii_masses(self, halo_id_list=[], rdict=None, precision=1e-2, rmax=None):
        '''
        Calculate radii (Rvir, R200, etc) and corresponding masses.

        Parameters
        -----------
        halo_id_list
            List of halo_ids to calculate radii and masses. 
            If set to empty list, then will use self.group_list.
        rdict : dict
            names and values for overdensity factors. Default is: 
            {'vir': self.ovdens, '200': 200, '500': 500, '2500': 2500}
        precision : float
            Precision for calculate radius. See get_index() in 
            calculate_R.py documentation for detail.
        rmax
            Maximum value for the shrinking sphere method. See 
            get_index() in calculate_R.py documentation for detail.

        mpi_comm
            In order to parallelize the code we need the communication
            world. If mpi_comm is None, then we know we are not doing
            a parallel operation.
        '''
        halo_id_list = np.array(halo_id_list, dtype=np.int).reshape(-1)
        if len(halo_id_list) == 0:
            if not self._have_group:
                raise Exception('Must get_group_list (or init_relationship) first!')
            halo_id_list = self.group_list
        if not self._have_center:
            raise Exception('Must get_center first!')
        
        if rdict == None:
            rdict = {'vir': self.ovdens, '200': 200, '500': 500, '2500': 2500}
        t1 = 0; t2 = 0
        list_length = np.array(list(halo_id_list)).max()
        for j in halo_id_list:
            i = j - 1
            if self.verbose::
                print('Calculating radii and masses... {:7} / {}, time: \
                            {:.5f}s'.format(j, list_length, t2 - t1), end='\r')
            prop = self.dict[i]
            t1 = time.time()
            MassRadii = cR.get_radius(self.new_catalogue[j], \
                    overdensities=list(rdict.values()), rho_crit=self.rho_crit, \
                        prop=prop, precision=precision, cen=self.center[i], rmax=rmax)
            for key in rdict:
                self.prop['R'][key][i] = MassRadii[1][rdict[key]]
                self.prop['M'][key][i] = MassRadii[0][rdict[key]]
            t2 = time.time()
        self._have_radii = True
    
    def calcu_specific_masses(self, halo_id_list=[], \
                calcu_field=radii_to_cal_sepcific_mass):
        '''
        Calculate some specific masses, such as baryon, IGrM, etc.

        Parameters
        -----------
        halo_id_list
            List of halo_ids to calculate masses. 
            If set to empty list, then will use self.group_list.
        calcu_field
            Radii to calculate specific masses within.
        '''
        halo_id_list = np.array(halo_id_list, dtype=np.int).reshape(-1)
        if len(halo_id_list) == 0:
            if not self._have_group:
                raise Exception('Must get_group_list (or init_relationship) first!')
            halo_id_list = self.group_list
        if not self._have_radii:
            raise Exception('Must get_radii_masses first!')
        
        list_length = np.array(list(halo_id_list)).max()
        for j in halo_id_list:
            i = j - 1
            if self.verbose:
                print('Calculating specific masses... {:7} / {}'.format(j, list_length), end='\r')
            prop = self.dict[i]
            center = self.center[i]
            halo = self.new_catalogue[j]
            tx = pnb.transformation.inverse_translate(halo, center)
            with tx:
                for r in calcu_field:
                    # Apply filters
                    subsim = halo[pnb.filt.Sphere(self.prop['R'][i:i+1][r].in_units('kpc'))]
                    cold_diffuse_gas = subsim.gas[pnb.filt.LowPass('temp', '5e5 K') & \
                            pnb.filt.LowPass('nh', '0.13 cm**-3')]
                    ISM = subsim.gas[pnb.filt.HighPass('nh', '0.13 cm**-3')]
                    hot_diffuse_gas_ = subsim.gas[pnb.filt.HighPass('temp', '5e5 K') & \
                            pnb.filt.LowPass('nh', '0.13 cm**-3')]
                    
                    # Calculate masses
                    self.prop['M']['star' + r][i] = subsim.star['mass'].sum()
                    self.prop['M']['gas' + r][i] = subsim.gas['mass'].sum()
                    self.prop['M']['bar' + r][i] = self.prop['M']['star' + r][i] \
                                + self.prop['M']['gas' + r][i]
                    self.prop['M']['ism' + r][i] = ISM['mass'].sum()
                    self.prop['M']['cold' + r][i] = cold_diffuse_gas['mass'].sum() \
                                + self.prop['M']['ism' + r][i]
                    self.prop['M']['igrm' + r][i] = hot_diffuse_gas_['mass'].sum()

    def calcu_temp_lumi(self, cal_file, halo_id_list=[], \
                    core_corr_factor=0.15, calcu_field='500'):
        '''
        Calculate all the temperatures and luminosities listed in
        temp_field and luminosity_field. 

        Parameters
        -----------
        cal_file
            Calibration file used for calculating Tspec.
        halo_id_list
            List of halo_ids to calculate temperatures 
            and luminosities. If set to empty list, then will use 
            self.group_list.
        core_corr_factor
            Inner radius for calculating core-corrected 
            temperatures. Gas particles within 
            (core_corr_factor*R, R) will be used for calculation.
        calcu_field
            Radius to calculate temperatures and luminosities 
            within. Must be in radius_field. Default: R_500.
        '''
        halo_id_list = np.array(halo_id_list, dtype=np.int).reshape(-1)
        if len(halo_id_list) == 0:
            if not self._have_group:
                raise Exception('Must get_group_list (or init_relationship) first!')
            halo_id_list = self.group_list
        if not self._have_radii:
            raise Exception('Must get_radii_masses first!')
        
        list_length = np.array(list(halo_id_list)).max()
        for j in halo_id_list:
            i = j - 1
            if self.verbose:
                print('Calculating temperatures and luminosities... {:7} / {}'\
                                .format(j, list_length), end='\r')
            center = self.center[i]
            halo = self.new_catalogue[j]
            R = self.prop['R'][i:i+1][calcu_field].in_units('kpc')
            tx = pnb.transformation.inverse_translate(halo, center)
            with tx:
                subsim = halo[pnb.filt.Sphere(R)]
                hot_diffuse_gas_ = subsim.gas[pnb.filt.HighPass('temp', '5e5 K') & \
                            pnb.filt.LowPass('nh', '0.13 cm**-3')]
                # cal_tweight can return the sum of weight_type at the same time.
                self.prop['T']['x'][i], self.prop['L']['x'][i] = \
                        cal_tweight(hot_diffuse_gas_, weight_type='Lx')
                self.prop['T']['x_cont'][i], self.prop['L']['x_cont'][i] = \
                        cal_tweight(hot_diffuse_gas_, weight_type='Lx_cont')
                self.prop['T']['mass'][i], _= cal_tweight(hot_diffuse_gas_, weight_type='mass')
                self.prop['T']['spec'][i] = pnb.array.SimArray(cal_tspec(hot_diffuse_gas_, \
                                cal_f=cal_file, datatype=self.datatype), units='keV')
                self.prop['L']['xb'][i] = hot_diffuse_gas_['Lxb'].sum()
                self.prop['L']['xb_cont'][i] = hot_diffuse_gas_['Lxb_cont'].sum()

                # Core-corrected temperatures:
                # Filter:
                corr_hot_ = hot_diffuse_gas_[~pnb.filt.Sphere(core_corr_factor*R)]

                self.prop['T']['spec_corr'][i] = pnb.array.SimArray(cal_tspec(corr_hot_, \
                                cal_f=cal_file, datatype=self.datatype), units='keV')
                self.prop['T']['x_corr'][i], _ = cal_tweight(corr_hot_, weight_type='Lx')
                self.prop['T']['x_corr_cont'][i], _ = \
                                        cal_tweight(corr_hot_, weight_type='Lx_cont')
                self.prop['T']['mass_corr'][i], _ = cal_tweight(corr_hot_, weight_type='mass')

        self._have_temp = True

    def calcu_entropy(self, cal_file, n_par=9, halo_id_list=[], \
                calcu_field=entropy_field, thickness=1):
        '''
        Calculate all entropy within a thin spherical shell 
        centered at halo.

        Parameters
        -----------
        cal_file
            Calibration file used for calculating Tspec.
        n_par : int
            Number of particles the shell must contain, 
            below which entropy will not be calculated.
        halo_id_list
            List of halo_ids to calculate entropies. 
            If set to empty list, then will use self.group_list.
        calcu_field
            Radii of the thin shell to calculate entropies.
        thickness : float
            Thickness of the spherical shell. Default in kpc.
        '''
        thickness = pnb.array.SimArray(thickness, 'kpc')
        halo_id_list = np.array(halo_id_list, dtype=np.int).reshape(-1)
        if len(halo_id_list) == 0:
            if not self._have_group:
                raise Exception('Must get_group_list (or init_relationship) first!')
            halo_id_list = self.group_list
        if not self._have_radii:
            raise Exception('Must get_radii_masses first!')

        list_length = np.array(list(halo_id_list)).max()
        for j in halo_id_list:
            i = j - 1
            if self.verbose:
                print('            Calculating entropies... {:7} / {}'\
                                .format(j, list_length), end='\r')
            center = self.center[i]
            halo = self.new_catalogue[j]
            tx = pnb.transformation.inverse_translate(halo, center)
            with tx:
                for r in calcu_field:
                    R = self.prop['R'][i:i+1][r].in_units('kpc')
                    subgas = halo.gas[pnb.filt.Annulus(R, thickness + R)]
                    hot_diffuse_gas_ = subgas[pnb.filt.HighPass('temp', '5e5 K') \
                            & pnb.filt.LowPass('nh', '0.13 cm**-3')]
                    if len(hot_diffuse_gas_) < n_par:
                        self.prop['S'][r][i] = np.nan
                        self.prop['T']['spec' + r][i] = np.nan
                    else:
                        tempTspec = pnb.array.SimArray(cal_tspec(hot_diffuse_gas_, \
                                cal_f=cal_file, datatype=self.datatype), units='keV')
                        avg_ne = (hot_diffuse_gas_['ne'] * hot_diffuse_gas_['volume']).sum() \
                                / hot_diffuse_gas_['volume'].sum()
                        self.prop['T']['spec' + r][i] = tempTspec
                        self.prop['S'][r][i] = tempTspec/(avg_ne.in_units('cm**-3'))**(2, 3)

    def savedata(self, filename, field = default_field, halo_id_list=[], units=default_units):
        '''
        Save the data in hdf5 format. Will save halo_id_list 
        (key: 'halo_id') and the quantities listed in field.

        Parameters
        -----------
        filename
            Filename of the hdf5 file.
        field
            Type of information to save.
        halo_id_list
            List of halo_ids to save.If set to empty list, 
            then will use self.group_list.
        units
            Convert the data into specified inits and save.
        '''
        halo_id_list = np.array(halo_id_list, dtype=np.int).reshape(-1)
        if len(halo_id_list) == 0:
            halo_id_list = self.group_list
        with h5py.File(filename, "w") as f:
            dataset = f.create_dataset("halo_id", data = halo_id_list)
            dataset.attrs['Description'] = 'halo_ids of halos saved in this file.'
            try:
                dataset2 = f.create_dataset("N_lgal", data = self.n_lgal[halo_id_list - 1])
                dataset2.attrs['Description'] = 'Number of luminous galaxies'
            except AttributeError:
                print('N_lgal will not be saved in this dataset.')

            for attr in field:
                grp = f.create_group(attr)
                infos = field[attr]
                for info in infos:
                    data_to_save = self.prop[attr][info][halo_id_list - 1]
                    data_to_save.convert_units(default_units[attr])
                    dset = grp.create_dataset(info, data = data_to_save)
                    dset.attrs['units'] = str(data_to_save.units)

    def get_children(self):
        '''
        Generate list of children (subhalos) for each halo.
        Subhalo itself can also have children. And this list 
        will not contain "grandchildren" (i.e., the children 
        of children).
        '''
        self.host_list = []
        self.tophost = np.zeros(self.length).astype(np.int)
        self.children = [set() for _ in range(self.length)]
        for i in range(self.length):
            j = self.haloid[i]#j = i + 1
            if self.verbose:
                print('Generating children list... Halo: {:7} / {}'.format(j, self.length), end='\r')
            prop = self.dict[i]
            hostID = prop['hostHalo']
            if j in self.errorlist[0]:
                self.errorlist[1][j] = hostID
                continue
            try:
                if hostID == self.host_id_of_top_level:
                    self.host_list.append(j)
                    self.tophost[i] = j
                else:
                    if hostID < 0:
                        print('Make sure you\'ve used the correct host ID of the top-level halos!')
                    host_haloid = self.ID_list.loc[hostID]['halo_id']
                    self.children[host_haloid - 1].add(j)
                    temphost = j
                    while temphost != self.host_id_of_top_level:
                        temphost2 = temphost
                        temphost = self.hostid[temphost - 1]
                    self.tophost[i] = temphost2
            except IndexError:
                self.errorlist[1][j] = hostID
        self._have_children = True
    
    def get_new_catalogue(self, include_):
        '''
        Generate a new catalogue based on catalogue_original, 
        the new catalogue will include all the subhalo particles 
        in its host halo.
        
        Parameters
        -------------
        include_ : bool
            If True, then will include all the subhalo particles. 
            Otherwise will just be a copy of catalogue_original.
        '''
        if not self._have_children:
            raise Exception('Must get_children first!')
        if include_:
            self.new_catalogue = {}
            for i in range(self.length):
                j = self.haloid[i]
                if self.verbose:
                    print('Generating new catalogue... Halo: {:7} / {}'.format(j, self.length), end='\r')
                if len(self.children[i]) == 0:
                    self.new_catalogue[j] = self.catalogue_original[j]
                else:
                    union_list = [j] + list(self.children[i])
                    self.new_catalogue[j] = get_union(self.catalogue_original, union_list)
        else:
            self.new_catalogue = self.catalogue_original
        self._have_new_catalogue = True

    def get_galaxy(self, g_low_limit, halo_indices=None):
        '''
        Generate list of galaxies for each host halo. The subsubhalo 
        will also be included in the hosthalo galaxy list. And it won't 
        generate list for the subhalos even if there are galaxies within.

        Parameters
        -------------
        g_low_limit : pynbody.array.SimArray
            Limit above which galaxies will be identified as luminous 
            galaxies.
        '''
        if not self._have_children:
            raise Exception('Must get_children first!')
        if not self._have_new_catalogue:
            raise Exception('Must get_new_catalogue first!')
        if halo_indices is None:
            iterator = range(self.length)
        else:
            iterator = halo_indices

        length_this = len(iterator)

        self.galaxy_list = [] # List of all galaxies (as long as n_star > 0).
        self.lumi_galaxy_list = [] # List of all luminous galaxies (self_m_star > galaxy_low_limit).
        self.galaxies = [set() for _ in iterator]
        self.lumi_galaxies = [set() for _ in iterator]
        self.n_lgal = np.zeros(self.length) # Number of total luminous galaxies embedded in each host halo.
        # The galaxies within subhalos (i.e., subhalos themselves) will also be taken into account.
        

        for n, i in enumerate(iterator):
            j = self.haloid[i]
            if self.verbose:
                print('Calculating total stellar masses... Halo: {:7} / {}'.format(n, length_this), end='\r')
            self.prop['M']['total_star'][i] = self.new_catalogue[j].star['mass'].sum()
            #sf_gas = self.new_catalogue[j].gas[pnb.filt.LowPass('temp', '3e4 K')]
            # sf_gas = self.new_catalogue[j].gas[pnb.filt.HighPass('nh', '0.13 cm**-3')]
            #self.prop['M']['total_sfgas'][i] = sf_gas['mass'].sum()
            # sf_gas, i.e., star forming gas, is used in the definition of resolved galaxies in Liang's Figure2.
            # But seems that Liang didn't plot Figure 2 using the concept of resolved galaxies.
        low_limit = g_low_limit.in_units(self.prop['M']['total_star'].units)
        for n, i in enumerate(iterator):
            j = self.haloid[i]
            if self.verbose:
                print('            Identifying galaxies... Halo: {:7} / {}'.format(n, length_this), end='\r')
            children_list = np.array(list(self.children[i]))
            if len(children_list) == 0:
                self_Mstar = self.prop['M']['total_star'][i]
                #self_Msfgas = self.prop['M']['total_sfgas'][i]
            else:
                children_union = get_union(self.new_catalogue, list(children_list))
                children_union_within_ = self.new_catalogue[j].intersect(children_union)
                #sf_gas_union = children_union.gas[pnb.filt.LowPass('temp', '3e4 K')]
                # sf_gas_union = children_union.gas[pnb.filt.HighPass('nh', '0.13 cm**-3')]
                self_Mstar = self.prop['M']['total_star'][i] - children_union_within_.star['mass'].sum()
                #self_Msfgas = self.prop['M']['total_sfgas'][i] - sf_gas_union['mass'].sum()
            self.prop['M']['self_star'][i] = self_Mstar
            # self.prop['M']['self_sfgas'][i] = self_Msfgas
            try:
                if self_Mstar > 0:# + self_Msfgas > low_limit:
                    self.galaxy_list.append(j)
                    temp_tophost = self.tophost[i]
                    self.galaxies[temp_tophost-1].add(j)
        
                    if self_Mstar > low_limit:
                        self.lumi_galaxy_list.append(j)
                        self.n_lgal[temp_tophost-1] += 1
                        self.lumi_galaxies[temp_tophost-1].add(j)
            except KeyError:
                self.errorlist[2][j] = self.dict['hostHalo'][i]
        self._have_galaxy = True

    def get_group_list(self, N_galaxy):
        '''
        halo_id of the halo identified as group in the catalogue. 

        Parameters
        -----------
        N_galaxy : int
            Number of luminous galaxies above which host halos 
            are considered as groups.
        '''
        if not self._have_galaxy:
            raise Exception('Must get_galaxy first!')
        self.group_list, = np.where(self.n_lgal >= N_galaxy)
        self.group_list += 1
        self._have_group = True
    
    def calcu_tx_lx(self, halo_id_list=[], \
                    core_corr_factor=0.15, calcu_field='500'):
        '''
        Calculate X-ray luminosities and emission weighted 
        temperatures listed in temp_field and luminosity_field. 

        Parameters
        -----------
        halo_id_list
            List of halo_ids to calculate temperatures on. 
            If set to empty list, then will use self.group_list.
        core_corr_factor
            Inner radius for calculating core-corrected 
            temperatures. Gas particles within 
            (core_corr_factor*R, R) will be used for calculation.
        calcu_field
            Radius to calculate temperatures and luminosities 
            within. Must be in radius_field. Default: R_500.
        '''
        halo_id_list = np.array(halo_id_list, dtype=np.int).reshape(-1)
        if len(halo_id_list) == 0:
            if not self._have_group:
                raise Exception('Must get_group_list (or init_relationship) first!')
            halo_id_list = self.group_list
        if not self._have_radii:
            raise Exception('Must get_radii_masses first!')
        if not self._have_new_catalogue:
            raise Exception('Must get_new_catalogue first!')
        
        list_length = np.array(list(halo_id_list)).max()
        for j in halo_id_list:
            i = j - 1
            if self.verbose:
                print('Calculating temperatures and luminosities... {:7} / {}'\
                                .format(j, list_length), end='\r')
            center = self.center[i]
            halo = self.new_catalogue[j]
            R = self.prop['R'][i:i+1][calcu_field].in_units('kpc')
            tx = pnb.transformation.inverse_translate(halo, center)
            with tx:
                subsim = halo[pnb.filt.Sphere(R)]
                hot_diffuse_gas_ = subsim.gas[pnb.filt.HighPass('temp', '5e5 K') & \
                            pnb.filt.LowPass('nh', '0.13 cm**-3')]
                # cal_tweight can return the sum of weight_type at the same time.
                self.prop['T']['x'][i], self.prop['L']['x'][i] = \
                        cal_tweight(hot_diffuse_gas_, weight_type='Lx')
                self.prop['T']['x_cont'][i], self.prop['L']['x_cont'][i] = \
                        cal_tweight(hot_diffuse_gas_, weight_type='Lx_cont')

                # Core-corrected temperatures:
                # Filter:
                corr_hot_ = hot_diffuse_gas_[~pnb.filt.Sphere(core_corr_factor*R)]

                self.prop['T']['x_corr'][i], _ = cal_tweight(corr_hot_, weight_type='Lx')
                self.prop['T']['x_corr_cont'][i], _ = \
                                        cal_tweight(corr_hot_, weight_type='Lx_cont')
    
    def calcu_tspec(self, cal_file, halo_id_list=[], \
                    core_corr_factor=0.15, calcu_field='500'):
        '''
        Calculate spectroscopic temperatures based on Douglas's 
        pytspec module.

        Parameters
        -----------
        cal_file
            Calibration file used for calculating Tspec.
        halo_id_list
            List of halo_ids to calculate temperatures and 
            luminosities. If set to empty list, then will use 
            self.group_list.
        core_corr_factor
            Inner radius for calculating core-corrected temperatures. 
            Gas particles within (core_corr_factor*R, R) will be used 
            for calculation.
        calcu_field
            Radius to calculate temperatures and luminosities within. 
            Must be in radius_field. Default: R_500.
        '''
        halo_id_list = np.array(halo_id_list, dtype=np.int).reshape(-1)
        if len(halo_id_list) == 0:
            if not self._have_group:
                raise Exception('Must get_group_list (or init_relationship) first!')
            halo_id_list = self.group_list
        if not self._have_radii:
            raise Exception('Must get_radii_masses first!')
        if not self._have_new_catalogue:
            raise Exception('Must get_new_catalogue first!')
        
        list_length = np.array(list(halo_id_list)).max()
        for j in halo_id_list:
            i = j - 1
            if self.verbose:
                print('Calculating spectroscopic temperatures... {:7} / {}'\
                                .format(j, list_length), end='\r')
            center = self.center[i]
            halo = self.new_catalogue[j]
            R = self.prop['R'][i:i+1][calcu_field].in_units('kpc')
            tx = pnb.transformation.inverse_translate(halo, center)
            with tx:
                subsim = halo[pnb.filt.Sphere(R)]
                hot_diffuse_gas_ = subsim.gas[pnb.filt.HighPass('temp', '5e5 K') & \
                            pnb.filt.LowPass('nh', '0.13 cm**-3')]

                self.prop['T']['spec'][i] = pnb.array.SimArray(cal_tspec(hot_diffuse_gas_, \
                                cal_f=cal_file, datatype=self.datatype), units='keV')
                # Core-corrected temperatures:
                # Filter:
                corr_hot_ = hot_diffuse_gas_[~pnb.filt.Sphere(core_corr_factor*R)]
                self.prop['T']['spec_corr'][i] = pnb.array.SimArray(cal_tspec(corr_hot_, \
                                cal_f=cal_file, datatype=self.datatype), units='keV')
    def get_center(self):
        '''
        Calculate the center of the halos if an ahfcatalogue is 
        provided, then will automatically load the results in ahf. 
        Otherwise it will try to calculate the center coordinates 
        via gravitional potential or center of mass.

        Notes
        ------
        Due to a bug in pynbody, calculating center of mass will 
        lead to an incorrect result for the halos crossing the 
        periodical boundary of the simulation box. Make sure pynbody 
        has fixed it before you use.
        '''
        if self.datatype[-4:] == '_ahf':
            axes = ['Xc', 'Yc', 'Zc']
            tempcen = {}
            for axis in axes:
                tempcen[axis] = np.asarray(self.dict[axis], dtype=float).reshape(-1, 1)
            self.center = np.concatenate((tempcen['Xc'], tempcen['Yc'], tempcen['Zc']), axis=1)
            self.center = pnb.array.SimArray(self.center, units='kpc') * self.dict['a'][0] / self.dict['h'][0]
            if self.datatype == 'tipsy_ahf':
                self.center -= self.dict['boxsize'][0].in_units('kpc')/2
        else:
            self.center = pnb.array.SimArray(np.zeros((self.length, 3)), units='kpc')
            if 'phi' in self.new_catalogue[1].loadable_keys():
                center_mode = 'pot'
            else:
                center_mode = 'com'
            for i in range(self.length):
                j = self.haloid[i]
                if self.verbose:
                    print('Calculating center... {:7} / {}'.format(j, self.length), end='\r')
                self.center[i] = pnb.analysis.halo.center(self.new_catalogue[j], mode=center_mode, retcen=True, vel=False)
        self._have_center = True