def download_gaia_hip() -> None:
    """Download HIP data from the Gaia archive."""
    hip_file = GAIA_DIR/'gaiadr2_hip-result.csv'
    if not proceed_checkfile(hip_file):
        return

    conesearch_file = GAIA_DIR/'hip2conesearch.zip'
    if proceed_checkfile(conesearch_file):
        download_file(conesearch_file, CONESEARCH_URL)

    # the gaiadr2.hipparcos2_best_neighbour table misses a large number of HIP stars that are
    # actually present, so use the mapping from Kervella et al. (2019) "Binarity of Hipparcos
    # stars from Gaia pm anomaly" instead.

    with open_cds_tarfile(VIZIER_DIR/'hipgpma.tar.gz') as tf:
        hip_map = unique(tf.read_gzip('hipgpma.dat', ['HIP', 'GDR2']))

    with ZipFile(conesearch_file, 'r') as csz:
        with csz.open('Hipparcos2GaiaDR2coneSearch.csv', 'r') as f:
            cone_map = io_ascii.read(
                f, format='csv', names=['HIP', 'GDR2', 'dist'],
                include_names=['HIP', 'GDR2'],
            )

    cone_map = unique(cone_map)

    hip_map = join(hip_map, cone_map, join_type='outer', keys='HIP', table_names=['pm', 'cone'])
    hip_map['GDR2'] = hip_map['GDR2_pm'].filled(hip_map['GDR2_cone'])
    hip_map.remove_columns(['GDR2_pm', 'GDR2_cone'])
    hip_map.rename_column('HIP', 'original_ext_source_id')
    hip_map.rename_column('GDR2', 'source_id')

    download_gaia_data('hip_id', hip_map, hip_file)
예제 #2
0
def load_tyc_hd() -> Table:
    """Load the Tycho-HD cross index."""
    print('Loading TYC-HD cross index')
    with tarfile.open(os.path.join('vizier', 'tyc2hd.tar.gz'), 'r:gz') as tf:
        with tf.extractfile('./ReadMe') as readme:
            col_names = ['TYC1', 'TYC2', 'TYC3', 'HD']
            reader = io_ascii.get_reader(io_ascii.Cds,
                                         readme=readme,
                                         include_names=col_names)
            reader.data.table_name = 'tyc2_hd.dat'
            with tf.extractfile('./tyc2_hd.dat.gz') as gzf, gzip.open(gzf, 'rb') as f:
                data = reader.read(f)

    parse_tyc_cols(data)

    err_del = np.array(TYC_HD_ERRATA['delete'] + [a[1] for a in TYC_HD_ERRATA['add']])
    data = data[np.logical_not(np.isin(data['HD'], err_del))]

    err_add = Table(np.array(TYC_HD_ERRATA['add']),
                    names=['TYC', 'HD'],
                    dtype=[np.int64, np.int64])

    data = vstack([data, err_add], join_type='exact')

    data = unique(data.group_by('HD'), keys='TYC')
    data = unique(data.group_by('TYC'), keys='HD')

    return data
예제 #3
0
 def _append(self, frame, unique=True):
     vstack_t = at.vstack([self._t, frame._t])
     vstack_mods_t = at.vstack([self._mods_t, frame._mods_t])
     if unique:
         self._t = at.unique(vstack_t, keys=['z0', 'z'])
         self._mods_t = at.unique(vstack_mods_t, keys=['z0'])
     #print(self._mods_t['z0', 'id'])
     return 0
예제 #4
0
def configmap(infilelist, confitemlist, debug='False'):
    """general purpose mapper of observing configurations

    Parameters
    ----------
    infilelist: list
        List of filenames 
    confitemlist: list
        List of header keywords which define a configuration

    Returns
    -------
    obs_i: numpy 1d array
        observation number (unique object, config) for each file
    config_i: numpy 1d array
        config number (unique config) for each file
    obstab: astropy Table
        object name and config number for each observation     
    configtab: astropy Table
        config items for each config
     
    """
    # create the observation log
    obsdict = obslog(infilelist)
    images = len(infilelist)

    # make table of unique configurations
    confdatlisti = []
    for i in range(images):
        confdatlist = []
        for item in confitemlist:
            confdatlist.append(obsdict[item][i])
        confdatlisti.append(confdatlist)

    dtypelist = map(type, confdatlist)
    configtab = Table(np.array(confdatlisti),
                      names=confitemlist,
                      dtype=dtypelist)
    config_i = np.array([np.where(configtab[i]==unique(configtab))   \
                        for i in range(images)]).flatten()
    configtab = unique(configtab)

    # make table of unique observations
    obsdatlisti = []
    for i in range(images):
        object = obsdict['OBJECT'][i].replace(' ', '')
        obsdatlisti.append([object, config_i[i]])

    obstab = Table(np.array(obsdatlisti),
                   names=['object', 'config'],
                   dtype=[str, int])
    obs_i = np.array([np.where(obstab[i]==unique(obstab))   \
                        for i in range(images)]).flatten()
    obstab = unique(obstab)

    return obs_i, config_i, obstab, configtab
예제 #5
0
    def find_best_comp(self, result_file_path=None, best_comparison_star=None):

        result_file = Table.read(result_file_path,
                                 format='ascii.commented_header')

        # read comparison star list
        # and check manual assigned comp star
        if best_comparison_star is None:
            result_unique_by_cat = table.unique(result_file, keys='nomad1')
        else:
            result_unique_by_cat = table.unique(
                result_file[(result_file['nomad1'] == best_comparison_star)],
                keys='nomad1')

        std_list = []
        t_c_list = []
        # calculates diff_mag for all target objects and comp. stars
        for star in result_unique_by_cat['nomad1']:
            frame_results = result_file[(result_file['nomad1'] == star)]

            # diff phot.
            frame_results[
                't-c'] = frame_results['magt_i'] - frame_results['magc_i']
            # error propagation
            frame_results['t-c-err'] = np.sqrt(
                np.power(frame_results['magt_i_err'], 2) +
                np.power(frame_results['magc_i_err'], 2))

            # extracting usefull columns
            t_c_list.append(frame_results['ast_num', 'nomad1', 'jd', 't-c',
                                          't-c-err'])

            # calculating all t-c stars STD then adding list
            std_list.append(np.std(frame_results['t-c']))
        # calculating all STD's mean and its index number in the list
        mean_idx = (np.abs(np.asanyarray(std_list) -
                           np.mean(std_list))).argmin()

        # choosing STD with min, mean and max stars
        diff_stats = {
            'min': [std_list.index(min(std_list)),
                    min(std_list)],
            'mean': [mean_idx, np.mean(std_list)],
            'max': [std_list.index(max(std_list)),
                    max(std_list)]
        }
        # getting these diff mags and their other columns
        results = {
            'with_min_comp': t_c_list[diff_stats['min'][0]],
            'with_mean_comp': t_c_list[diff_stats['mean'][0]],
            'with_max_comp': t_c_list[diff_stats['max'][0]]
        }

        return results
예제 #6
0
def download_galaxies_without_cuts(galaxies, nsa, joint_loc, png_dir,
                                   fits_dir):
    """[summary]
    
    Args:
        galaxies ([type]): [description]
        nsa ([type]): [description]
        joint_loc ([type]): save location for new joint catalog
    """

    # let's redownload all, without filtering - make temp joint catalog
    galaxies_with_nsa_maybe_duplicate, _ = matching_utils.match_galaxies_to_catalog_table(
        galaxies=galaxies,
        catalog=nsa,
        galaxy_suffix='',
        catalog_suffix='_nsa')
    # if duplicate match to NSA catalog, pick the first
    galaxies_with_nsa = table.unique(galaxies_with_nsa_maybe_duplicate,
                                     keys='iauname',
                                     keep='first')
    logging.warning(
        'Dropped {} galaxies that matched to the same NSA entry'.format(
            len(galaxies_with_nsa_maybe_duplicate) - len(galaxies_with_nsa)))
    assert len(table.unique(galaxies_with_nsa,
                            keys='iauname')) == len(galaxies_with_nsa)

    logging.info('In NSA: {}'.format(len(galaxies_with_nsa)))
    in_decals_bricks = get_joint_nsa_decals_catalog.create_joint_catalog(
        nsa=galaxies_with_nsa, bricks=bricks,
        data_release='5')  # dont apply selection cuts
    assert len(table.unique(in_decals_bricks,
                            keys='iauname')) == len(in_decals_bricks)

    for dir in [png_dir, fits_dir]:
        if not os.path.isdir(dir):
            os.mkdir(dir)

    joint_catalog = download_images_threaded.download_images_multithreaded(
        in_decals_bricks,
        '5',
        fits_dir,
        png_dir,
        overwrite_fits=False,
        overwrite_png=False)
    logging.info('Downloaded {} galaxies without cuts'.format(joint_catalog))
    image_download_stats(joint_catalog)
    joint_catalog['iauname'] = list(
        map(lambda x: str(x),
            joint_catalog['iauname']))  # avoid dtype problems
    joint_catalog.write(
        joint_loc, overwrite=False)  # not allowed to overwrite, for safety
    logging.info('Written new joint catalog to {} for uploader'.format(
        joint_catalog_loc))
예제 #7
0
def configmap(infilelist,confitemlist,debug='False'):
    """general purpose mapper of observing configurations

    Parameters
    ----------
    infilelist: list
        List of filenames 
    confitemlist: list
        List of header keywords which define a configuration

    Returns
    -------
    obs_i: numpy 1d array
        observation number (unique object, config) for each file
    config_i: numpy 1d array
        config number (unique config) for each file
    obstab: astropy Table
        object name and config number for each observation     
    configtab: astropy Table
        config items for each config
     
    """
  # create the observation log
    obsdict = obslog(infilelist)
    images = len(infilelist)

  # make table of unique configurations
    confdatlisti = []
    for i in range(images):
        confdatlist = []
        for item in confitemlist:
            confdatlist.append(obsdict[item][i])
        confdatlisti.append(confdatlist)

    dtypelist = map(type,confdatlist)           
    configtab = Table(np.array(confdatlisti),names=confitemlist,dtype=dtypelist) 
    config_i = np.array([np.where(configtab[i]==unique(configtab))   \
                        for i in range(images)]).flatten()
    configtab = unique(configtab)
                        
  # make table of unique observations
    obsdatlisti = []
    for i in range(images):
        object = obsdict['OBJECT'][i].replace(' ','')
        obsdatlisti.append([object, config_i[i]])

    obstab = Table(np.array(obsdatlisti),names=['object','config'],dtype=[str,int])
    obs_i = np.array([np.where(obstab[i]==unique(obstab))   \
                        for i in range(images)]).flatten()
    obstab = unique(obstab)
                        
    return obs_i,config_i,obstab,configtab
예제 #8
0
def load_sao() -> Table:
    """Loads the SAO catalog."""
    print("Loading SAO")

    with (VIZIER_DIR/'sao.readme').open('r') as readme:
        reader = WorkaroundCDSReader('sao.dat', ['SAO', 'HD'], [np.int64, np.int64], readme)

    with gzip.open(VIZIER_DIR/'sao.dat.gz', 'rt', encoding='ascii') as f:
        data = reader.read(f)

    data = unique(data.group_by('SAO'), keys=['HD'])
    data = unique(data.group_by('HD'), keys=['SAO'])
    return data
예제 #9
0
    def __add__(self, other):
        if not isinstance(other, type(self)):
            raise TypeError(
                f'Products must of same type not {type(self)} and {type(other)}'
            )

        other.control[
            'index'] = other.control['index'] + self.control['index'].max() + 1
        other.data['control_index'] = other.data[
            'control_index'] + self.control['index'].max() + 1

        control = vstack((self.control, other.control))
        control = unique(control, ['scet_coarse', 'scet_fine', 'seq_count'])

        orig_indices = control['index']
        new_index = range(len(control))
        control['index'] = new_index

        data = vstack((self.data, other.data))
        data = data[np.nonzero(
            orig_indices[:, None] == data['control_index'].data)[1]]
        data['control_index'] = range(len(data))

        if np.abs([((len(data['data'][i]) / 2) - (control['data_len'][i] + 7))
                   for i in range(len(data))]).sum() > 0:
            logger.error('Expected and actual data length do not match')

        return type(self)(control, data)
def rand_loop_through__sep(f, length):
    sep_list = []
    while len(sep_list) < length:

        righta = np.random.uniform(-180, 180)
        declin = np.random.uniform(-90, 90)

        pos = coords.SkyCoord(ra=righta * u.degree, dec=declin * u.degree)
        galatic_pos = pos.galactic
        xid = SDSS.query_region(galatic_pos,
                                spectro=False,
                                radius=2.5 * u.arcmin,
                                photoobj_fields=['ra', 'dec'])

        if xid is None:
            continue

        xid = unique(xid, keys=['ra', 'dec'])
        righta_list = xid["ra"]
        declin_list = xid["dec"]

        sep = pos.separation(
            coords.SkyCoord(ra=righta_list * u.degree,
                            dec=declin_list * u.degree))
        sep_list = np.concatenate((sep_list, sep))
        print(len(sep_list))

    return sep_list
예제 #11
0
파일: vi.py 프로젝트: desi-bgs/bgs-cmxsv
def get_vi():
    cols = ['TARGETID', 'best_z', 'best_quality', 'best_spectype']

    aa = atable.Table.read(
        '/global/cfs/cdirs/desi/sv/vi/TruthTables/Andes/BGS/Truth_table_Andes_reinspection_BGS_66003_20200315_v1.csv'
    )

    for x in ['z', 'quality', 'spectype']:
        aa.rename_column('best {}'.format(x, x), 'best_{}'.format(x, x))

    aa = aa[cols]

    bb = atable.Table.read(
        '/global/cfs/cdirs/desi/sv/vi/TruthTables/Blanc/BGS/desi-vi_BGS_tile80613_nightdeep_merged_all_210202.csv'
    )[cols]

    cc = atable.Table.read(
        '/global/cfs/cdirs/desi/sv/vi/TruthTables/Cascades/BGS/desi-vi_SV_cascades_combination_BGS_all_210521.csv'
    )[cols]

    vi = atable.vstack([aa, bb, cc])

    vi = vi[vi['best_quality'] >= 2.5]

    vi.sort('best_quality')

    # 4 targets with repeated VI.
    vi = atable.unique(vi, keys='TARGETID', keep='last')

    vi.rename_column('best_z', 'VI_Z')
    vi.rename_column('best_quality', 'VI_Q')
    vi.rename_column('best_spectype', 'VI_SPECTYPE')

    return vi
예제 #12
0
    def _load_curve_data(self):
        """
        Use the configuration data for a given curve to configure the astropy.io.ascii fixed-width reader and load the data.

        Returns
        -------
        data: dict-like
            Data columns read from extinction data file.
        """
        curve_config = self.laws[self.law]
        filename = os.path.join(default_refdata_directory, "extinction",
                                "curves", curve_config['filename'])
        data = ascii.read(filename,
                          Reader=ascii.FixedWidth,
                          data_start=curve_config['data_start'],
                          names=curve_config['names'],
                          col_starts=curve_config['col_starts'],
                          col_ends=curve_config['col_ends'],
                          guess=False)
        # make sure the data is sorted in order of increasing wavelength. 'data' is an astropy.table.Table object so has
        # built-in method to do this.
        data.sort(['wave'])

        # some of the files have duplicate rows so use astropy.table.unique() to weed them out, too
        uniq_data = unique(data, keys='wave')
        return uniq_data
예제 #13
0
def load_catalog(catalogs,sigs):

    filenames = catalogs.split(",")
    sigs = np.array(sigs.split(","),dtype=float)
    cnt = 0
    for ii, (filename, sig) in enumerate(zip(filenames,sigs)):    
        data_tmp = Table.read(filename, format='fits')
        idx = np.where(data_tmp["sig"] > sig)[0]
        data_tmp = data_tmp[idx]
        if cnt == 0:
            data = copy.copy(data_tmp)
        else:
            data = vstack([data,data_tmp])
        cnt = cnt + 1

    sig = data["sig"]
    idx = (1+np.arange(len(sig)))/len(sig+1)
    #sigsort = np.ceil(3.0*idx[np.argsort(sig)])
    sigsort = 3.0*idx[np.argsort(sig)]
    data["sigsort"] = sigsort
    data.sort("sigsort")
    data.reverse()

    ras, decs, sigs = data["ra"], data["dec"], data["sigsort"]

    objnames = []
    for ra, dec in zip(ras, decs):
        objname = "%.3f_%.3f" % (ra, dec)
        objnames.append(objname)
    data["objname"] = objnames
    data = unique(data, keys=['objname'], keep='first')

    data.sort("ra")
    
    return data
예제 #14
0
def drop_lines(dataset, drop_fractions):
    #inserting all supernovae into a single table
    for supernova in dataset.data.keys():
        new_sna=dataset.data[supernova].copy()
        new_col=Table.Column(name='supernova', data=np.repeat(supernova, len(dat.data[supernova])))
        new_sna.add_column(new_col, index=0)
        if supernova==dataset.data.keys()[0]:
            data_table=new_sna
        else:
            data_table=vstack([data_table, new_sna], metadata_conflicts='silent')
    #in each band, determine the number of lines to drop, and drop them        
    for band in unique(data_table, keys='filter')['filter']:
        band_indices=np.where(data_table['filter']==band)
        num_drops=int(np.floor(len(data_table[band_indices])*drop_fractions[band]))
        #print 'Dropping '+str(num_drops)+' lines of '+str(len(data_table))+' ...'
        if num_drops > 0:
            lines_to_drop=np.random.choice(band_indices[0], num_drops, replace=False)
        else:
            lines_to_drop=[]
        data_table.remove_rows(lines_to_drop)
    #make copy of the original dataset, place rows from data_table in there
    new_dataset=copy.deepcopy(dataset)
    types=dataset.get_types()
    new_types=new_dataset.get_types()
    for supernova in dataset.data.keys():
        new_dataset.data[supernova]=data_table[data_table['supernova']==supernova]
        new_dataset.data[supernova].remove_column('supernova')
  
    return new_dataset
예제 #15
0
파일: hipparcos.py 프로젝트: burggraaff/gfc
def read():
    table_south = read_data_as_table("hip/hip_south.vot")
    table_mag = ascii.read("hip/hip_mag.txt", format="fixed_width")
    t = table.vstack((table_mag, table_south))
    t = table.unique(t, "hip")
    read_data_cols(t)
    return t
예제 #16
0
def doGaiaCat(inputFile, outputFile):
    table = Table.read(inputFile)
    table2 = table.filled(0)

    table3 = unique(table2)

    magJ, magH, magK = 99.000, 99.000, 99.000
    JD = 15.0 * 365.25 + 2451545

    catFile = open(outputFile, "w")
    
    for row in table3:
        catFile.write(" ".ljust(64))
        catFile.write(("%.3f" % row[12]).rjust(6) )
        catFile.write(" ".ljust(7))
        catFile.write(" " + ("%.3f" % magJ).rjust(6))
        catFile.write(" " + ("%.3f" % magH).rjust(6))
        catFile.write(" " + ("%.3f" % magK).rjust(6))
        catFile.write(" ".rjust(35))
        catFile.write(" " + ("%.3f" % (row[5]/1000.0)).rjust(7))
        catFile.write(" " + ("%.3f" % (row[7]/1000.0)).rjust(7))
        catFile.write(" " + ("%.3f" % (row[6]/1000.0)).rjust(7))
        catFile.write(" " + ("%.3f" % (row[8]/1000.0)).rjust(7))
        catFile.write(" ".rjust(71))
        catFile.write(" " + ("%.9f" % (row[0]/15.0)).rjust(13))
        catFile.write(" " + ("%.9f" % row[2]).rjust(13))
        catFile.write(" ".ljust(24))
        catFile.write(("%.8f" % JD).rjust(16))
        catFile.write(" ".ljust(119))
        catFile.write("  " + ("%.3f" % (row[1]/1000.0)).rjust(6))
        catFile.write("  " + ("%.3f" % (row[3]/1000.0)).rjust(6))

        catFile.write("\n")
    
    catFile.close()
예제 #17
0
    def join(self, cat, name=None):
        """
        Returns a new ``Catalogue`` joining the current catalogue with 'cat'. Both
        catalogue must be consistent: same coordinates, positional errors and
        magnitudes, if they are included.
        If the original catalogues have areas defined through MOCs, the final area is
        the union of their MOCs, otherwise the area of the current catalogue is used.
        If the original catalogues have common sources, repeated entries will be
        remove from the final catalogue.
        """
        if name is None:
            name = self.name

        join_cat_data = vstack([self.save(), cat.save()])
        join_cat_data = unique(join_cat_data)

        try:
            area = self.moc.union(cat.moc)
        except:
            area = self.area

        mag_cols = None
        if self.mags is not None:
            mag_cols = self.mags.colnames

        join_cat = Catalogue(join_cat_data,
                             poserr_cols=self.poserr.components.colnames,
                             poserr_type=self.poserr.errtype,
                             area=area,
                             name=self.name,
                             mag_cols=mag_cols)

        return join_cat
예제 #18
0
def load_sao() -> Table:
    """Load the SAO-TYC2 cross match."""
    print('Loading SAO-TYC2 cross match')
    xmatch_files = [
        'sao_tyc2_xmatch.csv',
        'sao_tyc2_suppl1_xmatch.csv',
        'sao_tyc2_suppl2_xmatch.csv',
    ]
    data = vstack(
        [
            io_ascii.read(
                XMATCH_DIR/f,
                include_names=['SAO', 'TYC1', 'TYC2', 'TYC3', 'angDist', 'delFlag'],
                format='csv',
                converters={'delFlag': [io_ascii.convert_numpy(np.str)]},
            ) for f in xmatch_files
        ],
        join_type='exact',
    )

    data = data[data['delFlag'].mask]
    data.remove_column('delFlag')

    parse_tyc_cols(data)

    data = unique(data.group_by(['TYC', 'angDist']), keys=['TYC'])
    data.remove_column('angDist')

    data.add_index('TYC')
    return data
예제 #19
0
    def search_chandra_database(self):
        u = "https://cxcfps.cfa.harvard.edu/cgi-bin/cda/footprint/get_vo_table.pl?pos={0},{1}&size={2}"
        url = u.format(self.ra, self.dec, self.radius)

        url += "&inst=ACIS-I,ACIS-S"
        url += "&grating=NONE"

        r = requests.get(url)
        if r.status_code != 200:
            return
        f = io.BytesIO(r.text.encode())
        votable = parse(f)
        tbdata = votable.get_first_table().to_table()
        if len(tbdata) > 0:
            table = unique(tbdata, keys='ObsId')

            self.total_exp = np.sum(table['Exposure'])
            self.obsids = table['ObsId']
            self.targnames = table['target_name']
            self.n_obsid = len(table)
            self.targ_ras = table['RA']
            self.targ_decs = table['Dec']
            self.exp_times = table['Exposure']
            self.obs_dates = table['obs_date']
            self.jpegs = table['preview_uri']
            self.images = table['full_uri']
예제 #20
0
    def align(self,tabref, taba):

        tabajoin = join(tabref,taba,keys=['time'],join_type='left')

        cols = tabajoin.columns

        csel = []
        
        for c in cols:
            if '_2' in c and 'index' not in c and 'band' not in c:
                csel.append(c)

        csel.append('time')
        csel.append('band_1')


        tabanew = Table(tabajoin[csel])
        tabanew['flux_2'].fill_value=0.0
        tabanew = tabanew.filled()
        
        for vv in csel:
            if '_2' in vv or '_1' in vv:
                tabanew[vv].name= '_'.join(vv.split('_')[:-1])

        return unique(tabanew, keys=['time','band'])
    def _load_curve_data(self):
        """
        Use the configuration data for a given curve to configure the astropy.io.ascii fixed-width reader and load the data.

        Returns
        -------
        data: dict-like
            Data columns read from extinction data file.
        """
        curve_config = self.laws[self.law]
        filename = os.path.join(default_refdata_directory, "extinction", "curves", curve_config['filename'])
        data = ascii.read(
            filename,
            Reader=ascii.FixedWidth,
            data_start=curve_config['data_start'],
            names=curve_config['names'],
            col_starts=curve_config['col_starts'],
            col_ends=curve_config['col_ends'],
            guess=False
        )
        # make sure the data is sorted in order of increasing wavelength. 'data' is an astropy.table.Table object so has
        # built-in method to do this.
        data.sort(['wave'])

        # some of the files have duplicate rows so use astropy.table.unique() to weed them out, too
        uniq_data = unique(data, keys='wave')
        return uniq_data
예제 #22
0
def what_Tims_stars_are_missing_in_my_dataset():
    usco = Table.read('usco_res/usco_run_subset.fit')
    ucl = Table.read('ucl_res/ucl_run_subset.fit')
    lcc = Table.read('lcc_res/lcc_run_subset.fit')

    tim = vstack([usco, ucl, lcc])
    tim = unique(tim, keys='source_id')

    #tim = join(usco, ucl, keys='source_id')
    #tim = join(tim, lcc, keys='source_id')

    print len(tim), len(usco), len(ucl), len(lcc)

    d = Table.read('data_table_cartesian_with_bg_ols.fits')
    print len(d)

    # source_id
    sd = set(d['source_id'])
    st = set(tim['source_id'])

    print 'Stars in Tims set and not in mine', len(st.difference(sd))
    for x in st.difference(sd):
        print x

    ct = set(tim.colnames)
    cd = set(d.colnames)
    print 'Colnames in mine and not in Tims set', cd.difference(ct)
    print 'Colnames in Tims and not in my set', ct.difference(cd)
예제 #23
0
def create_MPA_catalog():
    galCl_table = read_fits_table(fnCl)
    galLi_table = read_fits_table(fnLi)
    SFR_table = read_fits_table(fnSFR)
    sSFR_table = read_fits_table(fnsSFR)
    S_M_table = create_mass_catalog()
    SDSS_table = Table([
        galCl_table['objID'], galCl_table['I_CLASS'],
        galLi_table['OIII_5007_FLUX'], galLi_table['NII_6584_FLUX'],
        galLi_table['SII_6717_FLUX'], galLi_table['OI_6300_FLUX'],
        galLi_table['H_ALPHA_FLUX'], galLi_table['H_BETA_FLUX'],
        SFR_table['AVG'], sSFR_table['AVG'], SFR_table['FLAG'],
        sSFR_table['FLAG']
    ],
                       names=('objID', 'class', 'OIII', 'NII', 'SII', 'OI',
                              'H_a', 'H_b', 'SFR avg', 'sSFR avg', 'SFR flag',
                              'sSFR flag'),
                       dtype=(np.int64, np.int32, np.float32, np.float32,
                              np.float32, np.float32, np.float32, np.float32,
                              np.float32, np.float32, np.int16, np.int16))
    SDSS_table = unique(SDSS_table, keys='objID')
    shared = ['objID']
    t = join(S_M_table, SDSS_table, keys=shared)
    t.rename_column('_RA', 'RA')
    t.rename_column('_DE', 'DE')
    return t
예제 #24
0
def why_galaxies_not_included(galaxies, nsa, bricks):
    logging.info('Original galaxies: {}'.format(len(galaxies)))
    in_nsa_maybe_duplicate, not_in_nsa = matching_utils.match_galaxies_to_catalog_table(
        galaxies=galaxies,
        catalog=nsa,
        galaxy_suffix='',
        catalog_suffix='_nsa')
    not_in_nsa_save_loc = 'galaxies_not_in_nsa.csv'
    not_in_nsa.to_pandas().to_csv(not_in_nsa_save_loc)
    logging.info('{} galaxies not in NSA listed in {}'.format(
        len(not_in_nsa), not_in_nsa_save_loc))
    # Are they duplicates?
    in_nsa = table.unique(in_nsa_maybe_duplicate, keep='first', keys='sdss_id')
    logging.info(
        'Duplicate NSA cross-matches, selecting {} first matches only'.format(
            len(in_nsa_maybe_duplicate) - len(in_nsa)))
    # Are they in the NSA?
    logging.info('In NSA 1_0_0: {}'.format(len(in_nsa)))
    # Do they pass the selection cuts?
    good_petrotheta = selection_cuts.apply_selection_cuts(in_nsa)
    logging.info('Good petrotheta: {}'.format(len(good_petrotheta)))
    # Are they in decals?
    joint_catalog = get_joint_nsa_decals_catalog.create_joint_catalog(
        in_nsa, bricks, '5')  # dont apply selection cuts
    logging.info('In DECALS bricks: {}'.format(len(joint_catalog)))
    # Are they successfully downloaded?
    fits_dir = download_decals_settings.fits_dir
    png_dir = download_decals_settings.png_dir
    set_download_directory(joint_catalog, fits_dir, png_dir)
    joint_catalog = download_images_threaded.check_images_are_downloaded(
        joint_catalog, n_processes=1)
    image_download_stats(joint_catalog)
    return joint_catalog
예제 #25
0
def query_tmass(source_names):
    Simbad.reset_votable_fields()
    Simbad.add_votable_fields('typed_id')  # keep search term in result table
    Simbad.add_votable_fields('flux(J)')
    Simbad.add_votable_fields('flux_error(J)')
    Simbad.add_votable_fields('flux_bibcode(J)')
    Simbad.add_votable_fields('flux(H)')
    Simbad.add_votable_fields('flux_error(H)')
    Simbad.add_votable_fields('flux_bibcode(H)')
    Simbad.add_votable_fields('flux(K)')
    Simbad.add_votable_fields('flux_error(K)')
    Simbad.add_votable_fields('flux_bibcode(K)')

    print('2MASS query started')
    result_table = Simbad.query_objects(source_names)
    print('2MASS query complete')

    # find indexes which contain 2MASS results
    ind = result_table['FLUX_BIBCODE_J'] == '2003yCat.2246....0C'

    tmass_phot = result_table['TYPED_ID', 'FLUX_J', 'FLUX_ERROR_J', 'FLUX_H',
                              'FLUX_ERROR_H', 'FLUX_K', 'FLUX_ERROR_K'][ind]
    tmass_phot_unique = unique(tmass_phot, keys='TYPED_ID', keep='first')
    print(len(tmass_phot), len(tmass_phot_unique))
    return tmass_phot_unique
예제 #26
0
파일: cattools.py 프로젝트: tskisner/LSS
def mkfullran(tile, goodloc, pdict, randir):
    ranf = randir + 'fba-0' + str(tile) + '.fits'
    f1 = fitsio.read(ranf)
    f2 = fitsio.read(ranf, ext=2)
    f3 = fitsio.read(ranf, ext=3)

    goodranw = np.isin(f3['LOCATION'], goodloc)
    goodranid = np.unique(f3[goodranw]['TARGETID'])

    t2 = Table.read(ranf, hdu=2)
    tj = Table()
    tj['TARGETID'] = f3[goodranw]['TARGETID']
    tj['LOCATION'] = f3[goodranw]['LOCATION']
    tj['FIBER'] = f3[goodranw]['FIBER']
    tj = unique(tj, keys=['TARGETID'])
    t2.remove_columns(['PRIORITY', 'OBSCONDITIONS', 'SUBPRIORITY'])
    rant = join(tj, t2, keys=['TARGETID'], join_type='left')
    #now match back to randoms with all columns

    tall = Table.read(randir + 'tilenofa-' + str(tile) + '.fits')
    tall.remove_columns([
        'NUMOBS_MORE', 'PRIORITY', 'OBSCONDITIONS', 'SUBPRIORITY',
        'NUMOBS_INIT'
    ])

    ranall = join(rant, tall, keys=['TARGETID'], join_type='left')
    print('number of randoms:')
    print(len(ranall))

    ranall['PRIORITY'] = np.vectorize(pdict.__getitem__)(ranall['LOCATION'])
    return ranall
예제 #27
0
    def __add__(self, other):
        """
        Combine two products stacking data along columns and removing duplicated data using time as
        the primary key.

        Parameters
        ----------
        other : A subclass of stix_parser.products.quicklook.Product

        Returns
        -------
        A subclass of stix_parser.products.quicklook.Product
            The combined data product
        """
        if not isinstance(other, type(self)):
            raise TypeError(
                f'Products must of same type not {type(self)} and {type(other)}'
            )

        # TODO reindex and update data control_index
        other.control[
            'index'] = other.control['index'] + self.control['index'].max() + 1
        control = vstack((self.control, other.control))
        # control = unique(control, keys=['scet_coarse', 'scet_fine'])
        # control = control.group_by(['scet_coarse', 'scet_fine'])

        other.data['control_index'] = other.data[
            'control_index'] + self.control['index'].max() + 1
        data = vstack((self.data, other.data))
        data = unique(data, keys='time')
        # data = data.group_by('time')
        unique_control_inds = np.unique(data['control_index'])
        control = control[np.isin(control['index'], unique_control_inds)]

        return type(self)(control, data)
예제 #28
0
def match_stars(skycat, srccat, in_wcs, max_sep=1.5 * u.deg):
    """
    skycat : `~astropy.table.Table`
        Skycam catalog as produced by load_skycam_catalog() with Alt and Az columns
        added for the appropriate time.

    srccat : `~astropy.table.Table`
        Source catalog as produced by make_catalog().

    in_wcs : `~astropy.wcs.WCS`
        WCS used to create Alt/Az for skycat.

    max_sep : `~astropy.units.Quantity` (default: 1 degree)
        Separation criterium for valid matching.
    """
    pred_az, pred_alt = in_wcs.all_pix2world(srccat['xcentroid'],
                                             srccat['ycentroid'], 0)
    pred_coord = SkyCoord(ra=pred_az * u.deg, dec=pred_alt * u.deg)
    act_coord = SkyCoord(ra=skycat['Az'], dec=skycat['Alt'])
    idx, d2d, d3d = pred_coord.match_to_catalog_sky(act_coord, nthneighbor=1)
    sep_constraint = d2d < max_sep
    matches = srccat[sep_constraint]
    cat_matches = skycat[idx[sep_constraint]]
    matched_cat = hstack([cat_matches, matches])
    matched_cat.sort('obs_mag')

    # If we get multiple matches, keep the brightest one. May be wrong, but at least consistent
    matched_cat = unique(matched_cat, keys='Star Name', keep='first')
    return matched_cat
 def field_stars_init(self,dwarfs_or_giants):
     from astropy.table import unique
     # data cleaning steps (Temperature, RV scatter, surface gravity)
     self.data = unique(self.data,keys='APOGEE_ID')
     self.data = self.data[self.data['TEFF']>3600]   
     self.data = self.data[self.data['VSCATTER']<1]
     self.data = self.data[self.data['LOGG']<3]
     # remove stars with 'bad' quality flags
     self.data = self.data[np.logical_and.reduce((self.data['STARFLAG'] & 2**17 == 0, self.data['STARFLAG'] & 2**2 == 0,\
                                                  self.data['STARFLAG'] & 2**3 == 0, self.data['ASPCAPFLAG'] & 2**19 == 0,\
                                                  self.data['ASPCAPFLAG'] & 2**20 == 0,self.data['ASPCAPFLAG'] & 2**23 == 0))]
     # select stars with good signal to noise ratio
     self.data = self.data[self.data['SNR']>50]
     self.data=self.data[np.logical_and(self.data['TEFF']>-9000,self.data['M_H']>-9000)]
     # initialize the SNR and Teff bins
     self.data['SNR_bin']=np.nan
     self.data['TEFF_MH_bin']=np.nan
     
     # select dwarves/red giants based on the property 'CLASS' in the file
     if dwarfs_or_giants == 'dwarfs':
         cond=np.logical_or.reduce((['Fd' in i for i in self.data['ASPCAP_CLASS']],\
                                    ['GKd' in i for i in self.data['ASPCAP_CLASS']],\
                                    ['Md' in i for i in self.data['ASPCAP_CLASS']]))
     elif dwarfs_or_giants == 'giants':
         cond=np.logical_or(['Mg' in i for i in self.data['ASPCAP_CLASS']],\
                            ['GKg' in i for i in self.data['ASPCAP_CLASS']])
     else:
         print("Please enter 'dwarfs' or 'giants'")
     self.data = self.data[cond]
예제 #30
0
def update_local_data(year: int, timeout: bool = None):
    """Download data from SuomiNet for a given year and update the master table

    Args:
        year: The year to update data for
        timeout: Optional seconds to wait while connecting to SuomiNet

    Returns:
        A boolean representing whether any data was downloaded
    """

    # Determine what years to download
    if year > datetime.now().year:
        raise ValueError(
            'Cannot download data for years greater than the current year.')

    # Get any local data that has already been downloaded
    local_data = _get_local_data()

    # Download new data from SuomiNet
    new_data = _download_data_for_year(year, timeout)
    stacked_tables = vstack([local_data, new_data])
    if stacked_tables:
        updated_data = unique(stacked_tables, keys=['date'], keep='last')

        # Update local files
        updated_data.write(settings._pwv_measured_path, overwrite=True)
        return True

    else:
        return False
예제 #31
0
    def _set_identifiers(self):
        """make a useful table of identifiers of lines covered by this model"""
        n = deepcopy(self._table['numerator'])
        n.name = 'ID'
        d = deepcopy(self._table['denominator'])
        d.name = 'ID'

        t1 = Table([self._table['title'], n], copy=True)
        # discard the summed fluxes as user would input them individually
        for id in ['OI_145+CII_158', 'OI_63+CII_158']:
            a = np.where(t1['ID'] == id)[0]
            for z in a:
                t1.remove_row(z)
        # now remove denominator from title (everything from / onwards)
        for i in range(len(t1['title'])):
            t1['title'][i] = t1['title'][i][0:t1['title'][i].index('/')]

        t2 = Table([self._table['title'], d], copy=True)
        # remove numermator from title (everything before and including /)
        for i in range(len(t2['title'])):
            t2['title'][i] = t2['title'][i][t2['title'][i].index('/') + 1:]
        t = vstack([t1, t2])
        t = unique(t, keys=['ID'], keep='first', silent=True)
        t['title'].unit = None
        t['ID'].unit = None
        t.rename_column('title', 'canonical name')
        self._identifiers = t
예제 #32
0
def parse_spectra(data: Table) -> Table:
    """Parse the spectral types into the celestia.Sci format."""
    print('Parsing spectral types')
    data['SpType'] = data['SpType'].filled('')
    sptypes = unique(data['SpType', ])
    sptypes['CelSpec'] = parse_spectrum_vec(sptypes['SpType'])
    return join(data, sptypes)
예제 #33
0
def get_planets_table():
    """
    Get the joined planets table from the NASA Exoplanet Archive and
    the Exoplanet Orbit Database.

    Returns
    -------
    table : `~astropy.table.Table`
        Table of exoplanet properties
    """
    if not os.path.exists(planet_table_path):
        raise ValueError("You must run salter.cache.cache_planets_table first "
                         "before you can run get_joined_table")
    table = ascii.read(planet_table_path, format='csv')

    # Toss out multis
    first_kois_only = np.array(
        [koi.endswith('01') for koi in table['kepoi_name']])
    table = table[first_kois_only]
    table.add_index('kepid')

    # Ensure only unique results
    unique_table = unique(table, keys='kepid')
    unique_table.add_index('kepid')

    return unique_table
예제 #34
0
    def find_best_comp(self, result_file_path=None,
                       best_comparison_star=None):

        result_file = Table.read(result_file_path,
                                 format='ascii.commented_header')

        # read comparison star list
        # and check manual assigned comp star
        if best_comparison_star is None:
            result_unique_by_cat = table.unique(result_file, keys='nomad1')
        else:
            result_unique_by_cat = table.unique(
                result_file[(result_file['nomad1'] == best_comparison_star)],
                                                keys='nomad1')

        std_list = []
        t_c_list = []
        # calculates diff_mag for all target objects and comp. stars
        for star in result_unique_by_cat['nomad1']:
            frame_results = result_file[(result_file['nomad1'] == star)]

            # diff phot.
            frame_results['t-c'] = frame_results['magt_i'] - frame_results['magc_i']
            # error propagation
            frame_results['t-c-err'] = np.sqrt(
                np.power(frame_results['magt_i_err'], 2) + np.power(frame_results['magc_i_err'], 2))

            # extracting usefull columns
            t_c_list.append(frame_results['ast_num', 'nomad1', 'jd', 't-c', 't-c-err'])

            # calculating all t-c stars STD then adding list
            std_list.append(np.std(frame_results['t-c']))
        # calculating all STD's mean and its index number in the list
        mean_idx = (np.abs(np.asanyarray(std_list) - np.mean(std_list))).argmin()

        # choosing STD with min, mean and max stars
        diff_stats = {'min': [std_list.index(min(std_list)), min(std_list)],
                      'mean': [mean_idx, np.mean(std_list)],
                      'max': [std_list.index(max(std_list)), max(std_list)]
                      }
        # getting these diff mags and their other columns
        results = {'with_min_comp': t_c_list[diff_stats['min'][0]],
                   'with_mean_comp': t_c_list[diff_stats['mean'][0]],
                   'with_max_comp': t_c_list[diff_stats['max'][0]]
                   }

        return results
예제 #35
0
def test_group_mixins():
    """
    Test grouping a table with mixin columns
    """
    # Setup mixins
    idx = np.arange(4)
    x = np.array([3., 1., 2., 1.])
    q = x * u.m
    lon = coordinates.Longitude(x * u.deg)
    lat = coordinates.Latitude(x * u.deg)
    # For Time do J2000.0 + few * 0.1 ns (this requires > 64 bit precision)
    tm = time.Time(2000, format='jyear') + time.TimeDelta(x * 1e-10, format='sec')
    sc = coordinates.SkyCoord(ra=lon, dec=lat)
    aw = table_helpers.ArrayWrapper(x)
    nd = np.array([(3, 'c'), (1, 'a'), (2, 'b'), (1, 'a')],
                  dtype='<i4,|S1').view(NdarrayMixin)

    qt = QTable([idx, x, q, lon, lat, tm, sc, aw, nd],
                names=['idx', 'x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd'])

    # Test group_by with each supported mixin type
    mixin_keys = ['x', 'q', 'lon', 'lat', 'tm', 'sc', 'aw', 'nd']
    for key in mixin_keys:
        qtg = qt.group_by(key)

        # Test that it got the sort order correct
        assert np.all(qtg['idx'] == [1, 3, 2, 0])

        # Test that the groups are right
        # Note: skip testing SkyCoord column because that doesn't have equality
        for name in ['x', 'q', 'lon', 'lat', 'tm', 'aw', 'nd']:
            assert np.all(qt[name][[1, 3]] == qtg.groups[0][name])
            assert np.all(qt[name][[2]] == qtg.groups[1][name])
            assert np.all(qt[name][[0]] == qtg.groups[2][name])

    # Test that unique also works with mixins since most of the work is
    # done with group_by().  This is using *every* mixin as key.
    uqt = unique(qt, keys=mixin_keys)
    assert len(uqt) == 3
    assert np.all(uqt['idx'] == [1, 2, 0])
    assert np.all(uqt['x'] == [1., 2., 3.])

    # Column group_by() with mixins
    idxg = qt['idx'].group_by(qt[mixin_keys])
    assert np.all(idxg == [1, 3, 2, 0])
예제 #36
0
def merge_cat(rootname,rastr='ra',decstr='dec'):
    g=glob.glob(rootname+'/*.vo')
    tlist=[]
    for f in g:
        try:
            t=Table.read(f)
        except:
            print 'Error reading table',f
            raise
        t2=Table()
        t2['ra']=t[rastr]
        t2['dec']=t[decstr]
        tlist.append(t2)

    t=vstack(tlist)
    t2=unique(t,keys=['ra','dec'])
    t2.write(rootname+'.fits',overwrite=True)
    return t2
예제 #37
0
i=0
j=0

for filename in os.listdir(os.getcwd()):
    start = filename.find('tile_') + 5
    end = filename.find('.fits', start)
    tile_id =filename[start:end].lstrip("0")

    tile_data1 = fits.open(filename)[1].data
    tile_data2 = fits.open(filename)[2].data

    add_rows_all1 = Table((tile_data1['TARGETID'],), names=('TARGETID',))
    add_rows_all2 = Table((tile_data2['POTENTIALTARGETID'],),names=('TARGETID',))
    del tile_data1, tile_data2

    add_rows1 = unique(add_rows_all1,keys='TARGETID')
    add_rows1['OBS'] = np.ones(len(add_rows1))
    add_rows2 = unique(add_rows_all2,keys='TARGETID')
    add_rows2['OBS'] = np.zeros(len(add_rows2))

    add_rows1['TILEID']= np.ones(len(add_rows1))*int(tile_id)
    add_rows2['TILEID']= np.ones(len(add_rows2))*int(tile_id)

    tot_targs = (vstack([add_rows1,add_rows2]))
    del add_rows1, add_rows2

    unique_targs = unique(tot_targs, keys = 'TARGETID')
    
    if i==0:
        tot_unique_targsL100 = unique_targs
    else:
    def create_tables(self, uv_type, directory, radius, print_progress = False):
        
        '''
        Perform photometry on a directory of .fits files and create a list
        of two tables. The first table contains the redshift, exposure time,
        luminosity, and surface brightness for various supernova, along with
        the associated error values. The second table is a log outlining any
        files that do not contain a supernova or are missing checkfiles. If
        print_progress is set equal to true, the path each fits file will be
        printed before performing photometry on along with the number of
        remaining files.
        
        Args:
            uv_type        (str)  : Specifies which type of uv to create a table
                                        for. Use either "NUV" or "FUV".
            directory      (str)  : A directory containing .fits files
            radius         (float): Radius of desired photometry aperture in kpc
            print_progress (bool) : Whether or not to print the file path of each
                                        fits file

        Returns:
            results (list): [Data table (Table), Log table (Table)]
        '''
        
        #Make sure we have redshift and coordinates of each supernova
        if self.cord_dict.keys() != self.red_dict.keys():
            raise ValueError('''Keys in coordinate and redshift dictionaries
                                (self.cord_dict, self.red_dict) do not match''')
        
        label = uv_type + " " + str(radius) + "kpc "

        #Define the tables that will be returned by the function
        log = Table(names = ["File Path", "Issue"], dtype = [object, object])
        out = Table(names = ["sn",
                             "Redshift",
                             "Redshift Error",
                             label + "Exposure Time",
                             "Flux",
                             "Flux Error",
                             label + "Luminosity",
                             label + "Luminosity Error",
                             label + "Surface Brightness",
                             label + "Surface Brightness Error"],

                    dtype = ("S70", "float64", "float64", "float64", "float64",
                             "float64", "float64", "float64", "float64", "float64"))

        out["Redshift"].unit = u.dimensionless_unscaled
        out["Redshift Error"].unit = u.dimensionless_unscaled
        out[label + "Exposure Time"].unit = u.s
        out["Flux"].unit =  u.erg / u.s / u.Angstrom / u.kpc / u.kpc / u.cm / u.cm / np.pi
        out["Flux Error"].unit = u.erg / u.s / u.Angstrom / u.kpc / u.kpc / u.cm / u.cm / np.pi
        out[label + "Luminosity"].unit = u.erg / u.s / u.Angstrom / u.kpc / u.kpc
        out[label + "Luminosity Error"].unit = u.erg / u.s / u.Angstrom / u.kpc / u.kpc
        out[label + "Surface Brightness"].unit = u.erg / u.s / u.Angstrom / u.arcsec / u.arcsec
        out[label + "Surface Brightness Error"].unit = u.erg / u.s / u.Angstrom / u.arcsec / u.arcsec

        #Set parameters that are specific to NUV or FUV observations
        if "N" in uv_type.upper():
            file_key = "nd-int" #A string distinguing galex file types
            flux_conv = 2.06 * 1e-16 #A conversion factor from counts per second to flux

        elif "F" in uv_type.upper():
            file_key = "fd-int"
            flux_conv = 1.40 * 1e-15

        #Create a list of files to perform photometry on
        file_list = []
        for path, subdirs, files in os.walk(directory):
            for name in files:
                if file_key in name and len(name.split(".")) < 3:
                    file_list.append(os.path.join(path, name))
                    
        count = len(file_list)
        #Perform photometry on each .fits file
        for fits_file in file_list:
            if print_progress == True:
                print(count, ":", fits_file, flush = True)
                count -= 1

            p = self.photometry(fits_file, radius)
            for elt in p:
                if elt[0] == "error":
                    log.add_row([elt[1], elt[2]])
                    
                    if print_progress == True:
                        print("error", elt[2], "\n", flush = True)

                else:
                    #We calculate the values to be entered in the table
                    redshift = float(self.red_dict[elt[0]])
                    peculiar_redshift = np.sqrt((1 + (300 / 299792.458)) / (1 - (300 / 299792.458))) - 1
                    redshift_err = np.sqrt((redshift / 1000)**2 + (peculiar_redshift)**2)

                    arcmin = cosmo.kpc_comoving_per_arcmin(redshift).value**2 #kpc^2 per arcmin^2
                    arcmin_err = self.conv_error(redshift, redshift_err)

                    photom = elt[2] #The photometry value
                    photom_err = elt[3]

                    flux = flux_conv * photom #convert cps to flux using the conversion factor
                    flux_err = self.flux_error(photom, photom_err, flux_conv)

                    ldist = cosmo.luminosity_distance(redshift).cgs.value #Luminosity Distance (cm)
                    ldist_err = self.lum_dist_error(redshift, redshift_err)

                    lum = flux * 4 * np.pi * (ldist**2) #luminosity = flux*4*pi*r^2
                    lum_err = self.luminosity_error(flux, flux_err, ldist, ldist_err)

                    sbrightness = lum * arcmin / 3600
                    sbrightness_err = self.surf_brightness_error(lum, lum_err, arcmin, arcmin_err)

                    out.add_row([elt[0], redshift, redshift_err, elt[1], flux, flux_err,
                                 lum, lum_err, sbrightness, sbrightness_err])

        out.sort(label + "Surface Brightness Error")
        out_unique = unique(out, keys = "sn")
        out_unique.sort("sn")

        return([out_unique, log])
예제 #39
0
파일: FALmcmc.py 프로젝트: pacargile/FAL
    def __init__(self,**kwargs):

        minWLin     = kwargs.get("minWLin",605.2)
        maxWLin     = kwargs.get("maxWLin",605.8)
        minlinWL    = kwargs.get("minlinWL",None)
        maxlinWL    = kwargs.get("maxlinWL",None)
        IDin        = kwargs.get("IDin",1)
        starttime   = kwargs.get("starttime",None)
        walltime    = kwargs.get("walltime",None)
        initlines   = kwargs.get("initlines",None)
        injectlines = kwargs.get("injectlines",None)
        outputfile  = kwargs.get("outputfile",None)
        outputdir  = kwargs.get("outputdir",None)
        arcscale    = kwargs.get("arcscale",None)
        previousball = kwargs.get("previousball",None)

        # change the following lines to be inputs when you init the class
        self.minWL = minWLin
        self.maxWL = maxWLin
        if minlinWL == None:
            self.minLINWL = self.minWL
            self.maxLINWL = self.maxWL
        else:                
            self.minLINWL = minlinWL
            self.maxLINWL = maxlinWL

        self.ID = IDin
        self.numstars = 2
        self.previousball = previousball
        self.IDlist = [int(10000000*x)+self.ID for x in range(1,self.numstars+1,1)]

        # Define synthesis wavelength range
        self.wavebuffer = 0.15
        self.waverange = [self.minWL-self.wavebuffer,self.maxWL+self.wavebuffer]

        # setting cut for line selection
        self.condst = [{'LP':'RESID','OP':np.less,'LV':0.99}]

        # set output file name
        self.outputfile = outputfile
        self.outputdir = outputdir

        if arcscale == None:
            self.arcscale = 1.0
        else:
            self.arcscale = arcscale

        # define a general start time so that the code can stop short of the wall time and save everything
        if starttime == None:
            self.starttime = time.time()
        else:
            self.starttime = starttime # starttime in seconds

        if walltime == None:
            # an impossibly large walltime
            self.walltime = self.starttime + 300000
        else:
            self.walltime = walltime # walltime in seconds
        print('Pro: {0} --> Total possible run time = {1} seconds'.format(self.ID,self.walltime-self.starttime))
        print('Pro: {0} --> Full Wavelength Range = {1:9.5f}-{2:9.5f} ({3:9.5f}) nm'.format(
            self.ID,self.minWL-self.wavebuffer,self.maxWL+self.wavebuffer,self.maxWL-self.minWL+(2.0*self.wavebuffer)))


        # set up some dictionaries for passing objects
        self.fmdict = {}
        origsyndict = {}

        # run synthe to get all needed lines
        for ID_i,star_i in zip(self.IDlist,['Sun','Arcturus']):
            print('Pro: {0} --> Running original synthe on full master line list for {1}'.format(self.ID,star_i))
            # initialize the class
            fm_i = FALmod.FALmod(ID=ID_i,waverange=self.waverange,starpars=star_i)
            # run SYNTHE using the master line list to grab all important lines
            spec_i,ll_i = fm_i.runsynthe(timeit=False,linelist='readmaster')
            self.fmdict[ID_i] = fm_i
            origsyndict[ID_i] = [Table(spec_i),ll_i]

        # Assemble working line list (union of ll_i from last for-loop)
        print('Pro: {0} --> Assemble working line list'.format(self.ID))
        # stack the tables
        fmll = vstack([origsyndict[ID_i][1] for ID_i in self.IDlist])
        # fmll['FILTERBOOL'] = np.zeros(len(fmll['WL']),dtype=int)
        # sort tables on all collumns, include RESID as that way the stronger line will be listed first and will be set as a fit parameter
        tabpars = ['WL','GFLOG', 'CODE', 'E', 'XJ', 'LABEL', 'EP', 'XJP', 'LABELP', 'GR', 'GS', 'GW', 'WAVENO', 'REF', 'NBLO', 'NBUP', 'ISO1', 'X1', 'ISO2', 'X2', 'OTHER']
        fmll.sort(tabpars+['RESID'])
        fmll = unique(fmll,tabpars)

        # check to see if working in H-band where we don't incldue a background model
        if ((self.minWL > 1300.0) & (self.maxWL < 2300.0)):
            print("Pro: {0} --> Not using a background model for weak lines".format(self.ID))
        else:
            # remove lines with RESID > 0.9999 as these are included in the static background model
            fmll = fmll[fmll['RESID'] < 0.9999]
            print("Pro: {0} --> Using a background model for weak lines".format(self.ID))

        # # inject fake lines
        # fmll = self.injectfake(fmll.copy())

        if initlines != None:
            # inject previous parameters
            print('Pro: {0} --> Injecting Previous Line Parameters'.format(self.ID))
            if type(initlines) == type(''):
                fmll = self.injectprev(fmll.copy(),presetll=initlines)
            elif type(initlines) == type([]):
                for initlines_i in initlines:
                    fmll = self.injectprev(fmll.copy(),presetll=initlines_i)
            else:
                print("Pro: {0} --> WARNING!!! Did not understand previous line parameter file".format(self.ID))


        # set it into self
        self.fmll = fmll

        # remove all predicted lines with depths < 0.99
        plinesind = np.array((self.fmll['LABEL'] == '          ') & (self.fmll['LABELP'] == '          ') & (self.fmll['RESID'] < 0.99),dtype=bool)
        self.fmll = self.fmll[~plinesind]

        # change all DWL back to zero, hack for pervious solar-only fit
        self.fmll['DWL'] = np.zeros_like(self.fmll['DWL'])

        # now run each stellar spectrum again and archive the results
        print('Pro: {0} --> Archiving the results into working directories'.format(self.ID))
        for ID_i,star_i in zip(self.IDlist,['Sun','Arcturus']):
            if star_i == 'Arcturus':
                print('Pro: {0} --> Changing C12/C13 abundance fraction for Arcturus'.format(self.ID))
                C12C13 = 7.4
                fmll_i = self.fmll.copy()
                for ll_ii in fmll_i:
                    if ll_ii['ISO1'] == 12:
                        ll_ii['GFLOG'] = ll_ii['GFLOG']+0.005-np.log10(1.0+(1.0/C12C13))
                    elif ll_ii['ISO1'] == 13:
                        ll_ii['GFLOG'] = ll_ii['GFLOG']+1.955-np.log10(1.0+C12C13)
                    else:
                        pass
            else:
                fmll_i = self.fmll.copy()

            _spec,_ll = self.fmdict[ID_i].runsynthe(timeit=False,linelist=fmll_i,archive=True)
            # reset orgll to fmll because we don't want to use the synthe parsed ll
            self.fmdict[ID_i].orgll = fmll_i

        # run function to select which lines are modeled
        (self.parr,self.psig,self.pflag,self.Tarr) = FALlinesel.linesel(self.fmll,self.condst,self.minLINWL,self.maxLINWL)

        # calculate number of lines with free paramters and their wavelengths
        fitlineind = []
        fitlinewl  = []
        for ii,tt in enumerate(self.Tarr):
            if not all(tt == -1):
                fitlineind.append(ii)
                fitlinewl.append(float(self.fmll['WL'][ii]))


        print("Pro: {0} --> Total number of lines considered in WL segment = {1}".format(self.ID,len(self.fmll)))
        print("Pro: {0} --> Number of lines that are free in WL segment = {1}".format(self.ID,len(fitlinewl)))
        # print("Pro: {0} --> Index in line list of the modeled lines...".format(self.ID),fitlineind)
        # print("Pro: {0} --> WL of modeled lines...".format(self.ID),fitlinewl)

        # number of dimensions
        self.ndim = len(self.parr)
        print("Pro: {0} --> Number of Free Line Parameters...".format(self.ID),self.ndim)
        print("Pro: {0} --> Fitting Transmission Spectrum (scaling and velocity)".format(self.ID))
        self.ndim = self.ndim + 2
        print("Pro: {0} --> Fitting Arcturus Spectrum scaling and velocity".format(self.ID))
        self.ndim = self.ndim + 2

        # get observed data, transmission spectrum, and background model
        (self.solobswave,self.solobsflux,self.arcobswave,self.arcobsflux,self.transflux,self.bg_sol_flux,self.bg_arc_flux) = self.getspecdata()

        # initialize output files
        self.initoutput()

        # # now that the output files have been written...
        # # for arcturus, remove troublesome pixels in observed spectrum (flux < 0.001 & flux > 0.99)
        # prunearc = (self.arcobsflux >= 0.001) & (self.arcobsflux <= 0.99)
        # self.arcobsflux = self.arcobsflux[np.array(prunearc)]
        # self.arcobswave = self.arcobswave[np.array(prunearc)]

        self.arcobsflux = self.arcobsflux*self.arcscale

        print("Pro: {0} --> Number of Pixels in Obs Sol ...".format(self.ID),len(self.solobswave))
        print("Pro: {0} --> Number of Pixels in Obs Arc ...".format(self.ID),len(self.arcobswave))
        print("Pro: {0} --> Using an initial scaling of {1} for Obs Arc ...".format(self.ID,self.arcscale))

        print("Pro: {0} --> Finished Setup".format(self.ID))
예제 #40
0
    def lc_plot_general(self,
                        result_file_path=None,
                        xcol='jd',
                        ycol='magt_i',
                        errcol='magt_i_err',
                        mark_color="blue",
                        bar_color="red"):

        """
        Plot light curve of photometry result.
        @param result_file_path: Result file path
        @type result_file_path: path
        @param xcol: X-axis data for plotting
        @type xcol: array
        @param ycol: Y-axis data for plotting
        @type ycol: array
        @param errcol: Error bar data for plotting
        @type errcol: array
        @param mark_color: Marker color
        @type mark_color: str
        @param bar_color: Bar marker color
        @type bar_color: str
        @return: str
        """

        print("Plotting asteroid's LC...")

        fn = os.path.basename(result_file_path).split('.')[0]

        result_file = Table.read(result_file_path,
                                 format='ascii.commented_header')

        result_unique_by_keys = table.unique(result_file, keys='jd')

        rcParams['figure.figsize'] = [10., 8.]
        figlc = plt.figure(1)
        gs = gridspec.GridSpec(2, 1, height_ratios=[6, 2])

        # Two subplots, the axes array is 1-d
        axlc1 = figlc.add_subplot(gs[0])
        axlc2 = figlc.add_subplot(gs[1])
        axlc1.set_title(fn)

        filtered_data = sigma_clip(result_unique_by_keys[ycol], sigma=3,
                                   iters=10, stdfunc=mad_std)

        axlc1.errorbar(
            result_unique_by_keys[xcol][np.logical_not(filtered_data.mask)],
            result_unique_by_keys[ycol][np.logical_not(filtered_data.mask)],
            yerr=result_unique_by_keys[errcol][np.logical_not(
                filtered_data.mask)],
            fmt='o',
            ecolor=bar_color,
            color=mark_color,
            capsize=5,
            elinewidth=2)

        axlc1.invert_yaxis()
        axlc2.set_xlabel("JD", fontsize=12)
        axlc1.set_ylabel("Magnitude (R - INST)", fontsize=12)
        axlc2.set_ylabel("STD", fontsize=12)

        fit = np.polyfit(
            result_unique_by_keys[xcol][np.logical_not(filtered_data.mask)],
            result_unique_by_keys[errcol][np.logical_not(filtered_data.mask)],
            1)
        fit_fn = np.poly1d(fit)
        axlc2.plot(
            result_unique_by_keys[xcol][np.logical_not(filtered_data.mask)],
            result_unique_by_keys[errcol][np.logical_not(filtered_data.mask)],
            'yo',
            result_unique_by_keys[xcol][np.logical_not(filtered_data.mask)],
            fit_fn(result_unique_by_keys[xcol][np.logical_not(
                filtered_data.mask)]),
            '--k')

        axlc1.grid(True)
        axlc2.grid(True)
        axlc1.legend(loc=2, numpoints=1)

        figlc.savefig("{0}/{1}_jd_vs_magi_lc.pdf".format(os.getcwd(), fn))
 def time_unique_float(self):
     unique(self.table, keys='float')
 def time_unique_int(self):
     unique(self.table, keys='int')
 def time_unique_str(self):
     unique(self.table, keys='str')
예제 #44
0
    def lc_plot_std_mag(self, result_file_path=None,
                        xcol='magc_i',
                        ycol='star_Rmag',
                        errcol='magc_i_err',
                        mark_color="blue",
                        bar_color="red"):

        print("Plotting asteroid's LC...")

        # Fixing random state for reproducibility
        np.random.seed(19680801)

        fn = os.path.basename(result_file_path).split('.')[0]

        # Two subplots, the axes array is 1-d
        # Plotting settings
        rcParams['figure.figsize'] = [10., 8.]

        lc = plt.figure(1)
        lc_ast_std = plt.figure()
        gs = gridspec.GridSpec(2, 1, height_ratios=[6, 2])

        # magi vs catalogue
        lc1 = lc.add_subplot(gs[0])
        lc1.set_title(fn)
        lc1.grid(True)
        lc1.set_ylabel("Magnitude (R - NOMAD1)", fontsize=12)
        lc1.invert_yaxis()

        # magi vs STD
        lc2 = lc.add_subplot(gs[1])
        lc2.set_title(fn)
        lc2.grid(True)
        lc2.set_xlabel("Magnitude (Inst)", fontsize=12)
        lc2.set_ylabel("$STD$", fontsize=12)

        # magt vs estimated mag
        lc3 = lc_ast_std.add_subplot(gs[0])
        lc3.set_title(fn)
        lc3.legend(loc=2, numpoints=1)
        lc3.grid(True)
        lc3.invert_yaxis()
        lc3.set_xlabel("$JD$", fontsize=12)
        lc3.set_ylabel("Magnitude (R - Estimated from NOMAD1)",
                       fontsize=12)
        # Plotting settings

        result_file = Table.read(result_file_path,
                                 format='ascii.commented_header')

        # result_unique_by_keys = table.unique(result_file, keys='nomad1')
        result_unique_by_jd = table.unique(result_file, keys='jd')

        magt_std_list = []
        for jd in result_unique_by_jd['jd']:
            frame_results = result_file[(result_file['jd'] == jd)]

            # for reject outliers
            filtered_frame_results = sigma_clip(frame_results['magt_i'],
                                                sigma=3,
                                                iters=10, stdfunc=mad_std)

            # use only not rejected data (because umask used)
            filtered_f_umask = np.logical_not(filtered_frame_results.mask)

            # magci vs catalogue with error bar
            lc1.errorbar(
                frame_results[xcol][filtered_f_umask],
                frame_results[ycol][filtered_f_umask],
                yerr=frame_results[errcol][filtered_f_umask],
                fmt='o',
                ecolor=bar_color,
                color=mark_color,
                capsize=5,
                elinewidth=2)

            # magci vs catalogue fit calculation
            fit = np.polyfit(
                frame_results[xcol][filtered_f_umask],
                frame_results[ycol][filtered_f_umask],
                1)

            fit_fn = np.poly1d(fit)

            magt_to_std = fit_fn(frame_results['magt_i'][filtered_f_umask])
            magt_std_list.append([jd, magt_to_std[0], frame_results['magt_i_err'][0]])

            # magci vs catalogue fit plot
            lc1.plot(
                frame_results[xcol][filtered_f_umask],
                fit_fn(frame_results[xcol][filtered_f_umask]),
                '--k')

            # magi vs catalogue error fit calc.
            fit = np.polyfit(
                frame_results[xcol][filtered_f_umask],
                frame_results[errcol][filtered_f_umask],
                1)
            fit_fn = np.poly1d(fit)

            # magi vs STD fit plot
            lc2.plot(
                frame_results[xcol][filtered_f_umask],
                frame_results[errcol][filtered_f_umask],
                'yo',
                frame_results[xcol][filtered_f_umask],
                fit_fn(frame_results[xcol][filtered_f_umask]),
                '--k')

        # jd vs magt_std
        jd_vs_magt = np.asanyarray(magt_std_list)
        filtered_jd_vs_magt = sigma_clip(jd_vs_magt[:, 1],
                                         sigma=3,
                                         iters=10, stdfunc=mad_std)
        # use only not rejected data (because umask used)
        filtered_f_umask = np.logical_not(filtered_jd_vs_magt.mask)

        # jd vs magt plotting with error bars
        lc3.errorbar(
            jd_vs_magt[:, 0][filtered_f_umask],
            jd_vs_magt[:, 1][filtered_f_umask],
            yerr=jd_vs_magt[:, 2][filtered_f_umask],
            fmt='o',
            ecolor=bar_color,
            color=mark_color,
            capsize=5,
            elinewidth=2,
            label='{0} - R (Estimated)'.format(fn))

        lc3.legend(loc=2, numpoints=1)
        lc_ast_std.savefig("{0}/{1}_jd_vs_mag_std_lc.pdf".format(os.getcwd(), fn))