예제 #1
0
    def query_table(self, query={}, selection={}):
        """
        Get a formatted table of all query results. When multiple results are present for a single value, the best one
        is picked unless the user specifies a selection. This functionality will be revisited in the future.
        The output is as a QTable which allows Quantities to be embedded in the results.

        Parameters
        ----------
        query : dict
            Query to use in MongoDB query language. Default is an empty dictionary for all results.
        selection : dict
            Dictionary with the field value and reference to use for it (otherwise will pick best=1)

        Returns
        -------
        df : astropy.table.QTable
            Astropy QTable of results
        """

        results = self.query_db(query=query)

        # For each entry in result, select best field.value or what the user has specified
        tab_data = []
        for entry in results:
            out_row = {}
            for key, val in entry.items():
                if not isinstance(val, (list, type(np.array([])))):
                    out_row[key] = val
                else:
                    # Select best one to return
                    if len(val) > 1:
                        if key in selection.keys():
                            # If selection listed a key, use the reference information there
                            ind = np.array([x['reference'] for x in val]) == selection[key]
                        else:
                            ind = np.array([x.get('best', 0) for x in val]) == 1
                        unit = val[ind][0].get('unit')
                        if val[ind][0].get('distribution') is not None:
                            temp_dic = get_values_from_distribution(val[ind][0].get('distribution'))
                            temp_val = temp_dic['value']
                        else:
                            temp_val = val[ind][0]['value']
                        out_row[key] = self._store_quantity(temp_val, unit)
                    else:
                        unit = val[0].get('unit')
                        if val[0].get('distribution') is not None:
                            temp_dic = get_values_from_distribution(val[0].get('distribution'))
                            temp_val = temp_dic['value']
                        else:
                            temp_val = val[0]['value']
                        out_row[key] = self._store_quantity(temp_val, unit)

            tab_data.append(out_row)

        # Convert to QTable
        temp = pd.DataFrame(tab_data)  # use pandas as intermediary format
        df = QTable.from_pandas(temp)

        return df
예제 #2
0
def joinTables1(path, dest):
    'Same as before, using if the previous does not work'
    tables = []
    for i, filename in enumerate(os.listdir(path)):
        df = Table.read(os.path.join(path, filename)).to_pandas()
        tables.append(df)
    match = pd.concat(tabels, axis=0, ignore_index=True)
    del l
    del df
    match = QTable.from_pandas(match)
    match.write(dest)
예제 #3
0
def read_mc_dl2_to_QTable(filename):
    """
    Read MC DL2 files from lstchain and convert into pyirf internal format
    - astropy.table.QTable

    Parameters
    ----------
    filename: path

    Returns
    -------
    `astropy.table.QTable`, `pyirf.simulations.SimulatedEventsInfo`
    """

    # mapping
    name_mapping = {
        "mc_energy": "true_energy",
        "mc_alt": "true_alt",
        "mc_az": "true_az",
        "mc_alt_tel": "pointing_alt",
        "mc_az_tel": "pointing_az",
        "gammaness": "gh_score",
    }

    unit_mapping = {
        "true_energy": u.TeV,
        "reco_energy": u.TeV,
        "pointing_alt": u.rad,
        "pointing_az": u.rad,
        "true_alt": u.rad,
        "true_az": u.rad,
        "reco_alt": u.rad,
        "reco_az": u.rad,
    }

    simu_info = read_simu_info_merged_hdf5(filename)
    pyirf_simu_info = SimulatedEventsInfo(
        n_showers=simu_info.num_showers * simu_info.shower_reuse,
        energy_min=simu_info.energy_range_min,
        energy_max=simu_info.energy_range_max,
        max_impact=simu_info.max_scatter_range,
        spectral_index=simu_info.spectral_index,
        viewcone=simu_info.max_viewcone_radius,
    )

    events = pd.read_hdf(
        filename, key=dl2_params_lstcam_key).rename(columns=name_mapping)
    events = QTable.from_pandas(events)

    for k, v in unit_mapping.items():
        events[k] *= v

    return events, pyirf_simu_info
예제 #4
0
 def table(self):
     _diam = {
         'Name': 'Diameter',
         'Value': self.diam,
         'Comment': 'Primary mirror diameter',
         'Formula': '',
         'Symbol': r'$$D_\mathrm{PM}$$'
     }
     _cobscuration = {
         'Name': 'Central obscuration',
         'Value': self.cobscuration,
         'Comment': 'pct of primary mirror diameter',
         'Formula': '',
         'Symbol': r'$$\mathrm{oc}$$'
     }
     _sobscuration = {
         'Name': 'Strut obscuration',
         'Value': self.sobscuration,
         'Comment': 'pct of primary mirror area',
         'Formula': '',
         'Symbol': r'$$\mathrm{os}$$'
     }
     _clearApFrac = {
         'Name':
         'Clear aperture fraction',
         'Value':
         self.clearApFrac,
         'Comment':
         'After obscuration by secondary mirror and struts',
         'Formula':
         r'$$(1-%s)\times (1-%s^2)$$' % (_sobscuration['Symbol'].replace(
             '$', ''), _cobscuration['Symbol'].replace('$', '')),
         'Symbol':
         r'$$\mathrm{obsc}$$'
     }
     _Aeff = {
         'Name':
         'Effective aperture',
         'Value':
         self.Aeff,
         'Comment':
         'After obscuration by secondary mirror and struts',
         'Formula':
         r'$$%s \times%s$$' % (_diam['Symbol'].replace(
             '$', ''), _clearApFrac['Symbol'].replace('$', '')),
         'Symbol':
         r'$$A_\mathrm{eff}$$'
     }
     tab = QTable.from_pandas(
         pd.DataFrame(
             [_diam, _cobscuration, _sobscuration, _clearApFrac, _Aeff]))
     tab.add_index('Name')
     return tab['Name', 'Value', 'Symbol', 'Formula', 'Comment']
예제 #5
0
def read_snana_fits_to_sncosmo_table(fname):
    """Load SNANA formatted data and cast it to sncosmo table
    Args:
        fname (str): path + name to PHOT.FITS file
    Returns:
        (astropy.Table) table with photometry in sncosmo format
    """

    # load photometry
    dat = Table.read(fname, format="fits")
    df_phot = dat.to_pandas()
    # failsafe
    if df_phot.MJD.values[-1] == -777.0:
        df_phot = df_phot.drop(df_phot.index[-1])
    if df_phot.MJD.values[0] == -777.0:
        df_phot = df_phot.drop(df_phot.index[0])

    # load header
    header = Table.read(fname.replace("PHOT", "HEAD"), format="fits")
    df_header = header.to_pandas()
    df_header["SNID"] = df_header["SNID"].astype(np.int32)

    # add SNID to phot for skimming
    arr_ID = np.zeros(len(df_phot), dtype=np.int32)
    # New light curves are identified by MJD == -777.0
    arr_idx = np.where(df_phot["MJD"].values == -777.0)[0]
    arr_idx = np.hstack((np.array([0]), arr_idx, np.array([len(df_phot)])))
    # Fill in arr_ID
    for counter in range(1, len(arr_idx)):
        start, end = arr_idx[counter - 1], arr_idx[counter]
        # index starts at zero
        arr_ID[start:end] = df_header.SNID.iloc[counter - 1]
    df_phot["SNID"] = arr_ID

    df_phot = df_phot[df_phot.MJD != -777.000]

    df_tmp = pd.DataFrame()
    df_tmp["SNID"] = df_phot["SNID"]
    df_tmp["time"] = df_phot["MJD"]
    df_tmp["band"] = "des" + df_phot["FLT"].str.decode("utf-8").str.strip(" ")
    df_tmp["zp"] = df_phot["ZEROPT"]
    # FLUXCAL is zp = 27.5
    df_tmp["flux"] = df_phot["FLUXCAL"]
    df_tmp["fluxerr"] = df_phot["FLUXCALERR"]
    df_tmp["zp"] = np.repeat(27.5, len(df_phot))
    df_tmp["zpsys"] = np.repeat("ab", len(df_phot))

    at_phot = QTable.from_pandas(df_tmp)

    return at_phot
예제 #6
0
def read_data_dl2_to_QTable(filename, srcdep_pos=None):
    """
    Read data DL2 files from lstchain and return QTable format
    Parameters
    ----------
    filename: path to the lstchain DL2 file
    srcdep_pos: assumed source position for source-dependent analysis

    Returns
    -------
    `astropy.table.QTable`
    """

    # Mapping
    name_mapping = {
        "gammaness": "gh_score",
        "alt_tel": "pointing_alt",
        "az_tel": "pointing_az",
    }
    unit_mapping = {
        "reco_energy": u.TeV,
        "pointing_alt": u.rad,
        "pointing_az": u.rad,
        "reco_alt": u.rad,
        "reco_az": u.rad,
        "dragon_time": u.s,
    }

    # add alpha for source-dependent analysis
    srcdep_flag = dl2_params_src_dep_lstcam_key in get_dataset_keys(filename)
    
    if srcdep_flag:
        unit_mapping['alpha'] = u.deg

    data = pd.read_hdf(filename, key=dl2_params_lstcam_key)

    if srcdep_flag:
        data_srcdep = get_srcdep_params(filename, srcdep_pos)
        data = pd.concat([data, data_srcdep], axis=1)

    data = data.rename(columns=name_mapping)

    data = QTable.from_pandas(data)

    # Make the columns as Quantity
    for k, v in unit_mapping.items():
        data[k] *= v

    return data
예제 #7
0
def loadCGs(filename=None):
    '''
    Load coronagraph tables
    '''
    def p2f(x):
        return float(x.strip('%')) / 100

    if filename is None:
        fname = path + '/Inputs/ETC/CGs.csv'
    else:
        fname = filename
    c = pd.read_csv(fname, converters={'BW': p2f}, skip_blank_lines=True)
    cgs = QTable.from_pandas(c)
    cgs.add_index('CG')
    return cgs
예제 #8
0
def loadPlanets(filename=None):
    '''
    Load planet database
    '''
    if filename is None:
        fname = path + '/Inputs/ETC/Planets.csv'
    else:
        fname = filename
    pl = pd.read_csv(fname, skip_blank_lines=True)
    planets = QTable.from_pandas(pl)
    planets['V'].unit = u.mag
    planets['A'].unit = u.au
    planets['DIST'].unit = u.pc
    planets['Rp / R_J'].unit = u.R_jup
    planets.add_index(['NAME'])
    return planets
예제 #9
0
def loadCoronagraph(name):
    '''
    Load single coronagraph description
    '''
    if not isinstance(name, basestring):
        log.error('Need a string for the coronagraph name')
        raise
    else:
        coron = pd.read_csv(path + '/Inputs/ETC/' + name + '.csv',
                            skip_blank_lines=True)
        coron = coron.dropna()
        corono = QTable.from_pandas(coron)
        corono.add_index('r(arcsec)')
        corono['r(arcsec)'].unit = u.arcsec
        corono['area(sq_arcsec'].unit = u.arcsec**2
        return corono
예제 #10
0
def loadScenarios(filename=None):
    '''
    Load various observing scenarios
    '''
    if filename is None:
        fname = path + '/Inputs/ETC/Scenarios.csv'
    else:
        fname = filename
    scens = pd.read_csv(fname, skip_blank_lines=True)
    scenarios = QTable.from_pandas(scens)

    # two indexes
    scenarios.add_index('Scenario')
    scenarios.add_index('Coronagraph')
    scenarios['Center lam'].unit = u.nm
    scenarios['t integ, hrs'].unit = u.h
    scenarios['Years at L2'].unit = u.year
    scenarios['Ref  Dmag'].unit = u.mag
    return scenarios
예제 #11
0
def loadSpectra(filename=None):
    '''
    Load stellar spectra database
    '''

    if filename is None:
        fname = path + '/Inputs/ETC/Spectra.csv'
    else:
        fname = filename
    st = pd.read_csv(
        fname,
        skip_blank_lines=True,
    )
    stars = QTable.from_pandas(st)
    stars['Wavelength (m) '].unit = u.m
    stars.rename_column('Wavelength (m) ', 'Wavelength')
    stars['E_ph (J)'].unit = u.J
    stars.rename_column('E_ph (J)', 'E_ph')
    cols = ['a0v', 'a5v', 'f5v', 'g0v', 'g5v', 'k0v', 'k5v', 'm0v', 'm5v']
    for col in cols:
        stars[col].unit = u.W / u.m**2 / u.m
    return stars
예제 #12
0
def read_data_dl2_to_QTable(filename):
    """
    Read data DL2 files from lstchain and return QTable format
    Parameters
    ----------
    filename: path to the lstchain DL2 file

    Returns
    -------
    `astropy.table.QTable`
    """

    # Mapping
    name_mapping = {
        "gammaness": "gh_score",
        "alt_tel": "pointing_alt",
        "az_tel": "pointing_az",
    }
    unit_mapping = {
        "reco_energy": u.TeV,
        "pointing_alt": u.rad,
        "pointing_az": u.rad,
        "reco_alt": u.rad,
        "reco_az": u.rad,
        "dragon_time": u.s,
    }

    data = pd.read_hdf(filename,
                       key=dl2_params_lstcam_key).rename(columns=name_mapping)

    data = QTable.from_pandas(data)

    # Make the columns as Quantity
    for k, v in unit_mapping.items():
        data[k] *= v

    return data
def main(outdir, data, source, cuts_file, n_offs, n_jobs):

    gh_cuts = QTable.read(cuts_file, hdu='GH_CUTS')
    theta_cuts = QTable.read(cuts_file, hdu='THETA_CUTS_OPT')
    src = SkyCoord.from_name(source)

    if n_jobs == -1:
        n_jobs = cpu_count()

    with Pool(n_jobs) as pool:
        results = np.array(
            pool.starmap(
                calculation.read_run_calculate_thetas, # applies gh cut
                [(run, columns, gh_cuts, src, n_offs) for run in data]
            ), dtype=object
        )

    dfs = results[:,0]
    ontimes = results[:,1]
    thetas = results[:,2]
    df5s = results[:,3] # only necessary for theta_off (not currently included)
    theta_offs = results[:,4] # not currently included

    observations = []
    index = []
    for df, ontime, theta, df5, theta_off in zip(dfs, ontimes, thetas, df5s, theta_offs):
        tstart = df.dragon_time.min()
        tstop = df.dragon_time.max()
        df['theta_on'] = theta.deg
        df.dropna(
            subset=['gamma_energy_prediction', 'gammaness', 'source_ra_prediction', 'source_dec_prediction'],
            inplace=True
        )
        # Apply theta cuts here?
        #theta_mask = evaluate_binned_cut(
        #    theta, df.gamma_energy_prediction.to_numpy() * u.TeV, theta_cuts, operator.le
        #)

        df_events = df[event_map.keys()]
        df_events.rename(columns=event_map, inplace=True)
        df_pointings = df[pointing_map.keys()]
        df_pointings.rename(columns=pointing_map, inplace=True)

        events = QTable.from_pandas(df_events, units=unit_map)
        events['RA'] = events['RA'].to(u.deg)
        events['DEC'] = events['DEC'].to(u.deg)

        pointings = QTable.from_pandas(df_pointings, units=unit_map)
        pointings['RA_PNT'] = pointings['RA_PNT'].to(u.deg)
        pointings['DEC_PNT'] = pointings['DEC_PNT'].to(u.deg)

        event_header = DEFAULT_HEADER.copy()
        event_header['HDUCLAS1'] = 'EVENTS'
        event_header['OBS_ID'] = df.obs_id.iloc[0]
        event_header['TSTART'] = tstart
        event_header['TSTOP'] = tstop
        event_header['ONTIME'] = ontime.to_value(u.s)
        event_header['LIVETIME'] = event_header['ONTIME'] # Fix this?
        event_header['DEADC'] = 1.0

        event_header['RA_PNT'] = np.mean(pointings['RA_PNT']).to_value(u.deg)
        event_header['DEC_PNT'] = np.mean(pointings['DEC_PNT']).to_value(u.deg)

        event_header['EQUINOX'] = 2000.0
        event_header['RADECSYS'] = 'ICRS'
        event_header['ORIGIN'] = 'CTA'
        event_header['TELESCOP'] = 'LST1'
        event_header['INSTRUME'] = 'LST1'

        gtis = QTable(
            [[tstart] * u.s, [tstop] * u.s],
            names=('START', 'STOP')
        )
        gti_header = DEFAULT_HEADER.copy()
        gti_header['MJDREFI'] = 40587 # ref time is this correct? 01.01.1970?
        gti_header['MJDREFF'] = .0
        gti_header['TIMEUNIT'] = 's'
        gti_header['TIMESYS'] = 'UTC'  # ?
        gti_header['TIMEREF'] = 'TOPOCENTER' # ?

        pointing_header = gti_header.copy()
        pointing_header['OBSGEO-L'] = -17.89139
        pointing_header['OBSGEO-B'] = 28.76139
        pointing_header['OBSGEO-H'] = 2184.0

        hdus = [
            fits.PrimaryHDU(),
            fits.BinTableHDU(events, header=event_header, name="EVENTS"),
            fits.BinTableHDU(pointings, header=pointing_header, name="POINTING"),
            fits.BinTableHDU(gtis, header=gti_header, name="GTI")
        ]
        fits.HDUList(hdus).writeto(f'{outdir}/{df.obs_id.iloc[0]}.fits.gz', overwrite=True)

        observations.append((
            df.obs_id.iloc[0],
            (df_pointings.RA_PNT.mean(axis=0) * u.rad).to_value(u.deg),
            (df_pointings.DEC_PNT.mean(axis=0) * u.rad).to_value(u.deg),
            tstart,
            tstop,
            1.0
        ))
        index.append((
            df.obs_id.iloc[0],
            'events',
            'events',
            '.',
            f'{df.obs_id.iloc[0]}.fits.gz',
            'EVENTS'
        ))
        index.append((
            df.obs_id.iloc[0],
            'gti',
            'gti',
            '.',
            f'{df.obs_id.iloc[0]}.fits.gz',
            'GTI'
        ))
        index.append((
            df.obs_id.iloc[0],
            'aeff',
            'aeff_2d',
            '.',
            cuts_file.split('/')[-1],
            'EFFECTIVE_AREA'
        ))
        index.append((
            df.obs_id.iloc[0],
            'psf',
            'psf_table',
            '.',
            cuts_file.split('/')[-1],
            'PSF'
        ))
        index.append((
            df.obs_id.iloc[0],
            'edisp',
            'edisp_2d',
            '.',
            cuts_file.split('/')[-1],
            'ENERGY_DISPERSION'
        ))
        index.append((
            df.obs_id.iloc[0],
            'bkg',
            'bkg_2d',
            '.',
            cuts_file.split('/')[-1],
            'BACKGROUND'
        ))

    observations_table = QTable(
        rows=observations,
        names=['OBS_ID', 'RA_PNT', 'DEC_PNT', 'TSTART', 'TSTOP', 'DEADC'],
        units=['', 'deg', 'deg', 's', 's', '']
    )
    obs_header = DEFAULT_HEADER.copy()
    obs_header['HDUCLAS1'] = 'INDEX'
    obs_header['HDUCLAS2'] = 'OBS'
    obs_header['MJDREFI'] = 40587 # ref time is this correct? 01.01.1970?
    obs_header['MJDREFF'] = .0 
    obs_header['TIMEUNIT'] = 's'
    obs_header['TIMESYS'] = 'UTC'  # ?
    obs_header['TIMEREF'] = 'TOPOCENTER' # ?
    obs_header['OBSGEO-L'] = -17.89139
    obs_header['OBSGEO-B'] = 28.76139
    obs_header['OBSGEO-H'] = 2184.0

    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(observations_table, header=obs_header, name="OBS_INDEX")
    ]
    fits.HDUList(hdus).writeto(f'{outdir}/obs-index.fits.gz', overwrite=True)

    index_table = QTable(
        rows=index,
        names=['OBS_ID', 'HDU_TYPE', 'HDU_CLASS', 'FILE_DIR', 'FILE_NAME', 'HDU_NAME'],
    )
    index_header = DEFAULT_HEADER.copy()
    index_header['HDUCLAS1'] = 'INDEX'
    index_header['HDUCLAS2'] = 'HDU'

    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(index_table, header=index_header, name="HDU_INDEX"),
    ]
    fits.HDUList(hdus).writeto(f'{outdir}/hdu-index.fits.gz', overwrite=True)
예제 #14
0
    def searchCadidates(self, days=15):

        alerce = AlerceArchive()
        alerceGoodCandidates = alerce.getCandidates(days)

        lasairbroker = LasairArchive()
        lasairGoodCandidates = lasairbroker.getLastDetections(days)

        # alerceGoodCandidates = [r["result"][target] for target in r["result"]]

        alerceTable = QTable.from_pandas(alerceGoodCandidates)
        lasairTable = QTable.from_pandas(lasairGoodCandidates)

        print("candidates found lasair{0} alerce{1} ".format(
            len(lasairGoodCandidates), len(alerceGoodCandidates)))
        meanaler_val = {
            "ramean":
            np.nan_to_num(
                np.concatenate((alerceGoodCandidates["meanra"].values,
                                lasairGoodCandidates["meanra"].values),
                               axis=0)),
            "decmean":
            np.nan_to_num(
                np.concatenate((alerceGoodCandidates["meandec"].values,
                                lasairGoodCandidates["meandec"].values),
                               axis=0)),
            "maggmax":
            np.nan_to_num(
                np.concatenate((alerceGoodCandidates["max_magap_g"].values,
                                lasairGoodCandidates["maggmax"].values),
                               axis=0)),
            "maggmin":
            np.nan_to_num(
                np.concatenate((alerceGoodCandidates["min_magap_g"].values,
                                lasairGoodCandidates["maggmin"].values),
                               axis=0)),
            "magrmax":
            np.nan_to_num(
                np.concatenate((alerceGoodCandidates["max_magap_r"].values,
                                lasairGoodCandidates["magrmax"].values),
                               axis=0)),
            "magrmin":
            np.nan_to_num(
                np.concatenate((alerceGoodCandidates["min_magap_r"].values,
                                lasairGoodCandidates["magrmin"].values),
                               axis=0)),
            "id":
            np.concatenate((alerceGoodCandidates["oid"].values,
                            lasairGoodCandidates["oid"].values),
                           axis=0)
        }

        bigdata = pd.DataFrame(meanaler_val)
        bigdata_drop = bigdata.drop_duplicates(subset="id", keep=False)

        bigtable = bigdata
        if bigdata_drop.size > 0:
            bigtable = bigdata_drop

        table_candidates = QTable(QTable.from_pandas(bigtable), masked=False)

        return table_candidates, alerceGoodCandidates, lasairGoodCandidates
예제 #15
0
def check_ltc_residuals(dataframe,
                        ccd,
                        rawy_range=(1, 200),
                        binned=None,
                        filter_select=None,
                        plot_it=True,
                        png_file=None,
                        title=''):
    """Validation for the LTC correction by means of checking the residuals
    
     Parameters
     ----------
        dataframe : dataframe, mandatory 
            the pandas dataframe with the Cu Kalpha fit results (from Michael Smith monitoring run). Produced by `ff_monitoring_work2.ipynb`
        ccd : int, mandatory 
            the EPIC-pn CCD number (from 1 to 12)
        rawy_range : list
            the RAWY range to select, can be (1,200) and then can be (x,x+19) for x in range(1,201,20)
        binned: float, optional
            bin the data grouping with binned years, if None, then no binning
        filter_select : str
            if not None, then a selection on filter wheel is requested, can be one of 'CalClosed', 'CalMedium', 'CalThick', 'CalThin1', 'Closed',
           'Medium', 'Thick', 'Thin1', 'Thin2'. If None, then all are selected.
        plot_it : bool
            if set, will plot the results.
        png_file : str
            is set, then the plotting results will be saved to this file.
        title : str
            Text to append to the end of the plot title, e.g. the version or other comment to apper on the plot title
     Output
     ------
        output: dict
            {'ccd': []}

    Method
    ------
            
    Modification history
    --------------------
    
        Created 17 Mar 2021, Ivan Valchanov, XMM SOC

    """
    #
    ntot = np.count_nonzero((dataframe.ccd == ccd)
                            & (dataframe.rawy0 == rawy_range[0])
                            & (dataframe.rawy1 == rawy_range[1]))
    #
    xtab = select_data(dataframe,
                       ccd,
                       rawy_range=rawy_range,
                       filter_select=filter_select)
    ntab = len(xtab)
    xmode = xtab.xmode
    if (filter_select is not None):
        print(
            f"CCD {ccd}, {xmode} mode, filter {filter_select}: filtered {ntab} results out of {ntot}"
        )
    else:
        print(
            f"CCD {ccd}, {xmode} mode: filtered {ntab} results out of {ntot}")
    #
    #line = dataframe.line
    #
    xin = xtab.delta_time
    residual = (xtab.energy - line0)  # in eV
    residual_err = (xtab.energy_err1 + xtab.energy_err2) / 2.0  # in eV
    qmean = np.mean(residual)
    qstd = np.std(residual)
    xstat = stats.sigma_clipped_stats(residual, sigma=3, maxiters=3)
    #
    if (binned is not None):
        # add those as columns in the dataframe
        qt = QTable.from_pandas(xtab)
        qt['residual'] = residual
        qt['residual_err'] = residual_err
        #
        year_bin = np.trunc(qt['delta_time'] / binned)
        year_run = np.unique(year_bin)
        #year_bin = np.trunc(qt['delta_time']/binned)
        dat_grouped = qt.group_by(year_bin)
        #
        dat_binned = dat_grouped.groups.aggregate(np.median)
        dat_binned_std = dat_grouped.groups.aggregate(mad)
        xin_bin = dat_binned['delta_time']
        yin_bin = dat_binned['residual']
        yin_bin_err = dat_binned_std['residual']
    #
    # prepare the output
    #
    output = [
        ccd, xmode, rawy_range[0], rawy_range[1], xstat[0], xstat[2], ntab,
        year_run.data, xin_bin.data, yin_bin.data, yin_bin_err.data
    ]
    #
    # plotting
    #
    if (plot_it):
        fig, ax = plt.subplots(figsize=(10, 6))
        ax.errorbar(xin,
                    residual,
                    yerr=residual_err,
                    fmt='o',
                    label=f'CCDNR {ccd}',
                    zorder=0)
        if (binned is not None):
            ax.step(year_run,
                    yin_bin,
                    where='pre',
                    zorder=2,
                    color='cyan',
                    label='Per bin median')
            #ax.step(xin_bin,yin_bin,where='mid',zorder=2,color='cyan',label='Per bin median')
            ax.errorbar(xin_bin,
                        yin_bin,
                        yerr=yin_bin_err,
                        fmt='o',
                        color='cyan',
                        zorder=1)
        ax.axhline(0.0, linestyle='dashed', linewidth=3, color='red', zorder=1)
        ax.axhline(20.0,
                   linestyle='dotted',
                   linewidth=2,
                   color='red',
                   zorder=1)
        ax.axhline(-20.0,
                   linestyle='dotted',
                   linewidth=2,
                   color='red',
                   zorder=1)
        ax.text(0.1,
                0.9,
                fr'mean={qmean:.1f} eV, st.dev.={qstd:.1f} eV',
                fontsize=14,
                transform=ax.transAxes)
        ax.text(
            0.1,
            0.8,
            fr'mean={xstat[0]:.1f} eV, st.dev.={xstat[2]:.1f} eV (3-$\sigma$ clipped)',
            fontsize=14,
            transform=ax.transAxes)
        ax.set_xlim((0.0, 22.0))
        ax.set_ylim((-100.0, 100.0))
        ax.grid(True)
        ax.legend(loc=3)
        ax.set_title(
            f"Cu-Ka data for EPIC-PN CCD{ccd:02}, mode={xmode}, RAWY in [{rawy_range[0]},{rawy_range[1]}], {title}"
        )
        ax.set_ylabel(r"E$_{corr}$ - E$_{lab}$ (eV)")
        ax.set_xlabel("Time since 2000-01-01 (years)")
        if (png_file is not None):
            plt.savefig(png_file, dpi=100)
            plt.show()
            plt.close()
    return output
예제 #16
0
def checkLastDetections(**kwargs):
    # try:
    allrecords=0
    collection=current_collection

    days_ago=15
    if "collection" in kwargs.keys() and kwargs["collection"]!="":
        collection = kwargs["collection"]
    if "days_ago" in kwargs.keys() and kwargs["days_ago"]!="":
        days_ago=kwargs["days_ago"]
    if "IDpipeLine" in kwargs.keys() and kwargs["IDpipeLine"]!="":
        updatePipeline(kwargs["IDpipeLine"],"checkLastDetections",STATE_RUNNING)
    logger.info("checkLastDetections:: getting the last ZTF detections from brokers...")
    lasairarchive = LasairArchive()
    #coneecto to DATABASE

    db = MongodbManager()
    config=Config()
    dbconfig=config.getDatabase("mongodb")
    db.setDatabase(dbconfig["dbname"])
    db.setCollection(collection)

    #Get last candidates and update previews detection and light curves

    bestCandidates = BestCandidates()
    table_candidates, alerceDF, lasairDF = bestCandidates.searchCadidates(days_ago)

    #check if the new candidates is already in DB

    #get all zft id in and array to validate if exist into ddatabase and filter by
    listcandidates=table_candidates["id"]
    filter={"oid":{"$in":listcandidates.data.tolist()}}
    projection={"oid":1 ,"lastmjd":1 ,"last_update":1}

    current_data = db.getData(filter=filter, projection=projection)



    for remove_data in current_data:
        oid=remove_data["oid"]
        print("get info for ",oid)
        table_candidates.remove_rows(table_candidates["id"] == oid)

    #get desi photoz
    dataarchive = SussexArchive()
    desi_targetsvo, desi_targetstable = dataarchive.getDesiPhotoZfromTable(table_candidates)

    alerceTable = QTable.from_pandas(alerceDF)
    lasairTable = QTable.from_pandas(lasairDF)




    alerceTable.rename_column("oid","id")
    lasairTable.rename_column("oid", "id")
    alerceTable["id"] = alerceTable["id"].astype(str)
    lasairTable["id"] = lasairTable["id"].astype(str)
    desi_targetstable["id"] =desi_targetstable["id"].astype(str)

    desi_targetstable["desidec"].mask = False
    desi_targetstable["desira"].mask = False


    #calc separation desi source
    ra_ref = desi_targetstable["ramean"].tolist()
    dec_ref = desi_targetstable["decmean"].tolist()
    cref = SkyCoord(ra_ref, dec_ref, frame='icrs', unit='deg')
    ra_desi = desi_targetstable["desira"].tolist()
    dec_desi = desi_targetstable["desidec"].tolist()
    c1 = SkyCoord(ra_desi, dec_desi, frame='icrs', unit='deg')
    desi_distance = cref.separation(c1).arcsec
    desi_targetstable["separation"] = desi_distance


    #merge all table in one json to save in mongo

    desi_targetstable = Table(desi_targetstable, masked=False)
    alerceTable = Table(alerceTable, masked=False)
    lasairTable = Table(lasairTable, masked=False)
    alerceTable["broker"] = "alerce"
    lasairTable["broker"] = "lasair"

    update_alerce_table = join(alerceTable, lasairTable, join_type='outer', keys='id')
    merge_table = join(update_alerce_table, desi_targetstable, join_type='outer', keys='id')


    merge_table["desiid"] = merge_table["desiid"].astype(str)
    merge_table["field"] = merge_table["field"].astype(str)

    lastItems= merge_table.to_pandas()
    newItems = lastItems.fillna('', axis=1)
    dic_result = newItems.to_dict('records')

    newCandidates=0
    logger.info("checkLastDetections:: Ingested {0} candidates".format(str(len(dic_result))))
    allrecords=len(dic_result)
    for index,row in enumerate(dic_result):
        id=row["id"]
        print("saving candidate",id)
        row["comments"]={}
        row["snh_score"] = 0.0
        if row["broker_1"] != "":
            #alerce
            #row["pclassearly"]=row["pclassearly_1"]
            if row["broker_2"]!="":
                row["broker"]=row["broker_1"]+"/"+row["broker_2"]
            else:
                row["broker"] = row["broker_1"]
            row["meanra"]=row["meanra_1"]
            row["meandec"]=row["meandec_1"]
            row["lastmjd"]=row["lastmjd_1"]

        else:
            #lasair
            #row["pclassearly"] = row["pclassearly_2"]
            row["broker"] = row["broker_2"]
            row["meanra"] = row["meanra_2"]
            row["meandec"] = row["meandec_2"]
            row["lastmjd"] = row["lastmjd_2"]

        try:

            #remove duplicate fields
            #del row["pclassearly_1"]
            #del row["pclassearly_2"]
            del row["broker_1"]
            del row["broker_2"]
            del row["meanra_1"]
            del row["meandec_1"]

            del row["meanra_2"]
            del row["meandec_2"]

            del row["lastmjd_2"]
            del row["lastmjd_1"]

        except KeyError as er:
            print("key error",er,id)

        # check if already exist this candidate, if exist update light curve and run check list to alerts
        currentdata = db.getData(filter={"id": id}, projection={"nobs": 1, "last_update": 1, "id": 1})
        now = datetime.now().timestamp()
        rowupdated={}
        if len(currentdata) > 0:
            currentdata = currentdata[0]
            days_from_update = ((now - float(currentdata["last_update"])) / 3600) / 24
            if days_from_update < 0.6:
                print("last detections is the same, not getting enough to services update classify",id)
                logger.info("checkLastDetections:: {0} last detections is the same, not getting enough to services update classify".format(id))
                continue

        classification = getClassification(id)
        #peak = lasairarchive.getPeakLightCurve(classification["light_curve"]["candidates"])
        rowupdated["ra"] = row["meanra"]
        rowupdated["dec"] = row["meandec"]
        rowupdated["lasair_clas"]=classification["lasair_clas"]
        rowupdated["alerce_clas"]=classification["alerce_clas"]
        rowupdated["alerce_early_class"] = classification["alerce_early_class"]
        rowupdated["alerce_late_class"] = classification["alerce_late_class"]
        rowupdated["crossmatch"]={"lasair":classification["light_curve"]["crossmatches"],"check":False}

        rowupdated["lightcurve"] = classification["light_curve"]["candidates"]
        rowupdated["report"] = row
        rowupdated["broker"] = row["broker"]
        rowupdated["nobs"] = row["nobs"]
        rowupdated["lastmjd"] = row["lastmjd"]
        rowupdated["sigmara"] = row["sigmara"]
        rowupdated["sigmadec"] = row["sigmadec"]
        rowupdated["last_magpsf_g"] = row["last_magpsf_g"]
        rowupdated["last_magpsf_r"] = row["last_magpsf_r"]
        rowupdated["first_magpsf_g"] = row["first_magpsf_g"]
        rowupdated["first_magpsf_r"] = row["first_magpsf_r"]
        rowupdated["sigma_magpsf_g"] = row["sigma_magpsf_g"]
        rowupdated["sigma_magpsf_r"] = row["sigma_magpsf_r"]
        rowupdated["max_magpsf_g"] = row["max_magpsf_g"]
        rowupdated["max_magpsf_r"] = row["max_magpsf_r"]
        rowupdated["id"] = row["id"]



        #check if already exist this candidate, if exist update light curve and run check list to alerts
        currentdata=db.getData(filter={"id":id},projection={"nobs":1,"last_update":1,"id":1})
        now = datetime.now().timestamp()



        if len(currentdata)>0 :
            #update current data
            try:
                if currentdata[0]["nobs"] < rowupdated["nobs"]:
                    peak = lasairarchive.getPeakLightCurve(classification["light_curve"]["candidates"])
                    rowupdated["lightpeak"] = peak

                    update_query={"last_update":now,"lightcurve":rowupdated["lightcurve"],"lightpeak":peak,"lasair_clas":rowupdated["lasair_clas"],"alerce_clas":rowupdated["alerce_clas"],"nobs":rowupdated["nobs"],"state":"updated"}
                    update_id = db.update(filter={"id":id}, query={"$set":update_query})
                    print("updated source",id,update_id.raw_result)
                else:
                    print("last detections is the same, not getting enough to services update classify",id)
            except Exception as err:
                print("Error updated",id,currentdata[0]["nobs"],rowupdated["nobs"])
                logger.error("checkLastDetections:: {0} Error updated..".format(str(id)))

        else:
            peak = lasairarchive.getPeakLightCurve(classification["light_curve"]["candidates"])
            rowupdated["lightpeak"] = peak

            #insert new candidate
            print("save new candidate")
            rowupdated["state"]="new"
            rowupdated["last_update"] = now
            db.saveData(rowupdated)
            logger.info("checkLastDetections:: {0} Saved candidate with {1} observations".format(id,rowupdated["nobs"]))
            newCandidates+=1

    logger.info("checkLastDetections:: {0} candidates stored..".format(str(len(dic_result))))
    logger.info("checkLastDetections:: alerce table detections {0}".format(str(len(alerceTable))))
    logger.info("checkLastDetections:: lasair table detections {0}".format(str(len(lasairTable))))
    logger.info("checkLastDetections:: desi detections {0}".format(str(len(desi_targetstable))))
    logger.info("checkLastDetections:: new Candidates {0}".format(str(newCandidates)))


    db.saveData(data={"date":now,"newcandidates":newCandidates,"allrecords":allrecords,"alerce_records":len(alerceTable),"lasair_records":len(lasairTable),"desi_matchs":len(desi_targetstable),"process":"lastdetections"},collection="tasks")

    if "IDpipeLine" in kwargs.keys() and kwargs["IDpipeLine"]!="":
        updatePipeline(kwargs["IDpipeLine"],"checkLastDetections",STATE_COMPLETED)
예제 #17
0
def read_mc_dl2_to_QTable(filename):
    """
    Read MC DL2 files from lstchain and convert into pyirf internal format
    - astropy.table.QTable

    Parameters
    ----------
    filename: path

    Returns
    -------
    `astropy.table.QTable`, `pyirf.simulations.SimulatedEventsInfo`
    """
    
    # mapping
    name_mapping = {
        "mc_energy": "true_energy",
        "mc_alt": "true_alt",
        "mc_az": "true_az",
        "mc_alt_tel": "pointing_alt",
        "mc_az_tel": "pointing_az",
        "gammaness": "gh_score",
    }

    unit_mapping = {
        "true_energy": u.TeV,
        "reco_energy": u.TeV,
        "pointing_alt": u.rad,
        "pointing_az": u.rad,
        "true_alt": u.rad,
        "true_az": u.rad,
        "reco_alt": u.rad,
        "reco_az": u.rad,
    }

    # add alpha for source-dependent analysis
    srcdep_flag = dl2_params_src_dep_lstcam_key in get_dataset_keys(filename)
    
    if srcdep_flag:
        unit_mapping['alpha'] = u.deg

    simu_info = read_simu_info_merged_hdf5(filename)
    pyirf_simu_info = SimulatedEventsInfo(
        n_showers=simu_info.num_showers * simu_info.shower_reuse,
        energy_min=simu_info.energy_range_min,
        energy_max=simu_info.energy_range_max,
        max_impact=simu_info.max_scatter_range,
        spectral_index=simu_info.spectral_index,
        viewcone=simu_info.max_viewcone_radius,
    )

    events = pd.read_hdf(filename, key=dl2_params_lstcam_key)

    if srcdep_flag:
        events_srcdep = get_srcdep_params(filename, 'on')
        events = pd.concat([events, events_srcdep], axis=1)

    events = events.rename(columns=name_mapping)

    events = QTable.from_pandas(events)

    for k, v in unit_mapping.items():
        events[k] *= v

    return events, pyirf_simu_info
예제 #18
0
def query_function(params):
    con = ea.connect('desdr')
    DF = con.query_to_pandas(query_string(params))
    DF = QTable.from_pandas(DF)
    return DF