예제 #1
0
파일: test_basic.py 프로젝트: MQQ/astropy
 def test_yday_format(self):
     """Year:Day_of_year format"""
     # Heterogeneous input formats with in_subfmt='*' (default)
     times = ["2000-12-01", "2001-12-01 01:01:01.123"]
     t = Time(times, format="iso", scale="tai")
     t.out_subfmt = "date_hm"
     assert np.all(t.yday == np.array(["2000:336:00:00", "2001:335:01:01"]))
     t.out_subfmt = "*"
     assert np.all(t.yday == np.array(["2000:336:00:00:00.000", "2001:335:01:01:01.123"]))
예제 #2
0
def compile_datasets(target, old=0, returnColors=True):

    lcvs = glob.glob(target + 'lcvs/optical/*.lcv')
    #    all_datasets = np.zeros(1, dtype='S30')
    #    all_jds = np.zeros(1, dtype=float)
    num_images = 0
    for lcv in lcvs:

        U, B, V, R, I = lightcurves.read_optical_lcv(lcv, old=old)
        if lcv == lcvs[0]:
            all_datasets = np.array(U[3], dtype='S35')
            all_jds = np.array(U[2], dtype=float)
            num_images += len(U[0]) + len(B[0]) + len(V[0]) + len(R[0]) + len(
                I[0])
        else:
            all_datasets = np.append(all_datasets, U[3])
            all_jds = np.append(all_jds, U[2])
            num_images += len(U[0]) + len(B[0]) + len(V[0]) + len(R[0]) + len(
                I[0])
        all_datasets = np.append(all_datasets, B[3])
        all_datasets = np.append(all_datasets, V[3])
        all_datasets = np.append(all_datasets, R[3])
        all_datasets = np.append(all_datasets, I[3])
        all_jds = np.append(all_jds, B[2])
        all_jds = np.append(all_jds, V[2])
        all_jds = np.append(all_jds, R[2])
        all_jds = np.append(all_jds, I[2])
    all_jds = all_jds + 2400000.5
    datasets_prefix = np.zeros(len(all_datasets), dtype='S30')
    print 'Total number of images: {} '.format(num_images)
    for ind, string in enumerate(all_datasets):
        datasets_prefix[ind] = string.split(':')[0]
    unique, counts = np.unique(datasets_prefix, return_counts=True)

    dataset_names = unique[np.argsort(counts)[::-1]]
    dataset_counts = counts[np.argsort(counts)[::-1]]
    colors = np.zeros(len(dataset_names), dtype='S25')
    print '\n\nDatasets:\n'
    for ind, dataset in enumerate(dataset_names):
        jds = all_jds[datasets_prefix == dataset]
        t_min = Time(jds.min(), format='jd')
        t_max = Time(jds.max(), format='jd')
        t_min.out_subfmt = 'date'
        t_max.out_subfmt = 'date'
        colors[ind] = plotting_utilities.get_color(ind)
        print '%3i %10s %6i %s %s %s' % (ind + 1, dataset, dataset_counts[ind],
                                         t_min.iso, t_max.iso,
                                         plotting_utilities.get_color(ind))

    if returnColors == True:
        return dataset_names, colors
    if returnColors == False:
        return dataset_names
예제 #3
0
def write_fits(img, metadata, fitsobj):
    imgtime = Time(metadata[0][0] + config.inttime*0.5, scale='utc', format='unix', location=(LOFAR_CS002_LONG, LOFAR_CS002_LAT))

    imgtime.format='isot'
    imgtime.out_subfmt = 'date_hms'
    filename = '%s_S%0.1f_I%ix%i_W%i_A%0.1f.fits' % (imgtime.datetime.strftime("%Y%m%d%H%M%SUTC"), np.mean(subbands), len(subbands), config.inttime, config.window, config.alpha)
    filename = os.path.join(config.output, filename)

    if os.path.exists(filename):
        logger.info("'%s' exists - skipping", filename)
        return

    # CRVAL1 should hold RA in degrees. sidereal_time returns hour angle in
    # hours.
    fitsobj.header['CRVAL1'] = imgtime.sidereal_time(kind='apparent').value  *  15
    fitsobj.header['DATE-OBS'] = str(imgtime)
    imgtime_end = imgtime + TimeDelta(config.inttime, format='sec')
    fitsobj.header['END_UTC'] = str(imgtime_end)
    t = Time.now();
    t.format = 'isot'
    fitsobj.header['DATE'] = str(t)
    fitsobj.data[0, 0, :, :] = img
    data = img[np.logical_not(np.isnan(img))]
    quality = rms.rms(rms.clip(data))
    high = data.max()
    low = data.min()
    fitsobj.header['DATAMAX'] = high
    fitsobj.header['DATAMIN'] = low
    fitsobj.header['HISTORY'][0] = 'AARTFAAC 6 stations superterp'
    fitsobj.header['HISTORY'][1] = 'RMS {}'.format(quality)
    fitsobj.header['HISTORY'][2] = 'DYNAMIC RANGE {}:{}'.format(int(round(high)), int(round(quality)))
    fitsobj.writeto(filename)
    logger.info("%s %0.3f %i:%i", filename, quality, int(round(high)), int(round(quality)))
예제 #4
0
def get_datetime(filename):
    # Reading binary data
    with open(filename, "rb") as f:
        bin_data = f.read()

    # Searching datetime start byte
    date_key = r'\xe0.\x00\x00\x0b'
    matches = []
    for match in finditer(date_key, bin_data):
        matches.append(match.span())
    date_offset = matches[-1][0] - 8

    # Unpacking datetime
    time_values = {}
    hours, days = unpack_from("<II", bin_data, date_offset)
    secs = days * 45.0 * 60.0 + hours / (4294967295.0 / 45.0) * 60.0
    time_values["UNIX"] = secs - 2938117104000.0

    # Convert to ISO and MJD time formats
    t = Time(time_values["UNIX"], format="unix")
    t.format = "iso"
    t.out_subfmt = "date_hm"
    time_values["ISO"] = t.value
    t.format = "mjd"
    time_values["MJD"] = str(int(np.floor(t.value)))

    return time_values
예제 #5
0
    def extract_1708_08971(self):
        """Extract SLSN entries for arxiv paper 1708.08971 (page 11)"""
        path = "source_lists/tabula-1708.08971 (dragged).csv"
        arxiv = "1708.08971 (p.11)"

        with open(path, 'rb') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            for i, row in enumerate(reader):
                if i < 1:
                    pass
                else:
                    new = SLSN()
                    new.name = row[0]
                    new.redshift = float(row[5])
                    new.ref = row[8]
                    new.arxiv = arxiv

                    date_type = row[6][-1]
                    mjd = Time(float(row[7]), format="mjd")
                    mjd.out_subfmt = "date"

                    ra = row[1]
                    dec = row[2]
                    new.add_coordinates(ra, dec)

                    if date_type == "p":
                        new.peak_date = mjd
                    elif date_type == "d":
                        new.disc_date = mjd
                    else:
                        raise Exception("Unknown date type!")

                    self.entries.append(new)
예제 #6
0
def _get_time():
    t = Time([[1], [2]], format='cxcsec',
             location=EarthLocation(1000, 2000, 3000, unit=u.km))
    t.format = 'iso'
    t.precision = 5
    t.delta_ut1_utc = np.array([[3.0], [4.0]])
    t.delta_tdb_tt = np.array([[5.0], [6.0]])
    t.out_subfmt = 'date_hm'

    return t
예제 #7
0
def save_obs_uvfits(obs, fname, force_singlepol=None, polrep_out='circ'):
    """Save observation data to uvfits.
       To save Stokes I as a single polarization (e.g., only RR) set force_singlepol='R' or 'L'
    """

    # output times must be in utc
    obs = obs.switch_timetype(timetype_out='UTC')

    if polrep_out == 'circ':
        obs = obs.switch_polrep('circ')
    elif polrep_out == 'stokes':
        obs = obs.switch_polrep('stokes')
    else:
        raise Exception(
            "'polrep_out' in 'save_obs_uvfits' must be 'circ' or 'stokes'!")

    hdulist_new = fits.HDUList()
    hdulist_new.append(fits.GroupsHDU())

    #####################
    # AIPS Data TABLE
    #####################

    # Data header (based on the BU format)
    MJD_0 = 2400000.5
    header = hdulist_new['PRIMARY'].header
    header['OBSRA'] = obs.ra * 180. / 12.
    header['OBSDEC'] = obs.dec
    header['OBJECT'] = obs.source
    header['MJD'] = float(obs.mjd)
    header['DATE-OBS'] = Time(obs.mjd + MJD_0,
                              format='jd',
                              scale='utc',
                              out_subfmt='date').iso
    header['BSCALE'] = 1.0
    header['BZERO'] = 0.0
    header['BUNIT'] = 'JY'
    header['VELREF'] = 3  # TODO ??
    header['EQUINOX'] = 'J2000'
    header['ALTRPIX'] = 1.e0
    header['ALTRVAL'] = 0.e0
    header['TELESCOP'] = 'VLBA'  # TODO Can we change this field?
    header['INSTRUME'] = 'VLBA'
    header['OBSERVER'] = 'EHT'

    header['CTYPE2'] = 'COMPLEX'
    header['CRVAL2'] = 1.e0
    header['CDELT2'] = 1.e0
    header['CRPIX2'] = 1.e0
    header['CROTA2'] = 0.e0
    header['CTYPE3'] = 'STOKES'
    if polrep_out == 'circ':
        header['CRVAL3'] = -1.e0
        header['CDELT3'] = -1.e0
    elif polrep_out == 'stokes':
        header['CRVAL3'] = 1.e0
        header['CDELT3'] = 1.e0
    header['CRPIX3'] = 1.e0
    header['CROTA3'] = 0.e0
    header['CTYPE4'] = 'FREQ'
    header['CRVAL4'] = obs.rf
    header['CDELT4'] = obs.bw
    header['CRPIX4'] = 1.e0
    header['CROTA4'] = 0.e0
    header['CTYPE6'] = 'RA'
    header['CRVAL6'] = header['OBSRA']
    header['CDELT6'] = 1.e0
    header['CRPIX6'] = 1.e0
    header['CROTA6'] = 0.e0
    header['CTYPE7'] = 'DEC'
    header['CRVAL7'] = header['OBSDEC']
    header['CDELT7'] = 1.e0
    header['CRPIX7'] = 1.e0
    header['CROTA7'] = 0.e0
    header['PTYPE1'] = 'UU---SIN'
    header['PSCAL1'] = 1.0 / obs.rf
    header['PZERO1'] = 0.e0
    header['PTYPE2'] = 'VV---SIN'
    header['PSCAL2'] = 1.0 / obs.rf
    header['PZERO2'] = 0.e0
    header['PTYPE3'] = 'WW---SIN'
    header['PSCAL3'] = 1.0 / obs.rf
    header['PZERO3'] = 0.e0
    header['PTYPE4'] = 'BASELINE'
    header['PSCAL4'] = 1.e0
    header['PZERO4'] = 0.e0
    header['PTYPE5'] = 'DATE'
    header['PSCAL5'] = 1.e0
    header['PZERO5'] = 0.e0
    header['PTYPE6'] = 'DATE'
    header['PSCAL6'] = 1.e0
    header['PZERO6'] = 0.0
    header['PTYPE7'] = 'INTTIM'
    header['PSCAL7'] = 1.e0
    header['PZERO7'] = 0.e0
    header['PTYPE8'] = 'TAU1'
    header['PSCAL8'] = 1.e0
    header['PZERO8'] = 0.e0
    header['PTYPE9'] = 'TAU2'
    header['PSCAL9'] = 1.e0
    header['PZERO9'] = 0.e0
    header['history'] = "AIPS SORT ORDER='TB'"

    # Get data

    if polrep_out == 'circ':
        obsdata = obs.unpack([
            'time', 'tint', 'u', 'v', 'rrvis', 'llvis', 'rlvis', 'lrvis',
            'rrsigma', 'llsigma', 'rlsigma', 'lrsigma', 't1', 't2', 'tau1',
            'tau2'
        ])
    elif polrep_out == 'stokes':
        obsdata = obs.unpack([
            'time', 'tint', 'u', 'v', 'vis', 'qvis', 'uvis', 'vvis', 'sigma',
            'qsigma', 'usigma', 'vsigma', 't1', 't2', 'tau1', 'tau2'
        ])

    ndat = len(obsdata['time'])

    # times and tints
    jds = (2400000.5 + obs.mjd) * np.ones(len(obsdata))
    fractimes = (obsdata['time'] / 24.0)
    tints = obsdata['tint']

    # Baselines
    t1 = [obs.tkey[scope] + 1 for scope in obsdata['t1']]
    t2 = [obs.tkey[scope] + 1 for scope in obsdata['t2']]
    bl = 256 * np.array(t1) + np.array(t2)

    # opacities
    tau1 = obsdata['tau1']
    tau2 = obsdata['tau2']

    # uv are in lightseconds
    u = obsdata['u']
    v = obsdata['v']

    # rr, ll, lr, rl, weights

    if polrep_out == 'circ':
        rr = obsdata['rrvis']
        ll = obsdata['llvis']
        rl = obsdata['rlvis']
        lr = obsdata['lrvis']
        weightrr = 1.0 / (obsdata['rrsigma']**2)
        weightll = 1.0 / (obsdata['llsigma']**2)
        weightrl = 1.0 / (obsdata['rlsigma']**2)
        weightlr = 1.0 / (obsdata['lrsigma']**2)

        # If necessary, enforce single polarization
        if force_singlepol == 'L':
            if obs.polrep == 'stokes':
                raise Exception(
                    "force_singlepol only works with obs.polrep=='stokes'!")
            print(
                "force_singlepol='L': treating Stokes 'I' as LL and ignoring Q,U,V!!"
            )
            ll = obsdata['vis']
            rr = rr * 0.0
            rl = rl * 0.0
            lr = lr * 0.0
            weightrr = weightrr * 0.0
            weightrl = weightrl * 0.0
            weightlr = weightlr * 0.0
        elif force_singlepol == 'R':
            if obs.polrep == 'stokes':
                raise Exception(
                    "force_singlepol only works with obs.polrep=='stokes'!")
            print(
                "force_singlepol='R': treating Stokes 'I' as RR and ignoring Q,U,V!!"
            )
            rr = obsdata['vis']
            ll = rr * 0.0
            rl = rl * 0.0
            lr = lr * 0.0
            weightll = weightll * 0.0
            weightrl = weightrl * 0.0
            weightlr = weightlr * 0.0

        dat1 = rr
        dat2 = ll
        dat3 = rl
        dat4 = lr
        weight1 = weightrr
        weight2 = weightll
        weight3 = weightrl
        weight4 = weightlr

    elif polrep_out == 'stokes':
        dat1 = obsdata['vis']
        dat2 = obsdata['qvis']
        dat3 = obsdata['uvis']
        dat4 = obsdata['vvis']
        weight1 = 1.0 / (obsdata['sigma']**2)
        weight2 = 1.0 / (obsdata['qsigma']**2)
        weight3 = 1.0 / (obsdata['usigma']**2)
        weight4 = 1.0 / (obsdata['vsigma']**2)

    # Replace nans by zeros (including zero weights)
    dat1 = np.nan_to_num(dat1)
    dat2 = np.nan_to_num(dat2)
    dat3 = np.nan_to_num(dat3)
    dat4 = np.nan_to_num(dat4)
    weight1 = np.nan_to_num(weight1)
    weight2 = np.nan_to_num(weight2)
    weight3 = np.nan_to_num(weight3)
    weight4 = np.nan_to_num(weight4)

    # Data array
    outdat = np.zeros((ndat, 1, 1, 1, 1, 4, 3))
    outdat[:, 0, 0, 0, 0, 0, 0] = np.real(dat1)
    outdat[:, 0, 0, 0, 0, 0, 1] = np.imag(dat1)
    outdat[:, 0, 0, 0, 0, 0, 2] = weight1
    outdat[:, 0, 0, 0, 0, 1, 0] = np.real(dat2)
    outdat[:, 0, 0, 0, 0, 1, 1] = np.imag(dat2)
    outdat[:, 0, 0, 0, 0, 1, 2] = weight2
    outdat[:, 0, 0, 0, 0, 2, 0] = np.real(dat3)
    outdat[:, 0, 0, 0, 0, 2, 1] = np.imag(dat3)
    outdat[:, 0, 0, 0, 0, 2, 2] = weight3
    outdat[:, 0, 0, 0, 0, 3, 0] = np.real(dat4)
    outdat[:, 0, 0, 0, 0, 3, 1] = np.imag(dat4)
    outdat[:, 0, 0, 0, 0, 3, 2] = weight4

    # Save data
    pars = [
        'UU---SIN', 'VV---SIN', 'WW---SIN', 'BASELINE', 'DATE', 'DATE',
        'INTTIM', 'TAU1', 'TAU2'
    ]
    x = fits.GroupData(
        outdat,
        parnames=pars,
        pardata=[u, v,
                 np.zeros(ndat), bl, jds, fractimes, tints, tau1, tau2],
        bitpix=-32)

    hdulist_new['PRIMARY'].data = x
    hdulist_new['PRIMARY'].header = header  # TODO necessary ??

    #####################
    # AIPS AN TABLE
    #####################

    # Load the array data
    tarr = obs.tarr
    tnames = tarr['site']
    tnums = np.arange(1, len(tarr) + 1)
    xyz = np.array([[tarr[i]['x'], tarr[i]['y'], tarr[i]['z']]
                    for i in np.arange(len(tarr))])
    sefd = tarr['sefdr']

    nsta = len(tnames)
    col1 = fits.Column(name='ANNAME', format='8A', array=tnames)
    col2 = fits.Column(name='STABXYZ', format='3D', unit='METERS', array=xyz)
    col3 = fits.Column(name='NOSTA', format='1J', array=tnums)
    colfin = fits.Column(name='SEFD', format='1D', array=sefd)

    # TODO these antenna fields+header are questionable - look into them
    col4 = fits.Column(name='MNTSTA', format='1J', array=np.zeros(nsta))
    col5 = fits.Column(name='STAXOF',
                       format='1E',
                       unit='METERS',
                       array=np.zeros(nsta))
    col6 = fits.Column(name='POLTYA',
                       format='1A',
                       array=np.array(['R' for i in range(nsta)], dtype='|S1'))
    col7 = fits.Column(name='POLAA',
                       format='1E',
                       unit='DEGREES',
                       array=np.zeros(nsta))
    col8 = fits.Column(name='POLCALA', format='3E', array=np.zeros((nsta, 3)))
    col9 = fits.Column(name='POLTYB',
                       format='1A',
                       array=np.array(['L' for i in range(nsta)], dtype='|S1'))
    col10 = fits.Column(name='POLAB',
                        format='1E',
                        unit='DEGREES',
                        array=(90. * np.ones(nsta)))
    col11 = fits.Column(name='POLCALB', format='3E', array=np.zeros((nsta, 3)))
    col25 = fits.Column(name='ORBPARM', format='1E', array=np.zeros(0))

    # Antenna Header params
    # TODO do we need to change more of these??
    collist = [
        col1, col2, col25, col3, col4, col5, col6, col7, col8, col9, col10,
        col11, colfin
    ]
    tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs(collist),
                                          name='AIPS AN')
    hdulist_new.append(tbhdu)

    head = hdulist_new['AIPS AN'].header

    head['EXTVER'] = 1
    head['ARRAYX'] = 0.e0
    head['ARRAYY'] = 0.e0
    head['ARRAYZ'] = 0.e0

    # TODO change the reference date
    rdate_tt_new = Time(obs.mjd + MJD_0,
                        format='jd',
                        scale='utc',
                        out_subfmt='date')
    rdate_out = rdate_tt_new.iso
    rdate_tt_new.out_subfmt = 'float'  # TODO -- needed to fix subformat issue in astropy 4.0
    rdate_jd_out = rdate_tt_new.jd
    rdate_gstiao_out = rdate_tt_new.sidereal_time('apparent',
                                                  'greenwich').degree
    rdate_offset_out = (rdate_tt_new.ut1.datetime.second -
                        rdate_tt_new.utc.datetime.second)
    rdate_offset_out += 1.e-6 * (rdate_tt_new.ut1.datetime.microsecond -
                                 rdate_tt_new.utc.datetime.microsecond)

    head['RDATE'] = rdate_out
    head['GSTIA0'] = rdate_gstiao_out
    head['DEGPDY'] = 360.9856
    head['UT1UTC'] = rdate_offset_out  # difference between UT1 and UTC ?
    head['DATUTC'] = 0.e0
    head['TIMESYS'] = 'UTC'

    head['FREQ'] = obs.rf
    head['POLARX'] = 0.e0
    head['POLARY'] = 0.e0

    head['ARRNAM'] = 'VLBA'  # TODO must be recognized by aips/casa
    head['XYZHAND'] = 'RIGHT'
    head['FRAME'] = '????'
    head['NUMORB'] = 0
    head['NO_IF'] = 1  # TODO nchan
    head['NOPCAL'] = 0  # TODO add pol cal information
    head['POLTYPE'] = 'VLBI'
    head['FREQID'] = 1

    hdulist_new['AIPS AN'].header = head  # TODO necessary, or is it a pointer?

    #####################
    # AIPS FQ TABLE
    #####################
    # Convert types & columns

    nif = 1
    col1 = np.array(1, dtype=np.int32).reshape([nif])  # frqsel
    col2 = np.array(0.0, dtype=np.float64).reshape([nif])  # iffreq
    col3 = np.array([obs.bw], dtype=np.float32).reshape([nif])  # chwidth
    col4 = np.array([obs.bw], dtype=np.float32).reshape([nif])  # bw
    col5 = np.array([1], dtype=np.int32).reshape([nif])  # sideband

    col1 = fits.Column(name="FRQSEL", format="1J", array=col1)
    col2 = fits.Column(name="IF FREQ", format="%dD" % (nif), array=col2)
    col3 = fits.Column(name="CH WIDTH", format="%dE" % (nif), array=col3)
    col4 = fits.Column(name="TOTAL BANDWIDTH",
                       format="%dE" % (nif),
                       array=col4)
    col5 = fits.Column(name="SIDEBAND", format="%dJ" % (nif), array=col5)
    cols = fits.ColDefs([col1, col2, col3, col4, col5])

    # create table
    tbhdu = fits.BinTableHDU.from_columns(cols)

    # add header information
    tbhdu.header.append(("NO_IF", nif, "Number IFs"))
    tbhdu.header.append(("EXTNAME", "AIPS FQ"))
    tbhdu.header.append(("EXTVER", 1))
    hdulist_new.append(tbhdu)

    #####################
    # AIPS NX TABLE
    #####################

    scan_times = []
    scan_time_ints = []
    start_vis = []
    stop_vis = []

    # TODO make sure jds AND scan_info MUST be time sorted!!
    jj = 0

    ROUND_SCAN_INT = 5
    comp_fac = 3600 * 24 * 100  # compare to 100th of a second
    scan_arr = obs.scans
    print('Building NX table')
    if (scan_arr is None or len(scan_arr) == 0):
        print("No NX table in saved uvfits")
    else:
        try:
            scan_arr = scan_arr / 24.
            for scan in scan_arr:
                scan_start = round(scan[0], ROUND_SCAN_INT)
                scan_stop = round(scan[1], ROUND_SCAN_INT)
                scan_dur = (scan_stop - scan_start)

                if jj >= len(fractimes):
                    # print start_vis, stop_vis
                    break

                # print ("%.12f %.12f %.12f" % (fractimes[jj], scan_start, scan_stop))
                jd = round(fractimes[jj],
                           ROUND_SCAN_INT) * comp_fac  # TODO precision??

                if ((np.floor(jd) >= np.floor(scan_start * comp_fac))
                        and (np.ceil(jd) <= np.ceil(comp_fac * scan_stop))):
                    start_vis.append(jj)

                    # TODO AIPS MEMO 117 says scan_times should be midpoint!
                    # but AIPS data looks likes it's at the start?
                    scan_times.append(scan_start +
                                      0.5 * scan_dur)  # - rdate_jd_out)
                    scan_time_ints.append(scan_dur)
                    ceilcut = np.ceil(comp_fac * scan_stop)
                    while ((jj < len(fractimes) and np.floor(
                            round(fractimes[jj], ROUND_SCAN_INT) * comp_fac) <=
                            ceilcut)):
                        jj += 1
                    stop_vis.append(jj - 1)
                else:
                    continue

            if jj < len(fractimes):
                print(scan_arr[-1])
                print(round(scan_arr[-1][0], ROUND_SCAN_INT),
                      round(scan_arr[-1][1], ROUND_SCAN_INT))
                print(jj, len(jds), round(jds[jj], ROUND_SCAN_INT))
                print(
                    "WARNING!!!: in save_uvfits NX table, " +
                    "didn't get to all entries when computing scan start/stop!"
                )
                print(scan_times)
            time_nx = fits.Column(name="TIME",
                                  format="1D",
                                  unit='DAYS',
                                  array=np.array(scan_times))
            timeint_nx = fits.Column(name="TIME INTERVAL",
                                     format="1E",
                                     unit='DAYS',
                                     array=np.array(scan_time_ints))
            sourceid_nx = fits.Column(name="SOURCE ID",
                                      format="1J",
                                      unit='',
                                      array=np.ones(len(scan_times)))
            subarr_nx = fits.Column(name="SUBARRAY",
                                    format="1J",
                                    unit='',
                                    array=np.ones(len(scan_times)))
            freqid_nx = fits.Column(name="FREQ ID",
                                    format="1J",
                                    unit='',
                                    array=np.ones(len(scan_times)))
            startvis_nx = fits.Column(name="START VIS",
                                      format="1J",
                                      unit='',
                                      array=np.array(start_vis) + 1)
            endvis_nx = fits.Column(name="END VIS",
                                    format="1J",
                                    unit='',
                                    array=np.array(stop_vis) + 1)
            cols = fits.ColDefs([
                time_nx, timeint_nx, sourceid_nx, subarr_nx, freqid_nx,
                startvis_nx, endvis_nx
            ])

            tbhdu = fits.BinTableHDU.from_columns(cols)

            # header information
            tbhdu.header.append(("EXTNAME", "AIPS NX"))
            tbhdu.header.append(("EXTVER", 1))

            hdulist_new.append(tbhdu)
        except TypeError:
            print("No NX table in saved uvfits")

    # Write final HDUList to file
    hdulist_new.writeto(fname, overwrite=True)

    return
예제 #8
0
def hjd2datetime(hjd):
    t = Time(hjd, format='jd')
    t.format = 'iso'
    t.out_subfmt = 'date_hms'
    return t.iso
예제 #9
0
def main():
    # templates are stored relative to the script dir
    # stored one level up, find the parent directory
    # and split the parent directory away
    script_dir = os.path.dirname(os.path.realpath(__file__))
    split_dir = os.path.split(script_dir)
    template_dir = os.path.join(split_dir[0], "templates")

    env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)
    env.filters["islist"] = is_list

    if sys.version_info[0] < 3:
        # py2
        computer_hostname = os.uname()[1]
    else:
        # py3
        computer_hostname = os.uname().nodename

    parser = argparse.ArgumentParser(
        description=("Create auto-correlation spectra plot for heranow dashboard")
    )
    parser.add_argument(
        "--redishost",
        dest="redishost",
        type=str,
        default="redishost",
        help=('The host name for redis to connect to, defaults to "redishost"'),
    )
    parser.add_argument(
        "--port", dest="port", type=int, default=6379, help="Redis port to connect."
    )
    args = parser.parse_args()
    r = redis.Redis(args.redishost, port=args.port)

    keys = [
        k.decode()
        for k in r.keys()
        if k.startswith(b"auto") and not k.endswith(b"timestamp")
    ]

    ants = []
    for key in keys:
        match = re.search(r"auto:(?P<ant>\d+)(?P<pol>e|n)", key)
        if match is not None:
            ant, pol = int(match.group("ant")), match.group("pol")
            ants.append(ant)

    ants = np.unique(ants)
    corr_map = r.hgetall(b"corr:map")
    ant_to_snap = json.loads(corr_map[b"ant_to_snap"])
    node_map = {}
    nodes = []
    # want to be smart against the length of the autos, they sometimes change
    # depending on the mode of the array
    for i in ants:
        for pol in ["e", "n"]:
            d = r.get("auto:{ant:d}{pol:s}".format(ant=i, pol=pol))
            if d is not None:
                auto = np.frombuffer(d, dtype=np.float32).copy()
                break
    auto_size = auto.size
    # Generate frequency axis
    # Some times we have 6144 length inputs, others 1536, this should
    # set the length to match whatever the auto we got was
    NCHANS = int(8192 // 4 * 3)
    NCHANS_F = 8192
    NCHAN_SUM = NCHANS // auto_size
    NCHANS = auto_size
    frange = np.linspace(0, 250e6, NCHANS_F + 1)[1536 : 1536 + (8192 // 4 * 3)]
    # average over channels
    frange = frange.reshape(NCHANS, NCHAN_SUM).sum(axis=1) / NCHAN_SUM
    frange_mhz = frange / 1e6

    got_time = False
    n_signals = 0

    try:
        t_plot_jd = np.frombuffer(r["auto:timestamp"], dtype=np.float64)[0]
        t_plot = Time(t_plot_jd, format="jd")
        t_plot.out_subfmt = u"date_hm"
        got_time = True
    except:
        pass
    # grab data from redis and format it according to plotly's javascript api
    autospectra = []

    table_ants = {}
    table_ants["title"] = "Antennas with no Node mapping"
    rows = []
    bad_ants = []
    for i in ants:
        for pol in ["e", "n"]:
            # get the timestamp from redis for the first ant-pol
            if not got_time:
                t_plot_jd = float(
                    r.hget(
                        "visdata://{i:d}/{j:d}/{i_pol:s}{j_pol:s}".format(
                            i=i, j=i, i_pol=pol, j_pol=pol
                        ),
                        "time",
                    )
                )
                if t_plot_jd is not None:
                    got_time = True
            linename = "ant{ant:d}{pol:s}".format(ant=i, pol=pol)

            try:
                hostname = ant_to_snap[str(i)][pol]["host"]
                match = re.search(r"heraNode(?P<node>\d+)Snap", hostname)
                if match is not None:
                    _node = int(match.group("node"))
                    nodes.append(_node)
                    node_map[linename] = _node
                else:
                    print("No Node mapping for antennna: " + linename)
                    bad_ants.append(linename)
                    node_map[linename] = -1
                    nodes.append(-1)
            except (KeyError):
                print("No Node mapping for antennna: " + linename)
                bad_ants.append(linename)
                node_map[linename] = -1
                nodes.append(-1)

            d = r.get("auto:{ant:d}{pol:s}".format(ant=i, pol=pol))
            if d is not None:

                n_signals += 1
                auto = np.frombuffer(d, dtype=np.float32)[0:NCHANS].copy()

                eq_coeffs = r.hget(
                    bytes("eq:ant:{ant}:{pol}".format(ant=i, pol=pol).encode()),
                    "values",
                )
                if eq_coeffs is not None:
                    eq_coeffs = np.fromstring(
                        eq_coeffs.decode("utf-8").strip("[]"), sep=","
                    )
                    if eq_coeffs.size == 0:
                        eq_coeffs = np.ones_like(auto)
                else:
                    eq_coeffs = np.ones_like(auto)

                # divide out the equalization coefficients
                # eq_coeffs are stored as a length 1024 array but only a
                # single number is used. Taking the median to not deal with
                # a size mismatch
                eq_coeffs = np.median(eq_coeffs)
                auto /= eq_coeffs ** 2

                auto[auto < 10 ** -10.0] = 10 ** -10.0
                auto = 10 * np.log10(auto)
                _auto = {
                    "x": frange_mhz.tolist(),
                    "y": auto.tolist(),
                    "name": linename,
                    "node": node_map[linename],
                    "type": "scatter",
                    "hovertemplate": "%{x:.1f}\tMHz<br>%{y:.3f}\t[dB]",
                }
                autospectra.append(_auto)

    row = {}
    row["text"] = "\t".join(bad_ants)
    rows.append(row)
    table_ants["rows"] = rows

    nodes = np.unique(nodes)
    # if an antenna was not mapped, roll the -1 to the end
    # this makes making buttons easier so the unmapped show last
    if -1 in nodes:
        nodes = np.roll(nodes, -1)
    # create a mask to find all the matching nodes
    node_mask = [
        [True if s["node"] == node else False for s in autospectra] for node in nodes
    ]
    buttons = []
    _button = {
        "args": [
            {"visible": [True for s in autospectra]},
            {
                "title": "",
                # "annotations": {}
            },
        ],
        "label": "All\tAnts",
        "method": "restyle",
    }
    buttons.append(_button)
    for node_cnt, node in enumerate(nodes):
        if node != -1:
            label = "Node\t{}".format(node)
        else:
            label = "Unmapped\tAnts"

        _button = {
            "args": [
                {"visible": node_mask[node_cnt]},
                {
                    "title": "",
                    # "annotations": {}
                },
            ],
            "label": label,
            "method": "update",
        }
        buttons.append(_button)

    updatemenus = [
        {
            "buttons": buttons,
            "showactive": True,
            "active": 0,
            "type": "dropdown",
            "x": 0.535,
            "y": 1.03,
        }
    ]

    layout = {
        "xaxis": {"title": "Frequency [MHz]"},
        "yaxis": {"title": "Power [dB]"},
        "title": {
            "text": "Autocorrelations",
            "xref": "paper",
            "x": 0.5,
            "yref": "paper",
            "y": 1.5,
            "font": {"size": 24},
        },
        "autosize": True,
        "showlegend": True,
        "legend": {"x": 1, "y": 1},
        "margin": {"l": 40, "b": 30, "r": 40, "t": 46},
        "hovermode": "closest",
    }
    plotname = "plotly-autos"

    caption = {}
    caption["text"] = (
        "The Autocorrelations from the correlator (in dB) versus frequency "
        "with equalization coefficients divided out.\n "
        "<br><br>Some antennas may not have "
        "a known node mapping and are listed below the image.\n  "
        "<br><br>Plot can be downselected to display individual nodes "
        "or show the entire array.\n "
        "<br><br>Double click on an entry in the legend to select only that "
        "entry, double click again to restore all plots.\n  "
        "<br><br>Single click an entry in the legend to un-plot it, "
        "single click again to restore it to the plot."
    )
    caption["title"] = "Autocorrelations Help"

    html_template = env.get_template("refresh_with_table.html")
    js_template = env.get_template("plotly_base.js")

    if sys.version_info.minor >= 8 and sys.version_info.major > 2:
        time_jd = t_plot.to_value('jd', subfmt='float')
        time_unix = t_plot.to_value('unix')
    else:
        time_jd = t_plot.jd
        time_unix = t_plot.unix

    rendered_html = html_template.render(
        plotname=plotname,
        data_type="Auto correlations",
        plotstyle="height: 100%",
        div_height="height: 73%",
        gen_date=Time.now().iso,
        data_date_iso=t_plot.iso,
        data_date_jd="{:.3f}".format(time_jd),
        data_date_unix_ms=time_unix * 1000,
        js_name="spectra",
        gen_time_unix_ms=Time.now().unix * 1000,
        scriptname=os.path.basename(__file__),
        hostname=computer_hostname,
        table=table_ants,
        caption=caption,
    )

    rendered_js = js_template.render(
        data=autospectra, layout=layout, updatemenus=updatemenus, plotname=plotname
    )

    print("Got {n_sig:d} signals".format(n_sig=n_signals))
    with open("spectra.html", "w") as h_file:
        h_file.write(rendered_html)
    with open("spectra.js", "w") as js_file:
        js_file.write(rendered_js)
예제 #10
0
def main():
    # templates are stored relative to the script dir
    # stored one level up, find the parent directory
    # and split the parent directory away
    script_dir = os.path.dirname(os.path.realpath(__file__))
    split_dir = os.path.split(script_dir)
    template_dir = os.path.join(split_dir[0], "templates")

    env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)
    if sys.version_info[0] < 3:
        # py2
        computer_hostname = os.uname()[1]
    else:
        # py3
        computer_hostname = os.uname().nodename

    # The standard M&C argument parser
    parser = mc.get_mc_argument_parser()
    # we'll have to add some extra options too
    parser.add_argument(
        "--redishost",
        dest="redishost",
        type=str,
        default="redishost",
        help=(
            'The host name for redis to connect to, defualts to "redishost"'),
    )
    parser.add_argument("--port",
                        dest="port",
                        type=int,
                        default=6379,
                        help="Redis port to connect.")
    args = parser.parse_args()

    try:
        db = mc.connect_to_mc_db(args)
    except RuntimeError as e:
        raise SystemExit(str(e))

    try:
        redis_db = redis.Redis(args.redishost, port=args.port)
        redis_db.keys()
    except Exception as err:
        raise SystemExit(str(err))

    with db.sessionmaker() as session:
        # without item this will be an array which will break database queries
        latest = Time(
            np.frombuffer(redis_db.get("auto:timestamp"),
                          dtype=np.float64).item(),
            format="jd",
        )
        latest.out_subfmt = u"date_hm"

        now = Time.now()
        amps = {}
        keys = [
            k.decode() for k in redis_db.keys()
            if k.startswith(b"auto") and not k.endswith(b"timestamp")
        ]

        for key in keys:
            match = re.search(r"auto:(?P<ant>\d+)(?P<pol>e|n)", key)
            if match is not None:
                ant, pol = int(match.group("ant")), match.group("pol")
                d = redis_db.get(key)
                if d is not None:
                    # need to copy because frombuffer creates a read-only array
                    auto = np.frombuffer(d, dtype=np.float32).copy()

                    eq_coeff = redis_db.hget(
                        bytes("eq:ant:{ant}:{pol}".format(ant=ant,
                                                          pol=pol).encode()),
                        "values",
                    )
                    if eq_coeff is not None:
                        eq_coeffs = np.fromstring(
                            eq_coeff.decode("utf-8").strip("[]"), sep=",")
                        if eq_coeffs.size == 0:
                            eq_coeffs = np.ones_like(auto)
                    else:
                        eq_coeffs = np.ones_like(auto)

                    # divide out the equalization coefficients
                    # eq_coeffs are stored as a length 1024 array but only a
                    # single number is used. Taking the median to not deal with
                    # a size mismatch
                    eq_coeffs = np.median(eq_coeffs)
                    auto /= eq_coeffs**2
                    auto[auto < 10**-10.0] = 10**-10.0
                    auto = np.median(auto)
                    amps[(ant, pol)] = 10.0 * np.log10(auto)

        hsession = cm_sysutils.Handling(session)
        ants = np.unique([ant for (ant, pol) in amps.keys()])
        pols = np.unique([pol for (ant, pol) in amps.keys()])

        antpos = np.genfromtxt(
            os.path.join(mc.data_path, "HERA_350.txt"),
            usecols=(0, 1, 2, 3),
            dtype={
                "names": ("ANTNAME", "EAST", "NORTH", "UP"),
                "formats": ("<U5", "<f8", "<f8", "<f8"),
            },
            encoding=None,
        )
        antnames = antpos["ANTNAME"]
        inds = [int(j[2:]) for j in antnames]
        inds = np.argsort(inds)

        antnames = np.take(antnames, inds)

        antpos = np.array([antpos["EAST"], antpos["NORTH"], antpos["UP"]])
        array_center = np.mean(antpos, axis=1, keepdims=True)
        antpos -= array_center
        antpos = np.take(antpos, inds, axis=1)

        stations = hsession.get_connected_stations(at_date="now")

        for station in stations:
            if station.antenna_number not in ants:
                ants = np.append(ants, station.antenna_number)
        ants = np.unique(ants)

        stations = []
        for station_type in hsession.geo.parse_station_types_to_check(
                "default"):
            for stn in hsession.geo.station_types[station_type]["Stations"]:
                stations.append(stn)

        # stations is a list of HH??? numbers we just want the ints
        stations = list(map(int, [j[2:] for j in stations]))
        built_but_not_on = np.setdiff1d(stations, ants)
        # Get node and PAM info
        node_ind = np.zeros_like(ants, dtype=np.int)
        pam_ind = np.zeros_like(ants, dtype=np.int)
        # defaul the snap name to "No Data"
        hostname = np.full_like(ants, "No\tData", dtype=object)
        snap_serial = np.full_like(ants, "No\tData", dtype=object)

        pam_power = {}
        adc_power = {}
        adc_rms = {}
        time_array = {}
        fem_imu_theta = {}
        fem_imu_phi = {}
        eq_coeffs = {}
        for ant in ants:
            for pol in pols:
                amps.setdefault((ant, pol), np.Inf)
                pam_power.setdefault((ant, pol), np.Inf)
                adc_power.setdefault((ant, pol), np.Inf)
                adc_rms.setdefault((ant, pol), np.Inf)
                eq_coeffs.setdefault((ant, pol), np.Inf)
                fem_imu_theta.setdefault((ant, pol), np.Inf)
                fem_imu_phi.setdefault((ant, pol), np.Inf)
                time_array.setdefault((ant, pol), now - Time(0, format="gps"))

        for ant_cnt, ant in enumerate(ants):
            station_status = session.get_antenna_status(
                most_recent=True, antenna_number=int(ant))

            for status in station_status:
                antpol = (status.antenna_number, status.antenna_feed_pol)
                if status.pam_power is not None:
                    pam_power[antpol] = status.pam_power
                if status.adc_power is not None:
                    adc_power[antpol] = 10 * np.log10(status.adc_power)
                if status.adc_rms is not None:
                    adc_rms[antpol] = status.adc_rms
                if status.time is not None:
                    time_array[antpol] = now - Time(status.time, format="gps")
                if status.fem_imu_phi is not None:
                    fem_imu_phi[antpol] = status.fem_imu_phi
                if status.fem_imu_theta is not None:
                    fem_imu_theta[antpol] = status.fem_imu_theta
                if status.eq_coeffs is not None:
                    _coeffs = np.fromstring(status.eq_coeffs.strip("[]"),
                                            sep=",")
                    # just track the median coefficient for now
                    eq_coeffs[antpol] = np.median(_coeffs)

            # Try to get the snap info. Output is a dictionary with 'e' and 'n' keys
            mc_name = antnames[ant]
            snap_info = hsession.get_part_at_station_from_type(
                mc_name, "now", "snap")
            # get the first key in the dict to index easier
            _key = list(snap_info.keys())[0]
            pol_key = [key for key in snap_info[_key].keys() if "E" in key]
            if pol_key:
                # 'E' should be in one of the keys, extract the 0th entry
                pol_key = pol_key[0]
            else:
                # a hacky solution for a key that should work
                pol_key = "E<ground"
            if snap_info[_key][pol_key] is not None:
                snap_serial[ant_cnt] = snap_info[_key][pol_key]

            # Try to get the pam info. Output is a dictionary with 'e' and 'n' keys
            pam_info = hsession.get_part_at_station_from_type(
                mc_name, "now", "post-amp")
            # get the first key in the dict to index easier
            _key = list(pam_info.keys())[0]
            if pam_info[_key][pol_key] is not None:
                _pam_num = re.findall(r"PAM(\d+)", pam_info[_key][pol_key])[0]
                pam_ind[ant_cnt] = np.int(_pam_num)
            else:
                pam_ind[ant_cnt] = -1

            # Try to get the ADC info. Output is a dictionary with 'e' and 'n' keys
            node_info = hsession.get_part_at_station_from_type(
                mc_name, "now", "node")
            # get the first key in the dict to index easier
            _key = list(node_info.keys())[0]
            if node_info[_key][pol_key] is not None:
                _node_num = re.findall(r"N(\d+)", node_info[_key][pol_key])[0]
                node_ind[ant_cnt] = np.int(_node_num)

                _hostname = session.get_snap_hostname_from_serial(
                    snap_serial[ant_cnt])

                if _hostname is not None:
                    hostname[ant_cnt] = _hostname
                else:
                    snap_status = session.get_snap_status(
                        most_recent=True, nodeID=np.int(_node_num))
                    for _status in snap_status:
                        if _status.serial_number == snap_serial[ant_cnt]:
                            hostname[ant_cnt] = _status.hostname
            else:
                node_ind[ant_cnt] = -1

        pams, _pam_ind = np.unique(pam_ind, return_inverse=True)
        nodes, _node_ind = np.unique(node_ind, return_inverse=True)

        xs_offline = np.ma.masked_array(
            antpos[0, :],
            mask=[
                True if int(name[2:]) in ants else False for name in antnames
            ],
        )
        ys_offline = np.ma.masked_array(antpos[1, :], mask=xs_offline.mask)
        name_offline = np.ma.masked_array(
            [aname + "<br>OFFLINE" for aname in antnames],
            mask=xs_offline.mask,
            dtype=object,
        )
        xs_offline = xs_offline

        names = [
            "Auto  [dB]",
            "PAM [dB]",
            "ADC [dB]",
            "ADC RMS",
            "FEM IMU THETA",
            "FEM IMU PHI",
            "EQ COEF",
        ]
        powers = [
            amps,
            pam_power,
            adc_power,
            adc_rms,
            fem_imu_theta,
            fem_imu_phi,
            eq_coeffs,
        ]
        powers = [
            np.ma.masked_invalid([[p[ant, pol] for ant in ants]
                                  for pol in pols]) for p in powers
        ]
        write_csv("ant_stats.csv", antnames, ants, pols, names, powers,
                  built_but_not_on)

        time_array = np.array(
            [[time_array[ant, pol].to("hour").value for ant in ants]
             for pol in pols])
        xs = np.ma.masked_array(antpos[0, ants], mask=powers[0][0].mask)
        ys = np.ma.masked_array(
            [
                antpos[1, ants] + 3 * (pol_cnt - 0.5)
                for pol_cnt, pol in enumerate(pols)
            ],
            mask=powers[0].mask,
        )
        _text = np.array(
            [[
                antnames[ant] + pol + "<br>" + str(hostname[ant_cnt]) +
                "<br>" + "PAM\t#:\t" + str(pam_ind[ant_cnt])
                for ant_cnt, ant in enumerate(ants)
            ] for pol_cnt, pol in enumerate(pols)],
            dtype="object",
        )

        #  want to format No Data where data was not retrieved for each type of power
        for pol_cnt, pol in enumerate(pols):
            for ant_cnt, ant in enumerate(ants):
                for _name, _power in zip(names, powers):
                    if not _power.mask[pol_cnt, ant_cnt]:
                        _text[pol_cnt, ant_cnt] += (
                            "<br>" + _name +
                            ": {0:.2f}".format(_power[pol_cnt, ant_cnt]))
                    else:
                        _text[pol_cnt, ant_cnt] += "<br>" + _name + ": No Data"
                if time_array[pol_cnt, ant_cnt] > 2 * 24 * 365:
                    # if the value is older than 2 years it is bad
                    # value are stored in hours.
                    # 2 was chosen arbitraritly.
                    _text[pol_cnt, ant_cnt] += "<br>" + "Ant Status:  No Date"
                else:
                    _text[pol_cnt,
                          ant_cnt] += ("<br>" +
                                       "Ant Status: {0:.2f} hrs old".format(
                                           time_array[pol_cnt, ant_cnt]))
                # having spaces will cause odd wrapping issues, replace all
                # spaces by \t
                _text[pol_cnt, ant_cnt] = _text[pol_cnt,
                                                ant_cnt].replace(" ", "\t")

        masks = [[True] for p in powers]

        # Offline antennas
        data_hex = []
        offline_ants = {
            "x": xs_offline.compressed().tolist(),
            "y": ys_offline.compressed().tolist(),
            "text": name_offline,
            "mode": "markers",
            "visible": True,
            "marker": {
                "color":
                np.ma.masked_array(["black"] * len(name_offline),
                                   mask=xs_offline.mask),
                "size":
                14,
                "opacity":
                0.5,
                "symbol":
                "hexagon",
            },
            "hovertemplate": "%{text}<extra></extra>",
        }
        # now we want to Fill in the conneted ones
        offline_ants["marker"]["color"][built_but_not_on] = "red"
        offline_ants["text"].data[built_but_not_on] = [
            offline_ants["text"].data[ant].split("<br>")[0] +
            "<br>Constructed<br>Not\tOnline" for ant in built_but_not_on
        ]

        offline_ants["marker"]["color"] = (
            offline_ants["marker"]["color"].compressed().tolist())
        offline_ants["text"] = offline_ants["text"].compressed().tolist()
        data_hex.append(offline_ants)

        #  for each type of power, loop over pols and print out the data
        #  save up a mask array used for the buttons later
        #  also plot the bad ones!3
        colorscale = "Viridis"

        # define some custom scale values for the ADC RMS page
        rms_scale_vals = [2, 20]
        relavitve_values = [0.4, 0.7]
        rms_color_scale = [
            ["0.0", "rgb(68,1,84)"],
            ["0.2", "rgb(62,74,137)"],
            ["0.3", "rgb(38,130,142)"],
            ["0.4", "rgb(53,183,121)"],
            ["0.5", "rgb(53,183,121)"],
            ["0.6", "rgb(53,183,121)"],
            ["0.7", "rgb(109,205,89)"],
            ["0.8", "rgb(180,222,44)"],
            ["1.0", "rgb(253,231,37)"],
        ]

        for pow_ind, power in enumerate(powers):
            if power.compressed().size > 0:
                vmax = np.max(power.compressed())
                vmin = np.min(power.compressed())
            else:
                vmax = 1
                vmin = 0

            colorscale = "Viridis"

            if pow_ind == 3:
                cbar_title = "RMS\tlinear"
                vmin = rms_scale_vals[0] * relavitve_values[0]
                vmax = rms_scale_vals[1] / relavitve_values[1]
                colorscale = rms_color_scale
            elif pow_ind == 4 or pow_ind == 5:
                cbar_title = "Degrees"
            elif pow_ind == len(powers) - 1:
                cbar_title = "Median\tCoeff"
            else:
                cbar_title = "dB"

            if pow_ind == 0:
                visible = True
            else:
                visible = False

            for pol_ind, pol in enumerate(pols):
                for mask_cnt, mask in enumerate(masks):
                    if mask_cnt == pow_ind:
                        mask.extend([True] * 2)
                    else:
                        mask.extend([False] * 2)

                _power = {
                    "x": xs.data[~power[pol_ind].mask].tolist(),
                    "y": ys[pol_ind].data[~power[pol_ind].mask].tolist(),
                    "text": _text[pol_ind][~power[pol_ind].mask].tolist(),
                    "mode": "markers",
                    "visible": visible,
                    "marker": {
                        "color":
                        power[pol_ind].data[~power[pol_ind].mask].tolist(),
                        "size": 14,
                        "cmin": vmin,
                        "cmax": vmax,
                        "colorscale": colorscale,
                        "colorbar": {
                            "thickness": 20,
                            "title": cbar_title
                        },
                    },
                    "hovertemplate": "%{text}<extra></extra>",
                }
                data_hex.append(_power)

                _power_offline = {
                    "x": xs.data[power[pol_ind].mask].tolist(),
                    "y": ys[pol_ind].data[power[pol_ind].mask].tolist(),
                    "text": _text[pol_ind][power[pol_ind].mask].tolist(),
                    "mode": "markers",
                    "visible": visible,
                    "marker": {
                        "color": "orange",
                        "size": 14,
                        "cmin": vmin,
                        "cmax": vmax,
                        "colorscale": colorscale,
                        "colorbar": {
                            "thickness": 20,
                            "title": cbar_title
                        },
                    },
                    "hovertemplate": "%{text}<extra></extra>",
                }
                data_hex.append(_power_offline)

        buttons = []
        for _name, mask in zip(names, masks):
            _button = {
                "args": [{
                    "visible": mask
                }, {
                    "title": "",
                    "annotations": {}
                }],
                "label": _name,
                "method": "restyle",
            }
            buttons.append(_button)

        updatemenus_hex = [{
            "buttons": buttons,
            "showactive": True,
            "type": "buttons"
        }]

        layout_hex = {
            "xaxis": {
                "title": "East-West Position [m]"
            },
            "yaxis": {
                "title": "North-South Position [m]",
                "scaleanchor": "x"
            },
            "title": {
                "text": "Per Antpol Stats vs Hex position",
                "font": {
                    "size": 24
                },
            },
            "hoverlabel": {
                "align": "left"
            },
            "margin": {
                "t": 40
            },
            "autosize": True,
            "showlegend": False,
            "hovermode": "closest",
        }
        caption = {}
        caption["title"] = "Stats vs Hex pos Help"
        caption["text"] = (
            "This plot shows various statistics and measurements "
            "per ant-pol versus its position in the array."
            "<br>Antennas which are build but not fully hooked up "
            "are shown in light red."
            "<br>Grey antennas are not yet constructed."
            "<br><br><h4>Available plotting options</h4>"
            "<ul>"
            "<li>Auto Corr - Median Auto Correlation (in db) "
            "from the correlator with equalization coefficients "
            "divided out</li>"
            "<li>Pam Power - Latest Pam Power (in db) recorded in M&C</li>"
            "<li>ADC Power - Latest ADC Power (in db) recorded in M&C</li>"
            "<li>ADC RMS - Latest linear ADC RMS recorded in M&C</li>"
            "<li>FEM IMU THETA - IMU-reported theta, in degrees</li>"
            "<li>FEM IMU PHI - IMU-reported phi, in degrees</li>"
            "<li>EQ Coeffs - Latest Median Equalization Coefficient recorded in M&C</li>"
            "</ul>"
            "Any antpol showing with an orange color means "
            "no data is avaible for the currenty plot selection."
            "<h4>Hover label Formatting</h4>"
            "<ul>"
            "<li>Antenna Name from M&C<br>(e.g. HH0n = Hera Hex Antenna 0 Polarization N)</li>"
            "<li>Snap hostname from M&C<br>(e.g. heraNode0Snap0)</li>"
            "<li>PAM Number</li>"
            "<li>Median Auto Correlation power in dB</li>"
            "<li>PAM power in dB</li>"
            "<li>ADC power in dB</li>"
            "<li>Linear ADC RMS</li>"
            "<li>FEM IMU reported theta in degrees</li>"
            "<li>FEM IMU reported phi in degrees</li>"
            "<li>Median Equalization Coefficient</li>"
            "<li>Time ago in hours the M&C Antenna Status was updated. "
            "This time stamp applies to all data for this antenna "
            "except the Auto Correlation.</li>"
            "</ul>"
            "In any hover label entry 'No Data' means "
            "information not currrently available in M&C.")

        # Render all the power vs position files
        plotname = "plotly-hex"
        html_template = env.get_template("plotly_base.html")
        js_template = env.get_template("plotly_base.js")

        if sys.version_info.minor >= 8 and sys.version_info.major > 2:
            time_jd = latest.to_value('jd', subfmt='float')
            time_unix = latest.to_value('unix')
        else:
            time_jd = latest.jd
            time_unix = latest.unix

        rendered_hex_html = html_template.render(
            plotname=plotname,
            data_type="Auto correlations",
            plotstyle="height: 100%",
            gen_date=now.iso,
            data_date_iso=latest.iso,
            data_date_jd="{:.3f}".format(time_jd),
            data_date_unix_ms=time_unix * 1000,
            js_name="hex_amp",
            gen_time_unix_ms=now.unix * 1000,
            scriptname=os.path.basename(__file__),
            hostname=computer_hostname,
            caption=caption,
        )

        rendered_hex_js = js_template.render(
            data=data_hex,
            layout=layout_hex,
            updatemenus=updatemenus_hex,
            plotname=plotname,
        )

        with open("hex_amp.html", "w") as h_file:
            h_file.write(rendered_hex_html)

        with open("hex_amp.js", "w") as js_file:
            js_file.write(rendered_hex_js)

        # now prepare the data to be plotted vs node number
        data_node = []

        masks = [[] for p in powers]

        vmax = [
            np.max(power.compressed()) if power.compressed().size > 1 else 1
            for power in powers
        ]
        vmin = [
            np.min(power.compressed()) if power.compressed().size > 1 else 0
            for power in powers
        ]
        vmin[3] = rms_scale_vals[0] * relavitve_values[0]
        vmax[3] = rms_scale_vals[1] / relavitve_values[1]

        for node in nodes:
            node_index = np.where(node_ind == node)[0]
            hosts = hostname[node_index]

            host_index = np.argsort(hosts)

            ys = np.ma.masked_array(
                [
                    np.arange(node_index.size) + 0.3 * pol_cnt
                    for pol_cnt, pol in enumerate(pols)
                ],
                mask=powers[0][:, node_index].mask,
            )
            xs = np.zeros_like(ys)
            xs[:] = node
            powers_node = [pow[:, node_index] for pow in powers]
            __text = _text[:, node_index]

            for pow_ind, power in enumerate(powers_node):
                cbar_title = "dB"
                if pow_ind == 4 or pow_ind == 5:
                    cbar_title = "Degrees"

                if pow_ind == 3:
                    colorscale = rms_color_scale
                else:
                    colorscale = "Viridis"
                colorscale = "Viridis"

                if pow_ind == 3:
                    cbar_title = "RMS\tlinear"
                    colorscale = rms_color_scale
                elif pow_ind == 4 or pow_ind == 5:
                    cbar_title = "Degrees"
                elif pow_ind == len(powers) - 1:
                    cbar_title = "Median\tCoeff"
                else:
                    cbar_title = "dB"

                if pow_ind == 0:
                    visible = True
                else:
                    visible = False

                for pol_ind, pol in enumerate(pols):
                    for mask_cnt, mask in enumerate(masks):
                        if mask_cnt == pow_ind:
                            mask.extend([True] * 2)
                        else:
                            mask.extend([False] * 2)

                    __power = power[pol_ind][host_index]
                    ___text = __text[pol_ind][host_index]

                    _power = {
                        "x": xs[pol_ind].data[~__power.mask].tolist(),
                        "y": ys[pol_ind].data[~__power.mask].tolist(),
                        "text": ___text[~__power.mask].tolist(),
                        "mode": "markers",
                        "visible": visible,
                        "marker": {
                            "color": __power.data[~__power.mask].tolist(),
                            "size": 14,
                            "cmin": vmin[pow_ind],
                            "cmax": vmax[pow_ind],
                            "colorscale": colorscale,
                            "colorbar": {
                                "thickness": 20,
                                "title": cbar_title
                            },
                        },
                        "hovertemplate": "%{text}<extra></extra>",
                    }

                    data_node.append(_power)

                    _power_offline = {
                        "x": xs[pol_ind].data[__power.mask].tolist(),
                        "y": ys[pol_ind].data[__power.mask].tolist(),
                        "text": ___text[__power.mask].tolist(),
                        "mode": "markers",
                        "visible": visible,
                        "marker": {
                            "color": "orange",
                            "size": 14,
                            "cmin": vmin[pow_ind],
                            "cmax": vmax[pow_ind],
                            "colorscale": colorscale,
                            "colorbar": {
                                "thickness": 20,
                                "title": cbar_title
                            },
                        },
                        "hovertemplate": "%{text}<extra></extra>",
                    }

                    data_node.append(_power_offline)
        buttons = []
        for _name, mask in zip(names, masks):
            _button = {
                "args": [{
                    "visible": mask
                }, {
                    "title": "",
                    "annotations": {}
                }],
                "label": _name,
                "method": "restyle",
            }
            buttons.append(_button)

        updatemenus_node = [{
            "buttons": buttons,
            "showactive": True,
            "type": "buttons"
        }]

        layout_node = {
            "xaxis": {
                "title": "Node Number",
                "dtick": 1,
                "tick0": 0,
                "showgrid": False,
                "zeroline": False,
            },
            "yaxis": {
                "showticklabels": False,
                "showgrid": False,
                "zeroline": False
            },
            "title": {
                "text": "Per Antpol Stats vs Node #",
                "font": {
                    "size": 24
                }
            },
            "hoverlabel": {
                "align": "left"
            },
            "margin": {
                "t": 40
            },
            "autosize": True,
            "showlegend": False,
            "hovermode": "closest",
        }

        caption_node = {}
        caption_node["title"] = "Stats vs Node Help"
        caption_node["text"] = (
            "This plot shows various statistics and measurements "
            "per ant-pol versus the node number to which it is connected."
            "<br><br><h4>Available plotting options</h4>"
            "<ul>"
            "<li>Auto Corr - Median Auto Correlation (in db) "
            "from the correlator with equalization coefficients "
            "divided out</li>"
            "<li>Pam Power - Latest Pam Power (in db) recorded in M&C</li>"
            "<li>ADC Power - Latest ADC Power (in db) recorded in M&C</li>"
            "<li>ADC RMS - Latest linear ADC RMS recorded in M&C</li>"
            "<li>EQ Coeffs - Latest Median Equalization Coefficient recorded in M&C</li>"
            "</ul>"
            "Any antpol showing with an orange color means "
            "no data is avaible for the currenty plot selection."
            "<h4>Hover label Formatting</h4>"
            "<ul>"
            "<li>Antenna Name from M&C<br>(e.g. HH0n = Hera Hex Antenna 0 Polarization N)</li>"
            "<li>Snap hostname from M&C<br>(e.g. heraNode0Snap0)</li>"
            "<li>PAM Number</li>"
            "<li>Median Auto Correlation power in dB</li>"
            "<li>PAM power in dB</li>"
            "<li>ADC power in dB</li>"
            "<li>Linear ADC RMS</li>"
            "<li>Median Equalization Coefficient</li>"
            "<li>Time ago in hours the M&C Antenna Status was updated. "
            "This time stamp applies to all data for this antenna "
            "except the Auto Correlation.</li>"
            "</ul>"
            "In any hover label entry 'No Data' means "
            "information not currrently available in M&C.")

        # Render all the power vs ndde files
        plotname = "plotly-node"
        html_template = env.get_template("plotly_base.html")
        js_template = env.get_template("plotly_base.js")

        if sys.version_info.minor >= 8 and sys.version_info.major > 2:
            time_jd = latest.to_value('jd', subfmt='float')
            time_unix = latest.to_value('unix')
        else:
            time_jd = latest.jd
            time_unix = latest.unix

        rendered_node_html = html_template.render(
            plotname=plotname,
            data_type="Auto correlations",
            plotstyle="height: 100%",
            gen_date=now.iso,
            gen_time_unix_ms=now.unix * 1000,
            data_date_iso=latest.iso,
            data_date_jd="{:.3f}".format(time_jd),
            data_date_unix_ms=time_unix * 1000,
            js_name="node_amp",
            scriptname=os.path.basename(__file__),
            hostname=computer_hostname,
            caption=caption_node,
        )

        rendered_node_js = js_template.render(
            data=data_node,
            layout=layout_node,
            updatemenus=updatemenus_node,
            plotname=plotname,
        )

        with open("node_amp.html", "w") as h_file:
            h_file.write(rendered_node_html)

        with open("node_amp.js", "w") as js_file:
            js_file.write(rendered_node_js)
예제 #11
0
파일: config.py 프로젝트: tburnett/wtlike
def UTC(mjd):
    " convert MJD value to ISO date string"
    t = Time(mjd, format='mjd')
    t.format = 'iso'
    t.out_subfmt = 'date_hm'
    return t.value
예제 #12
0
    def extract_the_open_supernova_catalog(self):
        """Extracts SLSN from the Open supernovae catalog with the type SLSN.
        The catalog can be found at https://sne.space/ .
        """
        arxiv = "1605.01054 (https://sne.space/)"

        new_entries = []

        path = "source_lists/The Open Supernova Catalog.csv"

        with open(path, 'rb') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='"')
            for i, row in enumerate(reader):
                if i < 1:
                    pass

                # Check for weird artifact entry in SLSN catalog
                elif row[0] != "PISN_Jerkstrand":
                    new = SLSN()
                    new.name = row[0]
                    new.alias = list(
                        set([
                            x.strip() for x in row[1].split(",")
                            if x != new.name
                        ]))

                    for j, attr in enumerate(["disc_date", "peak_date"]):
                        try:
                            date = "-".join(row[j + 2].split("/"))
                            date = Time(date, format="iso")
                            date.out_subfmt = "date"
                            date.format = "mjd"
                            setattr(new, attr, date)
                        except ValueError:
                            if row[j + 2] != "":
                                new.notes += "Problem in " + attr + ", only have "
                                new.notes += row[j + 2] + " (not ISO format), "

                    if row[4] != "":
                        new.abs_peak = float(row[4])

                    coords = [np.nan, np.nan]

                    for j, angle in enumerate(coords):
                        if row[5 + j] != "":
                            values = row[5 + j].split(",")
                            coords[j] = values[0]
                            if len(values) > 1:
                                new.notes += "Several values reported for " + \
                                             ["ra", "dec"][j] + " " + \
                                             str(values) + ", "

                    new.add_coordinates(coords[0], coords[1])

                    for j, attr in enumerate(["redshift", "ebv"]):
                        val = row[7 + (2 * j)]
                        if val != "":
                            all_vals = list(set(val.split(",")))
                            setattr(new, attr, float(all_vals[0]))
                            if len(all_vals) > 1:
                                new.notes += "Several values reported for " + \
                                             attr + " " + str(all_vals) + ", "

                    type_val = row[8]
                    if type_val != "":
                        all_vals = list(set(type_val.split(",")))
                        setattr(new, "type", all_vals[0])
                        if len(all_vals) > 1:
                            new.notes += "Several values reported for " + \
                                         attr + " " + str(all_vals) + ", "

                    ref_val = row[10]
                    if ref_val != "":
                        setattr(new, "ref", ref_val)

                    new.arxiv = arxiv
                    new_entries.append(new)

        self.entries.extend(new_entries)
예제 #13
0
    def extract_1604_08207(self):
        """Extracts SLSN entries for arxiv paper 1604.08207 (page 3)"""
        path = "source_lists/tabula-1604.08207.csv"
        arxiv = "1604.08207 (p.3)"
        with open(path, 'rb') as csvfile:
            reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            for i, row in enumerate(reader):
                if i < 1:
                    pass
                else:
                    new = SLSN()
                    new.name = "PTF" + row[0]
                    ra = row[1]
                    dec = [x for x in row[2]]

                    if len(dec) > 12:
                        dec.pop(0)
                        dec.pop(0)
                        dec[0] = "-"

                    dec = "".join(dec)

                    new.add_coordinates(ra, dec)

                    if len(row[3]) > 2:
                        new.type = "SLSN-R"
                    else:
                        new.type = "SLSN-" + row[3]

                    new.redshift = float(row[4])
                    new.arxiv = arxiv
                    new.ref = arxiv

                    peak = [x for x in row[5]][3:]
                    while not peak[0].isdigit():
                        peak.pop(0)
                    peak = -1. * float("".join(peak))
                    new.abs_peak = peak

                    date = [x for x in row[10]]
                    if len(date) > 10:
                        date.pop(0)
                    date = "".join(date)
                    date = Time(date, format="iso")
                    date.out_subfmt = "date"
                    date.format = "mjd"
                    new.peak_date = date

                    new.ebv = float(row[13])

                    notes = row[16]
                    if notes != "":
                        if notes[0] == "=":
                            split = notes.split(".")
                            new.alias.append(split[0][2:])
                            if len(split) > 1:
                                notes = split[1]
                            else:
                                notes = ""
                        else:
                            notes = [x for x in row[16]]
                            while not notes[-1].isalpha():
                                notes.pop(-1)
                            notes = "".join(notes)
                    new.notes = notes

                    self.entries.append(new)
예제 #14
0
def main():
    # templates are stored relative to the script dir
    # stored one level up, find the parent directory
    # and split the parent directory away
    script_dir = os.path.dirname(os.path.realpath(__file__))
    split_dir = os.path.split(script_dir)
    template_dir = os.path.join(split_dir[0], "templates")

    env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True)

    if sys.version_info[0] < 3:
        # py2
        computer_hostname = os.uname()[1]
    else:
        # py3
        computer_hostname = os.uname().nodename

    parser = argparse.ArgumentParser(
        description=("Create snap hookup tables for heranow dashboard"))
    parser.add_argument(
        "--redishost",
        dest="redishost",
        type=str,
        default="redishost",
        help=(
            'The host name for redis to connect to, defaults to "redishost"'),
    )
    parser.add_argument("--port",
                        dest="port",
                        type=int,
                        default=6379,
                        help="Redis port to connect.")
    args = parser.parse_args()

    redis_db = redis.Redis(args.redishost, port=args.port)
    corr_map = redis_db.hgetall("corr:map")

    update_time = Time(float(corr_map[b"update_time"]), format="unix")
    update_time.out_subfmt = u"date_hm"
    all_tables = []

    # make a table of the antenna to snap mapping
    table_a_to_s = {}
    table_a_to_s["title"] = "Antenna -> SNAP mappings"
    table_a_to_s["tab_style"] = "float: left"
    rows_a = []
    ant_to_snap = json.loads(corr_map[b"ant_to_snap"])
    for ant in sorted(map(int, ant_to_snap)):
        ant = str(ant)
        pol = ant_to_snap[ant]
        for p in pol:
            vals = pol[p]
            row = {}
            host = vals["host"]
            chan = vals["channel"]
            if isinstance(host, bytes):
                host = host.decode("utf-8")
            if isinstance(chan, bytes):
                chan = chan.decode("utf-8")
            row["text"] = "{ant}:{pol} -> {host}:{chan}".format(ant=ant,
                                                                pol=p,
                                                                host=host,
                                                                chan=chan)
            rows_a.append(row)
    table_a_to_s["rows"] = rows_a
    table_a_to_s["div_style"] = 'style="max-height: 2500px;"'
    table_a_to_s["colsize"] = 6
    all_tables.append(table_a_to_s)

    # make a table of the snap to antenna mapping
    table_s_to_a = {}
    table_s_to_a["title"] = "SNAP -> Antenna mappings"
    table_s_to_a["style"] = "float: right"
    rows_s = []

    snap_to_ant = json.loads(corr_map[b"snap_to_ant"])
    for snap in sorted(snap_to_ant):
        ant = snap_to_ant[snap]
        for i in range(6):
            if ant[i] is None:
                ant[i] = "n/c"
            if isinstance(ant[i], bytes):
                ant[i] = ant[i].decode("utf-8")

        if isinstance(snap, bytes):
            snap = snap.decode("utf-8")
        row = {}
        row["text"] = "{snap} -> {ants}".format(snap=snap, ants=", ".join(ant))
        rows_s.append(row)

    table_s_to_a["rows"] = rows_s
    table_s_to_a["colsize"] = 6
    all_tables.append(table_s_to_a)

    # Make a table of the snap to antenna indices mapping
    table_ant_ind = {}
    table_ant_ind["title"] = "SNAP -> Antenna indices"

    snap_to_ant_i = redis_db.hgetall("corr:snap_ants")
    rows_ant_ind = []
    for snap in sorted(snap_to_ant_i):
        ant = snap_to_ant_i[snap]
        row = {}
        if isinstance(snap, bytes):
            snap = snap.decode("utf-8")
        if isinstance(ant, bytes):
            ant = ant.decode("utf-8")
        row["text"] = "{snap} -> {ant}".format(snap=snap, ant=str(ant))
        rows_ant_ind.append(row)

    table_ant_ind["rows"] = rows_ant_ind
    table_ant_ind["style"] = "float: right"
    table_ant_ind["colsize"] = 6
    all_tables.append(table_ant_ind)

    # Make a table of the XENG channel indices
    table_xeng = {}
    table_xeng["title"] = "XENG -> Channel indices"
    rows_xeng = []

    xeng_to_chan_i = redis_db.hgetall("corr:xeng_chans")
    for xeng in sorted(map(int, xeng_to_chan_i)):
        xeng = bytes(str(xeng).encode())
        chans = xeng_to_chan_i[xeng]
        row = {}
        if isinstance(xeng, bytes):
            xeng = xeng.decode("utf-8")
        if isinstance(chans, bytes):
            chans = chans.decode("utf-8")
        row["text"] = "{xeng} -> {chans}...".format(xeng=xeng,
                                                    chans=chans[0:5])
        rows_xeng.append(row)
    table_xeng["rows"] = rows_xeng
    table_xeng["style"] = "float: right"
    table_xeng["colsize"] = 6

    all_tables.append(table_xeng)

    html_template = env.get_template("tables_with_footer.html")

    if sys.version_info.minor >= 8 and sys.version_info.major > 2:
        time_jd = update_time.to_value('jd', subfmt='float')
        time_unix = update_time.to_value('unix')
    else:
        time_jd = update_time.jd
        time_unix = update_time.unix

    rendered_html = html_template.render(
        tables=all_tables,
        data_type="Hookup information",
        data_date_iso=update_time.iso,
        data_date_jd="{:.3f}".format(time_jd),
        data_date_unix_ms=time_unix * 1000,
        gen_date=Time.now().iso,
        gen_time_unix_ms=Time.now().unix * 1000,
        scriptname=os.path.basename(__file__),
        hostname=computer_hostname,
    )

    with open("snaphookup.html", "w") as h_file:
        h_file.write(rendered_html)