Ejemplo n.º 1
0
def refine_cands(candsfile,
                 threshold=0,
                 scaledm=2.1,
                 scalepix=2,
                 scaleuv=1.0,
                 chans=[],
                 savepkl=True):
    """ Runs refine_cand on all positive SNR candidates above threshold. Any detected at higher SNR are highlighted. """

    # get snrs above threshold
    locs, props, d = pc.read_candidates(candsfile,
                                        snrmin=threshold,
                                        returnstate=True)

    if 'snr2' in d['features']:
        snrcol = d['features'].index('snr2')
    elif 'snr1' in d['features']:
        snrcol = d['features'].index('snr1')
    scancol = d['featureind'].index('scan')
    segmentcol = d['featureind'].index('segment')
    intcol = d['featureind'].index('int')
    dtindcol = d['featureind'].index('dtind')
    dmindcol = d['featureind'].index('dmind')
    snrs = props[:, snrcol]

    for (i, snr) in enumerate(snrs):
        if snr > 0:
            d, cands = refine_cand(candsfile,
                                   threshold=threshold,
                                   candnum=i,
                                   scaledm=scaledm,
                                   scalepix=scalepix,
                                   scaleuv=scaleuv,
                                   chans=chans)
            if cands:
                candlocs = np.array(cands.keys())
                candprops = np.array(cands.values())

                scan = locs[i, scancol]
                segment = locs[i, segmentcol]
                candint = locs[i, intcol]
                dmind = locs[i, dmindcol]
                dtind = locs[i, dtindcol]
                candfile = 'cands_{0}_sc{1}-seg{2}-i{3}-dm{4}-dt{5}.pkl'.format(
                    d['fileroot'], scan, segment, candint, dmind, dtind)

                if any([candsnr > snr for candsnr in candprops[:, snrcol]]):
                    logger.info(
                        'Cand {0} had SNR {1} and refinement found a higher SNR in new ones: {2}.'
                        .format(i, snr, candprops[:, snrcol]))
                    logger.info('Saving to {0}: {1}'.format(candfile, cands))

                    with open(candfile, 'w') as pkl:
                        pickle.dump(d, pkl, protocol=2)
                        pickle.dump((candlocs, candprops), pkl, protocol=2)
                else:
                    logger.info(
                        'Cand {0} had SNR {1}, but refinement found no improvement: {2}'
                        .format(i, snr, candprops[:, snrcol]))
Ejemplo n.º 2
0
def readcandsfile(candsfile, plotdir='/users/claw/public_html/plots', tag=None, copyplots=True):
    """ Read candidates from pickle file and format as list of dictionaries

    plotdir is path to png plot files which are required in order to keep in datalist
    optionally copies png files into plotdir
    """

    if tag:
        assert isintance(tag, str)

    loc, prop, state = read_candidates(candsfile, returnstate=True)

    fileroot = state['fileroot']
    if plotdir:
        logging.info('Filtering data based on presence of png files in {0}'.format(plotdir))
    else:
        logging.info('Appending all data to datalist.')

    datalist = []
    for i in range(len(loc)):
        data = {}
        data['obs'] = fileroot

        for feat in state['featureind']:
            col = state['featureind'].index(feat)
            data[feat] = loc[i][col]

        for feat in state['features']:
            col = state['features'].index(feat)
            if isnan(prop[i][col]):
                data[feat] = nan_to_num(prop[i][col])
            else:
                data[feat] = prop[i][col]

        uniqueid = dataid(data)
        data['candidate_png'] = 'cands_{0}.png'.format(uniqueid)
        data['labeled'] = '0'  # has this cand been labeled by active learning loop?
        if tag:
            data['tag'] = tag  # care to add comma-delimited string to this cand?
        else:
            data['tag'] = ''

        # copy plot over and add path to datalist
        if plotdir:
            if copyplots and os.path.exists(data['candidate_png']) and not os.path.exists(os.path.join(plotdir, data['candidate_png'])):
                copy(data['candidate_png'], plotdir)
            if os.path.exists(os.path.join(plotdir, data['candidate_png'])):
                datalist.append(data)
        else:
            datalist.append(data)

    return datalist
Ejemplo n.º 3
0
def refine_cand(candsfile,
                candloc=[],
                candnum=-1,
                threshold=0,
                scaledm=2.1,
                scalepix=2,
                scaleuv=1.0,
                chans=[],
                returndata=False):
    """ Helper function to interact with merged cands file and refine analysis

    candsfile is merged pkl file
    candloc (scan, segment, candint, dmind, dtind, beamnum) is as above.
    if no candloc, then it prints out cands above threshold.
    """

    if candnum >= 0:
        candlocs, candprops, d0 = pc.read_candidates(candsfile,
                                                     snrmin=threshold,
                                                     returnstate=True)
        candloc = candlocs[candnum]
        candprop = candprops[candnum]

        logger.info('Refining cand {0} with features {1}'.format(
            candloc, candprop))
        values = rt.pipeline_refine(d0,
                                    candloc,
                                    scaledm=scaledm,
                                    scalepix=scalepix,
                                    scaleuv=scaleuv,
                                    chans=chans,
                                    returndata=returndata)
        return values

    elif candloc:
        logger.info('Refining cand {0}'.format(candloc))
        d0 = pickle.load(open(candsfile, 'r'))
        values = rt.pipeline_refine(d0,
                                    candloc,
                                    scaledm=scaledm,
                                    scalepix=scalepix,
                                    scaleuv=scaleuv,
                                    chans=chans,
                                    returndata=returndata)
        return d, cands
    else:
        return None
Ejemplo n.º 4
0
def readcandsfile(candsfile, plotdir='/users/claw/public_html/plots', tag=None):
    """ Read candidates from pickle file and format as list of dictionaries

    plotdir is path to png plot files which are required in order to keep in datalist
    """

    if tag:
        assert isintance(tag, str)

    loc, prop, state = read_candidates(candsfile, returnstate=True)

    fileroot = state['fileroot']
    if plotdir:
        logging.info('Filtering data based on presence of png files in {0}'.format(plotdir))
    else:
        logging.info('Appending all data to datalist.')

    datalist = []
    for i in range(len(loc)):
        data = {}
        data['obs'] = fileroot

        for feat in state['featureind']:
            col = state['featureind'].index(feat)
            data[feat] = loc[i][col]

        for feat in state['features']:
            col = state['features'].index(feat)
            data[feat] = prop[i][col]

        uniqueid = dataid(data)
        data['candidate_png'] = 'cands_{0}.png'.format(uniqueid)
        data['labeled'] = '0'
        if tag:
            data['tag'] = tag
        else:
            data['tag'] = ''

        if plotdir:
            if os.path.exists(os.path.join(plotdir, data['candidate_png'])):
                datalist.append(data)
        else:
            datalist.append(data)

    return datalist
Ejemplo n.º 5
0
def list_cands(candsfile, threshold=0.):
    """ Prints candidate info in time order above some threshold """

    loc, prop, d0 = pc.read_candidates(candsfile,
                                       snrmin=threshold,
                                       returnstate=True)

    if 'snr2' in d0['features']:
        snrcol = d0['features'].index('snr2')
    elif 'snr1' in d0['features']:
        snrcol = d0['features'].index('snr1')
    dmindcol = d0['featureind'].index('dmind')

    if len(loc):
        snrs = prop[:, snrcol]
        times = pc.int2mjd(d0, loc)
        times = times - times[0]

        logger.info('Getting candidates...')
        logger.info('candnum: loc, SNR, DM (pc/cm3), time (s; rel)')
        for i in range(len(loc)):
            logger.info("%d: %s, %.1f, %.1f, %.1f" %
                        (i, str(loc[i]), prop[i, snrcol], np.array(
                            d0['dmarr'])[loc[i, dmindcol]], times[i]))
Ejemplo n.º 6
0
def plot_cand(candsfile,
              candloc=[],
              candnum=-1,
              threshold=0,
              savefile=True,
              returndata=False,
              outname='',
              **kwargs):
    """ Reproduce detection of a single candidate for plotting or inspection.

    candsfile can be merge or single-scan cands pkl file. Difference defined by presence of scan in d['featureind'].
    candloc reproduces candidate at given location (scan, segment, integration, dmind, dtind, beamnum).
    candnum selects one to reproduce from ordered list
    threshold is min of sbs(SNR) used to filter candidates to select with candnum.
    savefile/outname define if/how to save png of candidate
    if returndata, (im, data) returned.
    kwargs passed to rt.set_pipeline
    """

    # get candidate info
    loc, prop = pc.read_candidates(candsfile)

    # define state dict and overload with user prefs
    d0 = pickle.load(open(candsfile, 'r'))
    for key in kwargs:
        logger.info('Setting %s to %s' % (key, kwargs[key]))
        d0[key] = kwargs[key]
    d0['logfile'] = False  # no need to save log

    # feature columns
    if 'snr2' in d0['features']:
        snrcol = d0['features'].index('snr2')
    elif 'snr1' in d0['features']:
        snrcol = d0['features'].index('snr1')
    if 'l2' in d0['features']:
        lcol = d0['features'].index('l2')
    elif 'l1' in d0['features']:
        lcol = d0['features'].index('l1')
    if 'm2' in d0['features']:
        mcol = d0['features'].index('m2')
    elif 'm1' in d0['features']:
        mcol = d0['features'].index('m1')

    try:
        scancol = d0['featureind'].index('scan')  # if merged pkl
    except ValueError:
        scancol = -1  # if single-scan pkl
    segmentcol = d0['featureind'].index('segment')
    intcol = d0['featureind'].index('int')
    dtindcol = d0['featureind'].index('dtind')
    dmindcol = d0['featureind'].index('dmind')

    # sort and prep candidate list
    snrs = prop[:, snrcol]
    select = np.where(np.abs(snrs) > threshold)[0]
    loc = loc[select]
    prop = prop[select]
    times = pc.int2mjd(d0, loc)
    times = times - times[0]

    # default case will print cand info
    if (candnum < 0) and (not len(candloc)):
        logger.info('Getting candidates...')
        logger.info('candnum: loc, SNR, DM (pc/cm3), time (s; rel)')
        for i in range(len(loc)):
            logger.info("%d: %s, %.1f, %.1f, %.1f" %
                        (i, str(loc[i]), prop[i, snrcol], np.array(
                            d0['dmarr'])[loc[i, dmindcol]], times[i]))
    else:  # if candnum or candloc provided, try to reproduce
        if (candnum >= 0) and not len(candloc):
            logger.info(
                'Reproducing and visualizing candidate %d at %s with properties %s.'
                % (candnum, loc[candnum], prop[candnum]))
            dmarrorig = d0['dmarr']
            dtarrorig = d0['dtarr']
            if scancol >= 0:  # here we have a merge pkl
                scan = loc[candnum, scancol]
            else:  # a scan-based cands pkl
                scan = d0['scan']
            segment = loc[candnum, segmentcol]
            candint = loc[candnum, intcol]
            dmind = loc[candnum, dmindcol]
            dtind = loc[candnum, dtindcol]
            beamnum = 0
            candloc = (scan, segment, candint, dmind, dtind, beamnum)
        elif len(candloc) and (candnum < 0):
            assert len(
                candloc
            ) == 6, 'candloc should be length 6 ( scan, segment, candint, dmind, dtind, beamnum ).'
            logger.info('Reproducing and visualizing candidate %d at %s' %
                        (candnum, candloc))
            dmarrorig = d0['dmarr']
            dtarrorig = d0['dtarr']
            scan, segment, candint, dmind, dtind, beamnum = candloc
        else:
            raise Exception, 'Provide candnum or candloc, not both'

        # if working locally, set workdir appropriately. Can also be used in queue system with full path given.
        if not os.path.dirname(candsfile):
            d0['workdir'] = os.getcwd()
        else:
            d0['workdir'] = os.path.dirname(candsfile)
        filename = os.path.join(d0['workdir'],
                                os.path.basename(d0['filename']))

        # clean up d0 of superfluous keys
        params = pp.Params()  # will be used as input to rt.set_pipeline
        for key in d0.keys():
            if not hasattr(params, key) and 'memory_limit' not in key:
                _ = d0.pop(key)

        d0['npix'] = 0
        d0['uvres'] = 0
        d0['nsegments'] = 0
        d0['logfile'] = False
        # get cand data
        d = rt.set_pipeline(filename, scan, **d0)
        im, data = rt.pipeline_reproduce(
            d, candloc, product='imdata')  # removed loc[candnum]

        # optionally plot
        if savefile:
            loclabel = scan, segment, candint, dmind, dtind, beamnum
            make_cand_plot(d, im, data, loclabel, outname=outname)

        # optionally return data
        if returndata:
            return (im, data)
Ejemplo n.º 7
0
def readdata(mergepkl=None, d=None, cands=None, sizerange=(2,70)):
    """ Converts candidate data to dictionary for bokeh

    Can take merged pkl file or d/cands as read separately.
    cands is an optional (loc, prop) tuple of numpy arrays.
    """

    # get cands from pkl
    if mergepkl:
        logger.info('Reading {0}'.format(mergepkl))
        loc, prop, d = read_candidates(mergepkl, returnstate=True)
    elif d and cands:
        logger.info('Using provided d/cands')
        loc, prop = cands

    # define columns to extract
    if 'snr2' in d['features']:
        snrcol = d['features'].index('snr2')
    elif 'snr1' in d['features']:
        snrcol = d['features'].index('snr1')
    l1col = d['features'].index('l1')
    m1col = d['features'].index('m1')
    specstdcol = d['features'].index('specstd')
    imkurcol = d['features'].index('imkurtosis')
    dtindcol = d['featureind'].index('dtind')
    dmindcol = d['featureind'].index('dmind')
    intcol = d['featureind'].index('int')
    segmentcol = d['featureind'].index('segment')
    scancol = d['featureind'].index('scan')

    # define data to plot
    key = ['sc{0}-seg{1}-i{2}-dm{3}-dt{4}'.format(ll[scancol], ll[segmentcol], ll[intcol], ll[dmindcol], ll[dtindcol]) for ll in loc]
#    key = [tuple(ll) for ll in loc]
    scan = loc[:, scancol]
    seg = loc[:, segmentcol]
    candint = loc[:, 2]
    dmind = loc[:, 3]
    dtind = loc[:, 4]
    beamnum = loc[:, 5]

    logger.info('Setting columns...')
    snrs = prop[:, snrcol]
    abssnr = np.abs(prop[:, snrcol])
    dm = np.array(d['dmarr'])[loc[:, dmindcol]]
    l1 = prop[:, l1col]
    m1 = prop[:, m1col]
    time = np.array([24*3600*d['segmenttimesdict'][scan[i]][seg[i], 0] + d['inttime']*candint[i] for i in range(len(loc))])
#    time.append(24*3600*d['segmenttimesdict'][k[scancol]][k[segmentcol],0] + d['inttime']*k[intcol])
    specstd = prop[:, specstdcol]
    imkur = prop[:, imkurcol]

    logger.info('Calculating sizes, colors, normprob...')
    time = time - min(time)
    sizes = calcsize(snrs)
    colors = colorsat(l1, m1)
    zs = normprob(d, snrs)

    # if pandas is available use dataframe to allow datashader feature
#    data = DataFrame(data={'snrs': snrs, 'dm': dm, 'l1': l1, 'm1': m1, 'time': time, 'specstd': specstd,
#                           'imkur': imkur, 'scan': scan, 'seg': seg, 'candint': candint, 'dmind': dmind,
#                           'dtind': dtind, 'sizes': sizes, 'colors': colors, 'key': key, 'zs': zs, 'abssnr': abssnr})
#    logger.info('Returning a pandas dataframe')
    data = dict(snrs=snrs, dm=dm, l1=l1, m1=m1, time=time, specstd=specstd, scan=scan,
                imkur=imkur, sizes=sizes, colors=colors, key=key, zs=zs, abssnr=abssnr)
#                dtind=dtind, scan=scan, seg=seg, candint=candint, dmind=dmind,

    return data
Ejemplo n.º 8
0
def readcandsfile(candsfile,
                  plotdir='/users/claw/public_html/plots',
                  tag=None,
                  copyplots=True):
    """ Read candidates from pickle file and format as list of dictionaries

    plotdir is path to png plot files which are required in order to keep in datalist
    optionally copies png files into plotdir
    """

    if tag:
        assert isintance(tag, str)

    loc, prop, state = read_candidates(candsfile, returnstate=True)

    fileroot = state['fileroot']
    if plotdir:
        logging.info(
            'Filtering data based on presence of png files in {0}'.format(
                plotdir))
    else:
        logging.info('Appending all data to datalist.')

    datalist = []
    for i in range(len(loc)):
        data = {}
        data['obs'] = fileroot

        for feat in state['featureind']:
            col = state['featureind'].index(feat)
            data[feat] = loc[i][col]

        for feat in state['features']:
            col = state['features'].index(feat)
            if isnan(prop[i][col]):
                data[feat] = nan_to_num(prop[i][col])
            else:
                data[feat] = prop[i][col]

        uniqueid = dataid(data)
        data['candidate_png'] = 'cands_{0}.png'.format(uniqueid)
        data[
            'labeled'] = '0'  # has this cand been labeled by active learning loop?
        if tag:
            data[
                'tag'] = tag  # care to add comma-delimited string to this cand?
        else:
            data['tag'] = ''

        # copy plot over and add path to datalist
        if plotdir:
            if copyplots and os.path.exists(
                    data['candidate_png']) and not os.path.exists(
                        os.path.join(plotdir, data['candidate_png'])):
                copy(data['candidate_png'], plotdir)
            if os.path.exists(os.path.join(plotdir, data['candidate_png'])):
                datalist.append(data)
        else:
            datalist.append(data)

    return datalist
Ejemplo n.º 9
0
def plot_cand(candsfile,
              candloc=[],
              candnum=-1,
              threshold=0,
              savefile=True,
              returndata=False,
              outname='',
              newplot=True,
              returnstate=False,
              **kwargs):
    """ Reproduce detection of a single candidate for plotting or inspection.

    candsfile can be merge or single-scan cands pkl file. Difference defined by presence of scan in d['featureind'].
    candloc reproduces candidate at given location (scan, segment, integration, dmind, dtind, beamnum).
    candnum selects one to reproduce from ordered list
    threshold is min of sbs(SNR) used to filter candidates to select with candnum.
    savefile/outname define if/how to save png of candidate
    if returndata, (im, data) returned.
    kwargs passed to rt.set_pipeline
    if newplot, then plot with the new candidate plot using bridget's version
    """

    # get candidate info
    loc, prop, d0 = pc.read_candidates(candsfile, returnstate=True)

    # define state dict and overload with user prefs
    for key in kwargs:
        logger.info('Setting %s to %s' % (key, kwargs[key]))
        d0[key] = kwargs[key]
    d0['logfile'] = False  # no need to save log

    # feature columns
    if 'snr2' in d0['features']:
        snrcol = d0['features'].index('snr2')
    elif 'snr1' in d0['features']:
        snrcol = d0['features'].index('snr1')
    if 'l2' in d0['features']:
        lcol = d0['features'].index('l2')
    elif 'l1' in d0['features']:
        lcol = d0['features'].index('l1')
    if 'm2' in d0['features']:
        mcol = d0['features'].index('m2')
    elif 'm1' in d0['features']:
        mcol = d0['features'].index('m1')

    scancol = d0['featureind'].index('scan')
    segmentcol = d0['featureind'].index('segment')
    intcol = d0['featureind'].index('int')
    dtindcol = d0['featureind'].index('dtind')
    dmindcol = d0['featureind'].index('dmind')

    # sort and prep candidate list
    snrs = prop[:, snrcol]
    select = np.where(np.abs(snrs) > threshold)[0]
    loc = loc[select]
    prop = prop[select]

    if candnum >= 0 or len(candloc):
        if (candnum >= 0) and not len(candloc):
            logger.info(
                'Reproducing and visualizing candidate %d at %s with properties %s.'
                % (candnum, loc[candnum], prop[candnum]))
            dmarrorig = d0['dmarr']
            dtarrorig = d0['dtarr']
            scan = loc[candnum, scancol]
            segment = loc[candnum, segmentcol]
            candint = loc[candnum, intcol]
            dmind = loc[candnum, dmindcol]
            dtind = loc[candnum, dtindcol]
            beamnum = 0
            candloc = (scan, segment, candint, dmind, dtind, beamnum)
        elif len(candloc) and (candnum < 0):
            assert len(
                candloc
            ) == 6, 'candloc should be length 6 ( scan, segment, candint, dmind, dtind, beamnum ).'
            logger.info('Reproducing and visualizing candidate %d at %s' %
                        (candnum, candloc))
            dmarrorig = d0['dmarr']
            dtarrorig = d0['dtarr']
            scan, segment, candint, dmind, dtind, beamnum = candloc
        else:
            raise Exception, 'Provide candnum or candloc, not both'

        # if working locally, set workdir appropriately. Can also be used in queue system with full path given.
        if not os.path.dirname(candsfile):
            d0['workdir'] = os.getcwd()
        else:
            d0['workdir'] = os.path.dirname(candsfile)
        filename = os.path.join(d0['workdir'],
                                os.path.basename(d0['filename']))

        if d0.has_key('segmenttimesdict'):  # using merged pkl
            segmenttimes = d0['segmenttimesdict'][scan]
        else:
            segmenttimes = d0['segmenttimes']

        # clean up d0 of superfluous keys
        params = pp.Params()  # will be used as input to rt.set_pipeline
        for key in d0.keys():
            if not hasattr(params, key):  # and 'memory_limit' not in key:
                _ = d0.pop(key)
        d0['npix'] = 0
        d0['uvres'] = 0
        d0['logfile'] = False
        d0['savenoise'] = False
        d0['savecands'] = False

        # this triggers redefinition of segment boundaries. memory optimization changed, so this is a problem.
        #        d0['nsegments'] = 0
        #        d0['scale_nsegments'] = 1.
        d0['segmenttimes'] = segmenttimes
        d0['nsegments'] = len(segmenttimes)

        # get cand data
        d = rt.set_pipeline(filename, scan, **d0)
        (vismem, immem) = rt.calc_memory_footprint(d)
        if 'memory_limit' in d:
            assert vismem + immem < d[
                'memory_limit'], 'memory_limit defined, but nsegments must (for now) be set to initial values to properly reproduce candidate'

        im, data = rt.pipeline_reproduce(
            d, candloc, product='imdata')  # removed loc[candnum]

        # optionally plot
        if savefile:
            loclabel = scan, segment, candint, dmind, dtind, beamnum
            if newplot:
                make_cand_plot(d,
                               im,
                               data,
                               loclabel,
                               version=2,
                               snrs=snrs,
                               outname=outname)
            else:
                make_cand_plot(d,
                               im,
                               data,
                               loclabel,
                               version=1,
                               outname=outname)

        # optionally return data
        if returndata:
            return (im, data)
        elif returnstate:
            return d
Ejemplo n.º 10
0
def readdata(mergepkl=None, d=None, cands=None, sizerange=(2, 70)):
    """ Converts candidate data to dictionary for bokeh

    Can take merged pkl file or d/cands as read separately.
    cands is an optional (loc, prop) tuple of numpy arrays.
    """

    # get cands from pkl
    if mergepkl:
        logger.info('Reading {0}'.format(mergepkl))
        loc, prop, d = read_candidates(mergepkl, returnstate=True)
    elif d and cands:
        logger.info('Using provided d/cands')
        loc, prop = cands

    # define columns to extract
    if 'snr2' in d['features']:
        snrcol = d['features'].index('snr2')
    elif 'snr1' in d['features']:
        snrcol = d['features'].index('snr1')
    l1col = d['features'].index('l1')
    m1col = d['features'].index('m1')
    specstdcol = d['features'].index('specstd')
    imkurcol = d['features'].index('imkurtosis')
    dtindcol = d['featureind'].index('dtind')
    dmindcol = d['featureind'].index('dmind')
    intcol = d['featureind'].index('int')
    segmentcol = d['featureind'].index('segment')
    scancol = d['featureind'].index('scan')

    # define data to plot
    key = [
        'sc{0}-seg{1}-i{2}-dm{3}-dt{4}'.format(ll[scancol], ll[segmentcol],
                                               ll[intcol], ll[dmindcol],
                                               ll[dtindcol]) for ll in loc
    ]
    #    key = [tuple(ll) for ll in loc]
    scan = loc[:, scancol]
    seg = loc[:, segmentcol]
    candint = loc[:, 2]
    dmind = loc[:, 3]
    dtind = loc[:, 4]
    beamnum = loc[:, 5]

    logger.info('Setting columns...')
    snrs = prop[:, snrcol]
    abssnr = np.abs(prop[:, snrcol])
    dm = np.array(d['dmarr'])[loc[:, dmindcol]]
    l1 = prop[:, l1col]
    m1 = prop[:, m1col]
    time = np.array([
        24 * 3600 * d['segmenttimesdict'][scan[i]][seg[i], 0] +
        d['inttime'] * candint[i] for i in range(len(loc))
    ])
    #    time.append(24*3600*d['segmenttimesdict'][k[scancol]][k[segmentcol],0] + d['inttime']*k[intcol])
    specstd = prop[:, specstdcol]
    imkur = prop[:, imkurcol]

    logger.info('Calculating sizes, colors, normprob...')
    time = time - min(time)
    sizes = calcsize(snrs)
    colors = colorsat(l1, m1)
    zs = normprob(d, snrs)

    # if pandas is available use dataframe to allow datashader feature
    #    data = DataFrame(data={'snrs': snrs, 'dm': dm, 'l1': l1, 'm1': m1, 'time': time, 'specstd': specstd,
    #                           'imkur': imkur, 'scan': scan, 'seg': seg, 'candint': candint, 'dmind': dmind,
    #                           'dtind': dtind, 'sizes': sizes, 'colors': colors, 'key': key, 'zs': zs, 'abssnr': abssnr})
    #    logger.info('Returning a pandas dataframe')
    data = dict(snrs=snrs,
                dm=dm,
                l1=l1,
                m1=m1,
                time=time,
                specstd=specstd,
                scan=scan,
                imkur=imkur,
                sizes=sizes,
                colors=colors,
                key=key,
                zs=zs,
                abssnr=abssnr)
    #                dtind=dtind, scan=scan, seg=seg, candint=candint, dmind=dmind,

    return data
Ejemplo n.º 11
0
def plot_cand(candsfile, candloc=[], candnum=-1, threshold=0, savefile=True, returndata=False, outname='', **kwargs):
    """ Reproduce detection of a single candidate for plotting or inspection.

    candsfile can be merge or single-scan cands pkl file. Difference defined by presence of scan in d['featureind'].
    candloc reproduces candidate at given location (scan, segment, integration, dmind, dtind, beamnum).
    candnum selects one to reproduce from ordered list
    threshold is min of sbs(SNR) used to filter candidates to select with candnum.
    savefile/outname define if/how to save png of candidate
    if returndata, (im, data) returned.
    kwargs passed to rt.set_pipeline
    """

    # get candidate info
    loc, prop = pc.read_candidates(candsfile)

    # define state dict and overload with user prefs
    d0 = pickle.load(open(candsfile, 'r'))
    for key in kwargs:
        logger.info('Setting %s to %s' % (key, kwargs[key]))
        d0[key] = kwargs[key]
    d0['logfile'] = False  # no need to save log

    # feature columns
    if 'snr2' in d0['features']:
        snrcol = d0['features'].index('snr2')
    elif 'snr1' in d0['features']:
        snrcol = d0['features'].index('snr1')
    if 'l2' in d0['features']:
        lcol = d0['features'].index('l2')
    elif 'l1' in d0['features']:
        lcol = d0['features'].index('l1')
    if 'm2' in d0['features']:
        mcol = d0['features'].index('m2')
    elif 'm1' in d0['features']:
        mcol = d0['features'].index('m1')

    try:
        scancol = d0['featureind'].index('scan')  # if merged pkl
    except ValueError:
        scancol = -1   # if single-scan pkl
    segmentcol = d0['featureind'].index('segment')
    intcol = d0['featureind'].index('int')
    dtindcol = d0['featureind'].index('dtind')
    dmindcol = d0['featureind'].index('dmind')

    # sort and prep candidate list
    snrs = prop[:, snrcol]
    select = np.where(np.abs(snrs) > threshold)[0]
    loc = loc[select]
    prop = prop[select]
    times = pc.int2mjd(d0, loc)
    times = times - times[0]

    # default case will print cand info
    if (candnum < 0) and (not len(candloc)):
        logger.info('Getting candidates...')
        logger.info('candnum: loc, SNR, DM (pc/cm3), time (s; rel)')
        for i in range(len(loc)):
            logger.info("%d: %s, %.1f, %.1f, %.1f" % (i, str(loc[i]), prop[i, snrcol], np.array(d0['dmarr'])[loc[i,dmindcol]], times[i]))
    else:  # if candnum or candloc provided, try to reproduce
        if (candnum >= 0) and not len(candloc):
            logger.info('Reproducing and visualizing candidate %d at %s with properties %s.' % (candnum, loc[candnum], prop[candnum]))
            dmarrorig = d0['dmarr']
            dtarrorig = d0['dtarr']
            if scancol >= 0:  # here we have a merge pkl
                scan = loc[candnum, scancol]
            else:   # a scan-based cands pkl
                scan = d0['scan']
            segment = loc[candnum, segmentcol]
            candint = loc[candnum, intcol]
            dmind = loc[candnum, dmindcol]
            dtind = loc[candnum, dtindcol]
            beamnum = 0
            candloc = (scan, segment, candint, dmind, dtind, beamnum)
        elif len(candloc) and (candnum < 0):
            assert len(candloc) == 6, 'candloc should be length 6 ( scan, segment, candint, dmind, dtind, beamnum ).'
            logger.info('Reproducing and visualizing candidate %d at %s' % (candnum, candloc))
            dmarrorig = d0['dmarr']
            dtarrorig = d0['dtarr']
            scan, segment, candint, dmind, dtind, beamnum = candloc
        else:
            raise Exception, 'Provide candnum or candloc, not both'

        # if working locally, set workdir appropriately. Can also be used in queue system with full path given.
        if not os.path.dirname(candsfile):
            d0['workdir'] = os.getcwd()
        else:
            d0['workdir'] = os.path.dirname(candsfile)
        filename = os.path.join(d0['workdir'], os.path.basename(d0['filename']))

        # clean up d0 of superfluous keys
        params = pp.Params()  # will be used as input to rt.set_pipeline
        for key in d0.keys():
            if not hasattr(params, key) and 'memory_limit' not in key:
                _ = d0.pop(key)

        d0['npix'] = 0
        d0['uvres'] = 0
        d0['nsegments'] = 0
        d0['logfile'] = False
        # get cand data
        d = rt.set_pipeline(filename, scan, **d0)
        im, data = rt.pipeline_reproduce(d, candloc, product='imdata') # removed loc[candnum]

        # optionally plot
        if savefile:
            loclabel = scan, segment, candint, dmind, dtind, beamnum
            make_cand_plot(d, im, data, loclabel, outname=outname)

        # optionally return data
        if returndata:
            return (im, data)