def doit(ensemblename, stackname):
    """Find ensembles with hulls.

    The ensemble names are displayed to the screen, in ascending
    order, one per line, along with the total number of stack-level
    hulls in the ensemble (they may overlap or not).

    Parameters
    ----------
    ensemblename : str
        The name of a file which contains columns ensemble and
        stack, and so gives the mapping from stack to ensemble.
    stackname : str
        The name of an ASCII file which has two columns: the
        name of the mrgsrc3 file (no directory) and the number
        of valid hulls in that stack.
        and contains stacks with hulls.

    """

    cr = pycrates.read_file(ensemblename + "[cols ensemble,stack]")
    stackmap = {}
    for ensname, stkname in zip(cr.ensemble.values, cr.stack.values):
        assert stkname not in stackmap, stkname
        stackmap[stkname] = ensname

    cr = None

    ensembles = defaultdict(lambda: 0)
    cr = pycrates.read_file(stackname)
    for mfile, nhulls in zip(
            cr.get_column(0).values,
            cr.get_column(1).values.astype(int)):
        if nhulls < 1:
            continue

        idx = mfile.find('N')
        if idx == -1:
            raise IOError("Invalid mrgsrc3 file name " + "'{}'".format(mfile))

        stackname = mfile[:idx]
        try:
            ensemblename = stackmap[stackname]
        except KeyError:
            raise IOError("Unrecognized stack from " + "'{}'".format(mfile))

        ensembles[ensemblename] += nhulls

    if len(ensembles) == 0:
        raise IOError("No ensembles found!")

    print("# ensemble nstackhulls")
    for ensname in sorted(list(ensembles.keys())):
        print("{} {}".format(ensname, ensembles[ensname]))
예제 #2
0
파일: lutplot.py 프로젝트: kglotfelty/LUT
def __test():
    """
    Commands to test above commands
    
    """
    from pycrates import read_file
    #tab = read_file("/export/byobsid/15403/primary/pcadf479143901N001_asol1.fits")
    tab  = read_file("/lenin2.real/Test/Merge/repro/pcadf474115095N001_asol1.fits")
    x = tab.get_column("dy").values
    y = tab.get_column("dz").values
    z = tab.get_column("time").values
    z= (z-min(z))/1000.0 # offset and convert to ksec
    from paramio import pget 
    lut = LUTPlot( pget("imagej_lut", "16_equal"), cmap=chips_usercmap2)
    clear()
    print ("plot")
    lut.plot( x,y,z)
    print ("add_colorbar")
    lut.add_colorbar( )

    print ("set_curve")
    lut.set_curve( "symbol.style=circle")
    
    print ("set_curve")
    lut.set_curve( { 'symbol.size' : 2 } )

    print ("replace_cmap")
    lut.replace_cmap( pget("imagej_lut", "005-random"))

    print ("shuffle")
    lut.shuffle()
예제 #3
0
def read_srclist_crates(infile):
    """Read in the pre-release list

    Only trust a minimal selection from this list.
    """

    # only read in the columns we care about, because the file is
    # quite large (due to the space/column names) which we do not
    # need.
    #
    fname = "{}[cols NAME,RA,DEC,ERR_ELLIPSE_R0]".format(infile)
    cr = pycrates.read_file(fname)
    out = {}

    # copies are a desperate/ineffectual attempt to save some memory
    for name, ra, dec, r0 in zip(cr.NAME.values.copy(),
                                 cr.RA.values.copy(),
                                 cr.DEC.values.copy(),
                                 cr.ERR_ELLIPSE_R0.values.copy()):

        # SHOULD we strip the name?
        name = str(name)
        assert name not in out
        out[name] = {'name': name, 'ra': ra, 'dec': dec,
                     'err_ellipse_r0': r0}

    return out
예제 #4
0
def findRbkg(rprof_file, rmax=None):
    rprof = pycrates.read_file(rprof_file)
    r = pycrates.copy_colvals(rprof, "RMID")

    cts = pycrates.copy_colvals(rprof, "CEL_BRI")
    bg_cts = pycrates.copy_colvals(rprof, "BG_CEL_BRI")

    if rmax is None:
        rmax = np.max(r)

    ## Calculando SNR
    ratio = cts / bg_cts
    dratio = np.diff(ratio)
    ratio_threshold = np.min(ratio[:-1]) + (-np.mean(dratio))

    mask = (ratio[:-1] < ratio_threshold)
    dratio_threshold = np.median(dratio[mask])
    if (dratio_threshold + 1.) > 0:
        print('threshold:', dratio_threshold)
        idx, = np.where((dratio > dratio_threshold)
                        & (ratio[:-1] < ratio_threshold))
        x = r[idx + 1]
        try:
            rbkg = x[0]
        except:
            rbkg = r[-1]
    else:
        rbkg = r[-1]
    print('Background Radius:', rbkg)

    return float(rbkg)
예제 #5
0
파일: Analysis.py 프로젝트: estevesjh/Xpipe
def makePlotBeta(infile,betapars,name,rbkg=0,model='modBeta',outdir='./'):
    '''Given a radial profile file and the model parameters it plots the electronic profile
    '''
    dirname = os.path.dirname(infile)
    rprof = pycrates.read_file(infile)
    
    r = pycrates.copy_colvals(rprof,"R")
    y = pycrates.copy_colvals(rprof,"SUR_BRI")
    dy = pycrates.copy_colvals(rprof,"SUR_BRI_ERR")
    
    x = 0.492*0.5*(r[:,0] + r[:,1])
    
    if model=='Beta':
        # Beta Model
        ym = betapars[2] * (1 + (x/(betapars[0]*0.492))**2)**(0.5-3*betapars[1])+betapars[3]
        Label = r'$\beta$-model'
    if model=='modBeta':
        # Beta Model modified Maughan et al. 2008
        rc,rs,alpha,beta,epsilon,gamma,n0,bkg,chisqr = betapars
        ym = fit.S_bkg(x,(rc*0.492),(rs*0.492),alpha,beta,epsilon,gamma,n0,bkg)
        # ym = (np.max(y)/np.max(ym))*ym
        Label=r'adapted-$\beta$-model'

    doPlotModel(x,y,ym,y_obs_err=dy,name=name,rbkg=rbkg,label=Label,outdir=outdir)

    return x,y,dy,ym
예제 #6
0
def read_stkfile(infile):
    """Read in the stack information.

    Parameters
    ----------
    infile : str
        A file readable by the DM with columns 'stack' and 'nobs',
        which give the number of obis in the stack.

    Returns
    -------
    ans : dict
        The keys are the stack name and the value the number of
        obis in the stack.

    """

    cr = pycrates.read_file(infile + "[cols stack, nobs]")
    out = {}
    for stack, nobs in zip(cr.stack.values,
                           cr.nobs.values.astype(np.int)):

        # try and remove the numpy string
        stack = str(stack)

        assert stack not in out, stack
        out[stack] = nobs

    return out
예제 #7
0
def read_ensemble(indir, ensemble):
    """Read master-hull file."""

    pat = os.path.join(indir, ensemble, utils.make_mhull_name(ensemble))
    match = utils.find_single_match(pat)

    infile = "{}[HULLMATCH][cols Master_Id,STACKID,COMPONENT]".format(match)
    cr = pycrates.read_file(infile)
    compzero = cr.get_key_value('COMPZERO')
    if compzero is None:
        raise IOError("No COMPZERO in {}".format(infile))

    store = defaultdict(list)
    for mid, stack, cpt in zip(cr.Master_Id.values, cr.STACKID.values,
                               cr.COMPONENT.values):
        store[mid].append((stack, cpt - compzero))

    out = {}
    for mid, cpts in store.items():
        nacis = len([s for s in cpts if s[0].startswith('acis')])
        nhrc = len([s for s in cpts if s[0].startswith('hrc')])
        assert nacis + nhrc == len(cpts)

        out[mid] = {
            'ensemble': ensemble,
            'components': cpts,
            'nacis': nacis,
            'nhrc': nhrc
        }

    return out
예제 #8
0
def get_obsid_object(infile):
    """Return a ciao_contrib._tools.utils.ObsId object using the OBS_ID
    and (optional) CYCLE and OBI_NUM keywords in the header. Note that
    the cycle keyword will always be set even if this is not an
    interleaved observation for ACIS data.

    As of CIAO 4.7, blank CYCLE keywords, as found in aspect-solution
    files, is treated as cycle=None. Care should be used in
    using the return object in such a case (there's no indication
    of it from the return value).

    """

    cr = pycrates.read_file(infile)
    obsid = cr.get_key_value("OBS_ID")

    if obsid is None:
        raise IOError("OBS_ID keyword is missing from '{}'.".format(infile))

    # Do not need to validate the value here, but it results in a
    # better error message than if ObsId fails.
    #
    cycle = cr.get_key_value("CYCLE")
    if cycle is not None and cycle.strip() == '':
        cycle = None
        v3("Changing blank CYCLE keyword from {} to None.".format(infile))

    elif cycle not in [None, 'P', 'S']:
        raise IOError("Invalid CYCLE={} keyword in '{}'.".format(
            cycle, infile))

    obi = cr.get_key_value("OBI_NUM")
    return utils.ObsId(obsid, cycle=cycle, obi=obi)
예제 #9
0
    def nbins(self, roll=600.0, xy=0.5):
        """Return the number of aspect bins to use (the max_bin parameter
        of asphist), given the set of aspect files.

        The roll and xy parameters correspond to the asphist.res_roll and
        asphist.res_xy parameter values (and are in arcsec).
        They used to be read from the asphist parameter file
        but have now been converted to default parameter values.

        Users should check that these defaults are still valid with
        new CIAO releases (in case this code doesn't get updated).
        """

        ralim = (None, None)
        declim = (None, None)
        rolllim = (None, None)

        def get_minmax(cr, colname, orange):
            "Work out min/max from column + input values"

            omin, omax = orange

            vals = pcr.get_colvals(cr, colname)
            vmin = vals.min()
            vmax = vals.max()

            if omin is None:
                nmin = vmin
            else:
                nmin = min(vmin, omin)

            if omax is None:
                nmax = vmax
            else:
                nmax = min(vmax, omax)

            return (nmin, nmax)

        # the min/max check could be done within __init__
        for aspfile in self._files:
            asol = pcr.read_file(aspfile)
            ralim = get_minmax(asol, "ra", ralim)
            declim = get_minmax(asol, "dec", declim)
            rollim = get_minmax(asol, "roll", rolllim)

        n_ra = (ralim[1] - ralim[0]) * 3600.0 / xy
        n_dec = (declim[1] - declim[0]) * 3600.0 / xy
        n_roll = (rollim[1] - rollim[0]) * 3600.0 / roll

        nbins = np.ceil(n_ra * n_dec * n_roll).astype(np.int)
        if nbins <= 10000:
            return 10000

        else:
            # would base 2 be better than base 10 here?
            # l = np.ceil(np.log10(nbins)).astype(np.int)
            # return 10 ** l
            l = np.ceil(np.log2(nbins)).astype(np.int)
            return 2 ** l
예제 #10
0
def SNR_obs(file, r_aper):
    rprof = pycrates.read_file(file)
    source_counts = pycrates.copy_colvals(rprof, "COUNTS")
    total_counts = source_counts + pycrates.copy_colvals(rprof, "BG_COUNTS")
    r = pycrates.copy_colvals(rprof, "R")
    rmid = 0.5 * (r[:, 0] + r[:, 1])
    ## The SNR in a given circle of aperture r_aper
    mask = rmid < r_aper
    SNR = np.sum(source_counts[mask]) / np.sqrt(np.sum(total_counts[mask]))
    return SNR
예제 #11
0
def test():
    print(annulus(1,2,3,4))
    print(box(1,2,3,4))
    print(box(1,2,3,4,5))
    print(circle(1,2,3))
    print(ellipse(1,2,3,4,5))
    print(ellipse(1,2,3,4))
    print(field())
    print(pie(1,2,3,4,5,6))
    print(point(1,2))
    print(polygon( 1,2,3,4,5,6))
    print(polygon( [1,2,3],[4,5,6]))
    print(rectangle(1,2,3,4))
    print(rotbox(1,2,3,4,5))
    print(sector(1,2,3,4))

    print(circle(10,10,+123)+box(10,10,1,1))
    print(circle(10,10,100)*box(10,10,1,1))
    print(circle(10,10,100)-box(10,10,1,1))
    print(-circle(50,50,100))
    z = region("/data/lenin2/export/byobsid/repro/ds9.reg")
    print(z)
    z.write("goo.reg")

    cc = region("/lenin2.real/Projects/ImproveRegression/Test/ciaox_20160125/dmcontour/04/dmcontour_4.fits")
    print(len(cc))
    cc.write("cntr.reg")

    a = pie(0,0, 1, 2, -45, 56)
    d = pie(0,0, 1, 2, -45, 56)
    b = a.__copy__()
    bb = box(5,5,10,20)
    print(a)
    print(b)
    print(a == b)
    print(a == d)
    print(a == cc)

    p = a+z+bb
    q = p.tweak(dx=5).tweak(stretch=5).tweak(rotate=+45).tweak(pad=3)
    print(p)
    print(q)

    q = dss('/data/lenin2/Projects/PIMMS/Doc/9768/repro/todetect/goo.fits')
    print(len(q))
    print(q[0])
    print(q.index(q[0]))
    print(q[0] in q)


    from pycrates import read_file
    c = circle(4274.5,3954.5,5)
    img = read_file("img.fits")
    wcs = img.get_transform("eqpos")
    print(c)
예제 #12
0
파일: Analysis.py 프로젝트: estevesjh/Xpipe
def noise(infits,outimage,mask=None):
	""" Module to add poisson noise to the image data

	Input: Fits image
	Output: Fits image - An image with possion randon noise

	"""
	ctimg = pycrates.read_file(infits)
	img = ctimg.get_image()
	pix = img.values
	noiseimg = poisson(pix)

	if mask != None:
		bla = pycrates.read_file(mask)
		msk_values = bla.get_image().values
		msk = msk_values == 0
		noiseimg[msk] = msk_values[msk]
	
	img.values = noiseimg
	pycrates.write_file(ctimg,outimage,clobber=True)
예제 #13
0
def get_obsid(infile):
    """Return the value of the OBS_ID keyword in the file. An IOError
    is raised if the keyword does not exist in the file.
    """

    cr = pycrates.read_file(infile)
    val = cr.get_key_value("OBS_ID")
    if val is None:
        raise IOError("OBS_ID keyword is missing from '{}'".format(infile))

    return val
예제 #14
0
def apply_binning_to_image(binmap_file, image_file, root=None, clobber=False):
    """
    Applies the binning scheme from the binmap to an image of the same shape.

    Inputs:  binmap_file - fits file of map of bins (pixel values = bin numbers)
             image_file - fits file of image (must have the same shape as the binmap)
             root - root name of ouput map; defaults to image_file_root + "_binned"
             clobber - if True, overwrite any existing files

    Outputs: Binned version of the input image with each bin set to the mean value
             inside that bin, named "root.fits".

    """
    if root is None:
        root = os.path.splitext(image_file)[0] + '_binned'

    # Check if min bin is negative or starts or ends on the image boundary.
    # If so, assume it is not wanted (e.g., for wvt bin maps).
    #
    # Use a CrateDataset to read in the file to ensure any "extra"
    # blocks are retained. If this is not needed then
    # pycrates.read_file could be used here.
    #
    ds = pycrates.CrateDataset(binmap_file, mode='r')
    cr = ds.get_crate(0)
    assert isinstance(cr, pycrates.IMAGECrate)

    binimage = cr.get_image().values
    minbin = int(binimage.min())
    maxbin = int(binimage.max())
    if minbin < 0:
        minbin = 0
    inbin = numpy.where(binimage == minbin)
    if 0 in inbin[0] or numpy.size(binimage, 0) - 1 in inbin[0]:
        minbin += 1
    nbins = maxbin - minbin + 1

    icr = pycrates.read_file(image_file)
    assert isinstance(icr, pycrates.IMAGECrate)
    im = icr.get_image().values

    # Check that the binmap and image have the same shape
    if binimage.shape != im.shape:
        sys.exit('ERROR: Input binmap and image must have the same shape.')

    # make copy of the binmap
    binimage_out = binimage.astype(float)

    for i in range(nbins):
        inbin = numpy.where(binimage == i + minbin)
        binimage_out[inbin] = numpy.mean(im[inbin])

    cr.get_image().values = binimage_out
    ds.write(root + '.fits', clobber=clobber)
예제 #15
0
def get_energy_limits(wgtfile):
    """Return (elo,ehi) from wgtfile, which is assumed to have
    been created by make_instmap_weights or save_instmap_weights().
    The return values are floats.

    We first look for ENERG_LO/HI keywords in the file and, if they
    are not present, calculate the values from the first column,
    assuming these values are the mid-points and that the bins
    have the same width.
    """

    elo = None
    ehi = None
    with open(wgtfile, "r") as fh:
        for l in fh.readlines():
            if not l.startswith('#'):
                break

            kv = find_keyval(l, 'ENERG_LO')
            if kv is None:
                kv = find_keyval(l, 'ENERG_HI')
                if kv is not None:
                    ehi = kv
            else:
                elo = kv

    if elo is not None:
        v3("Weight file {} - elo = {}".format(wgtfile, elo))
        try:
            elo = float(elo)
        except:
            v1("Unable to convert ENERGYLO = {} to a float".format(elo))
            elo = None

    if ehi is not None:
        v3("Weight file {} - ehi = {}".format(wgtfile, ehi))
        try:
            ehi = float(ehi)
        except:
            v1("Unable to convert ENERGYHI = {} to a float".format(ehi))
            ehi = None

    if elo is None or ehi is None:
        v3("Calculating energy limits from weight file: {}".format(wgtfile))
        cr = pycrates.read_file(wgtfile)
        x = cr.get_column(0).values
        # could get bin width from the whole array but use the
        # local bin sizes in case the bins are not equal sized,
        # to reduce the error
        elo = (3 * x[0] - x[1]) / 2.0
        ehi = (3 * x[-1] - x[-2]) / 2.0

    return (elo, ehi)
예제 #16
0
def read_mhull(infile):
    """Can have 0, 1, or multiple hulls in a file."""

    cr = pycrates.read_file("{}[HULLLIST]".format(infile))
    if cr.get_nrows() == 0:
        print(" - skipping {}".format(infile))
        return []

    ensemble = cr.get_key_value('ENSEMBLE')

    mids = cr.Master_Id.values
    nvs = cr.NVERTEX.values
    eqpos = cr.EQPOS.values

    # turns out the nvertex value is not to be trusted
    #
    out = []
    for (mid, nv, eqp) in zip(mids, nvs, eqpos):

        ra = eqp[0]
        dec = eqp[1]

        xidx = np.isfinite(ra)
        yidx = np.isfinite(dec)
        assert (xidx == yidx).all(), infile

        if xidx.sum() == 0:
            print("No valid data for {} mid={}".format(infile, mid))
            continue
        elif xidx.sum() < 3:
            print("Missing vertices for {} mid={}".format(infile, mid))
            continue

        ra = ra[xidx]
        dec = dec[xidx]

        if (ra[-1] != ra[0]) or (dec[-1] != dec[0]):
            print("WARNING: not closed {} mid={}".format(infile, mid))

        # convert to a regular python array
        pos = []
        for x, y in np.vstack((ra, dec)).T:
            pos.append([x, y])

        # we don't have the final centroid here, so for now just use
        # the first vertex
        #
        center = [ra[0], dec[0]]

        label = "{} {}".format(ensemble, mid)
        out.append((pos, center, label))

    return out
예제 #17
0
def extract_events(event_file, src_x, src_y, src_radius):
    """
    Get events from specified source circle

    :param event_file: Chandra event 1 or 2 file
    :param src_x: Sky X coordinate of source region to extract
    :param src_y: Sky Y coordinate of source region to extract
    :param src_radius: Source region/circle radius in pixels
    :returns: CRATE of events
    """
    regstring = "circle({},{},{})".format(src_x, src_y, src_radius)
    events = pycrates.read_file("{}[sky={}]".format(event_file, regstring))
    return events
예제 #18
0
파일: _utils.py 프로젝트: kglotfelty/LUT
def _try_hard_to_locate( filename ):
    """
    Try to locate the LUT file just based on the name (eg returned
    by ds9).  Looks in CIAO-ish places then in home dir
    ~/.ds9
    """
    import os as os
    import glob as glob
    from pycrates import read_file, get_colvals
    
    tox = [ "", "{}/data/".format( os.environ["ASCDS_INSTALL"] ), 
              "{}/data/".format(os.environ["ASCDS_CONTRIB"]),
              "{}/.ds9/".format(os.environ["HOME"])]

    tox.extend(glob.glob( "{}/.ds9/LUT/*/".format( os.environ["HOME"]) ) )

    tab = None
    for tt in tox:
        
        try:
            tab = read_file( tt+filename )
            break
        except:
            pass
        
        try:
            tab = read_file( tt+filename+".lut")
            break
        except:
            pass

    if tab is None:
        raise IOError("Could not find lookup table '{}'.  Maybe try full path?".format(filename ))
    
    rr = get_colvals(tab, 0)*1.0 # multiply by 1.0 detach from crate        
    gg = get_colvals(tab, 1)*1.0
    bb = get_colvals(tab, 2)*1.0
    return (rr, gg, bb)
def extract_events(event_file, src_x, src_y, src_radius):
    """
    Get events from specified source circle

    :param event_file: Chandra event 1 or 2 file
    :param src_x: Sky X coordinate of source region to extract
    :param src_y: Sky Y coordinate of source region to extract
    :param src_radius: Source region/circle radius in pixels
    :returns: CRATE of events
    """
    regstring = "circle({},{},{})".format(src_x, src_y, src_radius)
    events = pycrates.read_file("{}[sky={}]".format(
            event_file, regstring))
    return events
예제 #20
0
def get_count_stats(out_file, region_file):

    region = np.flipud(copy_piximgvals(read_file(region_file)))
    output = np.loadtxt(out_file)

    imsize = output.shape[1]
    niter = int(output.shape[0] / imsize)

    count_sums = np.zeros(niter)

    for i in range(niter):
        count_sums[i] = (np.flipud(output[i * imsize:(i + 1) * imsize, :]) *
                         region).sum()

    return count_sums
def read_xmdat3(xmdat3dir, stack):
    """Read xmdat3 PSF data, if available.

    The xmdat3 files are assumed to be stored as
       <xmdat3dir>/<stack>/<stack>N000_xmdat3.fits

    Should I send in the ra/dec limits so that we can filter
    on these (would miss those which are centered outside the
    range but overlap)?

    If the file is missing a message is logged.

    Parameters
    ----------
    xmdat3dir : str
        The directory containing the files.
    stack : str
        The stack name

    Returns
    -------
    rval : list of regions or None
        None if there is no xmdat3 file, otherwise a list
        (which can be empty) of regions, which contain
        'ra', 'dec', 'r0', 'r1', 'angle' (ra and dec are in
        decimal degrees, r0 and r1 are in arcseconds, and angle
        is in degrees).

    """

    infile = os.path.join(xmdat3dir, stack, '{}N000_xmdat3.fits'.format(stack))
    if not os.path.isfile(infile):
        utils.logonce("no xmdat3 file {}".format(infile))
        return None

    try:
        cr = pycrates.read_file(infile + "[cols ra,dec,psf_r0,psf_r1,psf_ang]")
    except IOError:
        utils.logonce("unable to read XMDAT3 file {}".format(infile))
        raise

    out = []
    for ra, dec, r0, r1, ang in zip(cr.ra.values, cr.dec.values,
                                    cr.psf_r0.values, cr.psf_r1.values,
                                    cr.psf_ang.values):
        out.append({'ra': ra, 'dec': dec, 'r0': r0, 'r1': r1, 'angle': ang})

    return out
def find_ensemble_stacks(ensemblefile, ensemble):
    """Return the stacks associated with the ensmeble.

    Parameters
    ----------
    emsemble : str
        The ensemble name.
    ensemblefile : str
        The file should contain columns ensemble and stack, and is
        used to find what stacks to look for.

    Returns
    -------
    stacks : list of str
        The stack names.

    Raises
    ------
    IOError
        If the ensemble is not known about
    """

    # I could use an 'ensemble=<...>' filter, but there are known
    # DM string-filtering bugs in CIAO 4.9 which mean that we
    # have to do the filtering manually.
    #
    infile = "{}[cols ensemble, stack]".format(ensemblefile)
    cr = pycrates.read_file(infile)

    # do not assume the input file is sorted by ensemble
    out = []
    for ens, stack in zip(cr.ensemble.values,
                          cr.stack.values.copy()):
        if ens != ensemble:
            continue

        out.append(stack)

    if out == []:
        raise IOError("No stacks found for " +
                      "ensemble {} in {}".format(ensemble,
                                                 ensemblefile))

    return out
예제 #23
0
def fsmooth(image, filename):
    """Smooth image by the image stored in filename.

    Any non-finte values in the input file are set to 0.

    NaN (and other non-finite) values in the input are set to 0 for
    the smooth, and then set to NaN on output.

    """

    cr = pyc.read_file(filename)
    if not isinstance(cr, pyc.IMAGECrate):
        raise ValueError("File '{0}' is not an image!".format(filename))

    kvals = cr.get_image().values.copy()
    if kvals.ndim != 2:
        raise ValueError("Kernel file is not 2D but 0D!".format(kvals.ndim))

    return ismooth(image, kvals)
예제 #24
0
def get_column_unique(infile, colname):
    """Return a NumPy array listing the unique values
    in the given column.

    If the file is empty the routine returns None.
    """

    cr = pycrates.read_file("{}[cols {}]".format(infile, colname))
    try:
        if cr.get_nrows() == 0:
            return None

        vals = pycrates.get_colvals(cr, colname).copy()

    except AttributeError:
        raise IOError(
            "Unable to find column {0} in {1} - is it an image?".format(
                colname, infile))

    return np.unique(vals)
예제 #25
0
def get_ensemble(infile):
    """What is the ensemble and revision of a mhull file?"""

    # For now rely on the header, and hope they have been updated
    #
    cr = pycrates.read_file(infile)
    ensemble = cr.get_key_value('ENSEMBLE')
    chsver = cr.get_key_value('CHSVER')
    if ensemble is None:
        raise IOError("Missing ENSEMBLE in {}".format(infile))
    if chsver is None:
        raise IOError("Missing CHSVER in {}".format(infile))

    # it should be an int, but just in case
    try:
        chsver = int(chsver)
    except ValueError:
        raise IOError("CHSVER is not an in in {}".format(infile))

    return (ensemble, chsver)
예제 #26
0
    def load_pha(self, specfile, annulus):
        """
        Load a pha file and add to the datasets for stacked analysis.

        :param specfile: extracted source PHA/PI spectrum file
        :param annulus: annulus for spectrum file
        """
        dataid = len(self.datasets)
        print 'Loading spectrum file %s as dataset id %d' % (specfile, dataid)
        SherpaUI.load_pha(dataid, specfile)

        try:
            obsid = int(pycrates.read_file(specfile).get_key_value('OBS_ID'))
        except (TypeError, ValueError):
            obsid = 0
        dataset = dict(file=specfile,
                       obsid=obsid,
                       id=dataid,
                       annulus=annulus
                       )
        self.datasets.append(dataset)
        self.obsids.add(obsid)
def read_qa_hulls(qadir, revision, master_id):
    """Read in the QA hull (or hulls) for the given master hull.

    Parameters
    ----------
    qadir : str
        The directory containing the qa.<master_id>.v<revision>.fits
        files to read.
    revision : int
        The revision number to use.
    master_id : int
        The master id.

    Returns
    -------
    qahulls : list of dict
        Each entry contains the 'eqpos' keyword, which is a 2 by npts
        NumPy array with the celestial coordinates of the polygon
        (closed, only finite values). There can be one or more items.

    """

    infile = os.path.join(qadir,
                          'qa.{:03d}.v{:03d}.fits'.format(master_id, revision))
    cr = pycrates.read_file(infile)
    if cr.get_nrows() < 1:
        raise IOError("Expected at least 1 row: {}".format(infile))

    out = []
    for cpt, npts, eqpos in zip(cr.COMPONENT.values, cr.NVERTEX.values,
                                cr.EQPOS.values):
        eqpos = utils.validate_polygon(eqpos[:, :npts], report=True)
        out.append({
            'component': cpt,
            'master_id': master_id,
            'eqpos': eqpos.copy()
        })

    return out
예제 #28
0
def find_stack_filenames(infile, props):
    """Find the filenames for the stack products.

    Parameters
    ----------
    infile : str
        The name of the file. It must contain a stack column; the value
        is used as the stack id.
    props : list_of_str
        The file types, with optional band specifiers.

    """

    filename = "{}[cols stack]".format(infile)
    cr = pycrates.read_file(filename)
    if cr.get_nrows() == 0:
        raise IOError("No rows read from {}".format(filename))

    # go overboard in ensuring we have a string version that is not
    # part of a crate
    stacks = [str(c) for c in cr.stack.values.copy()]
    cr = None

    # Note: there is no attempt to batch the queries
    #
    out = {}
    for stack in stacks:

        url = make_stack_url(stack, props)
        ans = make_query(url)
        if ans is None:
            continue

        out[stack] = ans

    # Should this get converted to ISO 8661 (or whatever) format?
    out['lastupdate'] = time.asctime()

    print(json.dumps(out))
예제 #29
0
def _get_radec_from_file(pos):
    """
    Get ra/dec values from a table.  Will 
    try to use ra/dec columns if they exist and then
    will fail over to use first two columns.
    """
    from pycrates import read_file
    mytab = read_file(pos)

    cls = [x.lower() for x in mytab.get_colnames(vectors=False)]

    if "ra" in cls and "dec" in cls:
        ra = "ra"
        dec = "dec"
    else:
        # Use first two columns in file
        ra = 0
        dec = 1

    ra = mytab.get_column(ra).values.tolist()
    dec = mytab.get_column(dec).values.tolist()
    zz = list(zip(ra, dec))

    if isinstance(ra[0], str) and isinstance(dec[0], str):
        # if columns are string, then use above to parse

        _radec = [_get_radec_from_str(r + "," + d) for r, d in zz]
        ra = [r[0] for r in _radec]
        dec = [r[1] for r in _radec]
    elif isinstance(ra[0], str) or isinstance(dec[0], str):
        raise IOError("Both RA and Dec values must be strings if either is")

    if len(zz) != len(set(zz)):
        verb0(
            "WARNING:  The same source position was input more than once.  This may produce unexpected results\n"
        )

    return ra, dec
예제 #30
0
def doGRP(infile, Ncounts, x0, y0):
    """Make groups of Ncounts in each bin
    """
    rprof = pycrates.read_file(infile)
    r = pycrates.copy_colvals(rprof, "R")
    cnt = pycrates.copy_colvals(rprof, "COUNTS")
    dr = r[:, 1] - r[:, 0]

    nbins = len(cnt)
    outradii = []
    lastidx = 0

    inrad, outrad = np.empty((0, 0)), np.empty((0, 0))
    for i in range(1, nbins):
        totfgcts = cnt[lastidx:i].sum()
        Csum = totfgcts
        if Csum >= Ncounts or i == nbins:
            inrad, outrad = np.append(inrad, r[lastidx,
                                               0]), np.append(outrad, r[i, 1])
            outradii.append(i)
            lastidx = i
    radii = [inrad, outrad]
    return len(outradii), radii
예제 #31
0
def get_centroids(out_file, region_file):

    region = np.flipud(copy_piximgvals(read_file(region_file)))
    output = np.loadtxt(out_file)

    imsize = output.shape[1]
    niter = int(output.shape[0] / imsize)

    avg_image = np.zeros((imsize, imsize))

    xvals = np.zeros(niter)
    yvals = np.zeros(niter)

    count_sums = np.zeros(niter)

    dmcoords.punlearn()
    for i in range(niter):
        avg_image += output[i * imsize : (i + 1) * imsize, :]
        xvals[i], yvals[i] = centroid(
            np.flipud(output[i * imsize : (i + 1) * imsize, :]) * region, region_file
        )
        count_sums[i] = ((output[i * imsize : (i + 1) * imsize, :]) * region).sum()

    return xvals, yvals, count_sums
예제 #32
0
    # https://matplotlib.org/users/legend_guide.html#creating-artists-specifically-for-adding-to-the-legend-aka-proxy-artists
    #
    return mpatches.Patch(facecolor=facecolor, edgecolor=edgecolor,
                          alpha=alpha)


# Read in results from XSPEC analysis of M87

arcsec_per_pix = 0.492
arcsec_per_rad = (180.*3600.)/numpy.pi

# Read in the values from XSPEC analysis, where the radius is in
# ACIS pixels.
#
try:
    phys = read_file('m87_phys.dat')

    r0 = phys.r0.values
    r1 = phys.r1.values
    ne = phys.ne.values
    nelow = phys.nelow.values
    nehigh = phys.nehigh.values
    kt = phys.kt.values
    ktlow = phys.ktlow.values
    kthigh = phys.kthigh.values

except NameError:

    phys = Table.read('m87_phys.dat', format='ascii')

    r0 = phys['r0']
예제 #33
0
def get_infile_type(infile):
    """Returns 'Table' or 'Image' depending on what Crates thinks
    the type of infile is."""
    cr = pycrates.read_file(infile)
    return pycrates.get_crate_type(cr)
예제 #34
0
파일: ds9_plot.py 프로젝트: kglotfelty/dax
   baseFile = infile.split ('[')
   baseFile = baseFile[0]

   if pkg_flag == pkgIsLoad :     #   CHIPS plot
      import time

      # connect to chips -- try a couple of time in case the server is slow to 
      # start up
      for ii in range (3):
         try:
            connect (ds9XPA)
         except:
            time.sleep (.1)

      add_window();

      # overwrite the get_filename function so that chips controls the file names
      crate = pycrates.read_file (infile)
      crate.get_filename = lambda: ''

      make_figure(crate, "histogram");
      set_plot_title(title);

   else :                         #   BLT plot
      import string 
      import subprocess
      make_blt_plot( infile, ds9XPA, title, baseFile) 

   os.remove (baseFile)
예제 #35
0
if __name__ == "__main__":

    nargs = len(sys.argv)
    if nargs != 3:
        usage()

    if sys.argv[2] in options:
        proptype = sys.argv[2]
    else:
        usage()

    infile = sys.argv[1] + "[cols stack]"

    import pycrates
    cr = pycrates.read_file(infile)
    if cr.get_nrows() == 0:
        raise IOError("No stacks in {}".format(infile))

    print("# stack filename")
    stacks = cr.get_column('stack').values
    nstacks = len(stacks)

    # try to avoid the URL being too long; pick 24 as a guess
    # (character length of stack + extra coding is < 40 characters,
    # 24 * 40 = 960, so total url length is <~ 1024 characters.
    # It can probably be larger, but let's see how this goes.
    #
    # NumPy's array_split routine takes the number of chunks, rather
    # than the number of elements per chunk
    #
예제 #36
0
from pycrates import read_file

from calc_dist import calc_distance

import sys

tol = float(sys.argv[1]) / 60.0

early = read_file("no_cal")
later = read_file(
    "o_by_date.fits[obs_mode=POINTING,obspar_ver=7:][cols ra_pnt,dec_pnt,obs_id]"
)

er = early.get_column("ra_pnt").values
ed = early.get_column("dec_pnt").values
eo = early.get_column("obs_id").values
ej = early.get_column("object").values

lr = later.get_column("ra_pnt").values
ld = later.get_column("dec_pnt").values
lo = later.get_column("obs_id").values

early = {o: [r, d, j] for o, r, d, j in zip(eo, er, ed, ej)}
later = {o: [r, d] for o, r, d in zip(lo, lr, ld)}

#tol = 1.0/60.0 # 1 arcmin

for ee in early:
    for ll in later:
        d = calc_distance(early[ee][0], early[ee][1], later[ll][0],
                          later[ll][1])
def main(opt):

    # Use verbose option to control sherpa output
    logger = logging.getLogger("sherpa")
    logger.setLevel(LOGLEVELS[opt['verbose']])

    events = extract_events(opt['evtfile'],
                            opt['x'], opt['y'], opt['radius'])

    evt_ra_pnt = events.get_key('RA_PNT').value
    evt_dec_pnt = events.get_key('DEC_PNT').value
    evt_roll_pnt = events.get_key('ROLL_PNT').value

    asol = pycrates.read_file(opt['infile'])
    asol_times = asol.get_column('time').values

    # Sanity check the two input files
    asol_obsid = asol.get_key('OBS_ID').value
    evt_obsid = events.get_key('OBS_ID').value
    if asol_obsid != evt_obsid:
        v1("Error Aspect solution obsid {} != event file obsid {}".format(asol_obsid, evt_obsid))

    # Extract event RA, Dec, and times from event file
    # Do the WCS transformation directly instead of using the pycrates RA/Dec properties to
    # work around intermittent bug https://icxc.harvard.edu/pipe/ascds_help/2013a/0315.html
    wcs = events.get_transform("eqpos")
    evt_x = events.get_column("x").values
    evt_y = events.get_column("y").values
    rd = wcs.apply(np.column_stack([evt_x, evt_y]))
    evt_ra = rd[:, 0]
    evt_dec = rd[:, 1]
    evt_times = events.get_column('Time').values

    # Limit to only using events contained within the range of the aspect solution
    ok_times = (evt_times > asol_times[0]) & (evt_times < asol_times[-1])
    if not np.any(ok_times):
        raise ValueError("No events in region are contained within time range of aspect solution.")
    # Limit this *in place*
    evt_ra = evt_ra[ok_times]
    evt_dec = evt_dec[ok_times]
    evt_times = evt_times[ok_times]

    if len(evt_times) < opt['src_min_counts']:
        v1("Warning only {} counts in src region.  {} minimum suggested 'src_min_counts'".format(
                len(evt_times), opt['src_min_counts']))

    ax_data = {}
    ax_map = {'yag': 'dy',
              'zag': 'dz'}

    ax_data['yag'], ax_data['zag'] = get_event_yag_zag(evt_ra, evt_dec,
                                                       evt_ra_pnt, evt_dec_pnt, evt_roll_pnt)

    # Store comments to print in block after all of the sherpa fit output
    fit_comments = []
    plot_list = []

    for data_id, ax in enumerate(['yag', 'zag']):
        fit_data = ax_data[ax] - np.mean(ax_data[ax])
        mp, model = _fit_poly(fit_data, evt_times, opt['corr_poly_degree'], data_id=data_id)

        bin_centers, bin_mean, bin_std = time_bins(evt_times, fit_data)

        add_window(6, 4, "inches")
        add_curve((bin_centers - evt_times[0]) / 1000., bin_mean, [bin_std, +bin_std],
                  ["line.style", "none", "symbol.style", "none", "err.style", "cap"])
        add_curve(mp.x / 1000., mp.y, ["symbol.style", "none"])
        # set minimum limit on fit plot in arcsecs and set this explicitly as a symmetric limit
        fit_ymax = max(0.3, np.max(np.abs(bin_mean - bin_std)), np.max(np.abs(bin_mean + bin_std)))
        limits(Y_AXIS, -1 * fit_ymax, fit_ymax)
        set_plot_xlabel("Observation elapsed/delta time (ks)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title("Fit of {} data (with time-binned event offsets)".format(ax))
        fit_plot = "{}_fit_{}.png".format(opt['corr_plot_root'], ax)
        if os.path.exists(fit_plot) and opt['clobber'] == 'yes':
            os.unlink(fit_plot)
        plot_list.append(fit_plot)
        print_window(fit_plot)

        add_window(6, 4, "inches")
        data_plot = "{}_data_{}.png".format(opt['corr_plot_root'], ax)
        ui.get_data_plot_prefs()['yerrorbars'] = False
        ui.plot_fit(data_id)
        if os.path.exists(data_plot) and opt['clobber'] == 'yes':
            os.unlink(data_plot)
        # set minimum limit on data plot in arcsecs and set this explicitly as a symmetric limit
        data_ymax = max(2.0, np.max(np.abs(fit_data)) + .2)
        limits(Y_AXIS, -1 * data_ymax, data_ymax)
        set_plot_xlabel("Observation elapsed/delta time (s)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title("Raw data and fit in {}".format(ax))
        plot_list.append(data_plot)
        print_window(data_plot)

        asol_corr = np.interp(asol_times, mp.x + evt_times[0], mp.y)
        asol_col_to_fix = asol.get_column(ax_map[ax])
        fit_comments.append("Events show drift range of {:.2f} arcsec in {} axis".format(
                np.max(asol_corr) - np.min(asol_corr), ax))
        fit_comments.append("Max absolute correction of {:.2f} arcsec for {} axis".format(
                np.max(np.abs(asol_corr)), ax))

        # Convert the correction from arcsecs to mm (divide by 20) and add the correction
        # to the dy and dz columns in the file.
        asol_col_to_fix.values += (asol_corr / 20)

        # Add header keys saving the axis-specific parts of this correction
        write_key(asol, "ADC{}MN".format(ax.upper()), np.mean(ax_data[ax]),
                  "Aspect Drift Corr. Mean of uncorr {} data".format(ax))
        for deg in range(0, 1 + opt['corr_poly_degree']):
            write_key(asol, "ADC{}C{}".format(ax.upper(), deg),
                      getattr(model, 'c{}'.format(deg)).val,
                      "Aspect Drift Corr. {} model c{}".format(ax, deg))

    # Add header keywords about fit
    write_key(asol, "ADCTIME0", evt_times[0],
              "Aspect Drift Corr. reference time")
    write_key(asol, "ADCSRCX", opt['x'],
              "Aspect Drift Corr. input src x")
    write_key(asol, "ADCSRCY", opt['y'],
              "Aspect Drift Corr. input src y")
    write_key(asol, "ADCSRCR", opt['radius'],
              "Aspect Drift Corr. input src radius", units='pix')
    write_key(asol, "ADCORDR", opt['corr_poly_degree'],
              "Aspect Drift Corr. model poly degree")
    write_key(asol, "ADCVER", VERSION,
              "Aspect Drift Corr. tool version")

    v2("-" * 60)
    v2("Fit results")
    for c in fit_comments:
        v2("\t{}".format(c))
    v2("-" * 60)
    v2("Writing out corrected aspect solution file to {}".format(opt['outfile']))
    v2("\tTo review fit see correction plots in:")
    for p in plot_list:
        v2("\t\t{}".format(p))

    # Actually write out the new aspect solution file
    asol.write(opt['outfile'], clobber=opt['clobber'])

    # Add history
    add_tool_history(opt['outfile'], TOOLNAME, params=opt, toolversion=VERSION)
예제 #38
0
def main(opt):

    # Use verbose option to control sherpa output
    logger = logging.getLogger("sherpa")
    logger.setLevel(LOGLEVELS[opt['verbose']])

    events = extract_events(opt['evtfile'], opt['x'], opt['y'], opt['radius'])

    evt_ra_pnt = events.get_key('RA_PNT').value
    evt_dec_pnt = events.get_key('DEC_PNT').value
    evt_roll_pnt = events.get_key('ROLL_PNT').value

    asol = pycrates.read_file(opt['infile'])
    asol_times = asol.get_column('time').values

    # Sanity check the two input files
    asol_obsid = asol.get_key('OBS_ID').value
    evt_obsid = events.get_key('OBS_ID').value
    if asol_obsid != evt_obsid:
        v1("Error Aspect solution obsid {} != event file obsid {}".format(
            asol_obsid, evt_obsid))

    # Extract event RA, Dec, and times from event file
    # Do the WCS transformation directly instead of using the pycrates RA/Dec properties to
    # work around intermittent bug https://icxc.harvard.edu/pipe/ascds_help/2013a/0315.html
    wcs = events.get_transform("eqpos")
    evt_x = events.get_column("x").values
    evt_y = events.get_column("y").values
    rd = wcs.apply(np.column_stack([evt_x, evt_y]))
    evt_ra = rd[:, 0]
    evt_dec = rd[:, 1]
    evt_times = events.get_column('Time').values

    # Limit to only using events contained within the range of the aspect solution
    ok_times = (evt_times > asol_times[0]) & (evt_times < asol_times[-1])
    if not np.any(ok_times):
        raise ValueError(
            "No events in region are contained within time range of aspect solution."
        )
    # Limit this *in place*
    evt_ra = evt_ra[ok_times]
    evt_dec = evt_dec[ok_times]
    evt_times = evt_times[ok_times]

    if len(evt_times) < opt['src_min_counts']:
        v1("Warning only {} counts in src region.  {} minimum suggested 'src_min_counts'"
           .format(len(evt_times), opt['src_min_counts']))

    ax_data = {}
    ax_map = {'yag': 'dy', 'zag': 'dz'}

    ax_data['yag'], ax_data['zag'] = get_event_yag_zag(evt_ra, evt_dec,
                                                       evt_ra_pnt, evt_dec_pnt,
                                                       evt_roll_pnt)

    # Store comments to print in block after all of the sherpa fit output
    fit_comments = []
    plot_list = []

    for data_id, ax in enumerate(['yag', 'zag']):
        fit_data = ax_data[ax] - np.mean(ax_data[ax])
        mp, model = _fit_poly(fit_data,
                              evt_times,
                              opt['corr_poly_degree'],
                              data_id=data_id)

        bin_centers, bin_mean, bin_std = time_bins(evt_times, fit_data)

        add_window(6, 4, "inches")
        add_curve(
            (bin_centers - evt_times[0]) / 1000., bin_mean,
            [bin_std, +bin_std],
            ["line.style", "none", "symbol.style", "none", "err.style", "cap"])
        add_curve(mp.x / 1000., mp.y, ["symbol.style", "none"])
        # set minimum limit on fit plot in arcsecs and set this explicitly as a symmetric limit
        fit_ymax = max(0.3, np.max(np.abs(bin_mean - bin_std)),
                       np.max(np.abs(bin_mean + bin_std)))
        #limits(Y_AXIS, -1 * fit_ymax, fit_ymax)
        set_plot_xlabel("Observation elapsed/delta time (ks)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title(
            "Fit of {} data (with time-binned event offsets)".format(ax))
        fit_plot = "{}_fit_{}.png".format(opt['corr_plot_root'], ax)
        if os.path.exists(fit_plot) and opt['clobber']:
            os.unlink(fit_plot)
        plot_list.append(fit_plot)
        print_window(fit_plot)

        add_window(6, 4, "inches")
        data_plot = "{}_data_{}.png".format(opt['corr_plot_root'], ax)
        ui.get_data_plot_prefs()['yerrorbars'] = False
        ui.plot_fit(data_id)
        if os.path.exists(data_plot) and opt['clobber']:
            os.unlink(data_plot)
        # set minimum limit on data plot in arcsecs and set this explicitly as a symmetric limit
        data_ymax = max(2.0, np.max(np.abs(fit_data)) + .2)
        #limits(Y_AXIS, -1 * data_ymax, data_ymax)
        set_plot_xlabel("Observation elapsed/delta time (s)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title("Raw data and fit in {}".format(ax))
        plot_list.append(data_plot)
        print_window(data_plot)

        asol_corr = np.interp(asol_times, mp.x + evt_times[0], mp.y)
        asol_col_to_fix = asol.get_column(ax_map[ax])
        fit_comments.append(
            "Events show drift range of {:.2f} arcsec in {} axis".format(
                np.max(asol_corr) - np.min(asol_corr), ax))
        fit_comments.append(
            "Max absolute correction of {:.2f} arcsec for {} axis".format(
                np.max(np.abs(asol_corr)), ax))

        # Convert the correction from arcsecs to mm (divide by 20) and add the correction
        # to the dy and dz columns in the file.
        asol_col_to_fix.values += (asol_corr / 20)

        # Add header keys saving the axis-specific parts of this correction
        write_key(asol, "ADC{}MN".format(ax.upper()), np.mean(ax_data[ax]),
                  "Aspect Drift Corr. Mean of uncorr {} data".format(ax))
        for deg in range(0, 1 + opt['corr_poly_degree']):
            write_key(asol, "ADC{}C{}".format(ax.upper(), deg),
                      getattr(model, 'c{}'.format(deg)).val,
                      "Aspect Drift Corr. {} model c{}".format(ax, deg))

    # Add header keywords about fit
    write_key(asol, "ADCTIME0", evt_times[0],
              "Aspect Drift Corr. reference time")
    write_key(asol, "ADCSRCX", opt['x'], "Aspect Drift Corr. input src x")
    write_key(asol, "ADCSRCY", opt['y'], "Aspect Drift Corr. input src y")
    write_key(asol,
              "ADCSRCR",
              opt['radius'],
              "Aspect Drift Corr. input src radius",
              units='pix')
    write_key(asol, "ADCORDR", opt['corr_poly_degree'],
              "Aspect Drift Corr. model poly degree")
    write_key(asol, "ADCVER", VERSION, "Aspect Drift Corr. tool version")

    v2("-" * 60)
    v2("Fit results")
    for c in fit_comments:
        v2("\t{}".format(c))
    v2("-" * 60)
    v2("Writing out corrected aspect solution file to {}".format(
        opt['outfile']))
    v2("\tTo review fit see correction plots in:")
    for p in plot_list:
        v2("\t\t{}".format(p))

    # Actually write out the new aspect solution file
    asol.write(opt['outfile'], clobber=opt['clobber'])
예제 #39
0
def make_regions_from_binmap(binmap_file, output_dir,
                             reg_format='fits',
                             minx=None, miny=None, bin=None,
                             skip_dmimglasso=False,
                             clobber=False):
    """
    Make CIAO region files from an input binmap.

    Inputs:  binmap - fits file of map of bins (pixel values = bin numbers)
             output_dir - name of output directory where region
                          files will be written
             reg_format - format of ouput region files ('fits' or 'ascii');
                          default = 'fits'
             minx - minimum x sky coordinate of binmap; default = None
             miny - minimum y sky coordinate of binmap; default = None
             bin - pixel binning used for binmap; default = None
             clobber - if True, overwrite any existing files

    Outputs: CIAO region files for each bin, suitable for spectral
             extraction. Output files are named "output_dir/reg*.fits"
             (for fits format) or "output_dir/reg*.reg" (for ascii format).

    Returns: A list of the region files created (without the output_dir
             prepended).

    Uses "dmimglasso" from CIAO to build the polygonal regions.

    """

    cr = pycrates.read_file(binmap_file)
    file_sky_transform = find_sky_transform(cr)

    # Create the mapping from logical (image or pixel) coordinates
    # to the SKY system. If none of minx, miny, or bin are given
    # then the transform from the file is used, otherwise a new
    # transform is created using the user-supplied information.
    #
    # This is not quite the same as the original version. In part,
    # the original code was a little-more general, in that it
    # could read in "partial" data on the SKY coordinate
    # transformation - e.g. if the file only had CDELT1P and CDELT2P
    # keywords then the original version would pick this up. Using
    # the crates approach is more of an all-or-nothing: you either
    # have all the keywords or none of them.
    #
    if minx is None and miny is None and bin is None:
        sky_transform = file_sky_transform

    else:
        if file_sky_transform is None:
            sys.exit('ERROR: The binmap header does not have ' +
                     'pixel coordinate information. Please specify ' +
                     'the minx, miny, and binning for the binmap.')

        scales = file_sky_transform.get_parameter('SCALE').get_value()
        offsets = file_sky_transform.get_parameter('OFFSET').get_value()

        # There is a slight difference here in how the OFFSET values
        # are processed (these correspond to the CRVAL1/2P values).
        # The transform offsets are defined for the logical coordinate
        # (0, 0), whereas the file values are defined for whatever
        # the CRPIX1/2P values are. Normally these are (0.5,0.5),
        # and I believe the original code rounded the CRVALXP values
        # down, which effectively matches things up.
        #
        if minx is not None:
            offsets[0] = minx * 1.0
        if miny is not None:
            offsets[1] = miny * 1.0

        if bin is not None:
            scales = [bin * 1.0, bin * 1.0]

        sky_transform = pytransform.LINEAR2DTransform()
        sky_transform.get_parameter('ROTATION').set_value(0)
        sky_transform.get_parameter('SCALE').set_value(scales)
        sky_transform.get_parameter('OFFSET').set_value(offsets)

    # This logic could be moved into the if statement above to
    # avoid unneeded work, but it's clearer here.
    scales = sky_transform.get_parameter('SCALE').get_value()
    offsets = sky_transform.get_parameter('SCALE').get_value()
    binx, biny = scales
    minx, miny = offsets
    file_sky_transform = None

    if not os.path.exists(output_dir):
        p = subprocess.call(['mkdir', output_dir])

    if clobber:
        p = subprocess.call(['rm', '-f', output_dir + '/bin_*.reg'])

    # Check if min bin is negative or starts or ends on the image
    # boundary. If so, assume it is not wanted (e.g., for wvt bin
    # maps).
    binimage = cr.get_image().values
    minbin = int(binimage.min())
    maxbin = int(binimage.max())
    if minbin < 0:
        minbin = 0

    inbin = numpy.where(binimage == minbin)
    if 0 in inbin[0] or numpy.size(binimage, 0) - 1 in inbin[0]:
        minbin += 1

    print('  Using minbin=' + str(minbin) +
          ', maxbin=' + str(maxbin) +
          ', minx=' + str(minx) +
          ', miny=' + str(miny) +
          ', binx=' + str(binx) +
          ', biny=' + str(biny))

    # For each bin, construct region using CIAO's "dmimglasso"
    #
    # The coordinates returned by numpy.where are 0 based, but
    # the FITS logical/image coordinate system is 1 based, so
    # a conversion is needed when passing to sky_transform.
    #
    if not skip_dmimglasso:
        region_comment = '# Region file format: CIAO version 1.0\n'
        if clobber:
            clb_txt = 'yes'
        else:
            clb_txt = 'no'

        for i in range(minbin, maxbin + 1):
            out_region_fits_file = output_dir + '/reg' + \
                str(i) + '.fits'
            inybin, inxbin = numpy.where(binimage == i)
            if len(inybin) == 0:
                continue

            # Convert the j,i values from where into the FITS
            # logical coordinate system lx,ly and then convert
            # to SKY values. It is important that lcoords is
            # a floating-point value, and not an integer one,
            # to ensure that no truncation of the result happens.
            #
            # An alternative would be to set coord=logical
            # when running dmimglasso, so that
            # inxbin+1, inybin+1 could be used (i.e. no need
            # for the coordinate conversion.
            #
            lcoords = numpy.vstack((inxbin + 1.0, inybin + 1.0)).T
            scoords = sky_transform.apply(lcoords)

            for xpos, ypos in scoords:
                # Is this restriction needed?
                xpos = min(xpos, minx + binx * binimage.shape[1])
                ypos = min(ypos, miny + biny * binimage.shape[0])
                cmd = ['dmimglasso', binmap_file,
                       out_region_fits_file,
                       str(xpos), str(ypos), '0.1', '0.1',
                       'value=delta', 'maxdepth=1000000',
                       'clobber=' + clb_txt]
                p = subprocess.call(cmd)
                if p == 0:
                    break

            #
            # Check for failure condition
            #
            # If dmimglasso fails, use ascii conversion work-around
            #
            if p != 0:
                cmd2 = ['dmmakereg', 'region('+output_dir+'/regions/xaf_'+str(i)+'.reg)', out_region_fits_file]
                q = subprocess.call(cmd2)

                print(i, xpos, ypos, out_region_fits_file, p)
                print(cmd)
                print(cmd2)
                print(q)

            if reg_format == 'ascii':
                if os.path.isfile(out_region_fits_file):
                    reg = pycrates.read_file(out_region_fits_file)
                    vertices = reg.get_column(0).values
                    xvertices = vertices[0, 0]
                    yvertices = vertices[0, 1]

                    out_region_file = open(output_dir + '/reg' +
                                           str(i) + '.reg', "w")
                    out_region_file.write(region_comment)

                    for j in range(len(xvertices)):
                        if j == 0:
                            region_text = 'polygon(%7.2f,%7.2f' % (xvertices[j], yvertices[j])
                        else:
                            region_text = region_text + ',%7.2f,%7.2f' % (xvertices[j], yvertices[j])
                    region_text = region_text + ')\n'
                    out_region_file.writelines(region_text)
                    out_region_file.close()
                    reg.close()
                    p = subprocess.call(['rm', '-f', out_region_fits_file])

    # Build region list
    bin_region_list = []
    for i in range(minbin, maxbin + 1):
        if reg_format == 'ascii':
            # Check that each region file exists before adding it to the list
            rname = 'reg' + str(i) + '.reg'
            if os.path.isfile(output_dir + '/' + rname):
                bin_region_list.append(rname)
        else:
            # Check that each region file exists before adding it to the list
            filename = "reg%d.fits" % i
            path = os.path.join(output_dir, filename)
            # It is not 100% clear what the equivalent to checking
            # `pyfits.open(path)[1].data is not None`. I am going
            # to use a check for a non-zero number of rows, forcing
            # the second block (CXC Data model starts counting at
            # a block number of 1, not 0).
            if os.path.isfile(path) and \
                    pycrates.read_file(path + "[2]").get_nrows() > 0:
                bin_region_list.append(filename)
            else:
                print("Warning: not using %s" % filename)
    return bin_region_list