Example #1
0
File: cas.py Project: esheldon/espy
    def combine_scinv(self):
        from glob import glob

        outfile=self.combined_file()
        stdout.write("Will write to file: %s\n" % outfile)

        dir=self.output_dir()
        pattern = self.scinv_file_pattern()
        pattern=path_join(dir, pattern)
        flist = glob(pattern)

        datalist = []
        idmin = 0
        for f in flist:
            print f

            tdata = esutil.io.read(f)
            data = numpy_util.add_fields(tdata, [('zid','i4')])
            data['zid'] = idmin + numpy.arange(data.size)
            idmin += data.size

            print data['zid'].min(), data['zid'].max()
            datalist.append(data)

        hdr = esutil.sfile.read_header(flist[0])
        print 'combining data'
        data = numpy_util.combine_arrlist(datalist)

        print 'writing file: %s' % outfile
        esutil.sfile.write(data, outfile, header=hdr)
Example #2
0
    def _load_data(self):
        from esutil.numpy_util import combine_arrlist

        alldata = []
        for ccd in xrange(1, 62 + 1):
            datai = files.read_fits_output(run=self.run, ftype="psf", psfnum=self.psfnum, shnum=self.shnum, ccd=ccd)
            if datai is None:
                fname = files.get_output_path(run=self.run, ftype="psf", psfnum=self.psfnum, shnum=self.shnum, ccd=ccd)
                raise ValueError("error reading: %s")
            alldata.append(datai)

        data = combine_arrlist(alldata)
        flags_name = self.model + "_flags"
        w, = where(data[flags_name] == 0)
        if w.size == 0:
            raise ValueError("none with flags==0")
        data = data[w]
        self._data = data
Example #3
0
    def go(self):
        for psfnum in files.PSFNUMS:
            for shnum in files.SHNUMS:
                
                exp_data=[]
                for ccd in files.CCDS:
                    pipe=self.load_single(psfnum,shnum,ccd)

                    ccd_data_i=self.get_matched_struct(pipe)

                    w,=where(ccd_data_i['flags'] == 0)
                    ccd_data_i=ccd_data_i[w]

                    self.set_use(ccd_data_i)

                    exp_data.append( ccd_data_i )

                exp_data=combine_arrlist(exp_data)

                self.write_data(exp_data, psfnum, shnum)
Example #4
0
def read_exposure_data(dir,exposurename):

    out_dtype=[('ccd','i1'),
               ('x','f4'),('y','f4'),
               ('psf_flags','i4'),
               ('e1','f4'),('e2','f4'),
               ('e1interp','f4'),('e2interp','f4')]

    datalist=[]
    for ccd in xrange(1,1+62):
        psf_file = path_join(dir,'%s_%02d_psf.fits' % (exposurename,ccd))
        psf=esutil.io.read(psf_file)
        psfe1 = psf['shapelets'][:,3]*sqrt(2)
        psfe2 = -psf['shapelets'][:,4]*sqrt(2)

        shear_file = path_join(dir,'%s_%02d_shear.fits' % (exposurename,ccd))
        shear=esutil.io.read(shear_file)
        e1interp = shear['interp_psf_coeffs'][:,3]*sqrt(2)
        e2interp = -shear['interp_psf_coeffs'][:,4]*sqrt(2)

        mpsf,mshear=numpy_util.match(psf['id'], shear['id'])

        tdata = numpy.zeros(mpsf.size, dtype=out_dtype)

        tdata['ccd'] = ccd
        tdata['x'] = psf['x'][mpsf]
        tdata['y'] = psf['y'][mpsf]
        tdata['e1'] = psfe1[mpsf]
        tdata['e2'] = psfe2[mpsf]

        tdata['psf_flags'] = psf['psf_flags'][mpsf]

        tdata['e1interp'] = e1interp[mshear]
        tdata['e2interp'] = e2interp[mshear]

        datalist.append(tdata)


    data = numpy_util.combine_arrlist(datalist)
    return data
Example #5
0
    def _load_data(self):
        from esutil.numpy_util import combine_arrlist
        ntot=len(self['shnums'])*len(self['psfnums'])*len(self['ccds'])

        itot=1
        if self['progress']:
            from progressbar import ProgressBar
            prog=ProgressBar(width=70, color='green')

        datalist=[]
        for shnum in self['shnums']:
            shlist=[]
            for psfnum in self['psfnums']:
                for ccd in self['ccds']:
                    if self['progress']:
                        prog.update(frac=float(itot)/ntot)
                        itot += 1

                    data0=self.read_one(shnum,psfnum,ccd)
                    if data0 is not None:
                        if self['setname'] is not None:
                            data=self.select(data0)
                        else:
                            data=data0

                        if data is not None:
                            datalist.append(data)

                    del data0

        if len(datalist) == 0:
            if self['verbose']:
                print 'no data read or passed cuts'
            self._data=None
        else:
            self._data=combine_arrlist(datalist)
Example #6
0
def read(fileobj, **keywords): 
    """
    Name:
        io.read

    Usage:
        import esutil
        data = esutil.io.read(
            filename/fileobject,
            typ=None,
            ext=0,
            rows=None, fields=None, columns=None,
            header=False, 
            combine=False, 
            view=None,
            lower=False, upper=False,
            noroot=True, seproot=False,
            verbose=False, 
            ensure_native=False)

    Purpose:
        Provide a single interface to read from a variety of file types.
        Supports reading from a list of files.


    Inputs:
        filename/fileobject:  
            File name or an open file object.  Can also be a sequence.  If a
            sequence is input, the return value will, by default, be a list of
            results.  If the return types are numpy arrays, one can send the
            combine=True (the default) keyword to combine them into a single
            array as long as the data types match.

    Keywords:
        type: 
            A string describing the file type, see below.  If this is not sent,
            then the file type is determined from the file extension.
        ext: 
            The file extension.  If multiple extensions are supported by the
            file type, such as for FITS, then use this keyword to select which
            is to be read. Default is the first extension with data.

        rows:  
            For numpy record-type files such as FITS binary tables or simple
            REC files, setting this keyword will return a subset of the rows.
            For FITS, this requires reading the entire file and selecting a
            subset.  For REC files only the requested rows are read from disk
            by using the recfile package.  Default is all rows.

        fields=, columns=:  
            For numpy record-type files such as FITS binary tables or simple
            REC files, return a subset of the columns or fields.  The keywords
            "fields" and "columns" are synonyms.  For FITS, this requires
            reading the entire file and selecting a subset.  For REC files only
            the requested rows are read from disk by using the recfile package.
            Default is all columns.

        header:  
            If True, and the file type supports header+data, return a tuple
            (data, header).  Can also be 'only' in which case only the header
            is read and returned (rec and fits only for now).  Default is
            False.

        combine:  If a list of filenames/fileobjects is sent, the default
            behavior is to return a list of data.  If combine=True and the
            data are numpy arrays, attempt to combine them into a single
            array.  Only works if the data types match.  Default True
        view:  If the result is derived from a numpy array, set this to
            pick the view.  E.g. pyfits returns a special pyfits type for
            binary table.  You can request a simple numpy array with fields
            by setting view=numpy.ndarray, or a numpy recarray type with
            view=numpy.recarray

        lower,upper:  For FITS files, if true convert the case of the
            fields to all lower or all upper.  Certain FITS writers
            tend to write all fields names as capitals which can result
            in annoyance.

        noroot:  For XML files, do not return the root name as the base
            name in the dictionary.  Default is True
        seproot: For XML files, return a tuple (data, rootname) instead of
            just the data under the root.

        ensure_native: For numpy arrays, make sure data is in native
            byte ordering.

    Currently Supported File Types:
        fits
            Flexible Image Transport System
        rec
            Simple ascii header followed by data in binary or text form. These
            files can be written/read using the esutil.sfile module.  REC files
            support appending rows.  Also supports reading sub-selections of
            rows and columns.
        xml
            Extensible Markup Language
        json
            JavaScript Object Notation.  Less flexible than XML but more useful
            in most practical situations such as storing inhomogeneous data in
            a portable way. 
        yaml
            A nice, human readable markup language, especially useful
            for configuration files.  YAML stands for
                YAML Ain't Markup Language
        pyobj
            A straight dump of an object to disk using it's repr().  Files are
            written using pprint, read simply using eval(open(file).read()).

            This is not secure so use with caution.


    Revision History:
        Use **keywords for input and for sending to all called methods. Much
        more flexible when adding new keywords and file types.
        2010
    """


    verbose = keywords.get('verbose', False)

    # If input is a sequence, read them all.
    if isinstance(fileobj, (list,tuple)):


        flist = fileobj
        nfiles=len(flist)

        if nfiles==1:
            return read(flist[0], **keywords) 

        combine = keywords.get('combine', True)

        # we want to default to verbose in terms of showing progress
        verbose_progress = keywords.get('verbose', True)

        # a list was given
        alldata = []
        for i,f in enumerate(flist):
            if verbose_progress:
                print("reading %d/%d %s" % (i+1,nfiles,f))

            # note, only fields/columns is being passed on but not rows
            # also note seproot is not being passed on
            data = read(f, **keywords) 
            alldata.append(data)

        if combine:
            fn,fobj,type,fs = _get_fname_ftype_from_inputs(fileobj[0], **keywords)
            if type == 'fits' or type == 'rec':
                # this will only work if the all data has the 
                # same structure
                alldata = numpy_util.combine_arrlist(alldata)
        return alldata

    # a scalar was input
    fname,fobj,type,fs = _get_fname_ftype_from_inputs(fileobj, **keywords)

    if fs == 'hdfs':
        with hdfs.HDFSFile(fname, verbose=verbose) as hdfs_file:
            data = hdfs_file.read(read, **keywords)
        return data
    else:
        if verbose:
            print("reading:",fname)

    # pick the right reader based on type
    try:
        if type == 'fits':
            data = read_fits(fobj, **keywords)
        elif type == 'json':
            data = json_util.read(fobj, **keywords)
        elif type == 'yaml':
            data = read_yaml(fobj, **keywords)
        elif type == 'rec':
            data = read_rec(fobj, **keywords)
        elif type == 'xml':
            data = read_xml(fobj, **keywords)
        elif type == 'pyobj':
            data = read_pyobj(fobj, **keywords)
        else:
            raise ValueError("Don't know about file type '%s'" % type)
    finally:
        pass

    return data
Example #7
0
def read_output_set(run, psfnums, shnums, 
                    objtype=None, 
                    s2n_field='s2n_w',
                    s2n_min=None,
                    s2n_max=None,
                    s2_max=None,
                    gsens_min=None,
                    gerr_max=None,
                    columns=None,
                    subtract_mean=False,
                    progress=False):
    """
    Read some data based on the input.
    
    Multiple files may be read. If files are missing they will be skipped

    Note only a single shear number is expected but many psfnums can
    be sent.  
    
    Only those with flags==0 are kept.

    parameters
    ----------
    run: string
        run id
    psfnums: integers
        the psf numbers to read
    shnums: integers
        The shear numbers to read.
    objtype: string, optional
        optionally select only objects with this best-fit model
    columns: optional
        only return these columns
    subtract_mean: bool, optional
        Calculate the mean g and subtract it
    """
    from esutil.numpy_util import strmatch, combine_arrlist
    psfnums=get_psfnums(psfnums)
    shnums=get_shnums(shnums)

    ntot=len(shnums)*len(psfnums)*62

    itot=1
    if progress:
        from progressbar import ProgressBar
        prog=ProgressBar(width=70, color='green')
        #prog=ProgressBar(width=70, color='green', block='▣', empty='□')
        #prog=ProgressBar(width=70, color='green', block='◧', empty='◫')
        #prog=ProgressBar(width=70, color='green', block='■', empty='□')
        #prog=ProgressBar(width=70, color='green', block='=', empty='-')

    datalist=[]
    for shnum in shnums:
        shlist=[]
        for psfnum in psfnums:
            for ccd in xrange(1,62+1):
                if progress:
                    #prog.update(frac=float(itot)/ntot,
                    #            message='%s/%s' % (itot,ntot))
                    prog.update(frac=float(itot)/ntot)
                    itot += 1

                fname=get_output_path(run=run, psfnum=psfnum, shnum=shnum, 
                                      ccd=ccd, ftype='shear')
                if os.path.exists(fname):
                    data0=read_fits_output(run=run, psfnum=psfnum, 
                                           shnum=shnum, ccd=ccd, 
                                           ftype='shear',
                                           columns=columns, 
                                           verbose=False)

                    logic=(data0['flags']==0) | (data0['flags']==65536)
                    if objtype:
                        logic=logic & strmatch(data0['model'],objtype)

                    if s2n_min is not None:
                        logic=logic & (data0[s2n_field] > s2n_min)
                    if s2n_max is not None:
                        logic=logic & (data0[s2n_field] < s2n_max)

                    if s2_max is not None:
                        logic=logic & (data0['s2'] < s2_max)
                    if gsens_min is not None:
                        logic=logic \
                            & (data0['gsens'][:,0] > gsens_min) \
                            & (data0['gsens'][:,1] > gsens_min)
                    if gerr_max is not None:
                        g1err=sqrt(data0['gcov'][:,0,0])
                        g2err=sqrt(data0['gcov'][:,1,1])
                        logic=logic \
                            & (g1err < gerr_max) & (g2err < gerr_max)



                    wkeep,=where(logic)
                    if wkeep.size==0:
                        print 'No objects passed cuts'
                    else:
                        data0=data0[wkeep]
                        shlist.append(data0)

        shdata=combine_arrlist(shlist)

        if subtract_mean:
            g1mean = shdata['g'][:,0].mean()
            g2mean = shdata['g'][:,1].mean()
            shdata['g'][:,0] -= g1mean
            shdata['g'][:,1] -= g2mean
        datalist.append(shdata)

    if len(datalist)==0:
        raise RuntimeError("no outputs were found")
    data=combine_arrlist(datalist)
    return data
Example #8
0
def read(fileobj, **keywords): 
    """
    Name:
        io.read

    Usage:
        import esutil
        data = esutil.io.read(
            filename/fileobject,
            typ=None,
            ext=0,
            rows=None, fields=None, columns=None,
            header=False, 
            combine=False, 
            view=None,
            lower=False, upper=False,
            noroot=True, seproot=False,
            verbose=False, 
            ensure_native=False)

    Purpose:
        Provide a single interface to read from a variety of file types.
        Supports reading from a list of files.


    Inputs:
        filename/fileobject:  
            File name or an open file object.  Can also be a sequence.  If a
            sequence is input, the return value will, by default, be a list of
            results.  If the return types are numpy arrays, one can send the
            combine=True keyword to combine them into a single array as long
            as the data types match.

    Keywords:
        type: 
            A string describing the file type, see below.  If this is not sent,
            then the file type is determined from the file extension.
        ext: 
            The file extension.  If multiple extensions are supported by the
            file type, such as for FITS, then use this keyword to select which
            is to be read. Default is the first extension with data.

        rows:  
            For numpy record-type files such as FITS binary tables or simple
            REC files, setting this keyword will return a subset of the rows.
            For FITS, this requires reading the entire file and selecting a
            subset.  For REC files only the requested rows are read from disk
            by using the recfile package.  Default is all rows.

        fields=, columns=:  
            For numpy record-type files such as FITS binary tables or simple
            REC files, return a subset of the columns or fields.  The keywords
            "fields" and "columns" are synonyms.  For FITS, this requires
            reading the entire file and selecting a subset.  For REC files only
            the requested rows are read from disk by using the recfile package.
            Default is all columns.

        header:  
            If True, and the file type supports header+data, return a tuple
            (data, header).  Can also be 'only' in which case only the header
            is read and returned (rec and fits only for now).  Default is
            False.

        combine:  If a list of filenames/fileobjects is sent, the default
            behavior is to return a list of data.  If combine=True and the
            data are numpy arrays, attempt to combine them into a single
            array.  Only works if the data types match.
        view:  If the result is derived from a numpy array, set this to
            pick the view.  E.g. pyfits returns a special pyfits type for
            binary table.  You can request a simple numpy array with fields
            by setting view=numpy.ndarray, or a numpy recarray type with
            view=numpy.recarray

        lower,upper:  For FITS files, if true convert the case of the
            fields to all lower or all upper.  Certain FITS writers
            tend to write all fields names as capitals which can result
            in annoyance.

        noroot:  For XML files, do not return the root name as the base
            name in the dictionary.  Default is True
        seproot: For XML files, return a tuple (data, rootname) instead of
            just the data under the root.

        ensure_native: For numpy arrays, make sure data is in native
            byte ordering.

    Currently Supported File Types:
        fits
            Flexible Image Transport System
        rec
            Simple ascii header followed by data in binary or text form. These
            files can be written/read using the esutil.sfile module.  REC files
            support appending rows.  Also supports reading sub-selections of
            rows and columns.
        xml
            Extensible Markup Language
        json
            JavaScript Object Notation.  Less flexible than XML but more useful
            in most practical situations such as storing inhomogeneous data in
            a portable way. 
        yaml
            A nice, human readable markup language, especially useful
            for configuration files.  YAML stands for
                YAML Ain't Markup Language
        pyobj
            A straight dump of an object to disk using it's repr().  Files are
            written using pprint, read simply using eval(open(file).read()).

            This is not secure so use with caution.


    Revision History:
        Use **keywords for input and for sending to all called methods. Much
        more flexible when adding new keywords and file types.
        2010
    """


    verbose = keywords.get('verbose', False)

    # If input is a sequence, read them all.
    if isinstance(fileobj, (list,tuple)):
        combine = keywords.get('combine', False)

        # a list was given
        alldata = []
        for f in fileobj:
            # note, only fields/columns is begin passed on but not rows
            # also note seproot is not being passed on
            data = read(f, **keywords) 
            alldata.append(data)

        if combine:
            if len(fileobj) == 1:
                alldata = alldata[0]
            else:
                fn,fobj,type,fs = _get_fname_ftype_from_inputs(fileobj[0], **keywords)
                if type == 'fits' or type == 'rec':
                    # this will only work if the all data has the 
                    # same structure
                    if verbose:
                        stderr.write("Combining arrays\n")
                    alldata = numpy_util.combine_arrlist(alldata)
        return alldata

    # a scalar was input
    fname,fobj,type,fs = _get_fname_ftype_from_inputs(fileobj, **keywords)

    if fs == 'hdfs':
        with hdfs.HDFSFile(fname, verbose=verbose) as hdfs_file:
            data = hdfs_file.read(read, **keywords)
        return data
    else:
        if verbose:
            stderr.write("Reading: %s\n" % fname)

    # pick the right reader based on type
    try:
        if type == 'fits':
            data = read_fits(fobj, **keywords)
        elif type == 'json':
            data = json_util.read(fobj, **keywords)
        elif type == 'yaml':
            data = read_yaml(fobj, **keywords)
        elif type == 'rec':
            data = read_rec(fobj, **keywords)
        elif type == 'xml':
            data = read_xml(fobj, **keywords)
        elif type == 'pyobj':
            data = read_pyobj(fobj, **keywords)
        else:
            raise ValueError("Don't know about file type '%s'" % type)
    finally:
        pass

    return data
def doplot(serun, exposurename, example_wcs_byccd, nx=3,ny=3,diff=False,
           imageformat='png', ptypes=['unbinned','comparebinned']):

    stdout.write("serun: %s\n" % serun)
    stdout.write("exposurename: %s\n" % exposurename)
    stdout.write("\tReading checkpsf data\n")

    alldata = []
    allstats = []

    for ccd in range(1,62+1):

        try:
            data = deswl.files.wlse_read(exposurename,ccd,'checkpsf',
                                         serun=serun)

            fields=['e1','e2','e1interp','e2interp']
            stats = du.stats_xy(data, fields, nx=nx, ny=ny, typ='median')

            ccd_wcs = example_wcs_byccd[ccd]
            fx,fy = ccd_wcs.image2sky(data['x'], data['y'])
            mfx,mfy = ccd_wcs.image2sky(stats['mx'],stats['my'])

            newdata = numpy_util.add_fields(data, [('fx','f4'),('fy','f4')])
            newdata['fx'] = fx - 337.3
            newdata['fy'] = fy + 15.0

            fadd=[('mfx','f4'),('mfy','f4'),('me1diff','f4'),('me2diff','f4')]
            newstats = numpy_util.add_fields(stats,fadd)
            newstats['mfx'] = mfx - 337.3
            newstats['mfy'] = mfy + 15.0
            newstats['me1diff'] = newstats['me1interp']-newstats['me1']
            newstats['me2diff'] = newstats['me2interp']-newstats['me2']

            alldata.append(newdata)
            allstats.append(newstats)

        except:
            stdout.write("Failed ccd=%s\n" % ccd)
            print sys.exc_info()

    data = numpy_util.combine_arrlist(alldata)
    stats = numpy_util.combine_arrlist(allstats)

    stdout.write("\tPlotting whiskers\n")
    plt = du.setuplot('Agg')
    plt.clf()

    nplots = len(ptypes)
    xsize=8*nplots
    fig=plt.figure(figsize=(xsize,7))

    iplot = 1

    u, v = polar2whisker(data['e1'], data['e2'])
    uinterp, vinterp = polar2whisker(data['e1interp'], data['e2interp'])
    mu, mv = polar2whisker(stats['me1'],stats['me2'])
    muinterp, mvinterp = polar2whisker(stats['me1interp'],stats['me2interp'])
    mudiff, mvdiff = polar2whisker(stats['me1diff'],stats['me2diff'])



    # example size to plot
    psize=0.01

    if 'unbinned' in ptypes or 'compareunbinned' in ptypes:
        stdout.write("\t\tDoing plot type 'unbinned'\n")
        ax = fig.add_subplot(1,nplots,iplot)
        iplot += 1

        # plot individual stars
        scale=3.
        whiskers(ax, data['fx'], data['fy'], u, v, scale=scale,
                 linewidth=0.25)

        # a measure of scale
        xtext=-1.2
        ax.text(xtext, 0.9, str(psize), verticalalignment='center')
        whiskers(ax, xtext+0.18, 0.9, psize, 0.0, color='blue', scale=scale)

        if 'compareunbinned' in ptypes:
            stdout.write("\t\tOverplotting 'compareunbinned'\n")
            whiskers(ax, data['fx'], data['fy'], uinterp, vinterp, 
                     scale=scale, color='red', linewidth=0.25)
            
            # add a legend

            ypos = 1.1
            ystep = 0.07
            xtext = -1.1

            ax.text(xtext, ypos-ystep, "data", verticalalignment='center')
            whiskers(ax, xtext+0.23, ypos-ystep, psize, 0.0, 
                     color='black', scale=scale)
            ax.text(xtext, ypos-2*ystep, "interp", verticalalignment='center')
            whiskers(ax, xtext+0.23, ypos-2*ystep, psize, 0.0, 
                     color='red', scale=scale)


        ax.set_xlim(-1.3,1.3)
        ax.set_ylim(-1.3,1.3)
        set_minor_ticks(ax)

    if 'comparebinned' in ptypes:
        # comparison plot for the binned data
        ax = fig.add_subplot(1,nplots,iplot)
        iplot += 1

        scale=10.
        whiskers(ax, stats['mfx'], stats['mfy'], mu, mv, scale=scale)
        whiskers(ax, stats['mfx'], stats['mfy'], muinterp, mvinterp, 
                 scale=scale, color='red')

        # legend
        ypos = 1.1
        ystep = 0.07
        xtext = -1.1

        ax.text(xtext, ypos, str(psize), verticalalignment='center')
        whiskers(ax, xtext+0.23, ypos, psize, 0.0, color='blue', scale=scale)

        ax.text(xtext, ypos-ystep, "data", verticalalignment='center')
        whiskers(ax, xtext+0.23, ypos-ystep, psize, 0.0, 
                 color='black', scale=scale)
        ax.text(xtext, ypos-2*ystep, "interp", verticalalignment='center')
        whiskers(ax, xtext+0.23, ypos-2*ystep, psize, 0.0, 
                 color='red', scale=scale)

        ax.set_xlim(-1.3,1.3)
        ax.set_ylim(-1.3,1.3)
        set_minor_ticks(ax)

    if 'diffbinned' in ptypes:
        # comparison plot for the binned data
        ax = fig.add_subplot(1,nplots,iplot)
        iplot += 1

        scale=50.
        psize=0.001
        whiskers(ax, stats['mfx'], stats['mfy'], mudiff, mvdiff, scale=scale)

        ax.text(xtext, ypos, str(psize), verticalalignment='center')
        whiskers(ax, xtext+0.23, ypos, psize, 0.0, color='blue', scale=scale)

        ax.set_xlim(-1.3,1.3)
        ax.set_ylim(-1.3,1.3)
        set_minor_ticks(ax)


    outdir='/home/users/esheldon/www/tmp/plots'
    outdir=os.path.join(outdir, serun)
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    ptypes_string = '-'.join(ptypes)
    outfile = "%s-checkpsf-%s.%s" % (exposurename,ptypes_string, imageformat)
    outfile=os.path.join(outdir,outfile)
    stdout.write("\tWriting plot file: %s\n" % outfile)
    plt.savefig(outfile, bbox_inches='tight', pad_inches=0.1)
Example #10
0
def plot_shearxy_byccd(serun, region, 
                       example_wcs_byccd=None, outfile=None, typ='pdf'):
    """
    Take the input data, split by ccd, and plot each separately.  The x,y
    are converted to a ra,dec type system that preserved the layout of
    the camera.  We don't use the actual ra/dec here because we want to
    be able to combine multiple exposures on the same plot.
    """

    from numpy import arctan2, pi

    fpath=deswl.files.wlse_collated_path(serun,'gal',ftype='rec',region=region)
    data = esutil.io.read(fpath, verbose=True)
    if data.size == 0:
        stdout.write("No objects in region: %s\n" % region)
        return


    outdir=os.path.dirname(fpath)
    outdir=os.path.join(outdir, 'plots')
    if not os.path.exists(outdir):
        os.makedirs(outdir)
    outfile=os.path.basename(fpath).replace('.rec', '-shear-byccd.'+typ)
    outfile=os.path.join(outdir, outfile)
    stdout.write("Will write image: %s\n" % outfile)


    plt=setuplot('Agg')
    plt.clf()
    if example_wcs_byccd is None:
        example_wcs_byccd = get_exposure_wcs_example()

    h,rev = esutil.stat.histogram(data['ccd'], rev=True, min=1,max=62)

    allstats = []
    #for ccd in range(1,63):

    running_shear1=0.0
    running_shear2=0.0
    running_shear_weight=0.0
    for iccd in range(len(h)):
        if rev[iccd] != rev[iccd+1]:
            w = rev[ rev[iccd]:rev[iccd+1] ]

            ccd = data['ccd'][w[0]]

            stdout.write('\tccd=%s\n' % ccd)

            # no copy is made here!
            stats = stats_shearxy(data[w], nx=3, ny=3)

            ccd_wcs = example_wcs_byccd[ccd]
            xx,yy = ccd_wcs.image2sky(stats['mx'],stats['my'])

            xx -= 337.3
            yy += 15.0
            stats['mx'] = xx
            stats['my'] = yy

            allstats.append(stats)

            weights = 1.0/(stats['mshear1_err']**2 + stats['mshear2_err']**2)
            s1 = stats['mshear1']*weights
            s2 = stats['mshear2']*weights
            s1sum=s1.sum()
            s2sum=s2.sum()
            wsum = weights.sum()

            running_shear1 += s1sum
            running_shear2 += s2sum
            running_shear_weight += wsum

            thise1 = s1sum/wsum
            thise2 = s2sum/wsum
            thisangle = 0.5*arctan2(thise2, thise1)*180./pi
            stdout.write("\t\t<e1>=%s\n" % thise1 )
            stdout.write("\t\t<e2>=%s\n" % thise2 )
            stdout.write("\t\t<angle>=%s\n" % thisangle )


            #plt.plot(xx,yy,'.',markersize=1)

    #plt.show()

    stats = numpy_util.combine_arrlist(allstats)

    mshear1 = running_shear1/running_shear_weight
    mshear2 = running_shear2/running_shear_weight
    mangle = 0.5*arctan2(mshear2, mshear1)*180.0/pi
    mshear = numpy.sqrt( mshear1**2 + mshear2**2 )

    mangle_err = stats['mangle'].std()/numpy.sqrt(stats['mangle'].size)

    print "mins:",stats['mshear1'].min(), stats['mshear2'].min(), \
            stats['mshear'].min()
    print "maxs:",stats['mshear1'].max(), stats['mshear2'].max(), \
            stats['mshear'].max()
    stdout.write("overall averages: \n\tshear1: "
                 "%s\n\tshear2: %s\n\tangle: %s\n" % (mshear1, mshear2,mangle))
    stdout.write('\tangle error approximately %s\n' % mangle_err)

    # x component of the "vector" version
    u = stats['mshear']*numpy.cos(stats['mangle'])#*10000
    # y component of the "vector" version
    v = stats['mshear']*numpy.sin(stats['mangle'])#*10000

    # scale=1 means a vector of length 1 will cover essentially
    # all the plot.  I want them slightly smaller, so I'm using
    # scale=1.5
    scale=1.5
    ax=plt.axes()
    from matplotlib.ticker import MultipleLocator as ml
    ax.xaxis.set_minor_locator(ml(0.1))
    ax.yaxis.set_minor_locator(ml(0.1))
    #plt.quiver(stats['mx'], stats['my'], u, v,headwidth=0,
    #             scale=scale, pivot='middle')

    whiskers(plt, stats['mx'], stats['my'], u, v)


    xtext=336.0 - 337.3
    whiskers(plt, 336.18-337.3, -14.1+15.0, 0.05, 0.0, color='blue')
    plt.text(xtext, -14.1+15, "0.05", verticalalignment='center')


    ystart=-15.6 + 15
    ystep=-0.08
    istep = 0
    ytext=ystart+istep*ystep

    plt.text(xtext, ytext, r"$\langle \gamma_1 \rangle=%0.3f$" % mshear1,
               verticalalignment='center')
    istep+=1
    ytext=ystart+istep*ystep
    plt.text(xtext, ytext, r"$\langle \gamma_2 \rangle=%0.3f$" % mshear2,
               verticalalignment='center')
    istep+=1
    ytext=ystart+istep*ystep
    plt.text(xtext,ytext,r"$\langle \theta \rangle=%0.2f^{\circ}$" % mangle,
               verticalalignment='center')


    # plot a whisker representing the average
    istep+=1
    ytext=ystart+istep*ystep
    svec1 = mshear*numpy.cos( mangle*pi/180. )
    svec2 = mshear*numpy.sin( mangle*pi/180. )
    plt.text(xtext, ytext, r"$\langle \gamma \rangle=%0.3f$" % mshear, 
               verticalalignment='center')
    istep+=1
    ytext=ystart+istep*ystep
    whiskers(plt, 336.225-337.3, ytext, svec1, svec2, color='red')


    label = 'region%s' % region
    ax=plt.axes()
    plt.text(0.90, 0.9, label, 
               horizontalalignment='center', 
               verticalalignment='center', 
               transform=ax.transAxes, 
               fontsize=18)
    # this is so the arrows have the right angle

    plt.axis('equal')
    plt.ylim(-16.1+15.0,-13.9+15.0)

    if outfile is not None:
        stdout.write("Writing file: %s\n" % outfile)
        plt.savefig(outfile, bbox_inches='tight', pad_inches=0.2)
    else:
        plt.show()

    #print 'mean shear: ',stats['mshear']
    return example_wcs_byccd