コード例 #1
0
 def _export_set(self, fname):
     """Export raw to EEGLAB file."""
     data = self.current["data"].get_data() * 1e6  # convert to microvolts
     fs = self.current["data"].info["sfreq"]
     times = self.current["data"].times
     ch_names = self.current["data"].info["ch_names"]
     chanlocs = fromarrays([ch_names], names=["labels"])
     events = fromarrays([
         self.current["data"].annotations.description,
         self.current["data"].annotations.onset * fs + 1,
         self.current["data"].annotations.duration * fs
     ],
                         names=["type", "latency", "duration"])
     savemat(fname,
             dict(EEG=dict(data=data,
                           setname=fname,
                           nbchan=data.shape[0],
                           pnts=data.shape[1],
                           trials=1,
                           srate=fs,
                           xmin=times[0],
                           xmax=times[-1],
                           chanlocs=chanlocs,
                           event=events,
                           icawinv=[],
                           icasphere=[],
                           icaweights=[])),
             appendmat=False)
コード例 #2
0
ファイル: mbdb.py プロジェクト: ssamuroff/mbii
def classify_subhalos(data, snapshot=85):
    # Setup the database connection
    sqlserver='localhost'
    user='******'
    password='******'
    dbname='mb2_hydro'
    unix_socket='/home/rmandelb.proj/flanusse/mysql/mysql.sock'
    db = mdb.connect(sqlserver, user, password, dbname, unix_socket=unix_socket)
    names = ['groupId','central']
    add_names = ['x', 'y', 'z'] 

    dt = [('groupId',int),('central',int)] #, ('x',float), ('y',float), ('z',float)]

    out = np.zeros(len(data), dtype=dt)

    for i, subhalo in enumerate(data):
        sql = "SELECT groupId, central FROM subfind_halos WHERE snapnum=%d AND len=%d AND groupId=%d;"%(snapshot, subhalo['len'], math.ceil(subhalo['groupid']))

        #print sql
        cursor = db.cursor()
        cursor.execute(sql)

        import pdb ; pdb.set_trace()

        try:  
            results = fromarrays(np.array(cursor.fetchall()).squeeze().T, names=names)
        except:
            print 'Could not match results'
            import pdb ; pdb.set_trace()
            continue
        print i, results

        if (results.size>1):
            # Now try gain, also matching by position
            sql = "SELECT groupId, central, x, y, z FROM subfind_halos WHERE snapnum=%d AND len=%d AND groupId=%d;"%(snapshot, subhalo['len'], math.ceil(subhalo['groupid']))
            cursor = db.cursor()
            cursor.execute(sql)
            results = fromarrays(np.array(cursor.fetchall()).squeeze().T, names=names+add_names)

            # Find something in approximately the right 3D position
            select = np.isclose(results['x'],subhalo['pos'][0]) & np.isclose(results['y'],subhalo['pos'][1]) & np.isclose(results['z'],subhalo['pos'][2])
            results = results[select]
        if (results.size>1):
            # If that doesn't solve the problem then pause here
            import pdb ; pdb.set_trace()
        
        for colname in results.dtype.names:
            out[colname][i] = results[colname]

    outfits = fi.FITS('/home/ssamurof/subhalo_central_flag.fits','rw')
    outfits.write(out)
    outfits.close()
コード例 #3
0
def find_centre(g, snapshot=85, simulation='massiveblackii'):
    """Locate the mass centroid of a particular group of galaxies"""
    if (simulation == 'massiveblackii'):
        import pymysql as mdb
        sql = 'SELECT x,y,z FROM subfind_groups WHERE groupId=%d AND snapnum=%d;' % (
            g, snapshot)

        sqlserver = 'localhost'
        user = '******'
        password = '******'
        dbname = 'mb2_hydro'
        unix_socket = '/home/rmandelb.proj/flanusse/mysql/mysql.sock'
        db = mdb.connect(sqlserver,
                         user,
                         password,
                         dbname,
                         unix_socket=unix_socket)

        c = db.cursor()
        c.execute(sql)
        results = fromarrays(np.array(c.fetchall()).squeeze().T, names='x,y,z')
        return results
    elif (simulation == 'illustris'):
        import illustris_python as il
        root = '/nfs/nas-0-1/vat/Illustris-1'
        info = il.groupcat.loadSingle(root, snapshot, haloID=g)['GroupPos']
        out = np.array(1, dtype=[('x', float), ('y', float), ('z', float)])
        out['x'] = info[0]
        out['y'] = info[1]
        out['z'] = info[2]
        return out
コード例 #4
0
    def to_recarray(self, keys=None):
        """
        Converts tabular data types into record arrays, which is useful for e.g. saving as an hdf table. In order to be
        converted, the tabular data source must be able to be flattened.

        Parameters
        ----------
        keys : list of fields to be copied into output. Defaults to all existing keys.

        Returns
        -------
        numpy recarray version of self

        """
        from numpy.core import records
        if keys is None:
            keys = list(self.keys())

        columns = [self.__getitem__(k) for k in keys]

        filtered_cols = [
            i for i, v in enumerate(columns) if not v.dtype == 'O'
        ]

        cols = [columns[i] for i in filtered_cols]
        keys_ = [keys[i] for i in filtered_cols]

        dt = [(k, v.dtype, v.shape[1:]) for k, v in zip(keys_, cols)]

        #print(dt)
        return records.fromarrays(cols, names=keys_, dtype=dt)
コード例 #5
0
ファイル: mbdb.py プロジェクト: ssamuroff/mbii
    def cross_match(self, source, table, fields, match_column, match_column2='', fatal_errors=True):

        # Build the SQL query
        if match_column2=='':
            match_column2=match_column
        print 'Will cross match column %s in the table provided with %s in table %s'%(match_column,match_column2,table)
        print 'Building query...'
        sql = "SELECT %s FROM %s WHERE %s IN ("%(fields, table, match_column)

        for row in source[match_column]:
            sql+="'%d',"%int(row)
        sql = sql[:-1] + ')'
        
        try:
            # prepare a cursor for the query
            cursor = self.db.cursor()
            cursor.execute(sql)
            print("Fetching %d entries" % cursor.rowcount)
        except:
            if fatal_errors:
                    raise
            else:
                print("Error when runnning the SQL command")
                return

        results = fromarrays(np.array(cursor.fetchall()).squeeze().T,names=fields)

        # Finally match the results
        # Without this the results of the second query are misaligned  
        sm, rm = di.match_results(source, results, name1='subfindId', name2='subfindId')
        return sm, rm
コード例 #6
0
ファイル: save.py プロジェクト: jimmyliao13536/PhD-python
def savedict(filename, results, yaml_kwargs=dict()):
    """ Save a dictionary to a file. Choose file format based
        upon extension to filename. """
    is_results = lambda x: isinstance(x,dict) or isinstance(x,list)

    if isinstance(filename,str) and is_results(results):
        pass
    elif is_results(filename) and isinstance(results, str):
        filename, results = results, filename
    else:
        raise Exception("Unrecoginized types for filename and results")

    filename = expandvars(filename)

    extension = os.path.splitext(filename)[-1]

    if extension == '.yaml':
        open(filename, 'w').write(yaml.dump(tolist(results), **yaml_kwargs))
    elif extension == '.hdf5':
        if not isinstance(results, dict): raise Exception("Can only save dicts to hdf5 format.")
        import h5py
        f=h5py.File(filename,'w')
        for k,v in results.items(): f[k] = v
        f.close()
    elif extension == '.fits':
        if not isinstance(results, dict): raise Exception("Can only save dicts to fits format.")
        rec = fromarrays(results.values(), names=results.keys())
        makefits(rec, filename, clobber=True)
    elif extension == '.xml':
        from pyxml2obj import XMLout
        open(filename, 'w').write(XMLout(results))
    else:
        raise Exception("Unrecognized extension %s" % extension)
コード例 #7
0
ファイル: analyse_data.py プロジェクト: jd-au/magmo-HI
def extract_spectra(daydirname, field, continuum_ranges):
    num_edge_chan = 10
    fits_filename = "{0}/1420/magmo-{1}_1420_sl_restor.fits".format(daydirname,
                                                                    field)
    src_filename = "{0}/{1}_src_comp.vot".format(daydirname, field)
    isle_filename = "{0}/{1}_src_isle.vot".format(daydirname, field)

    spectra = dict()
    source_ids = dict()
    if not os.path.exists(fits_filename):
        print ("Warning: File %s does not exist, skipping extraction." % \
              fits_filename)
        return spectra, source_ids, []

    sources = read_sources(src_filename)
    islands = read_islands(isle_filename)
    hdulist = fits.open(fits_filename)
    image = hdulist[0].data
    header = hdulist[0].header
    w = WCS(header)
    index = np.arange(header['NAXIS3'])
    beam_maj = header['BMAJ'] * 60 * 60
    beam_min = header['BMIN'] * 60 * 60
    beam_area = math.radians(header['BMAJ']) * math.radians(header['BMIN'])
    print ("Beam was %f x %f arcsec giving area of %f radians^2." % (beam_maj, beam_min, beam_area))
    ranges = calc_island_ranges(islands, (header['CDELT1'], header['CDELT2']))
    velocities = w.wcs_pix2world(10,10,index[:],0,0)[2]
    for src in sources:
        c = SkyCoord(src['ra'], src['dec'], frame='icrs', unit="deg")

        img_slice = get_integrated_spectrum(image, w, src, velocities, c.galactic.l.value, continuum_ranges)

        l_edge, r_edge = find_edges(img_slice, num_edge_chan)
        print("Using data range %d - %d out of %d channels." % (
            l_edge, r_edge, len(img_slice)))

        # plotSpectrum(np.arange(slice.size), slice)
        spectrum_array = rec.fromarrays(
            [np.arange(img_slice.size)[l_edge:r_edge],
             velocities[l_edge:r_edge],
             img_slice[l_edge:r_edge]],
            names='plane,velocity,flux')
        spectra[c.galactic.l] = spectrum_array

        # isle = islands.get(src['island'], None)
        src_map = {'id': src['id'], 'flux': src['peak_flux'], 'pos': c, 'beam_area': beam_area}
        src_map['a'] = src['a']
        src_map['b'] = src['b']
        src_map['pa'] = src['pa']
        print (src_map)
        source_ids[c.galactic.l] = src_map
    del image
    del header
    hdulist.close()

    return spectra, source_ids, ranges
コード例 #8
0
def __collect_aos(self, f, imo, end):
    '''Collect the AOs in the MO block.'''
    from mfunc import norm, mult
    from numpy import dot

    # Initiallize the AO list
    aos = []

    # Loop over each MO sub-block
    for i, start in enumerate(imo):
        
        # Initiallize arrays for this MO
        pcent = array([], dtype=float)
        ao_id = array([], dtype=str)
        sym = array([], dtype=str)
        cont_sum = 0.0

        # Define start
        s = start + 4

        # Define end
        try:
            e = imo[i+1] - 1
        except IndexError:
            e = end

        # Loop over the AO lines for this MO
        for x in f[s:e]:
            # Atomic orbital number and symmetry
            s = x[22:36].split()
            ao_s = s[0] + ' ' + s[1]
            ao_id = append(ao_id, ao_s)
            # Coefficient in MO
            coeff = float(x[8:19].strip())
            pcent = append(pcent, coeff)
            # The Symmetry
            sym_s = ''.join(s[2:])
            sym = append(sym, sym_s.strip())
            # If two columns then lather, rince and repeat
            if len(x.strip()) > 38:
                s = x[63:77].split()
                ao_s = s[0] + ' ' + s[1]
                ao_id = append(ao_id, ao_s)
                coeff = float(x[49:60].strip())
                pcent = append(pcent, coeff)
                sym_s = ''.join(s[2:])
                sym = append(sym, sym_s)

        # Normalize
        pcent = (pcent/norm(pcent))**2

        # Place into the ao list
        aos.append(fromarrays([pcent, ao_id, sym], names='pcent,ao_id,sym'))

    return aos
コード例 #9
0
ファイル: build_catalogue.py プロジェクト: ssamuroff/mbii
    def __init__(self, snapshot, verbosity=1):
        # This is (hopefully) the one and only time we need to call on the coma DB in the catalogue pipeline

        sqlserver = 'localhost'
        user = '******'
        password = '******'
        dbname = 'mb2_hydro'
        unix_socket = '/home/rmandelb.proj/flanusse/mysql/mysql.sock'
        db = mdb.connect(sqlserver,
                         user,
                         password,
                         dbname,
                         unix_socket=unix_socket)

        # Setup the database connection and query for the centroids of all the halos
        c = db.cursor()
        sql = 'SELECT x,y,z,groupId,mass,len,subfindId FROM subfind_halos WHERE snapnum=%d;' % snapshot
        if verbosity > 0:
            print 'Submitting query...'
        c.execute(sql)
        self.info = fromarrays(np.array(c.fetchall()).squeeze().T,
                               names='x,y,z,groupId,mass,len,subfindId')

        # Do the same for groups
        sql = 'SELECT x,y,z,groupId,subfindId FROM subfind_groups WHERE snapnum=%d;' % snapshot
        if verbosity > 0:
            print 'Submitting group query...'
        c.execute(sql)
        self.group_info = fromarrays(np.array(c.fetchall()).squeeze().T,
                                     names='x,y,z,groupId,subfindId')

        # Convert to Mpc h^-1
        for name in ['x', 'y', 'z']:
            self.info[name] /= 1e3
            self.group_info[name] /= 1e3

        self.ids = np.arange(0, len(self.info), 1)

        if verbosity > 0:
            print 'Done.'

        self.verbosity = verbosity
コード例 #10
0
 def test_get_field_asattribute(self):
     "Tests item retrieval"
     [d, m, mrec, dlist, dates, mts, rts] = self.data
     self.failUnless(isinstance(rts.f0, TimeSeries))
     self.failUnless(not isinstance(rts[0], TimeSeriesRecords))
     assert_equal(rts.f0, time_series(d, dates=dates, mask=m))
     assert_equal(rts.f1, time_series(d[::-1], dates=dates, mask=m[::-1]))
     self.failUnless((rts._mask == nr.fromarrays([m, m[::-1]])).all())
     # Was _mask, now is recordmask
     assert_equal(rts.recordmask, np.r_[[m, m[::-1]]].all(0))
     assert_equal(rts.f0[1], rts[1].f0)
コード例 #11
0
 def test_get_field_asattribute(self):
     "Tests item retrieval"
     [d, m, mrec, dlist, dates, mts, rts] = self.data
     self.failUnless(isinstance(rts.f0, TimeSeries))
     self.failUnless(not isinstance(rts[0], TimeSeriesRecords))
     assert_equal(rts.f0, time_series(d, dates=dates, mask=m))
     assert_equal(rts.f1, time_series(d[::-1], dates=dates, mask=m[::-1]))
     self.failUnless((rts._mask == nr.fromarrays([m, m[::-1]])).all())
     # Was _mask, now is recordmask
     assert_equal(rts.recordmask, np.r_[[m, m[::-1]]].all(0))
     assert_equal(rts.f0[1], rts[1].f0)
コード例 #12
0
def blankRecordArray(desc, elements):
    """
    Accept a descriptor describing a recordarray, and return one that's full of
    zeros

    This seems like it should be in the numpy distribution...
    """
    blanks = []
    for atype in desc['formats']:
        blanks.append(np.zeros(elements, dtype=atype))
    return rec.fromarrays(blanks, **desc)
コード例 #13
0
def blankRecordArray(desc, elements):
    """
    Accept a descriptor describing a recordarray, and return one that's full of
    zeros

    This seems like it should be in the numpy distribution...
    """
    blanks = []
    for atype in desc['formats']:
        blanks.append(np.zeros(elements, dtype=atype))
    return rec.fromarrays(blanks, **desc)
コード例 #14
0
ファイル: mbdb.py プロジェクト: ssamuroff/mbii
 def get_sql(self, sql, fields):
     try:
         # prepare a cursor for the query
         cursor = self.db.cursor()
         cursor.execute(sql)
         print("Fetching %d entries" % cursor.rowcount)
     except:
         print("Error when runnning the SQL command")
         return
     results = fromarrays(np.array(cursor.fetchall()).squeeze().T,names=fields)
     return results
コード例 #15
0
def __collect_aos(self, f, imo, e):
    '''Collect the AOs in the MO block.'''

    # Each AO list for an MO starts in the same line as an MO, then
    # continues to the line before the next MO.

    # Function to split lines containing AO info and return
    def ao_lines(line):
        ln = line.split()
        try:
            pcent = float(ln[0].rstrip('%')) / 100
        except ValueError:
            pcent = float('nan')
        try:
            ao_id = ln[5] + ' ' + ln[6]
            sym = ln[1] + ' ' + ln[2]
        except IndexError:
            if ln[1].upper() == 'CORE':
                ao_id = 'core'
                sym = 'core'
            else:
                raise IndexError
        return pcent, ao_id, sym

    # Vectorize the above function to work on numpy arrays as a whole
    aolines = vectorize(ao_lines)

    # Loop over each MO index location, which is also the start of the AO list
    aos = []
    #for i in xrange(len(imo)):
    for i in range(len(imo)):
        # Find the index range for each set of AOs.
        # The try is if this is the last MO then imo[i+1] does not exist
        try:
            irange = range(imo[i], imo[i + 1])
        except IndexError:
            irange = range(imo[i], e)
        lines = [f[x] for x in irange]
        # The first index needs to be modified to remove the MO information.
        lines[0] = ' '.join(lines[0].split()[4:])
        # Now split and collect the info from each line,
        try:
            pcent, ao_id, sym = aolines(lines)
            # Place into a record array
            aos.append(fromarrays([pcent, ao_id, sym],
                                  names='pcent,ao_id,sym'))
        except IndexError:
            pass

    return tuple(aos)
コード例 #16
0
def vr_make_meta_for_obj_evaluation():
    from numpy.core.records import fromarrays
    from scipy.io import savemat
    m = h5py.File('data/sg_vrd_meta.h5')
    SG_VRD_ID = []
    WNID = []
    name = []
    description = []
    for i in xrange(1,101):
        n = str(m['meta/cls/idx2name/%d'%i][...])
        SG_VRD_ID.append(i)
        WNID.append(n)
        name.append(n)
        description.append(n)
    meta_synset = fromarrays([SG_VRD_ID,WNID,name,description], names=['SG_VRD_ID', 'WNID', 'name', 'description'])
    savemat('data/sg_vrd_meta.mat', {'synsets': meta_synset})
コード例 #17
0
 def test_fromrecords(self):
     "Test from recarray."
     [d, m, mrec, dlist, dates, mts, rts] = self.data
     nrec = nr.fromarrays(np.r_[[d, d[::-1]]])
     mrecfr = fromrecords(nrec.tolist(), dates=dates)
     assert_equal(mrecfr.f0, mrec.f0)
     assert_equal(mrecfr.dtype, mrec.dtype)
     #....................
     altrec = [tuple([d, ] + list(r)) for (d, r) in zip(dlist, nrec)]
     mrecfr = fromrecords(altrec, names='dates,f0,f1')
     assert_equal(mrecfr.f0, mrec.f0)
     assert_equal(mrecfr.dtype, mrec.dtype)
     #....................
     tmp = time_records(rts._series[::-1], dates=rts.dates)
     mrecfr = fromrecords(tmp)
     assert_equal(mrecfr.f0, mrec.f0[::-1])
     #....................
     mrecfr = fromrecords(mrec.data, dates=dates, mask=m)
     assert_equal(mrecfr.recordmask, m)
コード例 #18
0
 def test_fromrecords(self):
     "Test from recarray."
     [d, m, mrec, dlist, dates, mts, rts] = self.data
     nrec = nr.fromarrays(np.r_[[d, d[::-1]]])
     mrecfr = fromrecords(nrec.tolist(), dates=dates)
     assert_equal(mrecfr.f0, mrec.f0)
     assert_equal(mrecfr.dtype, mrec.dtype)
     #....................
     altrec = [tuple([
         d,
     ] + list(r)) for (d, r) in zip(dlist, nrec)]
     mrecfr = fromrecords(altrec, names='dates,f0,f1')
     assert_equal(mrecfr.f0, mrec.f0)
     assert_equal(mrecfr.dtype, mrec.dtype)
     #....................
     tmp = time_records(rts._series[::-1], dates=rts.dates)
     mrecfr = fromrecords(tmp)
     assert_equal(mrecfr.f0, mrec.f0[::-1])
     #....................
     mrecfr = fromrecords(mrec.data, dates=dates, mask=m)
     assert_equal(mrecfr.recordmask, m)
コード例 #19
0
 def fromfile(self, filename):
     self.data = {}
     self.param = {}
     if hasattr(filename, '__iter__'):
         file = filename
     else:
         if filename[-3:] == '.gz':
             file = gzip.open(filename)
         else:
             try:
                 file = open(filename)
             except IOError:
                 file = gzip.open(filename + '.gz')
     for line in file:
         if line.strip():
             f = line.split()
             if (f[0] == '@'):  # descriptor lines
                 try:
                     self.param[f[1]] = conv(f[2], f[3])
                 except:
                     print "bad descriptor", " ".join(f)
             elif (f[0] == '*'):  # self.labels lines
                 f.pop(0)
                 f = [pythonname(l) for l in f]
                 self.labels = f
                 for l in self.labels:
                     self.data[l] = []
             elif (f[0] == '$'):  # type lines
                 f.pop(0)
                 self.types = f
             elif (f[0].startswith('#')):  # comment lines
                 pass
             else:  # data lines
                 f = map(conv, self.types, f)
                 for l in self.labels:
                     d = f.pop(0)
                     self.data[l].append(d)
     data = [self.data[n] for n in self.labels]
     self.data = fromarrays(data, names=','.join(self.labels))
     return self
コード例 #20
0
ファイル: mbdb.py プロジェクト: ssamuroff/mbii
    def get(self, table, fields, cond="", fatal_errors=True):
        """
        Returns the sql query as a nice numpy recarray
        expects the list of fields in the format fields='a,b,c'
        """

        sql = "SELECT %s FROM %s WHERE %s;"%(fields, table, cond)
        print sql
        try:
            # prepare a cursor for the query
            cursor = self.db.cursor()
            cursor.execute(sql)
            print("Fetching %d entries" % cursor.rowcount)
        except:
            if fatal_errors:
                    raise
            else:
                print("Error when runnning the SQL command")
                return

        results = fromarrays(np.array(cursor.fetchall()).squeeze().T,names=fields)
        return results
コード例 #21
0
 def fromfile(self,filename):
   self.data={}
   self.param={}
   if hasattr(filename,'__iter__'):
     file=filename
   else:
     if filename[-3:]=='.gz':
       file=gzip.open(filename)
     else:
       try:
         file=open(filename)
       except IOError:
         file=gzip.open(filename+'.gz')
   for line in file:
     if line.strip():
       f=line.split()
       if (f[0] == '@'):  # descriptor lines
         try:
           self.param[f[1]]=conv(f[2],f[3])
         except:
           print "bad descriptor"," ".join(f)
       elif ( f[0] == '*'): # self.labels lines
         f.pop(0)
         f=[pythonname(l) for l in f]
         self.labels=f
         for l in self.labels: self.data[l]=[]
       elif (f[0] == '$'):  # type lines
         f.pop(0) ; self.types=f
       elif (f[0].startswith('#')):  # comment lines
         pass
       else :   # data lines
         f=map(conv,self.types,f)
         for l in self.labels:
           d=f.pop(0)
           self.data[l].append(d)
   data=[self.data[n] for n in self.labels]
   self.data=fromarrays(data,names=','.join(self.labels))
   return self
コード例 #22
0
ファイル: coord.py プロジェクト: eddienko/SamPy
def match(ra1, dec1, ra2, dec2, tol, allmatches=False):
    """
    Given two sets of numpy arrays of ra,dec and a tolerance tol
    (float), returns an array of integers with the same length as the
    first input array.  If integer > 0, it is the index of the closest
    matching second array element within tol arcsec.  If -1, then there
    was no matching ra/dec within tol arcsec.

    if allmatches = True, then for each object in the first array,
    return the index of everything in the second arrays within the
    search tolerance, not just the closest match.

    :note: does not force one-to-one mapping

    Note to get the indices of objects in ra2, dec2 without a match:
    imatch = match(ra1, dec1, ra2, dec2, 2.)
    inomatch = numpy.setdiff1d(np.arange(len(ra2)), set(imatch))
    """
    DEG_PER_HR = 360. / 24.             # degrees per hour
    DEG_PER_MIN = DEG_PER_HR / 60.      # degrees per min
    DEG_PER_S = DEG_PER_MIN / 60.       # degrees per sec
    DEG_PER_AMIN = 1. / 60.             # degrees per arcmin
    DEG_PER_ASEC = DEG_PER_AMIN / 60.   # degrees per arcsec
    RAD_PER_DEG = math.pi / 180.        # radians per degree

    isorted = ra2.argsort()
    sdec2 = dec2[isorted]
    sra2 = ra2[isorted]

    LIM = tol * DEG_PER_ASEC

    match = []

    #this is faster but less accurate
    # use mean dec, assumes decs similar
    #decav = np.mean(sdec2.mean() + dec1.mean())
    #RA_LIM = LIM / np.cos(decav * RAD_PER_DEG)

    for ra, dec in zip(ra1, dec1):
        #slower but more accurate
        RA_LIM = LIM / np.cos(dec * RAD_PER_DEG)

        i1 = sra2.searchsorted(ra - RA_LIM)
        i2 = i1 + sra2[i1:].searchsorted(ra + RA_LIM)
        close = []
        for j in xrange(i1, i2):
            decdist = np.abs(dec - sdec2[j])
            if decdist > LIM:
                continue
            else:
                # if ras and decs are within LIM, then
                # calculate actual separation
                disq = astCoords.calcAngSepDeg(ra, dec, sra2[j], sdec2[j])
                close.append((disq, j))

        close.sort()
        if not allmatches:
            # Choose the object with the closest separation inside the
            # requested tolerance, if one was found.
            if len(close) > 0:
                min_dist, jmin = close[0]
                if min_dist < LIM:
                    match.append((isorted[jmin], min_dist))
                    continue
                    # otherwise no match
            match.append((-1, -1))
        else:
            # append all the matching objects
            jclose = []
            seps = []
            for dist, j in close:
                if dist < LIM:
                    jclose.append(j)
                    seps.append(dist)
                else:
                    break
            match.append(fromarrays([isorted[jclose], seps],
                                                           dtype=[('ind', 'i8'), ('sep', 'f8')]))

    if not allmatches:
        # return both indices and separations in a recarray
        temp = np.rec.fromrecords(match, names='ind,sep')
        # change to arcseconds
        temp.sep *= 3600.
        temp.sep[temp.sep < 0] = -1.
        return temp
    else:
        return match
コード例 #23
0
 def bounds(self):
     if not hasattr(self, '_bounds'):
         start = (self.header['t_start'] - self.starttime) * self.sampling_rate
         self._bounds = fromarrays([start, start + self.sectorsizes - 1], names='start,end')
     return self._bounds
コード例 #24
0
ファイル: DataLoader.py プロジェクト: DennisHub/pyrelacs
def recload(filename):
    for meta, key, dat in iload(filename):
        yield meta, fromarrays(dat.T, names=key[0])
コード例 #25
0
def match_radec(ra1, dec1, ra2, dec2, tol, allmatches=False):
    """
    match_radec(ra1, dec1, ra2, dec2, tol)

    Given two sets of numpy arrays of ra,dec and a tolerance tol
    (float), returns an array of integers with the same length as the
    first input array.  If integer > 0, it is the index of the closest
    matching second array element within tol arcsec.  If -1, then there
    was no matching ra/dec within tol arcsec.

    if allmatches = True, then for each object in the first array,
    return the index of everything in the second arrays within the
    search tolerance, not just the closest match.

    if seps = True, return the separations from each matching object as
    well as the index.

    Note to get the indices of objects in ra2, dec2 without a match, use

    imatch = match_radec(ra1, dec1, ra2, dec2, 2.)
    inomatch = numpy.setdiff1d(np.arange(len(ra2)), set(imatch))

    doctests:

    >>> npts = 10
    >>> ra1 = np.linspace(340, 341, npts)
    >>> dec1 = np.linspace(20, 21, npts)
    >>> ra2 = ra1 + (1.-2*np.random.random(npts)) * DEG_PER_ASEC
    >>> dec2 = dec1 + (1.-2*np.random.random(npts)) * DEG_PER_ASEC
    >>> match(ra1, dec1, ra2, dec2, 2.)
    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

    """
    from numpy.core.records import fromarrays
    
    ra1,ra2,dec1,dec2 = map(np.asarray, (ra1, ra2, dec1, dec2))

    abs = np.abs

    isorted = ra2.argsort()
    sdec2 = dec2[isorted]
    sra2 = ra2[isorted]

    LIM = tol * DEG_PER_ASEC

    match = []
    # use mean dec, assumes decs similar
    decav = np.mean(sdec2.mean() + dec1.mean())
    RA_LIM = LIM / cos(decav * RAD_PER_DEG)

    for ra,dec in zip(ra1,dec1):
        i1 = sra2.searchsorted(ra - RA_LIM)
        #i2 = sra2.searchsorted(ra + RA_LIM)
        i2 = i1 + sra2[i1:].searchsorted(ra + RA_LIM)
        #print i1,i2
        close = []
        for j in xrange(i1,i2):
            if abs(dec - sdec2[j]) > LIM:
                continue
            else:
                # if ras and decs are within LIM arcsec, then
                # calculate actual separation:
                disq = ang_sep(ra, dec, sra2[j], sdec2[j])
                close.append((disq, j))

        close.sort()
        if not allmatches:
            # Choose the object with the closest separation inside the
            # requested tolerance, if one was found.
            if len(close) > 0:
                min_dist, jmin = close[0]
                if min_dist < LIM:
                    match.append((isorted[jmin], min_dist))
                    continue
            # otherwise no match
            match.append((-1,-1))
        else:
            # append all the matching objects
            jclose = []
            seps = []
            for dist,j in close:
                if dist < LIM:
                    jclose.append(j)
                    seps.append(dist)
                else:
                    break
            match.append(fromarrays([isorted[jclose], seps],
                                    dtype=[('ind','i8'),('sep','f8')]))

    if not allmatches:
        # return both indices and separations in a recarray
        temp = np.rec.fromrecords(match, names='ind,sep')
        # change to arcseconds
        import pdb; pdb.set_trace()
        temp.sep *= 3600.
        temp.sep[temp.sep < 0] = -1.
        return temp
    else:
        return match
コード例 #26
0
ファイル: coord.py プロジェクト: nhmc/LAE
def match(ra1, dec1, ra2, dec2, tol, allmatches=False):
    """ Given two sets of numpy arrays of ra,dec and a tolerance tol,
    returns an array of indices and separations with the same length
    as the first input array.

    If an index is > 0, it is the index of the closest matching second
    array element within tol arcsec.  If it's -1, then there was no
    matching ra/dec within tol arcsec.

    If allmatches = True, then for each object in the first array,
    return the index and separation of everything in the second array
    within the search tolerance, not just the closest match.

    See Also
    --------
    indmatch, unique_radec

    Notes
    -----
    To get the indices of objects in ra2, dec2 without a match, use

    >>> imatch = match(ra1, dec1, ra2, dec2, 2.)
    >>> inomatch = numpy.setdiff1d(np.arange(len(ra2)), set(imatch))
    """

    ra1, ra2, dec1, dec2 = map(np.asarray, (ra1, ra2, dec1, dec2))

    abs = np.abs

    isorted = ra2.argsort()
    sdec2 = dec2[isorted]
    sra2 = ra2[isorted]

    LIM = tol * DEG_PER_ASEC

    match = []
    # use mean dec, assumes decs similar
    decav = np.mean(sdec2.mean() + dec1.mean())
    RA_LIM = LIM / cos(decav * RAD_PER_DEG)

    for ra, dec in zip(ra1, dec1):
        i1 = sra2.searchsorted(ra - RA_LIM)
        i2 = i1 + sra2[i1:].searchsorted(ra + RA_LIM)
        #print(i1,i2)
        close = []
        for j in xrange(i1, i2):
            if abs(dec - sdec2[j]) > LIM:
                continue
            else:
                # if ras and decs are within LIM arcsec, then
                # calculate actual separation:
                disq = ang_sep(ra, dec, sra2[j], sdec2[j])
                close.append((disq, j))

        close.sort()
        if not allmatches:
            # Choose the object with the closest separation inside the
            # requested tolerance, if one was found.
            if len(close) > 0:
                min_dist, jmin = close[0]
                if min_dist < LIM:
                    match.append((isorted[jmin], min_dist))
                    continue
            # otherwise no match
            match.append((-1, -1))
        else:
            # append all the matching objects
            jclose = []
            seps = []
            for dist, j in close:
                if dist < LIM:
                    jclose.append(j)
                    seps.append(dist)
                else:
                    break
            match.append(
                fromarrays([isorted[jclose], seps],
                           dtype=[(str('ind'), str('i8')),
                                  str(('sep'), str('f8'))]))

    if not allmatches:
        # return both indices and separations in a recarray
        temp = np.rec.fromrecords(match, names=str('ind,sep'))
        # change to arcseconds
        temp.sep *= 3600.
        temp.sep[temp.sep < 0] = -1.
        return temp
    else:
        return match
コード例 #27
0
# https://stackoverflow.com/questions/33212855/how-can-i-create-a-matlab-struct-array-from-scipy-io

from numpy.core.records import fromarrays
from scipy.io import loadmat, savemat
import numpy as np
myrec = fromarrays([[1, 10], [2, 20]], names=['field1', 'field2'])
savemat('p.mat', {'myrec': myrec})
mat = loadmat('p.mat', struct_as_record=False, squeeze_me=True)
rec = mat['myrec']
print(rec[0].field1)
print(rec[0].field2)
print(rec[1].field1)
rec[1].field2 = -100
import copy
sample = copy.copy(rec[1])
sample.field2 = -900
rec = np.append(rec, sample)
savemat('pp.mat', {'myrec': rec})
mat = loadmat('pp.mat', struct_as_record=False, squeeze_me=True)
rec = mat['myrec']
print(rec[1].field2)
print(rec[2].field2)
コード例 #28
0
def components(
    channel,
    channel_name,
    unique_names,
    prefix="",
    master=None,
    only_basenames=False,
):
    """ yield pandas Series and unique name based on the ndarray object

    Parameters
    ----------
    channel : numpy.ndarray
        channel to be used for Series
    channel_name : str
        channel name
    unique_names : UniqueDB
        unique names object
    prefix : str
        prefix used in case of nested recarrays
    master : np.array
        optional index for the Series
    only_basenames (False) : bool
        use jsut the field names, without prefix, for structures and channel
        arrays

        .. versionadded:: 5.13.0

    Returns
    -------
    name, series : (str, values)
        tuple of unique name and values
    """
    names = channel.dtype.names

    # channel arrays
    if names[0] == channel_name:
        name = names[0]

        if not only_basenames:
            if prefix:
                name_ = unique_names.get_unique_name(f"{prefix}.{name}")
            else:
                name_ = unique_names.get_unique_name(name)
        else:
            name_ = unique_names.get_unique_name(name)

        values = channel[name]
        if len(values.shape) > 1:
            values = Series(
                list(values),
                index=master,
            )
        else:
            values = Series(
                values,
                index=master,
            )

        yield name_, values

        for name in names[1:]:
            values = channel[name]
            if not only_basenames:
                axis_name = unique_names.get_unique_name(f"{name_}.{name}")
            else:
                axis_name = unique_names.get_unique_name(name)
            if len(values.shape) > 1:
                arr = [values]
                types = [("", values.dtype, values.shape[1:])]
                values = Series(
                    fromarrays(arr, dtype=types),
                    index=master,
                )
                del arr
            else:
                values = Series(
                    values,
                    index=master,
                )

            yield axis_name, values

    # structure composition
    else:

        for name in channel.dtype.names:
            values = channel[name]

            if values.dtype.names:
                yield from components(
                    values,
                    name,
                    unique_names,
                    prefix=f"{prefix}.{channel_name}"
                    if prefix else f"{channel_name}",
                    master=master,
                    only_basenames=only_basenames,
                )

            else:
                if not only_basenames:
                    name_ = unique_names.get_unique_name(
                        f"{prefix}.{channel_name}.{name}"
                        if prefix else f"{channel_name}.{name}")
                else:
                    name_ = unique_names.get_unique_name(name)
                if len(values.shape) > 1:
                    values = Series(
                        list(values),
                        index=master,
                    )
                else:
                    values = Series(
                        values,
                        index=master,
                    )

                yield name_, values
コード例 #29
0
def dict_to_numpy_array(d):
    """
    Convert a dict of 1d array to a numpy recarray
    """
    return fromarrays(d.values(), np.dtype([(str(k), v.dtype) for k, v in d.items()]))
コード例 #30
0
def extract_spectra(dir_name, field_name):

    print("Extracting spectra...")

    num_edge_chan = 10
    fits_filename = dir_name + field_name + "/" + field_name + ".1419.5.2.restor.fits"
    src_filename = dir_name + 'extracted_spectra/' + field_name + "_src_comp.vot"
    isle_filename = dir_name + 'extracted_spectra/' + field_name + "_src_isle.vot"

    spectra = dict()
    source_ids = dict()
    if not os.path.exists(fits_filename):
        print ("Warning: File %s does not exist, skipping extraction." % \
              fits_filename)
        return spectra, source_ids, []

    sources = read_sources(src_filename, dir_name, field_name)
    islands = read_islands(isle_filename)
    hdulist = fits.open(fits_filename)
    image = hdulist[0].data
    header = hdulist[0].header
    CDELT = np.abs(header['CDELT2'])
    w = WCS(header)
    index = np.arange(header['NAXIS3'])
    beam_maj = header['BMAJ'] * 60 * 60
    beam_min = header['BMIN'] * 60 * 60
    beam_size = beam_maj / CDELT
    beam_area = math.radians(header['BMAJ']) * math.radians(header['BMIN'])
    print("Beam was %f x %f arcsec giving area of %f radians^2." %
          (beam_maj, beam_min, beam_area))
    ranges = calc_island_ranges(islands, (header['CDELT1'], header['CDELT2']))
    velocities = w.wcs_pix2world(10, 10, index[:], 0, 0)[2]
    for src in sources:
        c = SkyCoord(src['ra'], src['dec'], frame='icrs', unit="deg")
        rms = get_channel_rms(image, w, src, beam_size)
        img_slice = get_integrated_spectrum(image, w, src)
        l_edge, r_edge = find_edges(img_slice, num_edge_chan)
        print("Using data range %d - %d out of %d channels." %
              (l_edge, r_edge, len(img_slice)))

        # plotSpectrum(np.arange(slice.size), slice)
        spectrum_array = rec.fromarrays([
            np.arange(
                img_slice.size)[l_edge:r_edge], velocities[l_edge:r_edge],
            img_slice[l_edge:r_edge], rms[l_edge:r_edge]
        ],
                                        names='plane,velocity,flux,rms')
        spectra[c.ra] = spectrum_array

        # isle = islands.get(src['island'], None)
        src_map = {
            'id': src['id'],
            'flux': src['peak_flux'],
            'pos': c,
            'beam_area': beam_area,
            'ra_str': src['ra_str'],
            'dec_str': src['dec_str']
        }
        src_map['a'] = src['a']
        src_map['b'] = src['b']
        src_map['pa'] = src['pa']
        print(src_map)
        source_ids[c.ra] = src_map

        CDELT3 = header['CDELT3'] / 1000.
        CRVAL3 = header['CRVAL3'] / 1000.

    del image
    del header
    hdulist.close()

    return spectra, source_ids, ranges, CDELT3, CRVAL3
コード例 #31
0
     psfMagList_i.append(x.i[j])
     psfMagList_z.append(x.z[j])
     psfMagErrList_u.append(x.psfMagErr_u[j])
     psfMagErrList_g.append(x.psfMagErr_g[j])
     psfMagErrList_r.append(x.psfMagErr_r[j])
     psfMagErrList_i.append(x.psfMagErr_i[j])
     psfMagErrList_z.append(x.psfMagErr_z[j])
     mjdList_u.append(x.mjd_u[j])
     mjdList_g.append(x.mjd_g[j])
     mjdList_r.append(x.mjd_r[j])
     mjdList_i.append(x.mjd_i[j])
     mjdList_z.append(x.mjd_z[j])
     if j == (len(x) - 1):
         psfMag = rec.fromarrays([
             psfMagList_u, psfMagList_g, psfMagList_r, psfMagList_i,
             psfMagList_z
         ],
                                 names=','.join(SDSS_FILTERS))
         psfMagErr = rec.fromarrays([
             psfMagErrList_u, psfMagErrList_g, psfMagErrList_r,
             psfMagErrList_i, psfMagErrList_z
         ],
                                    names=','.join(SDSS_FILTERS))
         mjd = rec.fromarrays(
             [mjdList_u, mjdList_g, mjdList_r, mjdList_i, mjdList_z],
             names=','.join(SDSS_FILTERS))
         A_fit, gamma_fit = calculate_variability_parameters(
             mjd, psfMag, psfMagErr)
         A = np.append(A, A_fit)
         gamma = np.append(gamma, gamma_fit)
 psfMag_u.append(psfMagList_u)
コード例 #32
0
def save_mppe_results_to_mpi_format(mp_pose_list, save_path='./exps/preds/mat_results/pred_keypoints_mpii_multi.mat'):
    pred = fromarrays([mp_pose_list], names=['annorect'])
    sio.savemat(save_path, {'pred': pred})
コード例 #33
0
ファイル: coord.py プロジェクト: ishivvers/astro
def match(ra1, dec1, ra2, dec2, tol, allmatches=False):
    """
    match(ra1, dec1, ra2, dec2, tol)

    Given two sets of numpy arrays of ra,dec and a tolerance tol
    (float), returns an array of indices and separations with the same
    length as the first input array.  If and index is > 0, it is the
    index of the closest matching second array element within tol
    arcsec.  If it's -1, then there was no matching ra/dec within tol
    arcsec.

    if allmatches = True, then for each object in the first array,
    return the index and separation of everything in the second array
    within the search tolerance, not just the closest match.

    Note to get the indices of objects in ra2, dec2 without a match, use

    imatch = match(ra1, dec1, ra2, dec2, 2.)
    inomatch = numpy.setdiff1d(np.arange(len(ra2)), set(imatch))

    """
    from numpy.core.records import fromarrays
    
    ra1,ra2,dec1,dec2 = map(np.asarray, (ra1, ra2, dec1, dec2))

    abs = np.abs

    isorted = ra2.argsort()
    sdec2 = dec2[isorted]
    sra2 = ra2[isorted]

    LIM = tol * DEG_PER_ASEC

    match = []
    # use mean dec, assumes decs similar
    decav = np.mean(sdec2.mean() + dec1.mean())
    RA_LIM = LIM / cos(decav * RAD_PER_DEG)

    for ra,dec in zip(ra1,dec1):
        i1 = sra2.searchsorted(ra - RA_LIM)
        i2 = i1 + sra2[i1:].searchsorted(ra + RA_LIM)
        #print i1,i2
        close = []
        for j in xrange(i1,i2):
            if abs(dec - sdec2[j]) > LIM:
                continue
            else:
                # if ras and decs are within LIM arcsec, then
                # calculate actual separation:
                disq = ang_sep(ra, dec, sra2[j], sdec2[j])
                close.append((disq, j))

        close.sort()
        if not allmatches:
            # Choose the object with the closest separation inside the
            # requested tolerance, if one was found.
            if len(close) > 0:
                min_dist, jmin = close[0]
                if min_dist < LIM:
                    match.append((isorted[jmin], min_dist))
                    continue
            # otherwise no match
            match.append((-1,-1))
        else:
            # append all the matching objects
            jclose = []
            seps = []
            for dist,j in close:
                if dist < LIM:
                    jclose.append(j)
                    seps.append(dist)
                else:
                    break
            match.append(fromarrays([isorted[jclose], seps],
                                    dtype=[('ind','i8'),('sep','f8')]))

    if not allmatches:
        # return both indices and separations in a recarray
        temp = np.rec.fromrecords(match, names='ind,sep')
        # change to arcseconds
        temp.sep *= 3600.
        temp.sep[temp.sep < 0] = -1.
        return temp
    else:
        return match
コード例 #34
0
                        float(int(y2))
                    ]
                    boxes.append(cord)

                    bbox = patches.Rectangle((x1, y1),
                                             x2 - x1,
                                             y2 - y1,
                                             linewidth=2,
                                             edgecolor='blue',
                                             facecolor='none')
                    ax.add_patch(bbox)

            plt.axis('off')
            plt.gca().xaxis.set_major_locator(NullLocator())
            plt.gca().yaxis.set_major_locator(NullLocator())
            plt.savefig('/home/yeezy/Desktop/vis/{}'.format(img_fn),
                        bbox_inches='tight',
                        pad_inches=0.0)
            plt.close()

            box_cnt += len(boxes)
            boxes_list.append(boxes)

    # Save as .mat file
    matfn = '/home/yeezy/Src/matlab/OP/evaluation-metrics/data/coco_gt_data.mat'
    record = fromarrays([img_fn_list, boxes_list], names=['im', 'boxes'])
    scipy.io.savemat(matfn, {
        'num_annotations': np.array([float(box_cnt)]),
        'impos': record
    })
コード例 #35
0
def extract_spectra(fits_filename, continuum_ranges):
    num_edge_chan = 0
    #fits_filename = "{0}/1420/magmo-{1}_1420_sl_restor.fits".format(daydirname,
    #                                                                field)
    src_filename = 'catalog.dat'
    #isle_filename = "{0}/{1}_src_isle.vot".format(daydirname, field)

    spectra = dict()
    source_ids = dict()
    if not os.path.exists(fits_filename):
        print ("Warning: File %s does not exist, skipping extraction." % \
              fits_filename)
        return spectra, source_ids, []

    sources = read_sources(src_filename)
    #islands = read_islands(isle_filename)
    hdulist = fits.open(fits_filename)
    image = hdulist[0].data
    print("Image shape is", image.shape)
    header = hdulist[0].header
    w = WCS(header)
    index = np.arange(header['NAXIS3'])
    beam_maj = header['BMAJ'] * 60 * 60
    beam_min = header['BMIN'] * 60 * 60
    beam_area = math.radians(header['BMAJ']) * math.radians(header['BMIN'])
    print("Beam was %f x %f arcsec giving area of %f radians^2." %
          (beam_maj, beam_min, beam_area))
    ranges = [
    ]  #calc_island_ranges(islands, (header['CDELT1'], header['CDELT2']))
    velocities = w.wcs_pix2world(10, 10, index[:], 0)[2]
    print("Found {} sources".format(len(sources)))
    for src in sources:
        c = SkyCoord(src['ra'], src['dec'], frame='icrs', unit="deg")

        img_slice = get_integrated_spectrum(image, w, src, c, velocities,
                                            c.galactic.l.value,
                                            continuum_ranges)
        if img_slice is None:
            continue

        l_edge, r_edge = find_edges(img_slice, num_edge_chan)
        print("Using data range %d - %d out of %d channels." %
              (l_edge, r_edge, len(img_slice)))

        # plotSpectrum(np.arange(slice.size), slice)
        spectrum_array = rec.fromarrays([
            np.arange(img_slice.size)[l_edge:r_edge],
            velocities[l_edge:r_edge], img_slice[l_edge:r_edge]
        ],
                                        names='plane,velocity,flux')
        spectra[c.galactic.l] = spectrum_array

        # isle = islands.get(src['island'], None)
        src_map = {
            'id': src['id'],
            'flux': src['peak_flux'],
            'pos': c,
            'beam_area': beam_area
        }
        src_map['a'] = src['a']
        src_map['b'] = src['b']
        src_map['pa'] = src['pa']
        print(src_map)
        source_ids[c.galactic.l] = src_map
    del image
    del header
    hdulist.close()

    return spectra, source_ids, ranges
コード例 #36
0
def components(channel, channel_name, unique_names, prefix="", master=None):
    """ yield pandas Series and unique name based on the ndarray object

    Parameters
    ----------
    channel : numpy.ndarray
        channel to be used foir Series
    channel_name : str
        channel name
    unique_names : UniqueDB
        unique names object
    prefix : str
        prefix used in case of nested recarrays

    Returns
    -------
    name, series : (str, pandas.Series)
        tuple of unqiue name and Series object
    """
    names = channel.dtype.names

    # channel arrays
    if names[0] == channel_name:
        name = names[0]

        if prefix:
            name_ = unique_names.get_unique_name(f"{prefix}.{name}")
        else:
            name_ = unique_names.get_unique_name(name)

        values = channel[name]
        if len(values.shape) > 1:
            values = list(values)
        yield name_, Series(values, index=master)

        for name in names[1:]:
            values = channel[name]
            axis_name = unique_names.get_unique_name(f"{name_}.{name}")
            if len(values.shape) > 1:
                arr = [values]
                types = [("", values.dtype, values.shape[1:])]
                values = fromarrays(arr, dtype=types)
                del arr

            yield axis_name, Series(values, index=master, dtype="O")

    # structure composition
    else:

        for name in channel.dtype.names:
            values = channel[name]

            if values.dtype.names:
                yield from components(values,
                                      name,
                                      unique_names,
                                      prefix=f"{prefix}.{channel_name}"
                                      if prefix else f"{channel_name}",
                                      master=master)

            else:
                name_ = unique_names.get_unique_name(
                    f"{prefix}.{channel_name}.{name}"
                    if prefix else f"{channel_name}.{name}")
                if len(values.shape) > 1:
                    values = list(values)

                yield name_, Series(values, index=master)
コード例 #37
0
from datetime import datetime

import numpy.core.records as records

# Testing various incompatible args for fromarrays
records.fromarrays(
    dict(a=1)
)  # E: Argument 1 to "fromarrays" has incompatible type "Dict[str, int]"
records.fromarrays(datetime(
    1970, 1,
    1))  # E: Argument 1 to "fromarrays" has incompatible type "datetime"

records.fromarrays([[1]], dtype=dict(
    a=1))  # E: Argument "dtype" to "fromarrays" has incompatible type
records.fromarrays([[1]], formats=dict(
    a=1))  # E: Argument "formats" to "fromarrays" has incompatible type
records.fromarrays([[1]], names=dict(
    a=1))  # E: Argument "names" to "fromarrays" has incompatible type
records.fromarrays([[1]], titles=dict(
    a=1))  # E: Argument "titles" to "fromarrays" has incompatible type
records.fromarrays(
    [[1]],
    aligned=1)  # E: Argument "aligned" to "fromarrays" has incompatible type
records.fromarrays(
    [[1]], byteorder=1
)  # E: Argument "byteorder" to "fromarrays" has incompatible type

# Testing various incompatible args for fromrecords
records.fromrecords(
    dict(a=1)
)  # E: Argument 1 to "fromrecords" has incompatible type "Dict[str, int]"
コード例 #38
0
def single_image_testing_on_mpi_mp_dataset(net, im, objpos=None, \
                                                    scale_provided=None, \
                                                    center_box=None, \
                                                    center_box_extend_pixels=50, \
                                                    transform=None, \
                                                    stride=4, \
                                                    crop_size=256, \
                                                    training_crop_size=256, \
                                                    scale_multiplier=[1], \
                                                    num_of_joints=16, \
                                                    conf_th=0.1, \
                                                    dist_th=120, \
                                                    visualization=False, \
                                                    vis_im_path='./exps/preds/vis_results/mppe_vis_result.jpg'):
    
         
    # Get the original image size
    im_height = im.shape[0]
    im_width = im.shape[1]
    long_edge = max(im_height, im_width) 

    # Get the group center
    if objpos != None and scale_provided != None and center_box != None:
        ori_center = np.array([[objpos[0], objpos[1]]])
        base_scale = 1.1714 / scale_provided
    else:
        ori_center = np.array([[im_width / 2.0, im_height / 2.0]])
        scale_provided = long_edge * 1.0 / crop_size
        base_scale = 1 / scale_provided

    # Variables to store multi-scale test images and their crop parameters
    cropped_im_list = []
    cropped_param_list = []
    flipped_cropped_im_list = []
    flipped_cropped_param_list = []

    for sm in scale_multiplier:
        # Resized image to base scales
        scale = base_scale * sm
        resized_im = cv2.resize(im, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
        scaled_center = np.zeros([1, 2])
        scaled_center[0, 0] = int(ori_center[0, 0] * scale)
        scaled_center[0, 1] = int(ori_center[0, 1] * scale)

        # Get flipped images
        flipped_resized_im = cv2.flip(resized_im, 1)

        # Crop image for testing
        cropped_im, cropped_param = augmentation_cropped(resized_im, scaled_center, crop_x=crop_size, crop_y=crop_size, max_center_trans=0)
        cropped_im_list.append(cropped_im)
        cropped_param_list.append(cropped_param)

        scaled_flipped_center = np.zeros([1,2])
        scaled_flipped_center[0,0] = resized_im.shape[1] - scaled_center[0,0]
        scaled_flipped_center[0,1] = scaled_center[0,1]

        # Crop flipped image for testing
        flipped_cropped_im, flipped_cropped_param = augmentation_cropped(flipped_resized_im, scaled_flipped_center, crop_x=crop_size, crop_y=crop_size, max_center_trans=0)
        flipped_cropped_im_list.append(flipped_cropped_im)
        flipped_cropped_param_list.append(flipped_cropped_param)

    # Transform image
    input_im_list = []
    flipped_input_im_list = []
    if transform is not None:
        for cropped_im in cropped_im_list:
            input_im = transform(cropped_im)
            input_im_list.append(input_im)
        for flipped_cropped_im in flipped_cropped_im_list:
            flipped_input_im = transform(flipped_cropped_im)
            flipped_input_im_list.append(flipped_input_im)
    else:
        for cropped_im in cropped_im_list:
            input_im =cropped_im.copy()
            input_im_list.append(input_im)
        for flipped_cropped_im in flipped_cropped_im_list:
            flipped_input_im = flipped_cropped_im.copy()
            flipped_input_im_list.append(flipped_input_im)

    # Preparing input variable
    batch_input_im = input_im_list[0].view(-1, 3, crop_size, crop_size)
    for smi in range(1, len(input_im_list)):
        batch_input_im = torch.cat((batch_input_im, input_im_list[smi].view(-1, 3, crop_size, crop_size)), 0)
    batch_input_im = batch_input_im.cuda(async=True)
    batch_input_var = torch.autograd.Variable(batch_input_im, volatile=True)

    # Preparing flipped input variable
    batch_flipped_input_im = flipped_input_im_list[0].view(-1, 3, crop_size, crop_size)
    for smi in range(1, len(flipped_input_im_list)):
        batch_flipped_input_im = torch.cat((batch_flipped_input_im, flipped_input_im_list[smi].view(-1, 3, crop_size, crop_size)), 0)
    batch_flipped_input_im = batch_flipped_input_im.cuda(async=True)
    batch_flipped_input_var = torch.autograd.Variable(batch_flipped_input_im, volatile=True)

    # Get predicted heatmaps and convert them to numpy array
    pose_outputs, orie_outputs = net(batch_input_var)
    pose_output = pose_outputs[-1]
    pose_output = pose_output.data
    pose_output = pose_output.cpu().numpy()
    orie_output = orie_outputs[-1]
    orie_output = orie_output.data
    orie_output = orie_output.cpu().numpy()
    
    # Get predicted flipped heatmaps and convert them to numpy array
    flipped_pose_outputs, flipped_orie_outputs = net(batch_flipped_input_var)
    flipped_pose_output = flipped_pose_outputs[-1]
    flipped_pose_output = flipped_pose_output.data
    flipped_pose_output = flipped_pose_output.cpu().numpy()
    flipped_orie_output = flipped_orie_outputs[-1]
    flipped_orie_output = flipped_orie_output.data
    flipped_orie_output = flipped_orie_output.cpu().numpy()

    # First fuse the original prediction with flipped prediction
    fused_pose_output = np.zeros((pose_output.shape[0], pose_output.shape[1] - 1, crop_size, crop_size))
    flipped_idx = [0, 1, 5, 6, 7, 2, 3, 4, 11, 12, 13, 8, 9, 10, 14, 15]
    for smi in range(0, len(scale_multiplier)):
        # Get single scale output
        single_scale_output = pose_output[smi, :, :, :].copy()
        single_scale_flipped_output = flipped_pose_output[smi, :, :, :].copy()

        # fuse each joint's heatmap
        for ji in range(0, 16):
            # Get the original heatmap
            heatmap = single_scale_output[ji, :, :].copy()
            heatmap = cv2.resize(heatmap, (crop_size, crop_size), interpolation=cv2.INTER_LINEAR)

            # Get the flipped heatmap
            flipped_heatmap = single_scale_flipped_output[flipped_idx[ji], :, :].copy()
            flipped_heatmap = cv2.resize(flipped_heatmap, (crop_size, crop_size), interpolation=cv2.INTER_LINEAR)
            flipped_heatmap = cv2.flip(flipped_heatmap, 1)

            # Average the original heatmap with flipped heatmap
            heatmap += flipped_heatmap
            heatmap *= 0.5

            fused_pose_output[smi, ji, :, :] = heatmap

    # Second fuse multi-scale predictions
    base_pose_output_list = []
    base_crop_param_list = []
    for smi in range(0, len(scale_multiplier)):
        single_scale_output = fused_pose_output[smi, :, :, :]
        crop_param = cropped_param_list[smi]

        # Crop the heatmaps without padding
        cropped_single_scale_output = single_scale_output[:, crop_param[0, 3]:crop_param[0, 7], crop_param[0, 2]:crop_param[0, 6]]

        # Resize the cropped heatmaps to base scale
        cropped_single_scale_output = cropped_single_scale_output.transpose((1, 2, 0))
        base_single_scale_output = cv2.resize(cropped_single_scale_output, None, fx=1.0/scale_multiplier[smi], fy=1.0/scale_multiplier[smi], interpolation=cv2.INTER_LINEAR)
        base_single_scale_output = base_single_scale_output.transpose((2, 0, 1))

        # Resize the cropping parameters
        base_crop_param = crop_param * (1.0 / scale_multiplier[smi])

        # Add to list
        base_pose_output_list.append(base_single_scale_output)
        base_crop_param_list.append(base_crop_param)

    # Multi-scale fusion results
    ms_fused_pose_output = np.zeros((base_pose_output_list[0].shape))

    # Accumulate map for division
    accumulate_map = np.zeros((base_pose_output_list[0].shape)) + 1

    # Use the smallest image as reference
    base_start_x = int(base_crop_param_list[0][0, 0])
    base_start_y = int(base_crop_param_list[0][0, 1])
    for smi in range(0, len(scale_multiplier)):
        # Get base parameters and pose output
        base_crop_param = base_crop_param_list[smi]
        base_pose_output = base_pose_output_list[smi]

        # Temporary pose heatmaps
        temp_pose_output = np.zeros_like(ms_fused_pose_output)

        # Relative location for reference image
        store_start_x = int(base_crop_param[0, 0]) - base_start_x
        store_start_y = int(base_crop_param[0, 1]) - base_start_y
        store_end_x = int(min(store_start_x + base_pose_output.shape[2], ms_fused_pose_output.shape[2]))
        store_end_y = int(min(store_start_y + base_pose_output.shape[1], ms_fused_pose_output.shape[1]))

        temp_pose_output[:, store_start_y:store_end_y, store_start_x:store_end_x] = base_pose_output[:, 0:(store_end_y - store_start_y), 0:(store_end_x - store_start_x)]
        ms_fused_pose_output += temp_pose_output

        # Update the accumulate map
        if smi >= 1:
            accumulate_map[:, store_start_y:store_end_y, store_start_x:store_end_x] += 1
    
    # Average by the accumulate map
    # Every position should add at leat once, avoid divide by 0; also avoide dominated by center cropping
    accumulate_map[accumulate_map == 0] = len(scale_multiplier)
    ms_fused_pose_output = np.divide(ms_fused_pose_output, accumulate_map)
    
    # Get the final prediction results
    pred_joints = np.zeros((num_of_joints, 3))

    # Perform NMS to find joint candidates
    all_peaks = []
    peak_counter = 0
    for ji in range(0, num_of_joints):
        heatmap_ori = ms_fused_pose_output[ji, :, :]
        heatmap = gaussian_filter(heatmap_ori, sigma=3)

        heatmap_left = np.zeros(heatmap.shape)
        heatmap_left[1:, :] = heatmap[:-1, :]
        heatmap_right = np.zeros(heatmap.shape)
        heatmap_right[:-1, :] = heatmap[1:, :]
        heatmap_up = np.zeros(heatmap.shape)
        heatmap_up[:, 1:] = heatmap[:, :-1]
        heatmap_down = np.zeros(heatmap.shape)
        heatmap_down[:, :-1] = heatmap[:, 1:]
    
        peaks_binary = np.logical_and.reduce((heatmap >= heatmap_left, heatmap >= heatmap_right, heatmap >= heatmap_up, heatmap >= heatmap_down, heatmap > conf_th))
        peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]))
        peaks_with_score = [x + (heatmap_ori[x[1], x[0]], ) for x in peaks]
        id = range(peak_counter, peak_counter + len(peaks))
        peaks_with_score_and_id = [peaks_with_score[i] + (id[i], ) for i in range(len(id))]

        all_peaks.append(peaks_with_score_and_id)
        peak_counter += len(peaks)

    # Recover the peaks to locations in original image
    cropped_param = base_crop_param_list[0]
    all_joint_candi_list = []
    for ji in range(0, len(all_peaks)):
        joint_candi_list = []
        peaks_base = all_peaks[ji]
        for ci in range(0, len(peaks_base)):
            joint_candi = np.zeros((1, 4))
            joint_candi[0, :] = np.array(peaks_base[ci])
            joint_candi[0, 0] = (joint_candi[0, 0] + cropped_param[0, 0]) / base_scale
            joint_candi[0, 1] = (joint_candi[0, 1] + cropped_param[0, 1]) / base_scale
            joint_candi_list.append(joint_candi)
        all_joint_candi_list.append(joint_candi_list)

    # Get the center embedding results
    start = stride / 2.0 - 0.5
    all_embedding_list = []
    for ji in range(0, len(all_joint_candi_list)):
        joint_candi_list = all_joint_candi_list[ji]
        embedding_list = []
        for ci in range(0, len(joint_candi_list)):
            joint_candi = joint_candi_list[ci][0, 0:2]
            offset_x_avg = 0.0
            offset_y_avg = 0.0
            valid_offset_count = 0.0
            embedding = np.zeros((1, 2))
            for si in range(0, len(scale_multiplier)):
                orie_maps = orie_output[si, :, :, :]

                flipped_orie_maps = flipped_orie_output[si, :, :, :]

                joint_candi_scaled = joint_candi * scale_multiplier[si] * base_scale
                joint_candi_scaled[0] = joint_candi_scaled[0] - cropped_param_list[si][0, 0] + cropped_param_list[si][0, 2]
                joint_candi_scaled[1] = joint_candi_scaled[1] - cropped_param_list[si][0, 1] + cropped_param_list[si][0, 3]
                g_x = int((joint_candi_scaled[0] - start) / stride)
                g_y = int((joint_candi_scaled[1] - start) / stride)
                if g_x >= 0 and g_x < crop_size / stride and g_y >= 0 and g_y < crop_size / stride:
                    offset_x = orie_maps[ji * 2, g_y, g_x]
                    offset_y = orie_maps[ji * 2 + 1, g_y, g_x]

                    flipped_offset_x = flipped_orie_maps[flipped_idx[ji] * 2, g_y, crop_size / stride - g_x - 1]
                    flipped_offset_y = flipped_orie_maps[flipped_idx[ji] * 2 + 1, g_y, crop_size / stride - g_x - 1]
                    offset_x = (offset_x - flipped_offset_x) / 2.0
                    offset_y = (offset_y + flipped_offset_y) / 2.0

                    offset_x *= training_crop_size / 2.0  
                    offset_y *= training_crop_size / 2.0 
                    offset_x = offset_x / (scale_multiplier[si] * base_scale)
                    offset_y = offset_y / (scale_multiplier[si] * base_scale)
                    offset_x_avg += offset_x
                    offset_y_avg += offset_y
                    valid_offset_count += 1

            if valid_offset_count > 0:
                offset_x_avg /= valid_offset_count
                offset_y_avg /= valid_offset_count
            embedding[0, 0] = joint_candi[0] + offset_x_avg
            embedding[0, 1] = joint_candi[1] + offset_y_avg
            embedding_list.append(embedding)
        all_embedding_list.append(embedding_list)
        
    # Convert to np array
    all_embedding_np_array = np.empty((0, 2))
    for ji in range(0, len(all_embedding_list)):
        embedding_list = all_embedding_list[ji]
        for ci in range(0, len(embedding_list)):
            embedding = embedding_list[ci]
            all_embedding_np_array = np.vstack((all_embedding_np_array, embedding))

    all_joint_candi_np_array = np.empty((0, 5))
    for ji in range(0, len(all_joint_candi_list)):
        joint_candi_list = all_joint_candi_list[ji]
        for ci in range(0, len(joint_candi_list)):
            joint_candi_with_type = np.zeros((1, 5))
            joint_candi = joint_candi_list[ci]
            joint_candi_with_type[0, 0:4] = joint_candi[0, :]
            joint_candi_with_type[0, 4] = ji
            all_joint_candi_np_array = np.vstack((all_joint_candi_np_array, joint_candi_with_type))

    # Cluster the embeddings
    if all_embedding_np_array.shape[0] < 2:
        clusters = [-1]
    else:
        Z = hcluster.linkage(all_embedding_np_array, method='centroid')
        clusters = hcluster.fcluster(Z, dist_th, criterion='distance')
        clusters = clusters - 1
    
    # Get people structure by greedy search
    num_of_people = max(clusters) + 1
    joint_idx_list = [1, 
                      0, 2, 5, 8, 11,
                         3, 6, 9, 12,
                         4, 7, 10, 13]

    people = []
    for pi in range(0, num_of_people):
        joint_of_person_idx = np.where(clusters == pi)[0]
        joint_candi_cur_persons = all_joint_candi_np_array[joint_of_person_idx, 0:3]
        end_candi_cur_persons = all_embedding_np_array[joint_of_person_idx, :]
        joint_type_cur_person = all_joint_candi_np_array[joint_of_person_idx, 4]
        if len(joint_type_cur_person) > len(np.unique(joint_type_cur_person)):
            persons = []    
            persons_ends_list = []
            for joint_idx in joint_idx_list:
                # If the joint is neck, do initialization
                if joint_idx == 1:
                    neck_candi = np.where(joint_type_cur_person == joint_idx)[0]
                    for ni in range(len(neck_candi)):
                        person = {}
                        person[str(joint_idx)] = joint_candi_cur_persons[neck_candi[ni], :]
                        persons.append(person)
                        persons_ends = np.zeros((1, 2))
                        persons_ends[0, :] = end_candi_cur_persons[neck_candi[ni], :]
                        persons_ends_list.append(persons_ends)
                # For other joints, do connection
                else:
                    other_candi = np.where(joint_type_cur_person == joint_idx)[0]
                    other_pos = end_candi_cur_persons[other_candi]
                    person_centers = np.zeros((len(persons), 2))
                    person_idx = np.zeros((len(persons), 1), dtype=np.int)
                    for mi in range(len(persons_ends_list)):
                        person_centers[mi, :] = np.mean(persons_ends_list[mi], axis=0)
                        person_idx[mi] = mi
                    while (other_candi.shape[0] > 0 and person_centers.shape[0] > 0):
                        dist_matrix = np.zeros((other_candi.shape[0], person_centers.shape[0]))
                        for hi in range(other_candi.shape[0]):
                            for ci in range(person_centers.shape[0]):
                                offset_vec = other_pos[hi, :] - person_centers[ci, :]
                                dist = math.sqrt(offset_vec[0] * offset_vec[0] + offset_vec[1] * offset_vec[1])
                                dist_matrix[hi, ci] = dist
                        connection = np.where(dist_matrix == dist_matrix.min())
                        persons[person_idx[connection[1][0], 0]][str(joint_idx)] = joint_candi_cur_persons[other_candi[connection[0][0]], :]
                        persons_ends_list[person_idx[connection[1][0], 0]] = np.vstack((persons_ends_list[person_idx[connection[1][0], 0]], 
                                                                                        end_candi_cur_persons[other_candi[connection[0][0]], :]))

                        other_candi = np.delete(other_candi, connection[0][0], axis=0)
                        other_pos = np.delete(other_pos, connection[0][0], axis=0)
                        person_centers = np.delete(person_centers, connection[1][0], axis=0)
                        person_idx = np.delete(person_idx, connection[1][0], axis=0)
                    if other_candi.shape[0] > 0 and joint_idx < 2:
                        # Add new person to list
                        for hi in range(other_candi.shape[0]):
                            person = {}
                            person[str(joint_idx)] = joint_candi_cur_persons[other_candi[hi], :]
                            persons.append(person)
                            persons_ends = np.zeros((1, 2))
                            persons_ends[0, :] = end_candi_cur_persons[other_candi[hi], :]
                            persons_ends_list.append(persons_ends)
            for person in persons:
                people.append(person)
        else:
            person = {}
            for ji in range(0, len(joint_of_person_idx)):
                person[str(int(all_joint_candi_np_array[joint_of_person_idx[ji], 4]))] = all_joint_candi_np_array[joint_of_person_idx[ji], :]
            people.append(person)

    
    if objpos != None and scale_provided != None and center_box != None:
        # Exclude out of group persons
        extend_pixels = center_box_extend_pixels
        extend_pixels = extend_pixels / base_scale
        extend_center_box = np.zeros((4, 1)) 
        extend_center_box[0] = max(0, int(center_box[0] - extend_pixels))
        extend_center_box[1] = max(0, int(center_box[1] - extend_pixels))
        extend_center_box[2] = min(im_width, int(center_box[2] + extend_pixels))
        extend_center_box[3] = min(im_height, int(center_box[3] + extend_pixels))

        num_of_people = len(people)
        center_of_mass = np.zeros((num_of_people, 2))

        for pi in range(0, num_of_people):
            person = people[pi]
            point = {}
            point['x'] = []
            point['y'] = []
            for ji in range(0, num_of_joints):
                if str(ji) in person:
                    point['x'].append(person[str(ji)][0])
                    point['y'].append(person[str(ji)][1])
            if len(point['x']) > 0 and len(point['y']) > 0:
                center_of_mass[pi, 0] = np.mean(point['x'])
                center_of_mass[pi, 1] = np.mean(point['y'])

        isInExtendedBBox = np.zeros((num_of_people, 1))
        for pi in range(0, num_of_people):
            com = center_of_mass[pi, :]
            if (com[0] >= extend_center_box[0] and com[1] >= extend_center_box[1]) and (com[0] <= extend_center_box[2] and com[1] <= extend_center_box[3]):
               isInExtendedBBox[pi] = 1

        people_in_center_box = []
        for pi in range(0, num_of_people):
            if isInExtendedBBox[pi] == 1:
                people_in_center_box.append(people[pi])
    else:
        people_in_center_box = people

    # Reture prediction results
    joint_idx_mapping = [9, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5]
    annopoints_array = []
    
    for pi in range(0, len(people_in_center_box)):
        person = people_in_center_box[pi]
        point = {}
        point['x'] = []
        point['y'] = []
        point['score'] = []
        point['id'] = []
        for ji in range(0, 14):
            if str(ji) in person:
                point['x'].append(person[str(ji)][0])
                point['y'].append(person[str(ji)][1])
                point['score'].append(person[str(ji)][2])
                point['id'].append(joint_idx_mapping[ji])
        points_struct = fromarrays([point['x'], point['y'], point['id'], point['score']], names=['x', 'y', 'id', 'score'])
        if len(points_struct) < 4:
            continue
        annopoints = {}
        annopoints['point'] = points_struct
        annopoints_array.append(annopoints)    
    
    # If the is no detected point, add random dummy persons
    dummy_joint_id = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
    if len(annopoints_array) == 0:
        for pi in range(0, np.random.randint(2, 5)):
            point = {}
            point['x'] = []
            point['y'] = []
            point['score'] = []
            point['id'] = []
            for ji in range(0, np.random.randint(2, len(dummy_joint_id))):
                point['x'].append(np.float64(crop_size / 2.0))
                point['y'].append(np.float64(crop_size / 2.0))
                point['score'].append(np.float64(0.5))
                point['id'].append(int(dummy_joint_id[ji]))
            points_struct = fromarrays([point['x'], point['y'], point['id'], point['score']], names=['x', 'y', 'id', 'score'])
            annopoints = {}
            annopoints['point'] = points_struct
            annopoints_array.append(annopoints)
    
    mp_pose = fromarrays([annopoints_array], names=['annopoints'])

    if visualization:
        vis_mppe_results(im, people_in_center_box, save_im=True, save_path=vis_im_path)    

    return mp_pose
コード例 #39
0
snapshot = 68

sqlserver = 'localhost'
user = '******'
password = '******'
dbname = 'mb2_hydro'
unix_socket = '/home/rmandelb.proj/flanusse/mysql/mysql.sock'
db = mdb.connect(sqlserver, user, password, dbname, unix_socket=unix_socket)

# query for halo masses
sql = "SELECT mass,groupId FROM subfind_groups WHERE snapnum=%d;" % (snapshot)

print('Submitting query for halo masses')
cursor = db.cursor()
cursor.execute(sql)
results = fromarrays(np.array(cursor.fetchall()).squeeze().T,
                     names="mass,groupId")

# query for galaxies
sql2 = "SELECT groupId,haloId FROM subfind_halos WHERE snapnum=%d;" % (
    snapshot)

print('Submitting query for galaxy IDs')
cursor = db.cursor()
cursor.execute(sql2)
results2 = fromarrays(np.array(cursor.fetchall()).squeeze().T,
                      names="groupId,haloId")

ind = np.argsort(results['groupId'])

M = results['mass'][ind][results2['groupId']]
コード例 #40
0
def _dict_to_ndarray(d):
    return fromarrays(d.values(),
                      np.dtype([(str(k), v.dtype) for k, v in d.items()]))
コード例 #41
0
ファイル: coord.py プロジェクト: RainW7/SamPy
def match(ra1, dec1, ra2, dec2, tol, allmatches=False):
    """
    Given two sets of numpy arrays of ra,dec and a tolerance tol
    (float), returns an array of integers with the same length as the
    first input array.  If integer > 0, it is the index of the closest
    matching second array element within tol arcsec.  If -1, then there
    was no matching ra/dec within tol arcsec.

    if allmatches = True, then for each object in the first array,
    return the index of everything in the second arrays within the
    search tolerance, not just the closest match.

    :note: does not force one-to-one mapping

    Note to get the indices of objects in ra2, dec2 without a match:
    imatch = match(ra1, dec1, ra2, dec2, 2.)
    inomatch = numpy.setdiff1d(np.arange(len(ra2)), set(imatch))
    """
    DEG_PER_HR = 360. / 24.  # degrees per hour
    DEG_PER_MIN = DEG_PER_HR / 60.  # degrees per min
    DEG_PER_S = DEG_PER_MIN / 60.  # degrees per sec
    DEG_PER_AMIN = 1. / 60.  # degrees per arcmin
    DEG_PER_ASEC = DEG_PER_AMIN / 60.  # degrees per arcsec
    RAD_PER_DEG = math.pi / 180.  # radians per degree

    isorted = ra2.argsort()
    sdec2 = dec2[isorted]
    sra2 = ra2[isorted]

    LIM = tol * DEG_PER_ASEC

    match = []

    #this is faster but less accurate
    # use mean dec, assumes decs similar
    #decav = np.mean(sdec2.mean() + dec1.mean())
    #RA_LIM = LIM / np.cos(decav * RAD_PER_DEG)

    for ra, dec in zip(ra1, dec1):
        #slower but more accurate
        RA_LIM = LIM / np.cos(dec * RAD_PER_DEG)

        i1 = sra2.searchsorted(ra - RA_LIM)
        i2 = i1 + sra2[i1:].searchsorted(ra + RA_LIM)
        close = []
        for j in xrange(i1, i2):
            decdist = np.abs(dec - sdec2[j])
            if decdist > LIM:
                continue
            else:
                # if ras and decs are within LIM, then
                # calculate actual separation
                disq = astCoords.calcAngSepDeg(ra, dec, sra2[j], sdec2[j])
                close.append((disq, j))

        close.sort()
        if not allmatches:
            # Choose the object with the closest separation inside the
            # requested tolerance, if one was found.
            if len(close) > 0:
                min_dist, jmin = close[0]
                if min_dist < LIM:
                    match.append((isorted[jmin], min_dist))
                    continue
                    # otherwise no match
            match.append((-1, -1))
        else:
            # append all the matching objects
            jclose = []
            seps = []
            for dist, j in close:
                if dist < LIM:
                    jclose.append(j)
                    seps.append(dist)
                else:
                    break
            match.append(
                fromarrays([isorted[jclose], seps],
                           dtype=[('ind', 'i8'), ('sep', 'f8')]))

    if not allmatches:
        # return both indices and separations in a recarray
        temp = np.rec.fromrecords(match, names='ind,sep')
        # change to arcseconds
        temp.sep *= 3600.
        temp.sep[temp.sep < 0] = -1.
        return temp
    else:
        return match