Exemple #1
0
    def __init__(self, path=None, every=1, raw=False):
        """ 
            path is the dirname of the path to the blackhole_details_?.txt files
            everything will be in internal units

            .trees is sorted by final blackhole mass [0] the most massive
            .trees.byid is a dictionary accessing the trees by id.
            .blackholes is sorted by final bh mass [0] the most massive
            .blackholes.byid is a dictionary accessing the blackholes by id.
        """
        if not raw and os.path.exists(os.path.join(path, "blackhole_details_%d.txt" % 0)):
            data, merger = self._readtxt(path, every)
        else:
            data, merger = self._readraw(path, every)
        self.data = data
        self.merger = merger
        self._fillmain()
        self._fillparent()
        self.data.sort(order=['mainid', 'id', 'time']) 

        # data is already clustered by mainid and id
        treeids, start, end = uniqueclustered(self.data['mainid'])
        trees = packarray(self.data, start=start, end=end)
        arg = numpy.argsort([tree['mass'].max() for tree in trees])[::-1]
        self.trees = packarray(self.data, start=start[arg], end=end[arg])
        self.trees.byid = dict(zip(treeids, trees))
        bhids, start, end = uniqueclustered(self.data['id'])
        blackholes = packarray(self.data, start=start, end=end) 
        arg = numpy.argsort([blackhole['mass'].max() for blackhole in
            blackholes])[::-1]
        self.blackholes = packarray(self.data, start=start[arg], end=end[arg])
        self.blackholes.byid = dict(zip(bhids, blackholes))
    #        self._fillmergermass()
        self.merger2 = self.merger.copy()
        self.merger2.sort(order=['after', 'time'])

        if(len(merger) > 0) :
            t = merger['time']
            arg = t.argsort()
            t = t[arg]
            after = merger['after'][arg]
            swallowed = merger['swallowed'][arg]
            ind = t.searchsorted(self.data['time'])
            bad = (t.take(ind, mode='clip') == self.data['time'])
            bad &= after.take(ind, mode='clip') == self.data['id']
            bad &= swallowed.take(ind, mode='clip') == self.data['id']
            self.data['mass'][bad] = numpy.nan
Exemple #2
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument("-f", dest='format', default='genic', required=False)
  parser.add_argument("filename")
  # n is bits per chanel
  parser.add_argument("-n", dest='bitsperaxis', default=6, required=False, type=int)
  parser.add_argument("-o", dest='output', default=None, required=True)
  args = parser.parse_args()
  
  bits = args.bitsperaxis * 3

  if '%d' in args.filename:
    snap0 = Snapshot(args.filename % 0, args.format)
    Nfile = snap0.C['Nfiles']
  else:
    snap0 = Snapshot(args.filename, args.format)
    Nfile = 1

  boxsize = snap0.C['boxsize']
  C = []
  F = []
  with sharedmem.Pool(use_threads=True) as pool:
    def work(fid):
      if '%d' in args.filename:
        snap = Snapshot(args.filename % fid, args.format)
      else:
        snap = Snapshot(args.filename, args.format)
      N = snap.C['N'].sum()
      x,y,z = snap[None, 'pos'].T
      del snap
      scale = fillingcurve.scale(0, boxsize)
      zkey = fillingcurve.encode(x, y, z, scale=scale)
      del x, y, z
      dig = numpy.uint32(zkey >> (fillingcurve.bits * 3 - bits))
  
      bincount = numpy.bincount(dig)
      
      return (fid, bincount.nonzero()[0])

    def reduce(res):
      fid, active = res
      F.append(fid)
      C.append(list(active))
      print fid, len(active)
    pool.map(work, range(Nfile), callback=reduce)

  F = numpy.array(F, dtype='u4')
  l = [len(a) for a in C]
  C = numpy.concatenate(C)
  fid = numpy.repeat(F, l)
  arg = C.argsort()
  fid = fid[arg]
  count = numpy.bincount(C, minlength=1<<bits)
  
  index = packarray(fid, count)
  map = MeshIndex(0, boxsize, args.bitsperaxis, index)
  map.tofile(args.output)
Exemple #3
0
  def __init__(self, tabfilename, format, count=10, **kwargs):
    """ 
       tabfilename is like groups_019/group_tab_019.%d.
    """
    g = Snapshot(tabfilename % 0, format + '.GroupTab',
              **kwargs)
    if count < 0 or count > g.C['Ntot'][0]:
        count = g.C['Ntot'][0]
    i = 0
    # decide number of files to open
    nread = 0
    tabs = []
    while nread < count:
      g = Snapshot(tabfilename % i, format + '.GroupTab',
              **kwargs)
      nread += g.C['N'][0] 
      i = i + 1
      tabs.append(g)
    #print 'will read', len(tabs), 'files'
    Field.__init__(self, numpoints=count, components={'offset':'i8',
        'length':'i8', 'massbytype':('f8', 6), 'mass':'f8', 'pos':('f8', 3),
        'vel':('f8', 3)})
    if len(tabs) > 0:
        self.take_snapshots(tabs, ptype=0)
        del tabs

        # fix the offset which may overflow for large halos
        self['offset'][1:] = self['length'].cumsum()[:-1]

        nread = 0
        nshallread = self['length'].sum()
        i = 0
        idslen = numpy.zeros(g.C['Nfiles'], dtype='i8')
        while nread < nshallread:
          idslen[i] = numpy.fromfile(tabfilename.replace('_tab_', '_ids_')
                  % i, dtype='i4', count=3)[2]
          nread += idslen[i]
          i = i + 1
        idsoffset = numpy.concatenate(([0], idslen.cumsum()))

        ids = sharedmem.empty(idslen.sum(), dtype=g.C['idtype'])

        #print 'reading', i, 'id files'

        with sharedmem.Pool() as pool:
          def work(i):
            more = numpy.memmap(tabfilename.replace('_tab_', '_ids_')
                  % i, dtype=g.C['idtype'], mode='r', offset=28)
            ids[idsoffset[i]:idsoffset[i] + idslen[i]] = more
          pool.map(work, range(i))
        self.ids = packarray(ids, self['length'])
        for i in range(self.numpoints):
          self.ids[i].sort()
Exemple #4
0
    def fromfile(cls, filename):
        f = F77File(filename, 'r')
        header = f.read_record(headerdtype, 1)[0]
        N = f.read_record('i4', header['Nd'])
        boxsize = f.read_record('f8', header['Nd'])
        self = cls(N=N, boxsize=boxsize, Nd=header['Nd'])

        size = numpy.empty(0, dtype='i4')
        data = numpy.empty(0, dtype='i4')
        for i in range(self.N[0]):
            size_in = f.read_record('i4', numpy.prod(self.N[1:]))
            size = numpy.append(size, size_in)

        size = size.reshape(self.N[0], -1)
        for i in range(self.N[0]):
            data_in = f.read_record('i4', size[i].sum(dtype='i8'))
            data = numpy.append(data, data_in)

        self.data = packarray(data, size.reshape(-1))
        return self