コード例 #1
0
ファイル: readcube.py プロジェクト: neurodata/ndstore
def main():

    parser = argparse.ArgumentParser(
        description='Read a single cube and write images')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('zindex', type=int, action="store", help='')
    parser.add_argument('resolution', type=int, action="store", help='')

    result = parser.parse_args()

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim,
     zcubedim) = cubedim = proj.datasetcfg.cubedim[result.resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange

    import pdb
    pdb.set_trace()

    sql = "SELECT zindex, cube FROM res{} WHERE zindex={}".format(
        result.resolution, result.zindex)
    db.cursor.execute(sql)
    idx, datastring = db.cursor.fetchone()

    incube = imagecube.ImageCube8(cubedim)
    incube.fromNPZ(datastring[:])
コード例 #2
0
    def loadDB(self):
        """Load the database if it hasn't been load."""

        if self.db == None:
            # load the database/token
            [self.db, self.proj,
             self.projdb] = ocpcarest.loadDBProj(self.token)
コード例 #3
0
  def getTile ( self, webargs ):
    """Either fetch the file from mocpcache or get a mcfc image"""

    # parse the web args
    self.token, tileszstr, self.channels, resstr, xtilestr, ytilestr, zslicestr, rest = webargs.split('/',8)

    [ self.db, self.proj, projdb ] = ocpcarest.loadDBProj ( self.token )

    # convert args to ints
    xtile = int(xtilestr)
    ytile = int(ytilestr)
    res = int(resstr)
    # modify the zslice to the offset
    zslice = int(zslicestr)-self.proj.datasetcfg.slicerange[0]
    self.tilesz = int(tileszstr)

    # mocpcache key
    mckey = self.buildKey(res,xtile,ytile,zslice)

    # do something to sanitize the webargs??
    # if tile is in mocpcache, return it
    tile = self.mc.get(mckey)
    if tile == None:
      img=self.cacheMiss(res,xtile,ytile,zslice)
      fobj = cStringIO.StringIO ( )
      img.save ( fobj, "PNG" )
      self.mc.set(mckey,fobj.getvalue())
    else:
      fobj = cStringIO.StringIO(tile)

    fobj.seek(0)
    return fobj
コード例 #4
0
def main():

  parser = argparse.ArgumentParser(description='Create a mergemap file.')
  parser.add_argument('baseurl', action="store", help='Baseurl for OCP service')
  parser.add_argument('token', action="store", help='Database token to access')
  parser.add_argument('csvfile', action="store", help='File containing csv list of merges.')

  # customize for kasthuri11 
  resolution = 1

  result = parser.parse_args()

  # Convert the csv file into a dictionary
  f = open ( result.csvfile )

  # default dictionary of integers
  d = defaultdict(int)

  csvr = csv.reader(f, delimiter=',')
  for r in csvr:
    for s in r[1:]:
      d[int(s)]=int(r[0])

  # load the in database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[resolution]
  (ximagesz,yimagesz) = proj.datasetcfg.imagesz[resolution] 
  (startslice,endslice) = proj.datasetcfg.slicerange
  slices = endslice - startslice + 1

  # iterate over the defined set of cubes in the database
  sql = "SELECT zindex from res1;"
  cursor = db.conn.cursor()
  cursor.execute(sql)
  zindexes = np.array([item[0] for item in cursor.fetchall()])

  i=0

  # iterate over the cubes in morton order
  for mortonidx in zindexes:

    print "Working on cube {}".format(i)
    i = i+1

    cuboid = db.getCube ( mortonidx, resolution )
    annoids =  np.unique ( cuboid.data )
    for id in annoids:
      if d[id]!=0:
        print "Rewrite id {} to {} for cube {}".format(id,d[id],mortonidx)
        vec_func = np.vectorize ( lambda x: x if x != id else d[id] )
        cuboid.data = vec_func ( cuboid.data )
      else:
        if id != 0:
          print "Id {} is one of the target identifiers".format(id)

    db.putCube ( mortonidx, resolution, cuboid )
    db.commit()
コード例 #5
0
def main():

  parser = argparse.ArgumentParser( description='Check the zscale for the dataset')
  parser.add_argument('dataset', action="store", help='Dataset name for the project')

  result = parser.parse_args()

  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.dataset )
  ( xcubedim, ycubedim, zcubedim ) = proj.datasetcfg.cubedim [ 0 ]
  for res in proj.datasetcfg.resolutions:
    print "Resolution ", res, "Zscale ", proj.datasetcfg.zscale[res], "Dims ", proj.datasetcfg.cubedim[res]
コード例 #6
0
def main():

    parser = argparse.ArgumentParser(
        description='Check the zscale for the dataset')
    parser.add_argument('dataset',
                        action="store",
                        help='Dataset name for the project')

    result = parser.parse_args()

    [db, proj, projdb] = ocpcarest.loadDBProj(result.dataset)
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[0]
    for res in proj.datasetcfg.resolutions:
        print "Resolution ", res, "Zscale ", proj.datasetcfg.zscale[
            res], "Dims ", proj.datasetcfg.cubedim[res]
コード例 #7
0
  def __init__(self, token, path, resolution, channel):

    self.path = path
    self.resolution = resolution

    [ self.db, self.proj, self.projdb ] = ocpcarest.loadDBProj ( token )

    (self.xcubedim, self.ycubedim, self.zcubedim) = self.proj.datasetcfg.cubedim[ resolution ]
    (self.startslice, self.endslice) = self.proj.datasetcfg.slicerange
    self.batchsz = self.zcubedim
    
    self.channel = channel

    self._ximgsz = self.proj.datasetcfg.imagesz[resolution][0]
    self._yimgsz = self.proj.datasetcfg.imagesz[resolution][1]

    self.cursor = self.db.conn.cursor()
コード例 #8
0
ファイル: mitrachannel.py プロジェクト: neurodata/ndstore
  def __init__(self, token, path, resolution, channel):

    self.path = path
    self.resolution = resolution

    [ self.db, self.proj, self.projdb ] = ocpcarest.loadDBProj ( token )

    (self.xcubedim, self.ycubedim, self.zcubedim) = self.proj.datasetcfg.cubedim[ resolution ]
    (self.startslice, self.endslice) = self.proj.datasetcfg.slicerange
    self.batchsz = self.zcubedim
    
    self.channel = channel

    self._ximgsz = self.proj.datasetcfg.imagesz[resolution][0]
    self._yimgsz = self.proj.datasetcfg.imagesz[resolution][1]

    self.cursor = self.db.conn.cursor()
コード例 #9
0
  def __init__ (self, token, channels, centroid):

    # arguments
    self.token = token
    self.channels = channels
    self.centroid = centroid
    
    # parameter defaults.  set be accessors.
    self.sog_width = 200
    self.sog_frame = 20
    self.width = 11
    self.normalize = True
    self.normalize2 = False
    self.resolution = 0
    self.refchannels = []
    self.emchannels = []
    self.enhance = None

    [ self.db, self.proj, self.projdb ] = ocpcarest.loadDBProj ( self.token )
コード例 #10
0
def main():

  parser = argparse.ArgumentParser(description='Read a single cube and write images')
  parser.add_argument('token', action="store", help='Token for the annotation project.')
  parser.add_argument('zindex', type=int, action="store", help='')
  parser.add_argument('resolution', type=int, action="store", help='')

  result = parser.parse_args()

  # load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim)=cubedim=proj.datasetcfg.cubedim[result.resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange

  import pdb; pdb.set_trace()

  sql = "SELECT zindex, cube FROM res{} WHERE zindex={}".format(result.resolution,result.zindex) 
  db.cursor.execute(sql)
  idx, datastring = db.cursor.fetchone()

  incube = imagecube.ImageCube8 ( cubedim )
  incube.fromNPZ ( datastring[:] )
コード例 #11
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the TIFF data')
  parser.add_argument('token', action="store", help='Token for the project')
  parser.add_argument('path', action="store", help='Directory with the image files')
  
  result = parser.parse_args()
  
  resolution = 0

  #Load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz = zcubedim

  batchsz = 16
  (ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]

  # Set the image size to that of the actual image
  ximagesz = 6144
  yimagesz = 6144
  tilesz = 6144

  filelist = glob.glob( "{}*.tif".format(result.path) )

  # Get a list of the files in the directories
  for sl in range (startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint8 )

    for b in range ( batchsz ):

      if ( sl + b <= endslice ):

        # raw data
        #filenm = result.path + '{:0>4}_r1-c1_w01_s01_sec01_'.format(sl+b) + '{*}.tif'
        print "Opening filenm" + filelist[sl+b-1]

        img = Image.open (filelist[sl+b-1], 'r')
        imgdata = np.asarray ( img )
        slab[b,:,:] = imgdata

        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    for y in range ( 0, yimagesz+1, ycubedim ):
      for x in range ( 0, ximagesz+1, xcubedim ):

        mortonidx = zindex.XYZMorton ( [x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint8 )

        xmin = x
        ymin = y
        xmax = ((min(ximagesz-1,x+xcubedim-1))%tilesz)+1
        ymax = ((min(yimagesz-1,y+ycubedim-1))%tilesz)+1
        
        #xmax = min ( ximagesz, x+xcubedim )
        #ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim, endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
        
        # Create the DB Blob
        fileobj = cStringIO.StringIO()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress ( fileobj.getvalue() )

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()

      print " Commiting at x={}, y={}, z={}".format(x, y, sl)
    db.conn.commit()

    slab = None
コード例 #12
0
def main():

    parser = argparse.ArgumentParser(
        description='Ingest the FlyEM image data.')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation PNG files.')

    result = parser.parse_args()

    # convert to an argument
    resolution = 0

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 49152
    yimagesz = 32768

    # add all of the tiles to the image
    for sl in range(startslice, endslice + 1, batchsz):
        for ytile in range(ytiles):
            for xtile in range(xtiles):

                slab = np.zeros([batchsz, tilesz, tilesz], dtype=np.uint32)

                for b in range(batchsz):
                    if (sl + b <= endslice):

                        # raw data
                        filenm = result.path + '/S1Column_Localcellbodies_97-classified_export_s{:0>3}_Y{}_X{}.png'.format(
                            sl + b, ytile, xtile)
                        print "Opening filenm " + filenm

                        img = Image.open(filenm, 'r')
                        imgdata = np.asarray(img)

                        slab[b, :, :] = kanno_cy.pngto32(imgdata)

                    # the last z offset that we ingest, if the batch ends before batchsz
                    endz = b

                # Now we have a 8192x8192x16 z-aligned cube.
                # Send it to the database.
                for y in range(ytile * tilesz, (ytile + 1) * tilesz, ycubedim):
                    for x in range(xtile * tilesz, (xtile + 1) * tilesz,
                                   xcubedim):

                        mortonidx = zindex.XYZMorton([
                            x / xcubedim, y / ycubedim,
                            (sl - startslice) / zcubedim
                        ])
                        cubedata = np.zeros([zcubedim, ycubedim, xcubedim],
                                            dtype=np.uint32)

                        xmin = x % tilesz
                        ymin = y % tilesz
                        xmax = (
                            (min(ximagesz - 1, x + xcubedim - 1)) % tilesz) + 1
                        ymax = (
                            (min(yimagesz - 1, y + ycubedim - 1)) % tilesz) + 1
                        zmin = 0
                        zmax = min(sl + zcubedim, endslice + 1)

                        cubedata[0:zmax - zmin, 0:ymax - ymin,
                                 0:xmax - xmin] = slab[zmin:zmax, ymin:ymax,
                                                       xmin:xmax]

                        # check if there's anything to store
                        if (np.count_nonzero(cubedata) == 0):
                            continue

                        # create the DB BLOB
                        fileobj = cStringIO.StringIO()
                        np.save(fileobj, cubedata)
                        cdz = zlib.compress(fileobj.getvalue())

                        # insert the blob into the database
                        cursor = db.conn.cursor()
                        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(
                            int(resolution))
                        print mortonidx
                        cursor.execute(sql, (mortonidx, cdz))
                        cursor.close()

                    print "Commiting at x=%s, y=%s, z=%s" % (x, y, sl)
                    db.conn.commit()
コード例 #13
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the FlyEM image data.')
  parser.add_argument('token', action="store", help='Token for the annotation project.')
  parser.add_argument('path', action="store", help='Directory with annotation PNG files.')

  result = parser.parse_args()

  # convert to an argument
  resolution = 0

  # load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz=zcubedim

  batchsz=1

  # This doesn't work because the image size does not match exactly the cube size
  #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
  ximagesz = 49152
  yimagesz = 32768

  startslice = 100

  # add all of the tiles to the image
  for sl in range (startslice,endslice+1,batchsz):
    for ytile in range(ytiles):
      for xtile in range(xtiles):

        slab = np.zeros ( [ batchsz, tilesz, tilesz ], dtype=np.uint32 )

        for b in range ( batchsz ):
          if ( sl + b <= endslice ):

            # raw data
            filenm = result.path + '/S1Column_Localcellbodies_97-classified_export_s{:0>3}_Y{}_X{}.png'.format(sl+b,ytile,xtile) 
            print "Opening filenm " + filenm

          
            img = Image.open ( filenm, 'r' )
            imgdata = np.asarray ( img )

            slab[b,:,:] = kanno_cy.pngto32 ( imgdata )

            

          # the last z offset that we ingest, if the batch ends before batchsz
          endz = b

        # Now we have a 8192x8192x16 z-aligned cube.  
        # Send it to the database.
        for y in range ( ytile*tilesz, (ytile+1)*tilesz, ycubedim ):
          for x in range ( xtile*tilesz, (xtile+1)*tilesz, xcubedim ):

            mortonidx = zindex.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
            cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )

            xmin = x%tilesz
            ymin = y%tilesz
            xmax = ((min(ximagesz-1,x+xcubedim-1))%tilesz)+1
            ymax = ((min(yimagesz-1,y+ycubedim-1))%tilesz)+1
            zmin = 0
            zmax = min(sl+zcubedim,endslice+1)

            cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]

            if y == 8064:
              print x,y,xmin,xmax,ymin,ymax,zmin,zmax

            # check if there's anything to store
            if ( np.count_nonzero(cubedata) == 0 ): 
              continue

            # create the DB BLOB
            fileobj = cStringIO.StringIO ()
            np.save ( fileobj, cubedata )
            cdz = zlib.compress (fileobj.getvalue())

            # insert the blob into the database
            cursor = db.conn.cursor()
            sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(resolution))
            cursor.execute(sql, (mortonidx, cdz))
            cursor.close()

          print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl)
          db.conn.commit()
コード例 #14
0
    # Connection info
    try:
        mmconn = MySQLdb.connect(host='localhost',
                                 user=result.dbuser,
                                 passwd=result.dbpass,
                                 db=result.dbname)
    except MySQLdb.Error, e:
        print("Failed to connect to database: %s" % (result.dbname))
        raise

    # get a cursor of the mergemap
    mmcursor = mmconn.cursor()

    # load the in database
    [indb, inproj, inprojdb] = ocpcarest.loadDBProj(result.intoken)

    # load the out database
    [outdb, outproj, outprojdb] = ocpcarest.loadDBProj(result.outtoken)

    # get the dataset configuration
    (xcubedim, ycubedim,
     zcubedim) = inproj.datasetcfg.cubedim[result.resolution]
    (ximagesz, yimagesz) = inproj.datasetcfg.imagesz[result.resolution]
    (startslice, endslice) = inproj.datasetcfg.slicerange
    slices = endslice - startslice + 1

    # Set the limits for iteration on the number of cubes in each dimension
    xlimit = (ximagesz - 1) / xcubedim + 1
    ylimit = (yimagesz - 1) / ycubedim + 1
    #  Round up the zlimit to the next larger
コード例 #15
0
def main():

    parser = argparse.ArgumentParser(description='Ingest the Rohanna data.')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation TIF files.')
    parser.add_argument('resolution',
                        action="store",
                        type=int,
                        help='Resolution')

    result = parser.parse_args()

    # convert to an argument
    resolution = result.resolution

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 5120
    yimagesz = 5120
    endslice = 1123

    # add all of the tiles to the image
    for sl in range(startslice, endslice + 1, batchsz):

        slab = np.zeros([batchsz, yimagesz, ximagesz], dtype=np.uint32)

        for b in range(batchsz):

            if (sl + b <= endslice):

                # raw data
                filenm = result.path + '/labels_{:0>5}_ocp.tif'.format(sl + b)
                print "Opening filenm " + filenm

                img = Image.open(filenm, 'r')
                imgdata = np.asarray(img)

                slab[b, :, :] = (imgdata)
                # the last z offset that we ingest, if the batch ends before batchsz
                endz = b

        # Now we have a 5120x5120x16 z-aligned cube.
        # Send it to the database.
        for y in range(0, yimagesz, ycubedim):
            for x in range(0, ximagesz, xcubedim):

                mortonidx = zindex.XYZMorton([
                    (x + xoffsetsz) / xcubedim, (y + yoffsetsz) / ycubedim,
                    (sl + zoffsetsz - startslice) / zcubedim
                ])
                cubedata = np.zeros([zcubedim, ycubedim, xcubedim],
                                    dtype=np.uint32)
                test = zindex.MortonXYZ(mortonidx)
                pdb.set_trace()
                xmin = x
                ymin = y
                xmax = min(ximagesz, x + xcubedim)
                ymax = min(yimagesz, y + ycubedim)
                zmin = 0
                zmax = min(sl + zcubedim, endslice + 1)
                cubedata[0:zmax - zmin, 0:ymax - ymin,
                         0:xmax - xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]

                # check if there's anything to store
                if (np.count_nonzero(cubedata) == 0):
                    continue

                # create the DB BLOB
                fileobj = cStringIO.StringIO()
                np.save(fileobj, cubedata)
                cdz = zlib.compress(fileobj.getvalue())

                # insert the blob into the database
                cursor = db.conn.cursor()
                sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(
                    int(resolution))
                #        cursor.execute(sql, (mortonidx, cdz))
                cursor.close()

                print "Commiting at x=%s, y=%s, z=%s" % (
                    x + xoffsetsz, y + yoffsetsz, sl + b + zoffsetsz)
            db.conn.commit()
コード例 #16
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the JP@ data')
  parser.add_argument('baseurl', action="store", help='Base URL to of ocp service no http://, e.g. openconnecto.me')
  parser.add_argument('token', action="store", help='Token for the annotation project.')
  parser.add_argument('path', action="store", help='Directory with annotation TIF files.')
  parser.add_argument('resolution', action="store", type=int, help="Resolution for the project")

  result = parser.parse_args()

  #Load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token ) 
  
  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[result.resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  
  batchsz = 1
  (ximagesz,yimagesz)=proj.datasetcfg.imagesz[result.resolution]
  
  yimagesz = 18000
  ximagesz = 24000

  ytilesz = 3600
  xtilesz = 4800

  startslice = 174
  # Get a list of the files in the directories
  for sl in range (startslice,endslice+1,batchsz):
  
    for b in range ( batchsz ):
    
      slab = np.zeros ( [ batchsz, ytilesz, xtilesz ], dtype=np.uint64 ) 
      
      if ( sl + b <= endslice ):
       
        # raw data
        try:
          filenm = result.path + '{:0>4}'.format(sl+b) + '.tiff'
          print "Opening filenm" + filenm
          imgdata = cv2.imread( filenm, -1 )

          if imgdata != None:
            newimgdata = np.left_shift(65535, 48, dtype=np.uint64) | np.left_shift(imgdata[:,:, 0], 32, dtype=np.uint64) | np.left_shift(imgdata[:,:,1], 16, dtype=np.uint64) | np.uint64(imgdata[:,:,2])
          else:
            newimgdata = np.zeros( [ yimagesz, ximagesz ], dtype=np.uint64 )
        except IOError, e:
          print e
          newimgdata = np.zeros( [ yimagesz, ximagesz ], dtype=np.uint64 )
        
        newytilesz = 0
        newxtilesz = 0

        for tile in range(0,25):
          
          
          if tile%5==0 and tile!=0:
            newytilesz = newytilesz + ytilesz
            newxtilesz = 0
          elif tile!=0:
            # Updating the value
            newxtilesz = newxtilesz + xtilesz


          if newimgdata == None:
            print "Skipping Slice {} as it does not exist".format(sl+b)
            continue
          else:
            slab[b,:,:] = newimgdata[newytilesz:(tile/5+1)*ytilesz,newxtilesz:(tile%5+1)*xtilesz]
       
          # the last z offset that we ingest, if the batch ends before batchsz
          endz = b

          # Now we have a 3600x4800 tile to the server  
          # Construct the URL
          url = 'http://{}/ocp/ca/{}/npz/{}/{},{}/{},{}/{},{}/'.format(result.baseurl, result.token,result.resolution, newxtilesz, (tile%5+1)*xtilesz, newytilesz, (tile/5+1)*ytilesz, sl+zoffset, sl+batchsz)

          print url
          # Encode the voxelist an pickle
          fileobj = cStringIO.StringIO ()
          np.save ( fileobj, slab )
          cdz = zlib.compress (fileobj.getvalue())
 
          
          print " Sending URL"
          # Build the post request
          try:
            req = urllib2.Request(url = url, data = cdz)
            response = urllib2.urlopen(req)
            the_page = response.read()
          except Exception, e:
            print "Failed ", e
コード例 #17
0
def main():

    parser = argparse.ArgumentParser(description='Ingest the JP@ data')
    parser.add_argument(
        'baseurl',
        action="store",
        help='Base URL to of ocp service no http://, e.g. openconnecto.me')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation TIF files.')
    parser.add_argument('resolution',
                        action="store",
                        type=int,
                        help="Resolution for the project")

    result = parser.parse_args()

    #Load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[result.resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange

    batchsz = 1
    (ximagesz, yimagesz) = proj.datasetcfg.imagesz[result.resolution]

    yimagesz = 18000
    ximagesz = 24000

    ytilesz = 3600
    xtilesz = 4800

    startslice = 174
    # Get a list of the files in the directories
    for sl in range(startslice, endslice + 1, batchsz):

        for b in range(batchsz):

            slab = np.zeros([batchsz, ytilesz, xtilesz], dtype=np.uint64)

            if (sl + b <= endslice):

                # raw data
                try:
                    filenm = result.path + '{:0>4}'.format(sl + b) + '.tiff'
                    print "Opening filenm" + filenm
                    imgdata = cv2.imread(filenm, -1)

                    if imgdata != None:
                        newimgdata = np.left_shift(
                            65535, 48, dtype=np.uint64) | np.left_shift(
                                imgdata[:, :, 0], 32,
                                dtype=np.uint64) | np.left_shift(
                                    imgdata[:, :, 1], 16,
                                    dtype=np.uint64) | np.uint64(imgdata[:, :,
                                                                         2])
                    else:
                        newimgdata = np.zeros([yimagesz, ximagesz],
                                              dtype=np.uint64)
                except IOError, e:
                    print e
                    newimgdata = np.zeros([yimagesz, ximagesz],
                                          dtype=np.uint64)

                newytilesz = 0
                newxtilesz = 0

                for tile in range(0, 25):

                    if tile % 5 == 0 and tile != 0:
                        newytilesz = newytilesz + ytilesz
                        newxtilesz = 0
                    elif tile != 0:
                        # Updating the value
                        newxtilesz = newxtilesz + xtilesz

                    if newimgdata == None:
                        print "Skipping Slice {} as it does not exist".format(
                            sl + b)
                        continue
                    else:
                        slab[b, :, :] = newimgdata[newytilesz:(tile / 5 + 1) *
                                                   ytilesz,
                                                   newxtilesz:(tile % 5 + 1) *
                                                   xtilesz]

                    # the last z offset that we ingest, if the batch ends before batchsz
                    endz = b

                    # Now we have a 3600x4800 tile to the server
                    # Construct the URL
                    url = 'http://{}/ocp/ca/{}/npz/{}/{},{}/{},{}/{},{}/'.format(
                        result.baseurl, result.token, result.resolution,
                        newxtilesz, (tile % 5 + 1) * xtilesz, newytilesz,
                        (tile / 5 + 1) * ytilesz, sl + zoffset, sl + batchsz)

                    print url
                    # Encode the voxelist an pickle
                    fileobj = cStringIO.StringIO()
                    np.save(fileobj, slab)
                    cdz = zlib.compress(fileobj.getvalue())

                    print " Sending URL"
                    # Build the post request
                    try:
                        req = urllib2.Request(url=url, data=cdz)
                        response = urllib2.urlopen(req)
                        the_page = response.read()
                    except Exception, e:
                        print "Failed ", e
コード例 #18
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the TIFF data')
  parser.add_argument('token', action="store", help='Token for the project')
  parser.add_argument('path', action="store", help='Directory with the image files')
  parser.add_argument('resolution', action="store", type=int, help='Resolution of data')

  result = parser.parse_args()
  
  #Load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[result.resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz = zcubedim

  batchsz = 16
  (ximagesz,yimagesz)=proj.datasetcfg.imagesz[result.resolution]

  yimagesz = 18000
  ximagesz = 24000
  
  # Get a list of the files in the directories
  for sl in range ( startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )

    for b in range ( batchsz ):

      if ( sl + b <= endslice ):

        # raw data
        try:
          filenm = result.path + '{:0>4}'.format(sl+b) + '.tiff'
          print "Opening filenm" + filenm
          img = Image.open (filenm, 'r').convert("RGBA")
          imgdata = np.asarray ( img )
          slab[b,:,:] = np.left_shift(imgdata[:,:,3], 24, dtype=np.uint32) | np.left_shift(imgdata[:,:,2], 16, dtype=np.uint32) | np.left_shift(imgdata[:,:,1], 8, dtype=np.uint32) | np.uint32(imgdata[:,:,0])
        except IOError, e:
          print e
          imgdata = np.zeros((yimagesz, ximagesz), dtype=np.uint32)
          slab[b,:,:] = imgdata

        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    for y in range ( 0, yimagesz+1, ycubedim ):
      for x in range ( 0, ximagesz+1, xcubedim ):

        # Getting a Cube id and ingesting the data one cube at a time
        mortonidx = zindex.XYZMorton ( [x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim, endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
        
        # Create the DB Blob
        fileobj = cStringIO.StringIO()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress ( fileobj.getvalue() )

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(result.resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()

      print " Commiting at x={}, y={}, z={}".format(x, y, sl)
      db.conn.commit()
  
    # Freeing memory
    slab = None
コード例 #19
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the Rohanna data.')
  parser.add_argument('token', action="store", help='Token for the annotation project.')
  parser.add_argument('path', action="store", help='Directory with annotation TIF files.')
  parser.add_argument('resolution', action="store", type=int, help='Resolution')

  result = parser.parse_args()

  # convert to an argument
  resolution = result.resolution

  # load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz=zcubedim

  # This doesn't work because the image size does not match exactly the cube size
  #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
  ximagesz = 10748
  yimagesz = 12896
  
  # add all of the tiles to the image
  for sl in range (993,endslice+1,batchsz):
  
    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )

    for b in range ( batchsz ):
        
      if ( sl + b <= endslice ):

        # raw data
        filenm = result.path + 'RCvesiclescleaned_s{:0>4}.png'.format(sl+b-1)
        print "Opening filenm " + filenm
        
        try:
          img = Image.open ( filenm, 'r' )
          imgdata = np.asarray ( img )
          imgdata = np.left_shift(imgdata[:,:,0], 16, dtype=np.uint32) | np.left_shift(imgdata[:,:,1], 16, dtype=np.uint32) | np.uint32(imgdata[:,:,2])
        except IOError, e:
          print e
          imgdata = np.zeros((yimagesz,ximagesz), dtype=np.uint32)
        
        slab[b,:,:] = ( imgdata )
      
      # the last z offset that we ingest, if the batch ends before batchsz
        endz = b
   
    # Now we have a 5120x5120x16 z-aligned cube.  
    # Send it to the database.
    for y in range ( 0, yimagesz, ycubedim ):
      for x in range ( 0, ximagesz, xcubedim ):

        mortonidx = zindex.XYZMorton ( [ (x)/xcubedim, (y)/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )
        test = zindex.MortonXYZ (mortonidx )
        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim,endslice+1)
        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]

        # check if there's anything to store
        if ( np.count_nonzero(cubedata) == 0 ):
          continue

        # create the DB BLOB
        fileobj = cStringIO.StringIO ()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress (fileobj.getvalue())

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()
        print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl+b)
      db.conn.commit()
コード例 #20
0
ファイル: flyem_anno.py プロジェクト: neurodata/ndstore
def main():

    parser = argparse.ArgumentParser(
        description='Ingest the FlyEM image data.')
    parser.add_argument(
        'baseurl',
        action="store",
        help='Base URL to of ocp service no http://, e.  g. neurodata.io')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation PNG files.')

    result = parser.parse_args()

    # convert to an argument
    resolution = 0

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (realstartslice, realendslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 12000
    yimagesz = 12000

    # Adding the startslice and endslice manually
    startslice = 979
    endslice = 1000

    batchsz = 1

    # Accessing the dict in dbm
    anydb = anydbm.open('bodydict', 'r')

    # Get a list of the files in the directories
    for sl in range(startslice, endslice + 1, batchsz):

        slab = np.zeros([batchsz, yimagesz, ximagesz], dtype=np.uint32)

        for b in range(batchsz):

            if (sl + b <= endslice):

                #  Reading the raw data
                filenm = result.path + '/superpixel.' + '{:0>5}'.format(
                    sl + b) + '.png'
                print "Opening filenm " + filenm

                img = Image.open(filenm, 'r')
                imgdata = np.asarray(img)
                superpixelarray = imgdata[:, :, 0] + (
                    np.uint32(imgdata[:, :, 1]) << 8)
                uniqueidlist = np.unique(superpixelarray)
                smalldict = {}
                # Creating a small dict of all the ids in the given slice
                for i in uniqueidlist:
                    smalldict[i] = int(anydb.get(str(sl) + ',' + str(i)))
                # Looking the in the small dictionary for the body ids
                for i in range(superpixelarray.shape[0]):
                    for j in range(superpixelarray.shape[1]):
                        superpixelarray[i,
                                        j] = smalldict.get(superpixelarray[i,
                                                                           j])
                slab[b, :, :] = superpixelarray
                print "end on slice:", sl
                # the last z offset that we ingest, if the batch ends before batchsz
                endz = b

        # Now we have a 1024x1024x16 z-aligned cube.
        # Send it to the database.
        for y in range(0, yimagesz, ycubedim):
            for x in range(0, ximagesz, xcubedim):

                mortonidx = zindex.XYZMorton(
                    [x / xcubedim, y / ycubedim, (sl - startslice) / zcubedim])
                cubedata = np.zeros([1, ycubedim, xcubedim], dtype=np.uint32)
                #cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )

                xmin = x
                ymin = y
                xmax = min(ximagesz, x + xcubedim)
                ymax = min(yimagesz, y + ycubedim)
                zmin = 0
                zmax = min(sl + zcubedim, endslice + 1)
                cubedata[0:zmax - zmin, 0:ymax - ymin,
                         0:xmax - xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]

                # insert the blob into the database
                db.annotateDense((x, y, sl - realstartslice), resolution,
                                 cubedata, 'O')

                print "Commiting at x=%s, y=%s, z=%s" % (x, y,
                                                         sl - realstartslice)
            db.conn.commit()
コード例 #21
0
  # Connection info 
  try:
    mmconn = MySQLdb.connect (host = 'localhost',
                          user = result.dbuser,
                          passwd = result.dbpass,
                          db = result.dbname )
  except MySQLdb.Error, e:
    print("Failed to connect to database: %s" % (result.dbname))
    raise

  # get a cursor of the mergemap
  mmcursor = mmconn.cursor()

  # load the in database
  [ indb, inproj, inprojdb ] = ocpcarest.loadDBProj ( result.intoken )

  # load the out database
  [ outdb, outproj, outprojdb ] = ocpcarest.loadDBProj ( result.outtoken )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = inproj.datasetcfg.cubedim[result.resolution]
  (ximagesz,yimagesz) = inproj.datasetcfg.imagesz[result.resolution] 
  (startslice,endslice) = inproj.datasetcfg.slicerange
  slices = endslice - startslice + 1

  # Set the limits for iteration on the number of cubes in each dimension
  xlimit = (ximagesz-1)/xcubedim+1
  ylimit = (yimagesz-1)/ycubedim+1
  #  Round up the zlimit to the next larger
  zlimit = (((slices-1)/zcubedim+1)*zcubedim)/zcubedim
コード例 #22
0
def main():

    parser = argparse.ArgumentParser(
        description='Ingest the FlyEM image data.')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation PNG files.')

    result = parser.parse_args()

    # convert to an argument
    resolution = 0

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 49152
    yimagesz = 32768

    xtilesz = 8192
    ytilesz = 8192
    numxtiles = (6 + 1)
    numytiles = (4 + 1)

    # Get a list of the files in the directories
    for sl in range(startslice, endslice + 1, batchsz):
        for ytile in range(1, numytiles):
            for xtile in range(1, numxtiles):

                slab = np.zeros([batchsz, ytilesz, xtilesz], dtype=np.uint8)

                for b in range(batchsz):

                    if (sl + b <= endslice):

                        # raw data
                        filenm = result.path + 'S1Column_affine_s{:0>3}_x{:0>2}_y{:0>2}.png'.format(
                            sl + b + 1, xtile, ytile)
                        print "Opening filenm " + filenm

                        img = Image.open(filenm, 'r')
                        imgdata = np.asarray(img)
                        slab[b, :, :] = imgdata

                        # the last z offset that we ingest, if the batch ends before batchsz
                        endz = b

                # Now we have a 1024x1024x16 z-aligned cube.
                # Send it to the database.
                for y in range((ytile - 1) * ytilesz, (ytile) * ytilesz,
                               ycubedim):
                    for x in range((xtile - 1) * xtilesz, (xtile) * xtilesz,
                                   xcubedim):

                        mortonidx = zindex.XYZMorton([
                            x / xcubedim, y / ycubedim,
                            (sl - startslice) / zcubedim
                        ])
                        cubedata = np.zeros([zcubedim, ycubedim, xcubedim],
                                            dtype=np.uint8)

                        xmin = x % xtilesz
                        ymin = y % ytilesz
                        xmax = (min(ximagesz - 1, x + xcubedim - 1) %
                                xtilesz) + 1
                        ymax = (min(yimagesz - 1, y + ycubedim - 1) %
                                ytilesz) + 1
                        zmin = 0
                        zmax = min(sl + zcubedim, endslice + 1)

                        cubedata[0:zmax - zmin, 0:ymax - ymin,
                                 0:xmax - xmin] = slab[zmin:zmax, ymin:ymax,
                                                       xmin:xmax]

                        # Dont' ingest empty cubes
                        if len(np.unique(cubedata)) == 1:
                            continue

                        # create the DB BLOB
                        fileobj = cStringIO.StringIO()
                        np.save(fileobj, cubedata)
                        cdz = zlib.compress(fileobj.getvalue())

                        # insert the blob into the database
                        cursor = db.conn.cursor()
                        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(
                            int(resolution))
                        cursor.execute(sql, (mortonidx, cdz))
                        cursor.close()

                    print "Commiting at x=%s, y=%s, z=%s" % (x, y, sl)
                    db.conn.commit()

                slab = None
                import gc
                gc.collect()
コード例 #23
0
  def loadDB ( self ):
    """Load the database if it hasn't been load."""

    if self.db == None:
      # load the database/token
      [ self.db, self.proj, self.projdb ] = ocpcarest.loadDBProj ( self.token )
コード例 #24
0
# Stuff we make take from a config or the command line in the future
#ximagesz = 12000
#yimagesz = 12000

parser = argparse.ArgumentParser(description='Ingest the FlyEM image data.')
parser.add_argument('baseurl', action="store", help='Base URL to of ocp service no http://, e.  g. openconnecto.me')
parser.add_argument('token', action="store", help='Token for the annotation project.')
parser.add_argument('path', action="store", help='Directory with annotation PNG files.')
parser.add_argument('process', action="store", help='Number of processes.')

result = parser.parse_args()
# convert to an argument
resolution = 0

# load a database
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

# get the dataset configuration
(xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution]
(startslice,endslice)=proj.datasetcfg.slicerange
batchsz=zcubedim

# This doesn't work because the image size does not match exactly the cube size
#(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
ximagesz = 12000
yimagesz = 12000

batchsz=16
totalslices = range(startslice,endslice,16)
totalprocs = int(result.process)
#global anydb
コード例 #25
0
ファイル: flyem_image.py プロジェクト: neurodata/ndstore
def main():

  parser = argparse.ArgumentParser(description='Ingest the FlyEM image data.')
  parser.add_argument('baseurl', action="store", help='Base URL to of ocp service no http://, e.g. neurodata.io')
  parser.add_argument('token', action="store", help='Token for the annotation project.')
  parser.add_argument('path', action="store", help='Directory with annotation PNG files.')

  result = parser.parse_args()

  # convert to an argument
  resolution = 0

  # load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz=zcubedim

  # This doesn't work because the image size does not match exactly the cube size
  #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
  ximagesz = 12000
  yimagesz = 12000

  # Get a list of the files in the directories
  for sl in range (startslice,endslice+1,batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint8 )
   
    for b in range ( batchsz ):

      if ( sl + b <= endslice ):

        # raw data
        filenm = result.path + '/grayscale.' + '{:0>5}'.format(sl+b) + '.png'
        print "Opening filenm " + filenm
        
        img = Image.open ( filenm, 'r' )
        imgdata = np.asarray ( img )
        slab[b,:,:]  = imgdata

        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    # Now we have a 1024x1024x16 z-aligned cube.  
    # Send it to the database.
    for y in range ( 0, yimagesz, ycubedim ):
      for x in range ( 0, ximagesz, xcubedim ):

        mortonidx = zindex.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint8 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim,endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]

        # create the DB BLOB
        fileobj = cStringIO.StringIO ()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress (fileobj.getvalue())

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()

      print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl)
      db.conn.commit()
コード例 #26
0
def parallelwrite(slicenumber):
  
  # Accessing the dict in dbm
  #anydb = anydbm.open('bodydict','r')

  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  #print slicenumber
  startslice = slicenumber
  endslice = startslice+16

  # Get a list of the files in the directories
  for sl in range (startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )
   
    for b in range ( batchsz ):

      if ( sl + b <= endslice and sl + b<=1460 ):

        # raw data
        filenm = result.path + '/superpixel.' + '{:0>5}'.format(sl+b) + '.png'
        #print "Opening filenm " + filenm
        
        img = Image.open ( filenm, 'r' )
        imgdata = np.asarray ( img )
        #Adding new lines
        anydb = anydbm.open('bodydict2','r')
        superpixelarray = imgdata[:,:,0] + (np.uint32(imgdata[:,:,1])<<8)
        newdata = np.zeros([superpixelarray.shape[0],superpixelarray.shape[1]], dtype=np.uint32)
        #print "slice",sl+b,"batch",sl
        print sl+b,multiprocessing.current_process()
        for i in range(superpixelarray.shape[0]):
          for j in range(superpixelarray.shape[1]):
            key = str(sl)+','+str(superpixelarray[i,j])
            if( key not in anydb):
              f = open('missing_keys', 'a')
              f.write(key+'\n')
              f.close()
              print "Error Detected Writing to File"
              dictvalue = '0'
            else:
              dictvalue = anydb.get( key )
            newdata[i,j] = int(dictvalue)
        slab[b,:,:] = newdata
        print "end of slice:",sl+b
        anydb.close()
        
    print "Entering commit phase"

    # Now we have a 1024x1024x16 z-aligned cube.  
    # Send it to the database.
    for y in range ( 0, yimagesz, ycubedim ):
      for x in range ( 0, ximagesz, xcubedim ):

        mortonidx = zindex.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim,endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]

        # insert the blob into the database
        db.annotateDense ((x,y,sl-startslice), resolution, cubedata, 'O')

      print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl)
      db.conn.commit()
  return None
コード例 #27
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the TIFF data')
  parser.add_argument('token', action="store", help='Token for the project')
  parser.add_argument('path', action="store", help='Directory with the image files')
  parser.add_argument('resolution', action="store", type=int, help='Resolution of data')

  result = parser.parse_args()
  
  #Load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[result.resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz = zcubedim

  batchsz = 16
  (ximagesz,yimagesz)=proj.datasetcfg.imagesz[result.resolution]

  yimagesz = 18000
  ximagesz = 24000

  # Get a list of the files in the directories
  for sl in range (startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint64 )

    for b in range ( batchsz ):

      if ( sl + b <= endslice ):

        # raw data
        try:
          filenm = result.path + '{:0>4}'.format(sl+b) + '.tiff'
          print "Opening filenm" + filenm
          
          # Returns the image in BGR order. IN 8-bit script PIL returns it in correct order.
          imgdata = cv2.imread( filenm, -1 )
          if imgdata != None:
            slab[b,:,:] = np.left_shift(65535, 48, dtype=np.uint64) | np.left_shift(imgdata[:,:,0], 32, dtype=np.uint64) | np.left_shift(imgdata[:,:,1], 16, dtype=np.uint64) | np.uint64(imgdata[:,:,2])
          else:
            slab[b,:,:] = np.zeros( [ yimagesz, ximagesz ], dtype=np.uint64)
        except IOError, e:
          slab[b,:,:] = np.zeros( [ yimagesz, ximagesz ], dtype=np.uint64)
          print e

        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    for y in range ( 0, yimagesz+1, ycubedim ):
      for x in range ( 0, ximagesz+1, xcubedim ):

        mortonidx = zindex.XYZMorton ( [x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint64 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim, endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
        
        # Create the DB Blob
        fileobj = cStringIO.StringIO()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress ( fileobj.getvalue() )

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(result.resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()

      print " Commiting at x={}, y={}, z={}".format(x, y, sl)
    db.conn.commit()
  
    # Freeing memory
    slab = None
コード例 #28
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the TIFF data')
  parser.add_argument('token', action="store", help='Token for the project')
  parser.add_argument('path', action="store", help='Directory with the image files')
  parser.add_argument('resolution', action="store", type=int, help='Resolution of data')

  result = parser.parse_args()
  
  #Load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[result.resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz = zcubedim

  batchsz = 16
  (ximagesz,yimagesz)=proj.datasetcfg.imagesz[result.resolution]

  ximagesz = 3992
  yimagesz = 4144
  
  pdb.set_trace()

  # Get a list of the files in the directories
  for sl in range (startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint16 )

    for b in range ( batchsz ):

      if ( sl + b <= endslice ):
        # raw data
        try:
          filenm = result.path + 'x0.25_unspmask3-0.6_s_{:0>4}'.format(sl+b-1) + '.tif'
          print "Opening filenm" + filenm
          img = Image.open (filenm, 'r')
          imgdata = np.asarray ( img )
          slab[b,:,:] = imgdata
        except IOError, e:
          print e
          imgdata = np.zeros((yimagesz, ximagesz), dtype=np.uint16)
          slab[b,:,:] = imgdata

        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    for y in range ( 0, yimagesz+1, ycubedim ):
      for x in range ( 0, ximagesz+1, xcubedim ):

        # Getting a Cube id and ingesting the data one cube at a time
        mortonidx = zindex.XYZMorton ( [x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint8 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim, endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
        
        # Create the DB Blob
        fileobj = cStringIO.StringIO()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress ( fileobj.getvalue() )

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(result.resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()

      print " Commiting at x={}, y={}, z={}".format(x, y, sl)
    db.conn.commit()
  
    # Freeing memory
    slab = None
コード例 #29
0
def main():

    parser = argparse.ArgumentParser(description="Ingest the FlyEM image data.")
    parser.add_argument("token", action="store", help="Token for the annotation project.")
    parser.add_argument("path", action="store", help="Directory with annotation PNG files.")

    result = parser.parse_args()

    # convert to an argument
    resolution = 0

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    # (ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 49152
    yimagesz = 32768

    xtilesz = 8192
    ytilesz = 8192
    numxtiles = 6 + 1
    numytiles = 4 + 1

    # Get a list of the files in the directories
    for sl in range(startslice, endslice + 1, batchsz):
        for ytile in range(1, numytiles):
            for xtile in range(1, numxtiles):

                slab = np.zeros([batchsz, ytilesz, xtilesz], dtype=np.uint8)

                for b in range(batchsz):

                    if sl + b <= endslice:

                        # raw data
                        filenm = result.path + "S1Column_affine_s{:0>3}_x{:0>2}_y{:0>2}.png".format(
                            sl + b + 1, xtile, ytile
                        )
                        print "Opening filenm " + filenm

                        img = Image.open(filenm, "r")
                        imgdata = np.asarray(img)
                        slab[b, :, :] = imgdata

                        # the last z offset that we ingest, if the batch ends before batchsz
                        endz = b

                # Now we have a 1024x1024x16 z-aligned cube.
                # Send it to the database.
                for y in range((ytile - 1) * ytilesz, (ytile) * ytilesz, ycubedim):
                    for x in range((xtile - 1) * xtilesz, (xtile) * xtilesz, xcubedim):

                        mortonidx = zindex.XYZMorton([x / xcubedim, y / ycubedim, (sl - startslice) / zcubedim])
                        cubedata = np.zeros([zcubedim, ycubedim, xcubedim], dtype=np.uint8)

                        xmin = x % xtilesz
                        ymin = y % ytilesz
                        xmax = (min(ximagesz - 1, x + xcubedim - 1) % xtilesz) + 1
                        ymax = (min(yimagesz - 1, y + ycubedim - 1) % ytilesz) + 1
                        zmin = 0
                        zmax = min(sl + zcubedim, endslice + 1)

                        cubedata[0 : zmax - zmin, 0 : ymax - ymin, 0 : xmax - xmin] = slab[
                            zmin:zmax, ymin:ymax, xmin:xmax
                        ]

                        # Dont' ingest empty cubes
                        if len(np.unique(cubedata)) == 1:
                            continue

                        # create the DB BLOB
                        fileobj = cStringIO.StringIO()
                        np.save(fileobj, cubedata)
                        cdz = zlib.compress(fileobj.getvalue())

                        # insert the blob into the database
                        cursor = db.conn.cursor()
                        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(resolution))
                        cursor.execute(sql, (mortonidx, cdz))
                        cursor.close()

                    print "Commiting at x=%s, y=%s, z=%s" % (x, y, sl)
                    db.conn.commit()

                slab = None
                import gc

                gc.collect()
コード例 #30
0
def main():

    parser = argparse.ArgumentParser(
        description='Ingest the FlyEM image data.')
    parser.add_argument(
        'baseurl',
        action="store",
        help='Base URL to of ocp service no http://, e.g. openconnecto.me')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation PNG files.')

    result = parser.parse_args()

    # convert to an argument
    resolution = 0

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 12000
    yimagesz = 12000

    # Get a list of the files in the directories
    for sl in range(startslice, endslice + 1, batchsz):

        slab = np.zeros([batchsz, yimagesz, ximagesz], dtype=np.uint8)

        for b in range(batchsz):

            if (sl + b <= endslice):

                # raw data
                filenm = result.path + '/grayscale.' + '{:0>5}'.format(
                    sl + b) + '.png'
                print "Opening filenm " + filenm

                img = Image.open(filenm, 'r')
                imgdata = np.asarray(img)
                slab[b, :, :] = imgdata

                # the last z offset that we ingest, if the batch ends before batchsz
                endz = b

        # Now we have a 1024x1024x16 z-aligned cube.
        # Send it to the database.
        for y in range(0, yimagesz, ycubedim):
            for x in range(0, ximagesz, xcubedim):

                mortonidx = zindex.XYZMorton(
                    [x / xcubedim, y / ycubedim, (sl - startslice) / zcubedim])
                cubedata = np.zeros([zcubedim, ycubedim, xcubedim],
                                    dtype=np.uint8)

                xmin = x
                ymin = y
                xmax = min(ximagesz, x + xcubedim)
                ymax = min(yimagesz, y + ycubedim)
                zmin = 0
                zmax = min(sl + zcubedim, endslice + 1)

                cubedata[0:zmax - zmin, 0:ymax - ymin,
                         0:xmax - xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]

                # create the DB BLOB
                fileobj = cStringIO.StringIO()
                np.save(fileobj, cubedata)
                cdz = zlib.compress(fileobj.getvalue())

                # insert the blob into the database
                cursor = db.conn.cursor()
                sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(
                    int(resolution))
                cursor.execute(sql, (mortonidx, cdz))
                cursor.close()

            print "Commiting at x=%s, y=%s, z=%s" % (x, y, sl)
            db.conn.commit()
コード例 #31
0
ファイル: flyem_anno.py プロジェクト: neurodata/ndstore
def main():

  parser = argparse.ArgumentParser(description='Ingest the FlyEM image data.')
  parser.add_argument('baseurl', action="store", help='Base URL to of ocp service no http://, e.  g. neurodata.io')
  parser.add_argument('token', action="store", help='Token for the annotation project.')
  parser.add_argument('path', action="store", help='Directory with annotation PNG files.')

  result = parser.parse_args()

  # convert to an argument
  resolution = 0

  # load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution]
  (realstartslice,realendslice)=proj.datasetcfg.slicerange
  batchsz=zcubedim

  # This doesn't work because the image size does not match exactly the cube size
  #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
  ximagesz = 12000
  yimagesz = 12000

  # Adding the startslice and endslice manually
  startslice = 979
  endslice = 1000

  batchsz=1
  
  # Accessing the dict in dbm
  anydb = anydbm.open('bodydict','r')

  # Get a list of the files in the directories
  for sl in range (startslice,endslice+1,batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )
   
    for b in range ( batchsz ):

      if ( sl + b <= endslice ):

        #  Reading the raw data
        filenm = result.path + '/superpixel.' + '{:0>5}'.format(sl+b) + '.png'
        print "Opening filenm " + filenm
        
        
        img = Image.open ( filenm, 'r' )
        imgdata = np.asarray ( img )
        superpixelarray = imgdata[:,:,0] + (np.uint32(imgdata[:,:,1])<<8)
        uniqueidlist = np.unique( superpixelarray)
        smalldict = {}
        # Creating a small dict of all the ids in the given slice
        for i in uniqueidlist:
          smalldict[i] = int( anydb.get( str(sl)+','+str(i) ) )
        # Looking the in the small dictionary for the body ids
        for i in range(superpixelarray.shape[0]):
          for j in range(superpixelarray.shape[1]):
            superpixelarray[i,j] = smalldict.get(superpixelarray[i,j])
        slab[b,:,:] = superpixelarray
        print "end on slice:",sl
        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    # Now we have a 1024x1024x16 z-aligned cube.  
    # Send it to the database.
    for y in range ( 0, yimagesz, ycubedim ):
      for x in range ( 0, ximagesz, xcubedim ):

        mortonidx = zindex.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [1, ycubedim, xcubedim], dtype=np.uint32 )
        #cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim,endslice+1)
        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]

        # insert the blob into the database
        db.annotateDense ((x,y,sl-realstartslice),resolution,cubedata,'O')

        print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl-realstartslice)
      db.conn.commit()