예제 #1
0
    def getVoxel(self, resolution, voxel):
        """Return the identifier at a voxel"""

        # get the size of the image and cube
        [xcubedim, ycubedim,
         zcubedim] = cubedim = self.dbcfg.cubedim[resolution]

        # convert the voxel into zindex and offsets
        # Round to the nearest larger cube in all dimensions
        xyzcube = [
            voxel[0] / xcubedim, voxel[1] / ycubedim,
            (voxel[2] - self.startslice) / zcubedim
        ]
        xyzoffset = [
            voxel[0] % xcubedim, voxel[1] % ycubedim,
            (voxel[2] - self.startslice) % zcubedim
        ]

        # Create a cube object
        cube = anncube.AnnotateCube(cubedim)

        mortonidx = zindex.XYZMorton(xyzcube)

        # get the block from the database
        sql = "SELECT cube FROM " + self.annoproj.getTable(
            resolution) + " WHERE zindex = " + str(mortonidx)
        try:
            self.cursor.execute(sql)
        except MySQLdb.Error, e:
            logger.warning("Error reading annotation data: %d: %s. sql=%s" %
                           (e.args[0], e.args[1], sql))
            raise
예제 #2
0
  def upload ( self, channel, sl, imarray ):
    """Transfer the array to the database"""

    # get the size of the cube
    xcubedim,ycubedim,zcubedim = self.proj.datasetcfg.cubedim[self.resolution]

    # and the limits of iteration
    xlimit = (self._ximgsz-1) / xcubedim + 1
    ylimit = (self._yimgsz-1) / ycubedim + 1


    for y in range(ylimit):
      for x in range(xlimit):

        # each batch is the last slice in a cube
        z = sl/zcubedim

        # zindex
        key = zindex.XYZMorton ( [x,y,z] )

        # Create a channel cube
        cube = imagecube.ImageCube16 ( [xcubedim,ycubedim,zcubedim] )

        # data for this key
        cube.data = imarray[:,y*ycubedim:(y+1)*ycubedim,x*xcubedim:(x+1)*xcubedim]
        # compress the cube
        npz = cube.toNPZ ()

        # add the cube to the database
        sql = "INSERT INTO " + self.proj.getTable(self.resolution) +  "(channel, zindex, cube) VALUES (%s, %s, %s)"
        try:
          self.cursor.execute ( sql, (channel, key, npz))
        except MySQLdb.Error, e:
          print ("Error updating data cube: %d: %s. sql=%s" % (e.args[0], e.args[1], sql))
          raise 
예제 #3
0
def main():

    parser = argparse.ArgumentParser(
        description='Ingest a drosophilia tiff stack.')
    parser.add_argument('dbname', action="store")
    parser.add_argument('file', action="store")

    result = parser.parse_args()

    conn = MySQLdb.connect(host='localhost',
                           user='******',
                           passwd='88brain88',
                           db=result.dbname)

    cursor = conn.cursor()

    #Load the TIFF stack
    im = Image.open(result.file)

    imarray = np.zeros([_endslice - _startslice + 1, _ytilesize, _xtilesize],
                       dtype=np.uint8)
    for i in range(_startslice, _endslice + 1):
        im.seek(i)
        imarray[i, :, :] = np.array(im)

    for z in range(1, 2):
        conn.commit()
        for y in range(0, 4):
            for x in range(0, 4):

                zmin = z * 16
                zmax = min((z + 1) * 16, 30)
                zmaxrel = ((zmax - 1) % 16) + 1
                ymin = y * 128
                ymax = (y + 1) * 128
                xmin = x * 128
                xmax = (x + 1) * 128

                key = zindex.XYZMorton([x, y, z])

                dataout = np.zeros([16, 128, 128], dtype=np.uint8)

                dataout[0:zmaxrel, 0:128,
                        0:128] = imarray[zmin:zmax, ymin:ymax, xmin:xmax]

                # Compress the data
                outfobj = cStringIO.StringIO()
                np.save(outfobj, dataout)
                zdataout = zlib.compress(outfobj.getvalue())
                outfobj.close()

                # Put in the database
                sql = "INSERT INTO res0 (zindex, cube) VALUES (%s, %s)"
                try:
                    cursor.execute(sql, (key, zdataout))
                    conn.commit()
                except MySQLdb.Error, e:
                    print "Failed insert %d: %s. sql=%s" % (e.args[0],
                                                            e.args[1], sql)
예제 #4
0
def main():

  parser = argparse.ArgumentParser(description='Build an aeropsike DB from mysql data.')
  parser.add_argument('token', action="store", help='Token for the project.')
  parser.add_argument('resolution', action="store", type=int)
  
  result = parser.parse_args()

  # as database
  ascfg = { 'hosts': [ ('127.0.0.1', 3000) ] }
  ascli = aerospike.client(ascfg).connect()

  # mysql database
  projdb = ocpcaproj.OCPCAProjectsDB()
  proj = projdb.loadProject ( result.token )

  # Bind the annotation database
  imgDB = ocpcadb.OCPCADB ( proj )

  # Get the source database sizes
  [ximagesz, yimagesz] = proj.datasetcfg.imagesz [ result.resolution ]
  [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.cubedim [ result.resolution ]

  # Get the slices
  [ startslice, endslice ] = proj.datasetcfg.slicerange
  slices = endslice - startslice + 1

  # Set the limits for iteration on the number of cubes in each dimension
  # RBTODO These limits may be wrong for even (see channelingest.py)
  xlimit = ximagesz / xcubedim
  ylimit = yimagesz / ycubedim
  #  Round up the zlimit to the next larger
  zlimit = (((slices-1)/zcubedim+1)*zcubedim)/zcubedim 

  cursor = imgDB.conn.cursor()

  for z in range(zlimit):
    for y in range(ylimit):
      for x in range(xlimit):

        mysqlcube = imgDB.cutout ( [ x*xcubedim, y*ycubedim, z*zcubedim ], cubedim, result.resolution )
        zidx = zindex.XYZMorton ( [x,y,z] )

        tmpfile = tempfile.NamedTemporaryFile ()
        h5tocass = h5py.File ( tmpfile.name ) 
        h5tocass.create_dataset ( "cuboid", tuple(mysqlcube.data.shape), mysqlcube.data.dtype,
                                 compression='gzip',  data=mysqlcube.data )
        h5tocass.close()
        tmpfile.seek(0)

        askey = ("ocp",str(result.token)+":"+str(result.resolution),str(zidx))

        print askey
        ascli.put ( askey, { 'cuboid' : tmpfile.read().encode('hex') } )

        try:
          ascli.get ( askey )
        except:
          print "Except"
예제 #5
0
    def get_3d_cc(self,shape):
        """Takes a shape which is the shape of the new 3d image and 'colors' the image by connected component

        Input
        =====
        shape -- 3-tuple

        Output
        ======
        cc3d -- array of with shape=shape. colored so that ccz[x,y,z]=vcc[i] where x,y,z is the XYZ coordinates for Morton index i
        """

        cc3d = np.NaN*np.zeros(shape)
        allCoord = itt.product(*[xrange(sz) for sz in shape])

        [cc3d.itemset((xyz), self.vertexCC[zindex.XYZMorton(xyz)])
            for xyz in allCoord if not self.vertexCC[zindex.XYZMorton(xyz)]==0];
        return cc3d
예제 #6
0
    def annoCutout(self, entityid, resolution, corner, dim):
        """Fetch a volume cutout with only the specified annotation"""

        cube = self.cutout(corner, dim, resolution)
        vec_func = np.vectorize(lambda x: np.uint32(0)
                                if x != entityid else np.uint32(entityid))
        cube.data = vec_func(cube.data)

        # And get the exceptions
        # get the size of the image and cube
        [xcubedim, ycubedim,
         zcubedim] = cubedim = self.dbcfg.cubedim[resolution]

        # Round to the nearest larger cube in all dimensions
        zstart = corner[2] / zcubedim
        ystart = corner[1] / ycubedim
        xstart = corner[0] / xcubedim

        znumcubes = (corner[2] + dim[2] + zcubedim - 1) / zcubedim - zstart
        ynumcubes = (corner[1] + dim[1] + ycubedim - 1) / ycubedim - ystart
        xnumcubes = (corner[0] + dim[0] + xcubedim - 1) / xcubedim - xstart

        zoffset = corner[2] % zcubedim
        yoffset = corner[1] % ycubedim
        xoffset = corner[0] % xcubedim

        for z in range(znumcubes):
            for y in range(ynumcubes):
                for x in range(xnumcubes):

                    key = zindex.XYZMorton(
                        [x + xstart, y + ystart, z + zstart])

                    # Get exceptions if this DB supports it
                    if self.EXCEPT_FLAG:
                        exceptions = self.getExceptions(
                            key, resolution, entityid)
                        if exceptions != []:
                            # write as a loop first, then figure out how to optimize
                            for e in exceptions:
                                xloc = e[0] + (x + xstart) * xcubedim
                                yloc = e[1] + (y + ystart) * ycubedim
                                zloc = e[2] + (z + zstart) * zcubedim
                                if xloc >= corner[0] and xloc < corner[
                                        0] + dim[0] and yloc >= corner[
                                            1] and yloc < corner[1] + dim[
                                                1] and zloc >= corner[
                                                    2] and zloc < corner[
                                                        2] + dim[2]:
                                    cube.data[e[2] - zoffset + z * zcubedim,
                                              e[1] - yoffset + y * ycubedim,
                                              e[0] - xoffset +
                                              x * xcubedim] = entityid

        return cube
예제 #7
0
    def removeBB(self):
        """Iterate over all cubes"""

        # Get the source database sizes
        [ximagesz, yimagesz] = self.dbcfg.imagesz[self._resolution]
        [xcubedim, ycubedim, zcubedim] = self.dbcfg.cubedim[self._resolution]

        # Get the slices
        [startslice, endslice] = self.dbcfg.slicerange
        slices = endslice - startslice + 1

        # Set the limits for iteration on the number of cubes in each dimension
        xlimit = (ximagesz - 1) / xcubedim + 1
        ylimit = (yimagesz - 1) / ycubedim + 1
        #  Round up the zlimit to the next larger
        zlimit = (((slices - 1) / zcubedim + 1) * zcubedim) / zcubedim

        lastzindex = (zindex.XYZMorton([xlimit, ylimit, zlimit]))

        # call the range query
        #    self.annodb.queryRange ( 0, lastzindex, self._resolution );
        # RB restart
        self.annodb.queryRange(0, lastzindex, self._resolution)

        # get the first cube
        [key, cube] = self.annodb.getNextCube()

        count = 0

        while key != None:

            if len(np.intersect1d(np.unique(cube.data), self.BBIDS)) != 0:
                print "Found bounding box data in cube ", zindex.MortonXYZ(key)
                # Remove annotations
                vector_func = np.vectorize(lambda a: np.uint32(0)
                                           if a in self.BBIDS else a)
                cube.data = vector_func(cube.data)
                assert (type(cube.data[0, 0, 0]) == np.uint32)
                # Put the cube
                self.annodb.putCube(key, self._resolution, cube)
                count = count + 1

            else:
                print "No matching data in ", key

            # Get the next cube
            [key, cube] = self.annodb.getNextCube()

            if count == 100:
                self.annodb.conn.commit()
                count = 0

        print "No more cubes"
예제 #8
0
def get_3d_cc(vcc,shape):
    """Takes an array vcc and shape which is the shape of the new 3d image and 'colors' the image by connected component

    For some reason this is 3 times as fast as the same thing in the ConnectedComponet class ?

    Input
    =====
    vcc  1d array
    shape  3tuple

    Output
    ======
    cc3d  array of with shape=shape. colored so that ccz[x,y,z]=vcc[i] where x,y,z is the XYZ coordinates for Morton index i
    """

    cc3d = np.NaN*np.zeros(shape)
    allCoord = itt.product(*[xrange(sz) for sz in shape])

    [cc3d.itemset((xyz), vcc[zindex.XYZMorton(xyz)])
        for xyz in allCoord if not vcc[zindex.XYZMorton(xyz)]==0];
    return cc3d
예제 #9
0
    def getVoxels (self):
      """Return the list of edges in this fiber. As tuples."""

      voxels = []

      #  This is corrected to match the logic of MRCAP
      # extract a path of vertices
      for fbrpt in self.path: 
        
          voxels.append ( zindex.XYZMorton ( [ int(fbrpt[0]), int(fbrpt[1]), int(fbrpt[2]) ] ))

      # eliminate duplicates
      return set ( voxels )
예제 #10
0
def main():

    parser = argparse.ArgumentParser(
        description='Build an aeropsike DB from mysql data.')
    parser.add_argument('intoken',
                        action="store",
                        help='Token for the project.')
    parser.add_argument('outtoken',
                        action="store",
                        help='Token for the project.')
    parser.add_argument('resolution', action="store", type=int)

    result = parser.parse_args()

    # cassandra database
    outprojdb = ocpcaproj.OCPCAProjectsDB()
    outproj = outprojdb.loadProject(result.outtoken)

    # mysql database
    inprojdb = ocpcaproj.OCPCAProjectsDB()
    inproj = inprojdb.loadProject(result.intoken)

    # Bind the databases
    inDB = ocpcadb.OCPCADB(inproj)
    outDB = ocpcadb.OCPCADB(outproj)

    # Get the source database sizes
    [ximagesz, yimagesz] = inproj.datasetcfg.imagesz[result.resolution]
    [xcubedim, ycubedim,
     zcubedim] = cubedim = inproj.datasetcfg.cubedim[result.resolution]

    # Get the slices
    [startslice, endslice] = inproj.datasetcfg.slicerange
    slices = endslice - startslice + 1

    # Set the limits for iteration on the number of cubes in each dimension
    # and the limits of iteration
    xlimit = (ximagesz - 1) / xcubedim + 1
    ylimit = (yimagesz - 1) / ycubedim + 1
    #  Round up the zlimit to the next larger
    zlimit = (((slices - 1) / zcubedim + 1) * zcubedim) / zcubedim

    for z in range(zlimit):
        for y in range(ylimit):
            for x in range(xlimit):

                zidx = zindex.XYZMorton([x, y, z])
                outDB.putCube(zidx, result.resolution,
                              inDB.getCube(zidx, result.resolution))
                print "Ingesting {}".format(zidx)
예제 #11
0
    def removeExceptions(self, key, resolution, entityid, exceptions):
        """Remove a list of exceptions"""

        curexlist = self.getExceptions(key, resolution, entityid)

        table = 'exc' + str(resolution)

        if curexlist != []:

            oldexlist = set([zindex.XYZMorton(trpl) for trpl in curexlist])
            newexlist = set([zindex.XYZMorton(trpl) for trpl in exceptions])
            exlist = oldexlist - newexlist
            exlist = [zindex.MortonXYZ(zidx) for zidx in exlist]

            sql = "UPDATE " + table + " SET exlist=(%s) WHERE zindex=%s AND id=%s"
            try:
                fileobj = cStringIO.StringIO()
                np.save(fileobj, exlist)
                self.cursor.execute(
                    sql, (zlib.compress(fileobj.getvalue()), key, entityid))
            except MySQLdb.Error, e:
                logger.error("Error removing exceptions %d: %s. sql=%s" %
                             (e.args[0], e.args[1], sql))
                raise
예제 #12
0
    def upload(self, channel, sl, imarray):
        """Transfer the array to the database"""

        for y in range(0, self._yimgsz + 1, self.ycubedim):
            for x in range(0, self._ximgsz + 1, self.xcubedim):

                # zindex
                mortonidx = zindex.XYZMorton([
                    x / self.xcubedim, y / self.ycubedim,
                    (sl - self.startslice) / self.zcubedim
                ])

                # Create a channel cube
                cube = imagecube.ImageCube8(
                    [self.xcubedim, self.ycubedim, self.zcubedim])

                xmin = x
                ymin = y
                xmax = min(self._ximgsz, x + self.xcubedim)
                ymax = min(self._yimgsz, y + self.ycubedim)
                zmin = 0
                zmax = min(sl + self.zcubedim, self.endslice + 1)

                # data for this key
                #cube.data = imarray[:,y*self.ycubedim:(y+1)*self.ycubedim,x*self.xcubedim:(x+1)*self.xcubedim]
                cube.data = imarray[zmin:zmax, ymin:ymax, xmin:xmax]
                # compress the cube
                #npz = cube.toNPZ ()

                fileobj = cStringIO.StringIO()
                np.save(fileobj, cube.data)
                cdz = zlib.compress(fileobj.getvalue())

                # add the cube to the database
                sql = "INSERT INTO {} (channel, zindex, cube) VALUES (%s, %s, %s)".format(
                    self.proj.getTable(self.resolution))
                try:
                    #print xmin,xmax,ymin,ymax,zmin,zmax
                    self.cursor.execute(sql, (channel, mortonidx, cdz))
                except MySQLdb.Error, e:
                    print("Error updating data cube: %d: %s. sql=%s" %
                          (e.args[0], e.args[1], sql))
                    raise
            print " Commiting at x={}, y={}, z={}".format(x, y, sl)
예제 #13
0
    def writeImageCuboid(self, corner, resolution, imgdata):
        """Write an image through the Web service"""

        # dim is in xyz, data is in zyxj
        dim = [imgdata.shape[2], imgdata.shape[1], imgdata.shape[0]]

        # get the size of the image and cube
        [xcubedim, ycubedim,
         zcubedim] = cubedim = self.dbcfg.cubedim[resolution]

        # Round to the nearest larger cube in all dimensions
        zstart = corner[2] / zcubedim
        ystart = corner[1] / ycubedim
        xstart = corner[0] / xcubedim

        znumcubes = (corner[2] + dim[2] + zcubedim - 1) / zcubedim - zstart
        ynumcubes = (corner[1] + dim[1] + ycubedim - 1) / ycubedim - ystart
        xnumcubes = (corner[0] + dim[0] + xcubedim - 1) / xcubedim - xstart

        zoffset = corner[2] % zcubedim
        yoffset = corner[1] % ycubedim
        xoffset = corner[0] % xcubedim

        databuffer = np.zeros(
            [znumcubes * zcubedim, ynumcubes * ycubedim, xnumcubes * xcubedim],
            dtype=np.uint8)
        databuffer[zoffset:zoffset + dim[2], yoffset:yoffset + dim[1],
                   xoffset:xoffset + dim[0]] = imgdata

        for z in range(znumcubes):
            for y in range(ynumcubes):
                for x in range(xnumcubes):

                    key = zindex.XYZMorton(
                        [x + xstart, y + ystart, z + zstart])
                    cube = self.getCube(key, resolution, True)

                    cube.data = databuffer[z * zcubedim:(z + 1) * zcubedim,
                                           y * ycubedim:(y + 1) * ycubedim,
                                           x * xcubedim:(x + 1) * xcubedim]

                    self.putCube(key, resolution, cube)
예제 #14
0
    def __init__(self, matrixdim, rois, mask):

        # Regions of interest
        self.rois = rois

        # Brainmask
        #    self.mask = mask

        # Round up to the nearest power of 2
        xdim = int(math.pow(2, math.ceil(math.log(matrixdim[0], 2))))
        ydim = int(math.pow(2, math.ceil(math.log(matrixdim[1], 2))))
        zdim = int(math.pow(2, math.ceil(math.log(matrixdim[2], 2))))

        # Need the dimensions to be the same shape for zindex
        xdim = ydim = zdim = max(xdim, ydim, zdim)

        # largest value is -1 in each dimension, then plus one because range(10) is 0..9
        self._maxval = zindex.XYZMorton([xdim - 1, ydim - 1, zdim - 1]) + 1

        # list of list matrix for one by one insertion
        self.spedgemat = lil_matrix((self._maxval, self._maxval), dtype=float)

        # empty CSC matrix
        self.spcscmat = csc_matrix((self._maxval, self._maxval), dtype=float)
예제 #15
0
def main():

    parser = argparse.ArgumentParser(
        description='Ingest the FlyEM image data.')
    parser.add_argument(
        'baseurl',
        action="store",
        help='Base URL to of ocp service no http://, e.g. openconnecto.me')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation PNG files.')

    result = parser.parse_args()

    # convert to an argument
    resolution = 0

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 12000
    yimagesz = 12000

    # Get a list of the files in the directories
    for sl in range(startslice, endslice + 1, batchsz):

        slab = np.zeros([batchsz, yimagesz, ximagesz], dtype=np.uint8)

        for b in range(batchsz):

            if (sl + b <= endslice):

                # raw data
                filenm = result.path + '/grayscale.' + '{:0>5}'.format(
                    sl + b) + '.png'
                print "Opening filenm " + filenm

                img = Image.open(filenm, 'r')
                imgdata = np.asarray(img)
                slab[b, :, :] = imgdata

                # the last z offset that we ingest, if the batch ends before batchsz
                endz = b

        # Now we have a 1024x1024x16 z-aligned cube.
        # Send it to the database.
        for y in range(0, yimagesz, ycubedim):
            for x in range(0, ximagesz, xcubedim):

                mortonidx = zindex.XYZMorton(
                    [x / xcubedim, y / ycubedim, (sl - startslice) / zcubedim])
                cubedata = np.zeros([zcubedim, ycubedim, xcubedim],
                                    dtype=np.uint8)

                xmin = x
                ymin = y
                xmax = min(ximagesz, x + xcubedim)
                ymax = min(yimagesz, y + ycubedim)
                zmin = 0
                zmax = min(sl + zcubedim, endslice + 1)

                cubedata[0:zmax - zmin, 0:ymax - ymin,
                         0:xmax - xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]

                # create the DB BLOB
                fileobj = cStringIO.StringIO()
                np.save(fileobj, cubedata)
                cdz = zlib.compress(fileobj.getvalue())

                # insert the blob into the database
                cursor = db.conn.cursor()
                sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(
                    int(resolution))
                cursor.execute(sql, (mortonidx, cdz))
                cursor.close()

            print "Commiting at x=%s, y=%s, z=%s" % (x, y, sl)
            db.conn.commit()
예제 #16
0
    def shaveDense(self, entityid, corner, resolution, annodata):
        """Process all the annotations in the dense volume"""

        index_dict = defaultdict(set)

        # dim is in xyz, data is in zyxj
        dim = [annodata.shape[2], annodata.shape[1], annodata.shape[0]]

        # get the size of the image and cube
        [xcubedim, ycubedim,
         zcubedim] = cubedim = self.dbcfg.cubedim[resolution]

        # Round to the nearest larger cube in all dimensions
        zstart = corner[2] / zcubedim
        ystart = corner[1] / ycubedim
        xstart = corner[0] / xcubedim

        znumcubes = (corner[2] + dim[2] + zcubedim - 1) / zcubedim - zstart
        ynumcubes = (corner[1] + dim[1] + ycubedim - 1) / ycubedim - ystart
        xnumcubes = (corner[0] + dim[0] + xcubedim - 1) / xcubedim - xstart

        zoffset = corner[2] % zcubedim
        yoffset = corner[1] % ycubedim
        xoffset = corner[0] % xcubedim

        databuffer = np.zeros(
            [znumcubes * zcubedim, ynumcubes * ycubedim, xnumcubes * xcubedim],
            dtype=np.uint32)
        databuffer[zoffset:zoffset + dim[2], yoffset:yoffset + dim[1],
                   xoffset:xoffset + dim[0]] = annodata

        for z in range(znumcubes):
            for y in range(ynumcubes):
                for x in range(xnumcubes):

                    key = zindex.XYZMorton(
                        [x + xstart, y + ystart, z + zstart])
                    cube = self.getCube(key, resolution, True)

                    exdata = cube.shaveDense(
                        databuffer[z * zcubedim:(z + 1) * zcubedim,
                                   y * ycubedim:(y + 1) * ycubedim,
                                   x * xcubedim:(x + 1) * xcubedim])
                    for exid in np.unique(exdata):
                        if exid != 0:
                            # get the offsets
                            exoffsets = np.nonzero(exdata == exid)
                            # assemble into 3-tuples zyx->xyz
                            exceptions = np.array(zip(exoffsets[2],
                                                      exoffsets[1],
                                                      exoffsets[0]),
                                                  dtype=np.uint32)
                            # update the exceptions
                            self.removeExceptions(key, resolution, exid,
                                                  exceptions)
                            # add to the index
                            index_dict[exid].add(key)

                    self.putCube(key, resolution, cube)

                    #update the index for the cube
                    # get the unique elements that are being added to the data
                    uniqueels = np.unique(
                        databuffer[z * zcubedim:(z + 1) * zcubedim,
                                   y * ycubedim:(y + 1) * ycubedim,
                                   x * xcubedim:(x + 1) * xcubedim])
                    for el in uniqueels:
                        index_dict[el].add(key)

                    # remove 0 no reason to index that
                    del (index_dict[0])

        # Update all indexes
        self.annoIdx.updateIndexDense(index_dict, resolution)
예제 #17
0
    def cutout(self, corner, dim, resolution, channel=None):
        """Extract a cube of arbitrary size.  Need not be aligned."""

        # get the size of the image and cube
        [xcubedim, ycubedim,
         zcubedim] = cubedim = self.dbcfg.cubedim[resolution]

        # Round to the nearest larger cube in all dimensions
        zstart = corner[2] / zcubedim
        ystart = corner[1] / ycubedim
        xstart = corner[0] / xcubedim

        znumcubes = (corner[2] + dim[2] + zcubedim - 1) / zcubedim - zstart
        ynumcubes = (corner[1] + dim[1] + ycubedim - 1) / ycubedim - ystart
        xnumcubes = (corner[0] + dim[0] + xcubedim - 1) / xcubedim - xstart

        if (self.annoproj.getDBType() == emcaproj.ANNOTATIONS):

            # input cube is the database size
            incube = anncube.AnnotateCube(cubedim)

            # output cube is as big as was asked for and zero it.
            outcube = anncube.AnnotateCube ( [xnumcubes*xcubedim,\
                                              ynumcubes*ycubedim,\
                                              znumcubes*zcubedim] )
            outcube.zeros()

        elif (self.annoproj.getDBType() == emcaproj.IMAGES):

            incube = imagecube.ImageCube(cubedim)
            outcube = imagecube.ImageCube ( [xnumcubes*xcubedim,\
                                              ynumcubes*ycubedim,\
                                              znumcubes*zcubedim] )

        elif (self.annoproj.getDBType() == emcaproj.CHANNELS):

            incube = chancube.ChanCube(cubedim)
            outcube = chancube.ChanCube ( [xnumcubes*xcubedim,\
                                              ynumcubes*ycubedim,\
                                              znumcubes*zcubedim] )

        # Build a list of indexes to access
        listofidxs = []
        for z in range(znumcubes):
            for y in range(ynumcubes):
                for x in range(xnumcubes):
                    mortonidx = zindex.XYZMorton(
                        [x + xstart, y + ystart, z + zstart])
                    listofidxs.append(mortonidx)

        # Sort the indexes in Morton order
        listofidxs.sort()

        # Batch query for all cubes
        dbname = self.annoproj.getTable(resolution)

        # Customize query to the database (include channel or not)
        if (self.annoproj.getDBType() == emcaproj.CHANNELS):
            sql = "SELECT zindex, cube FROM " + dbname + " WHERE channel= " + str(
                channel) + " AND zindex in (%s)"
        else:
            sql = "SELECT zindex, cube FROM " + dbname + " WHERE zindex IN (%s)"

        # creats a %s for each list element
        in_p = ', '.join(map(lambda x: '%s', listofidxs))
        # replace the single %s with the in_p string
        sql = sql % in_p
        rc = self.cursor.execute(sql, listofidxs)

        # xyz offset stored for later use
        lowxyz = zindex.MortonXYZ(listofidxs[0])

        # Get the objects and add to the cube
        while (True):
            try:
                idx, datastring = self.cursor.fetchone()
            except:
                break

            #add the query result cube to the bigger cube
            curxyz = zindex.MortonXYZ(int(idx))
            offset = [
                curxyz[0] - lowxyz[0], curxyz[1] - lowxyz[1],
                curxyz[2] - lowxyz[2]
            ]

            incube.fromNPZ(datastring[:])
            # add it to the output cube
            outcube.addData(incube, offset)

        # need to trim down the array to size
        #  only if the dimensions are not the same
        if dim[0] % xcubedim  == 0 and\
           dim[1] % ycubedim  == 0 and\
           dim[2] % zcubedim  == 0 and\
           corner[0] % xcubedim  == 0 and\
           corner[1] % ycubedim  == 0 and\
           corner[2] % zcubedim  == 0:
            pass
        else:
            outcube.trim ( corner[0]%xcubedim,dim[0],\
                            corner[1]%ycubedim,dim[1],\
                            corner[2]%zcubedim,dim[2] )

        return outcube
예제 #18
0
def main():

    parser = argparse.ArgumentParser(description='Ingest the Rohanna data.')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation TIF files.')
    parser.add_argument('resolution',
                        action="store",
                        type=int,
                        help='Resolution')

    result = parser.parse_args()

    # convert to an argument
    resolution = result.resolution

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 5120
    yimagesz = 5120
    endslice = 1123

    # add all of the tiles to the image
    for sl in range(startslice, endslice + 1, batchsz):

        slab = np.zeros([batchsz, yimagesz, ximagesz], dtype=np.uint32)

        for b in range(batchsz):

            if (sl + b <= endslice):

                # raw data
                filenm = result.path + '/labels_{:0>5}_ocp.tif'.format(sl + b)
                print "Opening filenm " + filenm

                img = Image.open(filenm, 'r')
                imgdata = np.asarray(img)

                slab[b, :, :] = (imgdata)
                # the last z offset that we ingest, if the batch ends before batchsz
                endz = b

        # Now we have a 5120x5120x16 z-aligned cube.
        # Send it to the database.
        for y in range(0, yimagesz, ycubedim):
            for x in range(0, ximagesz, xcubedim):

                mortonidx = zindex.XYZMorton([
                    (x + xoffsetsz) / xcubedim, (y + yoffsetsz) / ycubedim,
                    (sl + zoffsetsz - startslice) / zcubedim
                ])
                cubedata = np.zeros([zcubedim, ycubedim, xcubedim],
                                    dtype=np.uint32)
                test = zindex.MortonXYZ(mortonidx)
                pdb.set_trace()
                xmin = x
                ymin = y
                xmax = min(ximagesz, x + xcubedim)
                ymax = min(yimagesz, y + ycubedim)
                zmin = 0
                zmax = min(sl + zcubedim, endslice + 1)
                cubedata[0:zmax - zmin, 0:ymax - ymin,
                         0:xmax - xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]

                # check if there's anything to store
                if (np.count_nonzero(cubedata) == 0):
                    continue

                # create the DB BLOB
                fileobj = cStringIO.StringIO()
                np.save(fileobj, cubedata)
                cdz = zlib.compress(fileobj.getvalue())

                # insert the blob into the database
                cursor = db.conn.cursor()
                sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(
                    int(resolution))
                #        cursor.execute(sql, (mortonidx, cdz))
                cursor.close()

                print "Commiting at x=%s, y=%s, z=%s" % (
                    x + xoffsetsz, y + yoffsetsz, sl + b + zoffsetsz)
            db.conn.commit()
예제 #19
0
def parallelwrite(slicenumber):
  
  # Accessing the dict in dbm
  #anydb = anydbm.open('bodydict','r')

  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  #print slicenumber
  startslice = slicenumber
  endslice = startslice+16

  # Get a list of the files in the directories
  for sl in range (startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )
   
    for b in range ( batchsz ):

      if ( sl + b <= endslice and sl + b<=1460 ):

        # raw data
        filenm = result.path + '/superpixel.' + '{:0>5}'.format(sl+b) + '.png'
        #print "Opening filenm " + filenm
        
        img = Image.open ( filenm, 'r' )
        imgdata = np.asarray ( img )
        #Adding new lines
        anydb = anydbm.open('bodydict2','r')
        superpixelarray = imgdata[:,:,0] + (np.uint32(imgdata[:,:,1])<<8)
        newdata = np.zeros([superpixelarray.shape[0],superpixelarray.shape[1]], dtype=np.uint32)
        #print "slice",sl+b,"batch",sl
        print sl+b,multiprocessing.current_process()
        for i in range(superpixelarray.shape[0]):
          for j in range(superpixelarray.shape[1]):
            key = str(sl)+','+str(superpixelarray[i,j])
            if( key not in anydb):
              f = open('missing_keys', 'a')
              f.write(key+'\n')
              f.close()
              print "Error Detected Writing to File"
              dictvalue = '0'
            else:
              dictvalue = anydb.get( key )
            newdata[i,j] = int(dictvalue)
        slab[b,:,:] = newdata
        print "end of slice:",sl+b
        anydb.close()
        
    print "Entering commit phase"

    # Now we have a 1024x1024x16 z-aligned cube.  
    # Send it to the database.
    for y in range ( 0, yimagesz, ycubedim ):
      for x in range ( 0, ximagesz, xcubedim ):

        mortonidx = zindex.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim,endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]

        # insert the blob into the database
        db.annotateDense ((x,y,sl-startslice), resolution, cubedata, 'O')

      print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl)
      db.conn.commit()
  return None
예제 #20
0
def main():

  parser = argparse.ArgumentParser(description='Ingest a tiff stack.')
  parser.add_argument('token', action="store" )
  parser.add_argument('channel', type=int, action="store" )
  parser.add_argument('path', action="store" )
  parser.add_argument('numslices', type=int, action="store" )

  result = parser.parse_args()

  projdb = emcaproj.EMCAProjectsDB()
  proj = projdb.getProj ( result.token )
  dbcfg = dbconfig.switchDataset ( proj.getDataset() )

  _ximgsz = None
  _yimgsz = None

  for sl in range(result.numslices):

    filenm = result.path + '/' + '{:0>4}'.format(sl) + '.png'
    print filenm
    img = Image.open ( filenm, "r" )

    if _ximgsz==None and _yimgsz==None:
      _ximgsz,_yimgsz = img.size
      imarray = np.zeros ( [result.numslices, _yimgsz, _ximgsz], dtype=np.uint16 )
    else:
      assert _ximgsz == img.size[0] and _yimgsz == img.size[1]

    imarray[sl,:,:] = np.asarray ( img )

  # get the size of the cube
  xcubedim,ycubedim,zcubedim = dbcfg.cubedim[0]
  
  # and the limits of iteration
  xlimit = (_ximgsz-1) / xcubedim + 1
  ylimit = (_yimgsz-1) / ycubedim + 1
  zlimit = (result.numslices-1) / zcubedim + 1

  # open the database
  db = emcadb.EMCADB ( dbcfg, proj )

  # get a db cursor 
  cursor = db.conn.cursor()

  for z in range(zlimit):
    db.commit()
    for y in range(ylimit):
      for x in range(xlimit):

        zmin = z*zcubedim
        zmax = min((z+1)*zcubedim,result.numslices)
        zmaxrel = ((zmax-1)%zcubedim)+1 
        ymin = y*ycubedim
        ymax = min((y+1)*ycubedim,_yimgsz)
        ymaxrel = ((ymax-1)%ycubedim)+1
        xmin = x*xcubedim
        xmax = min((x+1)*xcubedim,_ximgsz)
        xmaxrel = ((xmax-1)%xcubedim)+1

        # morton key
        key = zindex.XYZMorton ( [x,y,z] )

        # Create a channel cube
        cube = chancube.ChanCube ( [xcubedim,ycubedim,zcubedim] )

        # data for this key
        cube.data[0:zmaxrel,0:ymaxrel,0:xmaxrel] = imarray[zmin:zmax,ymin:ymax,xmin:xmax]

        # compress the cube
        npz = cube.toNPZ ()


        # add the cube to the database
        sql = "INSERT INTO " + proj.getTable(RESOLUTION) +  "(zindex, channel, cube) VALUES (%s, %s, %s)"
        print sql
        try:
          cursor.execute ( sql, (key, result.channel, npz))
        except MySQLdb.Error, e:
          raise ANNError ( "Error updating data cube: %d: %s. sql=%s" % (e.args[0], e.args[1], sql))
예제 #21
0
resolution = 1

for zstart in range(1, 1850, 16):

    zend = min(zstart + 16, 1851)

    slab = np.fromfile(fid,
                       count=_ximagesz * _yimagesz * (zend - zstart),
                       dtype=np.uint8)
    slab = slab.reshape([(zend - zstart), _yimagesz, _ximagesz])

    for y in range(0, _yimagesz, _ycubedim):
        for x in range(0, _ximagesz, _xcubedim):

            mortonidx = zindex.XYZMorton(
                [x / _xcubedim, y / _ycubedim, (zstart - 1) / _zcubedim])
            cubedata = np.zeros([_zcubedim, _ycubedim, _xcubedim],
                                dtype=np.uint8)

            xmin = x
            ymin = y
            xmax = min(_ximagesz, x + _xcubedim)
            ymax = min(_yimagesz, y + _ycubedim)
            zmin = 0
            zmax = zend - zstart

            cubedata[0:zmax - zmin, 0:ymax - ymin,
                     0:xmax - xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]

            #      print "zindex,x,y,z,xmax,ymax,zmax,shape",mortonidx,x,y,zstart,xmax,ymax,zend,cubedata.shape
def main():

  parser = argparse.ArgumentParser(description='Ingest the TIFF data')
  parser.add_argument('token', action="store", help='Token for the project')
  parser.add_argument('path', action="store", help='Directory with the image files')
  
  result = parser.parse_args()
  
  resolution = 0

  #Load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz = zcubedim

  batchsz = 16
  (ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]

  # Set the image size to that of the actual image
  ximagesz = 6144
  yimagesz = 6144
  tilesz = 6144

  filelist = glob.glob( "{}*.tif".format(result.path) )

  # Get a list of the files in the directories
  for sl in range (startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint8 )

    for b in range ( batchsz ):

      if ( sl + b <= endslice ):

        # raw data
        #filenm = result.path + '{:0>4}_r1-c1_w01_s01_sec01_'.format(sl+b) + '{*}.tif'
        print "Opening filenm" + filelist[sl+b-1]

        img = Image.open (filelist[sl+b-1], 'r')
        imgdata = np.asarray ( img )
        slab[b,:,:] = imgdata

        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    for y in range ( 0, yimagesz+1, ycubedim ):
      for x in range ( 0, ximagesz+1, xcubedim ):

        mortonidx = zindex.XYZMorton ( [x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint8 )

        xmin = x
        ymin = y
        xmax = ((min(ximagesz-1,x+xcubedim-1))%tilesz)+1
        ymax = ((min(yimagesz-1,y+ycubedim-1))%tilesz)+1
        
        #xmax = min ( ximagesz, x+xcubedim )
        #ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim, endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
        
        # Create the DB Blob
        fileobj = cStringIO.StringIO()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress ( fileobj.getvalue() )

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()

      print " Commiting at x={}, y={}, z={}".format(x, y, sl)
    db.conn.commit()

    slab = None
예제 #23
0
    [outdb, outproj, outprojdb] = ocpcarest.loadDBProj(result.outtoken)

    # get the dataset configuration
    (xcubedim, ycubedim,
     zcubedim) = inproj.datasetcfg.cubedim[result.resolution]
    (ximagesz, yimagesz) = inproj.datasetcfg.imagesz[result.resolution]
    (startslice, endslice) = inproj.datasetcfg.slicerange
    slices = endslice - startslice + 1

    # Set the limits for iteration on the number of cubes in each dimension
    xlimit = (ximagesz - 1) / xcubedim + 1
    ylimit = (yimagesz - 1) / ycubedim + 1
    #  Round up the zlimit to the next larger
    zlimit = (((slices - 1) / zcubedim + 1) * zcubedim) / zcubedim

    lastzindex = (zindex.XYZMorton([xlimit, ylimit, zlimit]) / 64 + 1) * 64

    # iterate over the cubes in morton order
    for mortonidx in range(lastzindex):

        x, y, z = zindex.MortonXYZ(mortonidx)

        # only process cubes in the space
        if x >= xlimit or y >= ylimit or z >= zlimit:
            continue

        incube = indb.getCube(mortonidx, result.resolution)
        annoids = np.unique(incube.data)

        sql = "select * from {} where source in ({})".format(
            mergemap, ','.join(annoids))
예제 #24
0
    def cutout(self, corner, dim, resolution):
        """Extract a cube of arbitrary size.  Need not be aligned."""

        [xcubedim, ycubedim, zcubedim] = self.dbcfg.cubedim[resolution]

        # Round to the nearest larger cube in all dimensions
        start = [ corner[0]/xcubedim,\
                        corner[1]/ycubedim,\
                        corner[2]/zcubedim ]

        numcubes = [ (corner[0]+dim[0]+xcubedim-1)/xcubedim - start[0],\
                    (corner[1]+dim[1]+ycubedim-1)/ycubedim - start[1],\
                    (corner[2]+dim[2]+zcubedim-1)/zcubedim - start[2] ]

        inbuf = imagecube.ImageCube(self.dbcfg.cubedim[resolution])
        outbuf = imagecube.ImageCube([
            numcubes[0] * xcubedim, numcubes[1] * ycubedim,
            numcubes[2] * zcubedim
        ])

        # Build a list of indexes to access
        listofidxs = []
        for z in range(numcubes[2]):
            for y in range(numcubes[1]):
                for x in range(numcubes[0]):
                    mortonidx = zindex.XYZMorton(
                        [x + start[0], y + start[1], z + start[2]])
                    listofidxs.append(mortonidx)

        # Sort the indexes in Morton order
        listofidxs.sort()

        # Batch query for all cubes
        dbname = self.dbcfg.tablebase + str(resolution)
        cursor = self.conn.cursor()
        sql = "SELECT zindex, cube from " + dbname + " where zindex in (%s)"
        # creats a %s for each list element
        in_p = ', '.join(map(lambda x: '%s', listofidxs))
        # replace the single %s with the in_p string
        sql = sql % in_p
        cursor.execute(sql, listofidxs)

        # xyz offset stored for later use
        lowxyz = zindex.MortonXYZ(listofidxs[0])

        # Get the objects and add to the cube
        for i in range(len(listofidxs)):
            idx, datastring = cursor.fetchone()

            # get the data out of the compressed blob
            newstr = zlib.decompress(datastring[:])
            newfobj = cStringIO.StringIO(newstr)
            inbuf.data = np.load(newfobj)

            #add the query result cube to the bigger cube
            curxyz = zindex.MortonXYZ(int(idx))
            offsetxyz = [
                curxyz[0] - lowxyz[0], curxyz[1] - lowxyz[1],
                curxyz[2] - lowxyz[2]
            ]
            outbuf.addData(inbuf, offsetxyz)

        # need to trim down the array to size
        #  only if the dimensions are not the same
        if dim[0] % xcubedim  == 0 and\
           dim[1] % ycubedim  == 0 and\
           dim[2] % zcubedim  == 0 and\
           corner[0] % xcubedim  == 0 and\
           corner[1] % ycubedim  == 0 and\
           corner[2] % zcubedim  == 0:
            pass
        else:
            outbuf.trim ( corner[0]%xcubedim,dim[0],\
                            corner[1]%ycubedim,dim[1],\
                            corner[2]%zcubedim,dim[2] )

        return outbuf
예제 #25
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the TIFF data')
  parser.add_argument('token', action="store", help='Token for the project')
  parser.add_argument('path', action="store", help='Directory with the image files')
  parser.add_argument('resolution', action="store", type=int, help='Resolution of data')

  result = parser.parse_args()
  
  #Load a database
  [ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )

  # get the dataset configuration
  (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[result.resolution]
  (startslice,endslice)=proj.datasetcfg.slicerange
  batchsz = zcubedim

  batchsz = 16
  (ximagesz,yimagesz)=proj.datasetcfg.imagesz[result.resolution]

  yimagesz = 18000
  ximagesz = 24000
  
  # Get a list of the files in the directories
  for sl in range ( startslice, endslice+1, batchsz):

    slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )

    for b in range ( batchsz ):

      if ( sl + b <= endslice ):

        # raw data
        try:
          filenm = result.path + '{:0>4}'.format(sl+b) + '.tiff'
          print "Opening filenm" + filenm
          img = Image.open (filenm, 'r').convert("RGBA")
          imgdata = np.asarray ( img )
          slab[b,:,:] = np.left_shift(imgdata[:,:,3], 24, dtype=np.uint32) | np.left_shift(imgdata[:,:,2], 16, dtype=np.uint32) | np.left_shift(imgdata[:,:,1], 8, dtype=np.uint32) | np.uint32(imgdata[:,:,0])
        except IOError, e:
          print e
          imgdata = np.zeros((yimagesz, ximagesz), dtype=np.uint32)
          slab[b,:,:] = imgdata

        # the last z offset that we ingest, if the batch ends before batchsz
        endz = b

    for y in range ( 0, yimagesz+1, ycubedim ):
      for x in range ( 0, ximagesz+1, xcubedim ):

        # Getting a Cube id and ingesting the data one cube at a time
        mortonidx = zindex.XYZMorton ( [x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
        cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )

        xmin = x
        ymin = y
        xmax = min ( ximagesz, x+xcubedim )
        ymax = min ( yimagesz, y+ycubedim )
        zmin = 0
        zmax = min(sl+zcubedim, endslice+1)

        cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
        
        # Create the DB Blob
        fileobj = cStringIO.StringIO()
        np.save ( fileobj, cubedata )
        cdz = zlib.compress ( fileobj.getvalue() )

        # insert the blob into the database
        cursor = db.conn.cursor()
        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(int(result.resolution))
        cursor.execute(sql, (mortonidx, cdz))
        cursor.close()

      print " Commiting at x={}, y={}, z={}".format(x, y, sl)
      db.conn.commit()
  
    # Freeing memory
    slab = None
예제 #26
0
def main():

    parser = argparse.ArgumentParser(
        description='Ingest a drosophilia tiff stack.')
    parser.add_argument('dbname', action="store")
    parser.add_argument('token', action="store")
    parser.add_argument('file', action="store")

    result = parser.parse_args()

    conn = MySQLdb.connect(host='localhost',
                           user='******',
                           passwd='88brain88',
                           db=result.dbname)

    cursor = conn.cursor()

    #Load the TIFF stack
    im = Image.open(result.file)

    # RBTODO this is a 16bit image.  Don't know how to read it.
    #  It won't convert into a numpy array

    imarray = np.zeros([_endslice - _startslice + 1, _ytilesize, _xtilesize],
                       dtype=np.uint32)
    for i in range(_startslice, _endslice + 1):
        im.seek(i)
        print i
        imarray[i, :, :] = np.array(im.getdata()).reshape([512, 512])

    print np.unique(imarray)
    print np.nonzero(imarray)

    for z in range(0, 2):
        conn.commit()
        for y in range(0, 4):
            for x in range(0, 4):

                zmin = z * 16
                zmax = min((z + 1) * 16, 30)
                zmaxrel = ((zmax - 1) % 16) + 1
                ymin = y * 128
                ymax = (y + 1) * 128
                xmin = x * 128
                xmax = (x + 1) * 128

                key = zindex.XYZMorton([x, y, z])

                dataout = np.zeros([16, 128, 128], dtype=np.uint8)

                dataout[0:zmaxrel, 0:128,
                        0:128] = imarray[zmin:zmax, ymin:ymax, xmin:xmax]

                url = 'http://localhost:8000/emca/%s/npdense/%s/%s,%s/%s,%s/%s,%s/' % (
                    result.token, 0, xmin, xmax, ymin, ymax, zmin, zmax)

                print url, dataout.shape, np.unique(dataout)

                # Encode the voxelist an pickle
                fileobj = cStringIO.StringIO()
                np.save(fileobj, dataout)

                cdz = zlib.compress(fileobj.getvalue())

                # Build the post request
                req = urllib2.Request(url, cdz)
                response = urllib2.urlopen(req)
                the_page = response.read()
예제 #27
0
    def findLocations(self, channel, resolution, threshhold, outfile):
        """Build the hierarchy of annotations"""

        # Get the source database sizes
        [ximagesz, yimagesz] = self.proj.datasetcfg.imagesz[resolution]
        [xcubedim, ycubedim,
         zcubedim] = self.proj.datasetcfg.cubedim[resolution]

        # Get the slices
        [startslice, endslice] = self.proj.datasetcfg.slicerange
        slices = endslice - startslice + 1

        # Set the limits for iteration on the number of cubes in each dimension
        xlimit = ximagesz / xcubedim
        ylimit = yimagesz / ycubedim

        #  Round up the zlimit to the next larger
        zlimit = (((slices - 1) / zcubedim + 1) * zcubedim) / zcubedim

        # Round up to the top of the range
        lastzindex = (zindex.XYZMorton([xlimit, ylimit, zlimit]) / 64 + 1) * 64

        locs = []

        with closing(open(outfile, 'wb')) as csvfile:
            csvwriter = csv.writer(csvfile)

            # Iterate over the cubes in morton order
            for mortonidx in range(0, lastzindex, 64):

                print "Working on batch %s at %s" % (
                    mortonidx, zindex.MortonXYZ(mortonidx))

                # call the range query
                self.annoDB.queryRange(mortonidx, mortonidx + 64, resolution,
                                       channel)

                # Flag to indicate no data.  No update query
                somedata = False

                # get the first cube
                [key, cube] = self.annoDB.getNextCube()

                #  if there's a cube, there's data
                if key != None:
                    somedata = True

                while key != None:

                    xyz = zindex.MortonXYZ(key)

                    # Compute the offset in the output data cube
                    #  we are placing 4x4x4 input blocks into a 2x2x4 cube
                    offset = [
                        xyz[0] * xcubedim, xyz[1] * ycubedim, xyz[2] * zcubedim
                    ]

                    nzlocs = np.nonzero(cube.data > threshhold)

                    if len(nzlocs[1]) != 0:

                        print "res : zindex = ", resolution, ":", key, ", location", zindex.MortonXYZ(
                            key)
                        # zip together the x y z and value
                        cubelocs = zip(
                            nzlocs[0], nzlocs[1], nzlocs[2],
                            cube.data[nzlocs[0], nzlocs[1], nzlocs[2]])
                        # translate from z,y,x,value to x,y,z,value and add global offset
                        locs = [(pt[2] + offset[0], pt[1] + offset[1],
                                 pt[0] + offset[2], pt[3]) for pt in cubelocs]

                        csvwriter.writerows([x for x in locs])

                    [key, cube] = self.annoDB.getNextCube()
예제 #28
0
 def get_coord_cc(self,xyz):
     return self.get_cc(zindex.XYZMorton(xyz))
def main():

    parser = argparse.ArgumentParser(
        description='Ingest the FlyEM image data.')
    parser.add_argument('token',
                        action="store",
                        help='Token for the annotation project.')
    parser.add_argument('path',
                        action="store",
                        help='Directory with annotation PNG files.')

    result = parser.parse_args()

    # convert to an argument
    resolution = 0

    # load a database
    [db, proj, projdb] = ocpcarest.loadDBProj(result.token)

    # get the dataset configuration
    (xcubedim, ycubedim, zcubedim) = proj.datasetcfg.cubedim[resolution]
    (startslice, endslice) = proj.datasetcfg.slicerange
    batchsz = zcubedim

    # This doesn't work because the image size does not match exactly the cube size
    #(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
    ximagesz = 49152
    yimagesz = 32768

    xtilesz = 8192
    ytilesz = 8192
    numxtiles = (6 + 1)
    numytiles = (4 + 1)

    # Get a list of the files in the directories
    for sl in range(startslice, endslice + 1, batchsz):
        for ytile in range(1, numytiles):
            for xtile in range(1, numxtiles):

                slab = np.zeros([batchsz, ytilesz, xtilesz], dtype=np.uint8)

                for b in range(batchsz):

                    if (sl + b <= endslice):

                        # raw data
                        filenm = result.path + 'S1Column_affine_s{:0>3}_x{:0>2}_y{:0>2}.png'.format(
                            sl + b + 1, xtile, ytile)
                        print "Opening filenm " + filenm

                        img = Image.open(filenm, 'r')
                        imgdata = np.asarray(img)
                        slab[b, :, :] = imgdata

                        # the last z offset that we ingest, if the batch ends before batchsz
                        endz = b

                # Now we have a 1024x1024x16 z-aligned cube.
                # Send it to the database.
                for y in range((ytile - 1) * ytilesz, (ytile) * ytilesz,
                               ycubedim):
                    for x in range((xtile - 1) * xtilesz, (xtile) * xtilesz,
                                   xcubedim):

                        mortonidx = zindex.XYZMorton([
                            x / xcubedim, y / ycubedim,
                            (sl - startslice) / zcubedim
                        ])
                        cubedata = np.zeros([zcubedim, ycubedim, xcubedim],
                                            dtype=np.uint8)

                        xmin = x % xtilesz
                        ymin = y % ytilesz
                        xmax = (min(ximagesz - 1, x + xcubedim - 1) %
                                xtilesz) + 1
                        ymax = (min(yimagesz - 1, y + ycubedim - 1) %
                                ytilesz) + 1
                        zmin = 0
                        zmax = min(sl + zcubedim, endslice + 1)

                        cubedata[0:zmax - zmin, 0:ymax - ymin,
                                 0:xmax - xmin] = slab[zmin:zmax, ymin:ymax,
                                                       xmin:xmax]

                        # Dont' ingest empty cubes
                        if len(np.unique(cubedata)) == 1:
                            continue

                        # create the DB BLOB
                        fileobj = cStringIO.StringIO()
                        np.save(fileobj, cubedata)
                        cdz = zlib.compress(fileobj.getvalue())

                        # insert the blob into the database
                        cursor = db.conn.cursor()
                        sql = "INSERT INTO res{} (zindex, cube) VALUES (%s, %s)".format(
                            int(resolution))
                        cursor.execute(sql, (mortonidx, cdz))
                        cursor.close()

                    print "Commiting at x=%s, y=%s, z=%s" % (x, y, sl)
                    db.conn.commit()

                slab = None
                import gc
                gc.collect()
예제 #30
0
def main():

  parser = argparse.ArgumentParser(description='Ingest the TIFF data')
  parser.add_argument('token', action="store", help='Token for the project')
  parser.add_argument('resolution', action="store", type=int, help='Resolution')
  parser.add_argument('path', action="store", help='Directory with the image files')
  
  result = parser.parse_args()
  
  #Load a database
  with closing ( ocpcaproj.OCPCAProjectsDB() ) as projdb:
    proj = projdb.loadProject ( result.token )

  with closing ( ocpcadb.OCPCADB(proj) ) as db:

    # get the dataset configuration
    (xcubedim,ycubedim,zcubedim) = proj.datasetcfg.cubedim[result.resolution]
    (startslice,endslice)=proj.datasetcfg.slicerange
    (starttime,endtime)=proj.datasetcfg.timerange
    (ximagesz,yimagesz)=proj.datasetcfg.imagesz[result.resolution]
    batchsz = zcubedim

    dims = (2048,1172,31)

    # Set the image size to that of the actual image
    ximagesz = 2048
    yimagesz = 1172
    endslice = 31

    # Get a list of the files in the directories
    for ts in range ( starttime, endtime ):

      filenm = "{}TM{:0>5}_CM0_CHN00.stack".format(result.path,ts)
      print "Opening file", filenm

      img = open(filenm,'r')
      imgdata = np.frombuffer(img.read(), dtype=np.int16, count=int(np.prod(dims))).reshape(dims, order='F')
      imgdata = np.swapaxes(imgdata,0,2)

      for sl in range (startslice, endslice+1, batchsz):

        slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint16 )

        for b in range ( batchsz ):

          if ( sl + b <= endslice ):

            slab[b,:,:] = imgdata[b,:,:]

            # the last z offset that we ingest, if the batch ends before batchsz
            endz = b

        for y in range ( 0, yimagesz+1, ycubedim ):
          for x in range ( 0, ximagesz+1, xcubedim ):

            mortonidx = zindex.XYZMorton ( [x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
            cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint16 )

            xmin = x
            ymin = y
            xmax = ((min(ximagesz-1,x+xcubedim-1)))+1
            ymax = ((min(yimagesz-1,y+ycubedim-1)))+1
            
            #xmax = min ( ximagesz, x+xcubedim )
            #ymax = min ( yimagesz, y+ycubedim )
            zmin = 0
            zmax = min(sl+zcubedim, endslice+1)

            cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
            
            # Create the DB Blob
            fileobj = cStringIO.StringIO()
            np.save ( fileobj, cubedata )
            cdz = zlib.compress ( fileobj.getvalue() )

            # insert the blob into the database
            cursor = db.conn.cursor()
            sql = "INSERT INTO res{} (zindex, timestamp, cube) VALUES (%s, %s, %s)".format(int(result.resolution))
            cursor.execute(sql, (mortonidx, ts, cdz))
            cursor.close()

          print " Commiting at x={}, y={}, z={}".format(x, y, sl)
        db.conn.commit()

        slab = None