def main(): parser = argparse.ArgumentParser( description='Build an aeropsike DB from mysql data.') parser.add_argument('intoken', action="store", help='Token for the project.') parser.add_argument('outtoken', action="store", help='Token for the project.') parser.add_argument('resolution', action="store", type=int) result = parser.parse_args() # cassandra database outprojdb = ocpcaproj.OCPCAProjectsDB() outproj = outprojdb.loadProject(result.outtoken) # mysql database inprojdb = ocpcaproj.OCPCAProjectsDB() inproj = inprojdb.loadProject(result.intoken) # Bind the databases inDB = ocpcadb.OCPCADB(inproj) outDB = ocpcadb.OCPCADB(outproj) # Get the source database sizes [ximagesz, yimagesz] = inproj.datasetcfg.imagesz[result.resolution] [xcubedim, ycubedim, zcubedim] = cubedim = inproj.datasetcfg.cubedim[result.resolution] # Get the slices [startslice, endslice] = inproj.datasetcfg.slicerange slices = endslice - startslice + 1 # Set the limits for iteration on the number of cubes in each dimension # and the limits of iteration xlimit = (ximagesz - 1) / xcubedim + 1 ylimit = (yimagesz - 1) / ycubedim + 1 # Round up the zlimit to the next larger zlimit = (((slices - 1) / zcubedim + 1) * zcubedim) / zcubedim for z in range(zlimit): for y in range(ylimit): for x in range(xlimit): zidx = zindex.XYZMorton([x, y, z]) outDB.putCube(zidx, result.resolution, inDB.getCube(zidx, result.resolution)) print "Ingesting {}".format(zidx)
def createProject(self, project): try: existing_proj = Project.objects.get(project_name=project['name']) print "Project {} already exists! Skipping creation of project and token...".format( project['name']) except Project.DoesNotExist, e: new_project = Project() new_project.project_name = project['name'] new_project.user = User.objects.get(id=1) new_project.decription = "Test Project for CAJAL v{}".format( self.version) new_project.public = 0 new_project.dataset = self.getDataset(project['dataset']) new_project.host = 'localhost' new_project.kvengine = 'MySQL' new_project.kvserver = 'localhost' new_project.ocp_version = '0.6' new_project.schema_version = '0.6' new_project.save() pd = ocpcaproj.OCPCAProjectsDB() pd.newOCPCAProject(new_project.project_name) tk = Token(token_name=new_project.project_name, token_description='Default token for projet {}'.format( new_project.project_name), project_id=new_project, public=0, user=new_project.user) tk.save()
def ingest(token, resolution): """ Read the stack and ingest """ with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadProject(token) with closing(ocpcadb.OCPCADB(proj)) as db: (xcubedim, ycubedim, zcubedim) = cubedims = proj.datasetcfg.cubedim[resolution] zidx = 0 cube = imagecube.ImageCube16(cubedims) cube.zeros() cube.data = np.array(range(xcubedim * ycubedim * zcubedim), dtype=np.uint8).reshape(cubedims) db.putCube(zidx, resolution, cube) db.conn.commit() c = db.getCube(zidx, resolution) print c.data cube2 = imagecube.ImageCube16(cubedims) cube2.data = np.zeros(cubedims, dtype=np.uint8) db.putCube(zidx, resolution, cube2, True) db.conn.commit() c = db.getCube(zidx, resolution) import pdb pdb.set_trace() print c.data
def main(): parser = argparse.ArgumentParser(description='Create a new dataset.') parser.add_argument('dsname', action="store", help='Name of the dataset') parser.add_argument('ximagesize', type=int, action="store") parser.add_argument('yimagesize', type=int, action="store") parser.add_argument('zimagesize', type=int, action="store") parser.add_argument('xoffset', type=int, action="store") parser.add_argument('yoffset', type=int, action="store") parser.add_argument('zoffset', type=int, action="store") parser.add_argument('xvoxelres', type=float, action="store") parser.add_argument('yvoxelres', type=float, action="store") parser.add_argument('zvoxelres', type=float, action="store") parser.add_argument('scalinglevels', type=int, action="store") parser.add_argument('scalingoption', type=str, action="store", help='should be isotropic or zslices', default='zslices') parser.add_argument('--startwindow', type=int, action="store", default=0) parser.add_argument('--endwindow', type=int, action="store", default=0) parser.add_argument('--starttime', type=int, action="store", default=0) parser.add_argument('--endtime', type=int, action="store", default=0) result = parser.parse_args() # Get database info pd = ocpcaproj.OCPCAProjectsDB() imagesize = (result.ximagesize,result.yimagesize,result.zimagesize) offset = (result.xoffset,result.yoffset,result.zoffset) voxelres = (result.xvoxelres,result.yvoxelres,result.zvoxelres) pd.newDataset ( result.dsname, imagesize, offset, voxelres, result.scalinglevels, result.scalingoption, result.startwindow, result.endwindow, result.starttime, result.endtime )
def main(): parser = argparse.ArgumentParser(description='Create a new annotation project.') parser.add_argument('token', action="store") parser.add_argument('openid', action="store") parser.add_argument('host', action="store") parser.add_argument('project', action="store") parser.add_argument('datatype', action="store", type=int, help='1 8-bit data or 2 32-bit annotations' ) parser.add_argument('dataset', action="store") parser.add_argument('dataurl', action="store") parser.add_argument('--kvserver', action="store", default='localhost') parser.add_argument('--kvengine', action="store", default='MySQL') parser.add_argument('--readonly', action='store_true', help='Project is readonly') parser.add_argument('--public', action='store_true', help='Project is readonly') parser.add_argument('--noexceptions', action='store_true', help='Project has no exceptions. (FASTER).') parser.add_argument('--nocreate', action='store_true', help='Do not create a database. Just make a project entry.') parser.add_argument('--resolution', action='store',type=int, help='Maximum resolution for an annotation projects', default=0) result = parser.parse_args() # Get database info pd = ocpcaproj.OCPCAProjectsDB() pd.newOCPCAProj ( result.token, result.openid, result.host, result.project, result.datatype, result.dataset, result.dataurl, result.readonly, not result.noexceptions, result.nocreate, result.resolution, result.public, result.kvserver, result.kvengine, False )
def createChannel( self, channel, project ): proj_obj = Project.objects.get( project_name = project['name'] ) try: existing_channel = Channel.objects.get( channel_name = channel['name'], project = proj_obj ) print "Channel {} already exists for project {}! Skipping creation...".format(channel['name'], project['name']) except Channel.DoesNotExist, e: new_channel = Channel() new_channel.project = proj_obj new_channel.channel_name = channel['name'] new_channel.description = channel['desc'] new_channel.channel_type = channel['type'] new_channel.resolution = channel['res'] new_channel.propagate = channel['propagate'] new_channel.channel_datatype = channel['datatype'] new_channel.readonly = 0 new_channel.exceptions = channel['exceptions'] new_channel.save() try: pd = ocpcaproj.OCPCAProjectsDB() pd.newOCPCAChannel( project['name'], channel['name'] ) except Exception, e: print e exit()
def importChannels(self): """ Import channels from the old project to the new project. """ # make sure the ocp project exists pr = Project.objects.get(project_name=self.newproject_name) for channel in self.oldchannels.keys(): ch = Channel() ch.project = pr ch.channel_name = channel ch.channel_description = 'Imported from oldchannel schema.' ch.channel_type = 'image' ch.resolution = 0 ch.propagate = self.propagate ch.channel_datatype = self.datatype ch.readonly = self.readonly ch.exceptions = 0 ch.startwindow = 0 ch.endwindow = 0 ch.default = False try: ch.save() pd = ocpcaproj.OCPCAProjectsDB() pd.newOCPCAChannel(pr.project_name, ch.channel_name) print "Created channel {}".format(channel) except Exception, e: print "[ERROR]: {}".format(e) exit()
def getAnnoIds(proj, ch, Xmin, Xmax, Ymin, Ymax, Zmin, Zmax): """Return a list of anno ids restricted by equality predicates. Equalities are alternating in field/value in the url.""" with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(proj.getToken()) db = (ocpcadb.OCPCADB(proj)) resolution = ch.getResolution() mins = (int(Xmin), int(Ymin), int(Zmin)) maxs = (int(Xmax), int(Ymax), int(Zmax)) offset = proj.datasetcfg.offset[resolution] from operator import sub corner = map(sub, mins, offset) dim = map(sub, maxs, mins) if not proj.datasetcfg.checkCube(resolution, corner, dim): logger.warning("Illegal cutout corner={}, dim={}".format(corner, dim)) raise OCPCAError("Illegal cutout corner={}, dim={}".format( corner, dim)) cutout = db.cutout(ch, corner, dim, resolution) if cutout.isNotZeros(): annoids = np.unique(cutout.data) else: annoids = np.asarray([], dtype=np.uint32) return annoids[1:]
def createChannel(self, channel_name, index): """ create the channels """ new_channel = Channel() new_channel.project = Project.objects.get( project_name=self.proj.getProjectName()) new_channel.channel_name = channel_name new_channel.channel_description = channel_name new_channel.channel_type = 'images' new_channel.resolution = self.resolution new_channel.propagate = 0 new_channel.channel_datatype = 'uint16' new_channel.readonly = 0 new_channel.exceptions = 0 new_channel.startwindow = 0 new_channel.endwindow = END_WINDOWS[index] new_channel.save() try: # create tables for the channel pd = ocpcaproj.OCPCAProjectsDB() pd.newOCPCAChannel(self.proj.getProjectName(), new_channel.channel_name) except Exception, e: print e exit()
def ingest ( self ): """Read the stack and ingest""" with closing (ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(self.token) with closing (ocpcadb.OCPCADB(proj)) as db: ch = proj.getChannelObj(self.channel) # get the dataset configuration [[ximagesz, yimagesz, zimagesz],(starttime,endtime)] = proj.datasetcfg.imageSize(self.resolution) [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[self.resolution] [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[self.resolution] # for all specified resolutions for resolution in range(0,1,1): # extract parameters for iteration numxtiles = ximagesz/self.tilesz[0] numytiles = yimagesz/self.tilesz[1] # Ingest in database aligned slabs in the z dimension for slice_number in range(0, zimagesz, zcubedim): slab = np.zeros ( [zcubedim,yimagesz,ximagesz], dtype=np.uint32 ) # over all tiles in that slice for b in range(zcubedim): for ytile in range(numytiles): for xtile in range(numxtiles): # if we are at the end of the space, quit if slice_number+b <= zimagesz: try: filename = '{}{}/{}/{}/{}.png'.format(self.tilepath, resolution, slice_number+b+zoffset, ytile+17, xtile+16) print "Opening filename {}".format(filename) # add tile to stack imgdata = np.asarray ( Image.open(filename, 'r').convert('RGBA') ) imgdata = np.left_shift(imgdata[:,:,3], 24, dtype=np.uint32) | np.left_shift(imgdata[:,:,2], 16, dtype=np.uint32) | np.left_shift(imgdata[:,:,1], 8, dtype=np.uint32) | np.uint32(imgdata[:,:,0]) slab [b,ytile*self.tilesz[1]:(ytile+1)*self.tilesz[1],xtile*self.tilesz[0]:(xtile+1)*self.tilesz[0]] = imgdata except IOError, e: print "Failed to open file {}".format(filename) slab [b,ytile*self.tilesz[1]:(ytile+1)*self.tilesz[1],xtile*self.tilesz[0]:(xtile+1)*self.tilesz[0]] = np.zeros([self.tilesz[1], self.tilesz[0]], dtype=np.uint32) for y in range (0, yimagesz+1, ycubedim): for x in range (0, ximagesz+1, xcubedim): # getting the cube id and ingesting the data one cube at a time zidx = ocplib.XYZMorton ([x/xcubedim, y/ycubedim, (slice_number)/zcubedim]) cube = Cube.getCube(cubedim, ch.getChannelType(), ch.getDataType()) cube.zeros() xmin, ymin = x, y xmax = min (ximagesz, x+xcubedim) ymax = min (yimagesz, y+ycubedim) zmin = 0 zmax = min(slice_number+zcubedim, zimagesz+1) cube.data[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax] if cube.isNotZeros(): db.putCube(ch, zidx, self.resolution, cube, update=True)
def main(): parser = argparse.ArgumentParser(description='Build an aeropsike DB from mysql data.') parser.add_argument('token', action="store", help='Token for the project.') parser.add_argument('resolution', action="store", type=int) result = parser.parse_args() # as database ascfg = { 'hosts': [ ('127.0.0.1', 3000) ] } ascli = aerospike.client(ascfg).connect() # mysql database projdb = ocpcaproj.OCPCAProjectsDB() proj = projdb.loadProject ( result.token ) # Bind the annotation database imgDB = ocpcadb.OCPCADB ( proj ) # Get the source database sizes [ximagesz, yimagesz] = proj.datasetcfg.imagesz [ result.resolution ] [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.cubedim [ result.resolution ] # Get the slices [ startslice, endslice ] = proj.datasetcfg.slicerange slices = endslice - startslice + 1 # Set the limits for iteration on the number of cubes in each dimension # RBTODO These limits may be wrong for even (see channelingest.py) xlimit = ximagesz / xcubedim ylimit = yimagesz / ycubedim # Round up the zlimit to the next larger zlimit = (((slices-1)/zcubedim+1)*zcubedim)/zcubedim cursor = imgDB.conn.cursor() for z in range(zlimit): for y in range(ylimit): for x in range(xlimit): mysqlcube = imgDB.cutout ( [ x*xcubedim, y*ycubedim, z*zcubedim ], cubedim, result.resolution ) zidx = zindex.XYZMorton ( [x,y,z] ) tmpfile = tempfile.NamedTemporaryFile () h5tocass = h5py.File ( tmpfile.name ) h5tocass.create_dataset ( "cuboid", tuple(mysqlcube.data.shape), mysqlcube.data.dtype, compression='gzip', data=mysqlcube.data ) h5tocass.close() tmpfile.seek(0) askey = ("ocp",str(result.token)+":"+str(result.resolution),str(zidx)) print askey ascli.put ( askey, { 'cuboid' : tmpfile.read().encode('hex') } ) try: ascli.get ( askey ) except: print "Except"
def ingest ( self ): """Read the stack and ingest""" with closing ( ocpcaproj.OCPCAProjectsDB() ) as projdb: proj = projdb.loadProject ( self.token ) with closing ( ocpcadb.OCPCADB (proj) ) as db: (startslice, endslice) = proj.datasetcfg.slicerange (xcubedim, ycubedim, zcubedim) = cubedims = proj.datasetcfg.cubedim[self.resolution] (ximagesz, yimagesz) = proj.datasetcfg.imagesz[self.resolution] batchsz = zcubedim # Ingest in database aligned slabs in the z dimension for sl in range( startslice, endslice, batchsz ): slab = np.zeros ( [zcubedim, yimagesz, ximagesz], dtype=np.uint8 ) # over each slice for b in range( batchsz ): #if we are at the end of the space, quit if ( sl + b <= endslice ): filename = '{}{:0>3}____z{}.0.tif'.format(self.path, sl+b, (sl+b-1)*25) #filename = '{}{:0>4}____z{}.0.tif'.format(self.path, sl+b, (sl+b-1)*25) print filename try: img = Image.open(filename,'r') slab [b,:,:] = np.asarray(img) except IOError, e: print "Failed to open file %s" % (e) img = np.zeros((yimagesz,ximagesz), dtype=np.uint8) slab [b,:,:] = img for y in range ( 0, yimagesz, ycubedim ): for x in range ( 0, ximagesz, xcubedim ): zidx = ndlib.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] ) cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint8 ) xmin = x ymin = y xmax = ( min(ximagesz-1, x+xcubedim-1) ) + 1 ymax = ( min(yimagesz-1, y+ycubedim-1) ) + 1 zmin = 0 zmax = min(sl+zcubedim,endslice) cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax] cube = imagecube.ImageCube16 ( cubedims ) cube.zeros() cube.data = cubedata if np.count_nonzero ( cube.data ) != 0: print zidx, ndlib.MortonXYZ(zidx) db.putCube ( zidx, self.resolution, cube ) print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl) db.conn.commit() slab = None
def __init__(self, token): """Load the annotation database and project""" projdb = ocpcaproj.OCPCAProjectsDB() self.proj = projdb.loadProject(token) # Bind the annotation database self.annoDB = ocpcadb.OCPCADB(self.proj)
def main(): parser = argparse.ArgumentParser(description='Ingest the TIFF data') parser.add_argument('token', action="store", type=str, help='Token for the project') parser.add_argument('channel', action="store", type=str, help='Channel for the project') parser.add_argument('path', action="store", type=str, help='Directory with the image files') parser.add_argument('resolution', action="store", type=int, help='Resolution of data') parser.add_argument('--offset', action="store", type=int, default=0, help='Offset on disk') result = parser.parse_args() # Load a database with closing (ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(result.token) with closing (ocpcadb.OCPCADB(proj)) as db: ch = proj.getChannelObj(result.channel) # get the dataset configuration [[ximagesz, yimagesz, zimagesz],(starttime,endtime)] = proj.datasetcfg.imageSize(result.resolution) [xcubedim,ycubedim,zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[result.resolution] [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[result.resolution] # Get a list of the files in the directories for slice_number in range (zoffset, zimagesz+1, zcubedim): slab = np.zeros([zcubedim, yimagesz, ximagesz ], dtype=np.uint8) for b in range(zcubedim): if (slice_number + b <= zimagesz): try: # reading the raw data file_name = "{}{:0>5}.tif".format(result.path, (slice_number + b)) # silvestri15 #file_name = "{}full_{:0>6}.tif".format(result.path, slice_number + b + result.offset) print "Open filename {}".format(file_name) slab[b,:,:] = np.asarray(Image.open(file_name, 'r')) except IOError, e: print e slab[b,:,:] = np.zeros((yimagesz, ximagesz), dtype=np.uint8) for y in range ( 0, yimagesz+1, ycubedim ): for x in range ( 0, ximagesz+1, xcubedim ): # Getting a Cube id and ingesting the data one cube at a time zidx = ocplib.XYZMorton ( [x/xcubedim, y/ycubedim, (slice_number-zoffset)/zcubedim] ) cube = Cube.getCube(cubedim, ch.getChannelType(), ch.getDataType()) cube.zeros() xmin,ymin = x,y xmax = min ( ximagesz, x+xcubedim ) ymax = min ( yimagesz, y+ycubedim ) zmin = 0 zmax = min(slice_number+zcubedim, zimagesz+1) cube.data[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax] if cube.isNotZeros(): db.putCube(ch, zidx, result.resolution, cube, update=True) slab = None
def __init__(self, token, tilesz, tilepath): """Load the CATMAID stack into an OCP database""" # Get the database self.projdb = ocpcaproj.OCPCAProjectsDB() self.proj = self.projdb.loadProject(token) self.db = ocpcadb.OCPCADB(self.proj) self.tilesz = tilesz self.prefix = tilepath
def deleteTestDBList(project_name_list): try: for project_name in project_name_list: pr = Project.objects.get(project_name=project_name) pd = ocpcaproj.OCPCAProjectsDB() pd.deleteOCPCADB(pr.project_name) ds = Dataset.objects.get(dataset_name=pr.dataset_id) ds.delete() except Exception, e: pass
def ingest(self): """ Read image stack and ingest """ # Load a database with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(self.token) with closing(ocpcadb.OCPCADB(proj)) as db: ch = proj.getChannelObj(self.channel_name) # get the dataset configuration [[ximagesz, yimagesz, zimagesz], (starttime, endtime)] = proj.datasetcfg.imageSize(self.resolution) [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[self.resolution] [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[self.resolution] # Get a list of the files in the directories for slice_number in range(zoffset, zimagesz, zcubedim): slab = np.zeros([zcubedim, yimagesz, ximagesz], dtype=np.uint32) for b in range(zcubedim): if (slice_number + b <= zimagesz): file_name = "{}{}{:0>4}.tif".format(self.path, self.token, slice_number+b) print "Open filename {}".format(file_name) try: img = Image.open(file_name,'r') slab [b,:,:] = np.asarray(img) except IOError, e: print "Failed to open file %s" % (e) img = np.zeros((yimagesz,ximagesz), dtype=np.uint8) slab [b,:,:] = img for y in range(0, yimagesz + 1, ycubedim): for x in range(0, ximagesz + 1, xcubedim): # Getting a Cube id and ingesting the data one cube at a time zidx = ocplib.XYZMorton([x / xcubedim, y / ycubedim, (slice_number - zoffset) / zcubedim]) cube = Cube.getCube(cubedim, ch.getChannelType(), ch.getDataType()) cube.zeros() xmin = x ymin = y xmax = min(ximagesz, x + xcubedim) ymax = min(yimagesz, y + ycubedim) zmin = 0 zmax = min(slice_number + zcubedim, zimagesz + 1) cube.data[0:zmax - zmin, 0:ymax - ymin, 0:xmax - xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax] from operator import sub corner = map(sub, [x,y,slice_number], [xoffset,yoffset,zoffset]) if cube.data.any(): db.annotateDense ( ch, corner, self.resolution, cube.data, 'O' )
def __init__(self, path, resolution, token_name, channel_name): print "In initialization" """ Load image stack into OCP, creating tokens and channels as needed """ self.token = token_name self.resolution = resolution self.path = path with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: self.proj = projdb.loadToken(self.token) self.channel_name = channel_name self.ingest()
def __init__( self, token, tilesz, tilepath, reslimit, totalprocs ): """Load the CATMAID stack into an OCP database""" # Get the database self.projdb = ocpcaproj.OCPCAProjectsDB() self.proj = self.projdb.loadProject ( token ) self.db = ocpcadb.OCPCADB ( self.proj ) self.tilesz = tilesz self.prefix=tilepath self.reslimit = reslimit self.totalprocs = totalprocs self.token = token
def buildStack(token, channel, res): """Build the hierarchy of images""" with closing (ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(token) with closing (ocpcadb.OCPCADB(proj)) as db: ch = proj.getChannelObj(channel) high_res = proj.datasetcfg.scalinglevels for cur_res in range(res, high_res+1): # Get the source database sizes [[ximagesz, yimagesz, zimagesz], timerange] = proj.datasetcfg.imageSize(cur_res) [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[cur_res] [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[cur_res] biggercubedim = [xcubedim*2,ycubedim*2,zcubedim] # Set the limits for iteration on the number of cubes in each dimension xlimit = (ximagesz-1) / xcubedim + 1 ylimit = (yimagesz-1) / ycubedim + 1 zlimit = (zimagesz-1) / zcubedim + 1 for z in range(zlimit): for y in range(ylimit): for x in range(xlimit): # cutout the data at the -1 resolution olddata = db.cutout(ch, [ x*2*xcubedim, y*2*ycubedim, z*zcubedim], biggercubedim, cur_res-1 ).data # target array for the new data (z,y,x) order newdata = np.zeros([zcubedim,ycubedim,xcubedim], dtype=np.uint16) for sl in range(zcubedim): # Convert each slice to an image slimage = Image.frombuffer ( 'I;16', (xcubedim*2,ycubedim*2), olddata[sl,:,:].flatten(), 'raw', 'I;16', 0, 1 ) # Resize the image newimage = slimage.resize ( [xcubedim,ycubedim] ) # Put to a new cube newdata[sl,:,:] = np.asarray ( newimage ) zidx = ocplib.XYZMorton ( [x,y,z] ) cube = Cube.getCube(cubedim, ch.getChannelType(), ch.getDataType()) cube.zeros() cube.data = newdata print "Inserting Cube {} at res {}".format(zidx, cur_res) db.putCube(ch, zidx, cur_res, cube, update=True)
def getHist(self): with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(self.token) with closing(ocpcadb.OCPCADB(proj)) as db: ch = proj.getChannelObj(self.channel) # Get the source database sizes [[ximagesz, yimagesz, zimagesz], timerange] = proj.datasetcfg.imageSize(self.res) [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[self.res] [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[self.res] # Set the limits for iteration on the number of cubes in each dimension xlimit = (ximagesz - 1) / xcubedim + 1 ylimit = (yimagesz - 1) / ycubedim + 1 zlimit = (zimagesz - 1) / zcubedim + 1 numbins = 2**16 hist = [] bins = np.zeros(numbins + 1) count = 0 # sum the histograms for z in range(zlimit): for y in range(ylimit): for x in range(xlimit): # cutout the data for the cube data = db.cutout( ch, [x * xcubedim, y * ycubedim, z * zcubedim], cubedim, self.res).data # compute the histogram and store it hist.append( np.histogram(data[data > 0], bins=numbins, range=(0, 2**16))) print "Processed cube {} {} {}".format(x, y, z) # sum the individual histograms hist_sum = np.zeros(numbins) bins = hist[0][1] # all bins should be the same for i in range(len(hist)): hist_sum += hist[i][0] return (hist_sum, bins)
def __init__(self, token, cutout): """Load the annotation database and project""" projdb = ocpcaproj.OCPCAProjectsDB() self.proj = projdb.loadProject(token) # Bind the annotation database self.annoDB = ocpcadb.OCPCADB(self.proj) # Perform argument processing try: args = restargs.BrainRestArgs() args.cutoutArgs(cutout + "/", self.proj.datasetcfg) except restargs.RESTArgsError, e: raise OCPCAError(e.value)
def deleteTestDB(project_name): try: tk = Token.objects.get(token_name=project_name) pr = Project.objects.get(project_name=project_name) ds = Dataset.objects.get(dataset_name=pr.dataset_id) channel_list = Channel.objects.filter(project_id=pr) pd = ocpcaproj.OCPCAProjectsDB() pd.deleteOCPCADB(pr.project_name) for ch in channel_list: ch.delete() tk.delete() pr.delete() ds.delete() except Exception, e: pass
def buildStack(token, channel, res, base_res): """ build a zoom hierarchy of images """ scaling = 2**(base_res-res) with closing (ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(token) with closing(ocpcadb.OCPCADB(proj)) as db: ch = proj.getChannelObj(channel) # get db sizes [[ximagesz, yimagesz, zimagesz], timerange] = proj.datasetcfg.imageSize(base_res) [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[base_res] [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[base_res] newcubedim = proj.datasetcfg.getCubeDims()[res] xlimit = (ximagesz-1) / xcubedim + 1 ylimit = (yimagesz-1) / ycubedim + 1 zlimit = (zimagesz-1) / zcubedim + 1 # iterate over the old cube for z in range(zlimit): for y in range(ylimit): for x in range(xlimit): # cutout data old_data = db.cutout( ch, [x*xcubedim, y*ycubedim, z*zcubedim], cubedim, base_res ).data #new_data = zoomIn(old_data, scaling) new_data = cZoomIn(old_data, base_res-res) newzsize = new_data.shape[0] / newcubedim[2] #old_data.shape[0] newysize = new_data.shape[1] / newcubedim[1] #old_data.shape[1] newxsize = new_data.shape[2] / newcubedim[0] #old_data.shape[2] #print "sizes: {} {} {}".format(newxsize, newysize, newzsize) for z2 in range(newzsize): for y2 in range(newysize): for x2 in range(newxsize): #print "{} {} {}".format(x*newxsize+x2,y*newysize+y2,z*newzsize+z2) zidx = ndlib.XYZMorton([x*newxsize+x2, y*newysize+y2, z*newzsize+z2]) cube = Cube.getCube(newcubedim, ch.getChannelType(), ch.getDataType()) cube.zeros() cube.data = new_data[z2*newcubedim[2]:(z2+1)*newcubedim[2], y2*newcubedim[1]:(y2+1)*newcubedim[1], x2*newcubedim[0]:(x2+1)*newcubedim[0]] #print "Grabbing cube from [{}:{} , {}:{}, {}:{}]".format(z2*newcubedim[2],(z2+1)*newcubedim[2], y2*newcubedim[1],(y2+1)*newcubedim[1], x2*newcubedim[0],(x2+1)*newcubedim[0]) print "Inserting Cube {} at res {}".format(zidx, res) db.putCube(ch, zidx, res, cube, update=True)
def __init__(self, path, resolution, token_name): print "In initialization" """ Load image stack into OCP, creating tokens and channels as needed """ self.token = token_name self.resolution = resolution self.path = path with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: self.proj = projdb.loadToken(self.token) channel_list = glob.glob("{}*.tif".format(self.path)) channel_list = [ i.split('/')[-1].strip('.tif') for i in channel_list ] channel_list.sort() for index, channel_name in enumerate(channel_list): self.createChannel(channel_name, index) self.ingest(channel_name)
def getTile(self, webargs): """Either fetch the file from mocpcache or load a new region into mocpcache by cutout""" # parse the web args self.token, tileszstr, self.channel, resstr, xtilestr, ytilestr, zslicestr, color, brightnessstr, rest = webargs.split( '/', 9) # load the database self.loadDB() with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: self.proj = projdb.loadProject(self.token) with closing(ocpcadb.OCPCADB(self.proj)) as self.db: # convert args to ints xtile = int(xtilestr) ytile = int(ytilestr) res = int(resstr) # modify the zslice to the offset zslice = int(zslicestr) - self.proj.datasetcfg.slicerange[0] self.tilesz = int(tileszstr) brightness = float(brightnessstr) # memcache key mckey = self.buildKey(res, xtile, ytile, zslice, color, brightness) # do something to sanitize the webargs?? # if tile is in mocpcache, return it tile = self.mc.get(mckey) if tile != None: fobj = cStringIO.StringIO(tile) # load a slab into CATMAID else: img = self.cacheMiss(res, xtile, ytile, zslice, color, brightness) fobj = cStringIO.StringIO() img.save(fobj, "PNG") self.mc.set(mckey, fobj.getvalue()) fobj.seek(0) return fobj
def __init__(self, token, path, resolution, channel): self.token = token self.path = path self.resolution = resolution with closing(ocpcaproj.OCPCAProjectsDB()) as self.projdb: self.proj = self.projdb.loadProject(token) with closing(ocpcadb.OCPCADB(self.proj)) as self.db: (self.xcubedim, self.ycubedim, self.zcubedim ) = self.cubedims = self.proj.datasetcfg.cubedim[resolution] (self.startslice, self.endslice) = self.proj.datasetcfg.slicerange self.batchsz = self.zcubedim self.channel = channel (self._ximgsz, self._yimgsz) = self.proj.datasetcfg.imagesz[resolution]
def __init__(self, token, path): self.path = path projdb = ocpcaproj.OCPCAProjectsDB() self.proj = projdb.loadProject ( token ) # Bind the database self.db = ocpcadb.OCPCADB ( self.proj ) # get spatial information self._ximgsz = self.proj.datasetcfg.imagesz[resolution][0] self._yimgsz = self.proj.datasetcfg.imagesz[resolution][1] self.startslice = self.proj.datasetcfg.slicerange[0] self.endslice = self.proj.datasetcfg.slicerange[1] self.batchsz = self.proj.datasetcfg.cubedim[resolution][2] # get a db cursor self.cursor = self.db.conn.cursor()
def __init__(self, token, resolution, path): self.path = path self.resolution = resolution self.projdb = ocpcaproj.OCPCAProjectsDB() self.proj = self.projdb.loadProject(token) (self._ximgsz, self._yimgsz) = self.proj.datasetcfg.imagesz[resolution] (self.startslice, self.endslice) = self.proj.datasetcfg.slicerange (self.ximagesz, self.yimagesz) = (9888, 7936) self.batchsz = self.proj.datasetcfg.cubedim[resolution][2] self.alldirs = os.listdir(path) # open the database self.db = ocpcadb.OCPCADB(self.proj) # get a db cursor self.cursor = self.db.conn.cursor()
def getHist(self): with closing(ocpcaproj.OCPCAProjectsDB()) as projdb: proj = projdb.loadToken(self.token) with closing(ocpcadb.OCPCADB(proj)) as db: ch = proj.getChannelObj(self.channel) # Get the source database sizes [[ximagesz, yimagesz, zimagesz], timerange] = proj.datasetcfg.imageSize(self.res) [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[self.res] [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[self.res] # Set the limits for iteration on the number of cubes in each dimension xlimit = (ximagesz - 1) / xcubedim + 1 ylimit = (yimagesz - 1) / ycubedim + 1 zlimit = (zimagesz - 1) / zcubedim + 1 hist_sum = np.zeros(self.numbins, dtype=np.uint32) # sum the histograms for z in range(zlimit): for y in range(ylimit): for x in range(xlimit): # cutout the data for the cube data = db.cutout( ch, [x * xcubedim, y * ycubedim, z * zcubedim], cubedim, self.res).data # compute the histogram and store it (hist, bins) = np.histogram(data[data > 0], bins=self.numbins, range=(0, self.numbins)) hist_sum = np.add(hist_sum, hist) print "Processed cube {} {} {}".format(x, y, z) return (hist_sum, bins)