def process_slice(z): print('Processing slice z=',z) array = image[z-1].reshape((1,y_dim,x_dim)).T # the z-1 index is because the files in to_upload are 1-indexed vol[:,:, z] = array touch(os.path.join(progress_dir, str(z)))
def process_slice(z): if os.path.exists(os.path.join(progress_dir, str(z))): print(f"Slice {z} already processed, skipping ") return if z >= z_dim: print("Index {z} >= z_dim of volume, skipping") return print('Processing slice z=', z) array = image[z].reshape((1, y_dim, x_dim)).T vol[:, :, z] = array touch(os.path.join(progress_dir, str(z))) return "success"
def process_paxinos_slice(y): if os.path.exists(os.path.join(progress_dir, str(y))): print(f"Slice {y} already processed, skipping ") return if y >= y_dim: print("Index {y} >= y_dim of volume, skipping") return print('Processing slice y=',y) array = downsized_vol[:,y,:].reshape((z_dim,1,x_dim)).T vol[:,y,:] = array touch(os.path.join(progress_dir, str(y))) return "success"
def process_slice(z): if os.path.exists(os.path.join(progress_dir, str(z))): print(f"Slice {z} already processed, skipping ") return if z > (z_dim - 1): print("Index {z} is larger than (number of slices - 1), skipping") return print('Processing slice z=',z) array = cell_map[z].reshape((1,y_dim,x_dim)).T vol[:,:, z] = array touch(os.path.join(progress_dir, str(z))) print("success")
def test_mesh_manifests(): directory = '/tmp/removeme/mesh_manifests/' layer_path = 'file://' + directory mesh_dir = 'mesh_mip_3_error_40' delete_layer(layer_path) to_path = lambda filename: os.path.join(directory, mesh_dir, filename) n_segids = 100 n_lods = 2 n_fragids = 5 with Storage(layer_path) as stor: stor.put_file('info', '{"mesh":"mesh_mip_3_error_40"}'.encode('utf8')) for segid in range(n_segids): for lod in range(n_lods): for fragid in range(n_fragids): filename = '{}:{}:{}'.format(segid, lod, fragid) lib.touch(to_path(filename)) for i in range(10): MeshManifestTask(layer_path=layer_path, prefix=i, lod=0).execute() for segid in range(n_segids): for fragid in range(n_fragids): filename = '{}:0'.format(segid) assert os.path.exists(to_path(filename)) filename = '{}:1'.format(segid) assert not os.path.exists(to_path(filename)) for i in range(10): MeshManifestTask(layer_path=layer_path, prefix=i, lod=1).execute() for segid in range(n_segids): for fragid in range(n_fragids): filename = '{}:0'.format(segid) assert os.path.exists(to_path(filename)) filename = '{}:1'.format(segid) assert os.path.exists(to_path(filename)) with open(to_path('50:0'), 'r') as f: content = json.loads(f.read()) assert content == { "fragments": ["50:0:0", "50:0:1", "50:0:2", "50:0:3", "50:0:4"] } if os.path.exists(directory): shutil.rmtree(directory)
def process(args): vol,z = args #format images correctly for raw space annotation output img_name = os.path.join(tif_dir, os.path.basename(os.path.dirname(os.path.dirname(tif_dir)))+"_annotation_Z%04d.tif" % z) #tempfix for atlas assert os.path.exists(img_name) == True image = Image.open(img_name) width, height = image.size array = np.array(list( image.getdata() ), dtype=np.uint16, order="F") array = array.reshape((1, height, width)).T vol[:,:, z] = array image.close() touch(os.path.join(progress_dir, str(z))) return "success"
def process_simple_slice(self, file_key): index, infile = file_key print(index, infile) try: image = Image.open(infile) except: print('Could not open', infile) width, height = image.size array = np.array(image, dtype=self.data_type, order='F') array = array.reshape((1, height, width)).T self.precomputed_vol[:, :, index] = array touchfile = os.path.join(self.progress_dir, os.path.basename(infile)) touch(touchfile) image.close() return
def process_slice(z): if os.path.exists(os.path.join(progress_dir, str(z))): print(f"Slice {z} already processed, skipping ") return if z > (len(sorted_files) - 1): print("Index {z} is larger than (number of slices - 1), skipping") return print('Processing slice z=', z) img_name = sorted_files[z] image = Image.open(img_name) width, height = image.size array = np.array(image, dtype=np.uint16, order='F') array = array.reshape((1, height, width)).T vol[:, :, z] = array image.close() touch(os.path.join(progress_dir, str(z))) return "success"
def test_mesh_manifests_filesystem(): directory = '/tmp/removeme/mesh_manifests_fs/' layer_path = 'file://' + directory mesh_dir = 'mesh_mip_3_error_40' delete_layer(layer_path) to_path = lambda filename: os.path.join(directory, mesh_dir, filename) n_segids = 100 n_lods = 2 n_fragids = 5 CloudFiles(layer_path).put_json('info', {"mesh":"mesh_mip_3_error_40"}) for segid in range(n_segids): for lod in range(n_lods): for fragid in range(n_fragids): filename = '{}:{}:{}'.format(segid, lod, fragid) lib.touch(to_path(filename)) MeshManifestFilesystemTask(layer_path=layer_path, lod=0) for segid in range(n_segids): for fragid in range(n_fragids): filename = '{}:0'.format(segid) assert os.path.exists(to_path(filename)), filename filename = '{}:1'.format(segid) assert not os.path.exists(to_path(filename)), filename MeshManifestFilesystemTask(layer_path=layer_path, lod=1) for segid in range(n_segids): for fragid in range(n_fragids): filename = '{}:0'.format(segid) assert os.path.exists(to_path(filename)), filename filename = '{}:1'.format(segid) assert os.path.exists(to_path(filename)), filename with open(to_path('50:0'), 'r') as f: content = json.loads(f.read()) content["fragments"].sort() assert content == {"fragments": [ "50:0:0","50:0:1","50:0:2","50:0:3","50:0:4" ]} if os.path.exists(directory): shutil.rmtree(directory)
def process(args): vol,z = args img_name = os.path.join(tif_dir, os.path.basename(tif_dir)+"_%06d.tif" % int((z*20)+380)) print("Processing ", img_name) assert os.path.exists(img_name) == True image = Image.open(img_name) width, height = image.size array = np.array(list( image.getdata() ), dtype=np.uint16, order="F") array = array.reshape((1, height, width)).T print(array.shape) vol[:,:, z] = array image.close() touch(os.path.join(progress_dir, str(z))) return "success"
def upload_chunk(vol, ranges, image, progress_dir, to_upload): """Push tif image as a chunk in CloudVolume object Arguments: vol {cloudvolume.CloudVolume} -- volume that will contain image data ranges {tuple} -- 3 tuple of lists for image stitch min,max bounds image {numpy array} -- 3D image array """ if not str(ranges) in done_files: vol[ranges[0][0]:ranges[0][1], ranges[1][0]:ranges[1][1], ranges[2][0]:ranges[2][1], ] = image.T print("uploaded") if validate_upload(vol, ranges, image): print("valid") touch(os.path.join(progress_dir, str(ranges))) else: warnings.warn("Invalid chunk, all 0") else: print("already uploaded")
def process_coronal_slice(self, file_key): index, infile = file_key if os.path.exists( os.path.join(self.progress_dir, os.path.basename(infile))): print(f"Slice {index} already processed, skipping ") return img = io.imread(infile) starty, endy, startx, endx = self.starting_points #img = np.rot90(img, 2) #img = np.flip(img) img = img[starty:endy, startx:endx] img = img.reshape(img.shape[0], img.shape[1], 1) #print(index, infile, img.shape, img.dtype, self.precomputed_vol.dtype, self.precomputed_vol.shape) self.precomputed_vol[:, :, index] = img touchfile = os.path.join(self.progress_dir, os.path.basename(infile)) touch(touchfile) del img return
def process(z): if "full_sizedatafld" in tif_dir: #if raw images img_name = os.path.join( tif_dir, os.path.basename(tif_dir)[:-5] + "_C01_Z%04d.tif" % z) elif "transformed_annotations" in tif_dir: #if atlas vol brain = os.path.basename(os.path.dirname(os.path.dirname(tif_dir))) img_name = os.path.join(tif_dir, brain + "_annotation_Z%04d.tif" % z) else: img_name = os.path.join(tif_dir, "cells_%04d.tif" % z) print("Processing ", img_name) image = Image.open(img_name) width, height = image.size array = np.array(list(image.getdata()), dtype=np.uint16, order="F") array = array.reshape((1, height, width)).T vol[:, :, z] = array image.close() touch(os.path.join(progress_dir, str(z)))
def process(args): vol, z = args # img_name = os.path.join(tif_dir, os.path.basename(os.path.dirname(os.path.dirname(tif_dir)))+"_annotation_Z%04d.tif" % z) #tempfix for atlas #format images correctly for terastitcher output imgs = [os.path.join(tif_dir, xx) for xx in os.listdir(tif_dir)] imgs.sort() f = int(imgs[0][-10:-4]) img_name = os.path.join( tif_dir, os.path.basename(tif_dir) + "_%06d.tif" % int((z * 20) + f)) assert os.path.exists(img_name) == True image = Image.open(img_name) width, height = image.size array = np.array(list(image.getdata()), dtype=np.uint16, order="F") array = array.reshape((1, height, width)).T vol[:, :, z] = array image.close() touch(os.path.join(progress_dir, str(z))) return "success"
def process_mesh(self, file_key): index, infile = file_key if os.path.exists( os.path.join(self.progress_dir, os.path.basename(infile))): print(f"Section {index} already processed, skipping ") return img = io.imread(infile) labels = [[v - 8, v - 1] for v in range(9, 256, 8)] arr = np.copy(img) for label in labels: mask = (arr >= label[0]) & (arr <= label[1]) arr[mask] = label[1] arr[arr > 248] = 255 img = arr.T del arr self.precomputed_vol[:, :, index] = img.reshape(img.shape[0], img.shape[1], 1) touchfile = os.path.join(self.progress_dir, os.path.basename(infile)) touch(touchfile) del img return
def ingest(args): """ Ingest an HDF file to a CloudVolume bucket """ if args.local_hdf_path: hdf_file = h5py.File(args.local_hdf_path, "r") else: with Storage(args.cloud_src_path) as storage: hdf_file = h5py.File(storage.get_file(args.cloud_hdf_filename), "r") cur_hdf_group = hdf_file for group_name in args.hdf_keys_to_dataset: cur_hdf_group = cur_hdf_group[group_name] hdf_dataset = cur_hdf_group if args.zyx: dataset_shape = np.array( [hdf_dataset.shape[2], hdf_dataset.shape[1], hdf_dataset.shape[0]]) else: dataset_shape = np.array([*hdf_dataset.shape]) if args.layer_type == "image": data_type = "uint8" else: data_type = "uint64" voxel_offset = args.voxel_offset info = CloudVolume.create_new_info( num_channels=1, layer_type=args.layer_type, data_type=data_type, encoding="raw", resolution=args.resolution, voxel_offset=voxel_offset, chunk_size=args.chunk_size, volume_size=dataset_shape, ) provenance = { "description": args.provenance_description, "owners": [args.owner] } vol = CloudVolume(args.dst_path, info=info, provenance=provenance) vol.commit_info() vol.commit_provenance() all_files = set() for x in np.arange(voxel_offset[0], voxel_offset[0] + dataset_shape[0], args.chunk_size[0]): for y in np.arange(voxel_offset[1], voxel_offset[1] + dataset_shape[1], args.chunk_size[1]): for z in np.arange(voxel_offset[2], voxel_offset[2] + dataset_shape[2], args.chunk_size[2]): all_files.add(tuple((x, y, z))) progress_dir = mkdir( "progress/") # unlike os.mkdir doesn't crash on prexisting done_files = set() for done_file in os.listdir(progress_dir): done_files.add(tuple(done_file.split(","))) to_upload = all_files.difference(done_files) for chunk_start_tuple in to_upload: chunk_start = np.array(list(chunk_start_tuple)) end_of_dataset = np.array(voxel_offset) + dataset_shape chunk_end = chunk_start + np.array(args.chunk_size) chunk_end = Vec(*chunk_end) chunk_end = Vec.clamp(chunk_end, Vec(0, 0, 0), end_of_dataset) chunk_hdf_start = chunk_start - voxel_offset chunk_hdf_end = chunk_end - voxel_offset if args.zyx: chunk = hdf_dataset[chunk_hdf_start[2]:chunk_hdf_end[2], chunk_hdf_start[1]:chunk_hdf_end[1], chunk_hdf_start[0]:chunk_hdf_end[0], ] chunk = chunk.T else: chunk = hdf_dataset[chunk_hdf_start[0]:chunk_hdf_end[0], chunk_hdf_start[1]:chunk_hdf_end[1], chunk_hdf_start[2]:chunk_hdf_end[2], ] print("Processing ", chunk_start_tuple) array = np.array(chunk, dtype=np.dtype(data_type), order="F") vol[chunk_start[0]:chunk_end[0], chunk_start[1]:chunk_end[1], chunk_start[2]:chunk_end[2], ] = array touch(os.path.join(progress_dir, str(chunk_start_tuple)))