def provide(self, request): timing = Timing(self) timing.start() min_bb = request[self.points].roi.get_begin() max_bb = request[self.points].roi.get_end() logger.debug( "CSV points source got request for %s", request[self.points].roi) point_filter = np.ones((self.data.shape[0],), dtype=np.bool) for d in range(self.ndims): point_filter = np.logical_and(point_filter, self.data[:,d] >= min_bb[d]) point_filter = np.logical_and(point_filter, self.data[:,d] < max_bb[d]) filtered = self.data[point_filter] ids = np.arange(len(self.data))[point_filter] points_data = { i: Point(p) for i, p in zip(ids, filtered) } points_spec = PointsSpec(roi=request[self.points].roi.copy()) batch = Batch() batch.points[self.points] = Points(points_data, points_spec) timing.stop() batch.profiling_stats.add(timing) return batch
def __setup_batch(self, batch_spec, chunk): '''Allocate a batch matching the sizes of ``batch_spec``, using ``chunk`` as template.''' batch = Batch() for (array_key, spec) in batch_spec.array_specs.items(): roi = spec.roi voxel_size = self.spec[array_key].voxel_size # get the 'non-spatial' shape of the chunk-batch # and append the shape of the request to it array = chunk.arrays[array_key] shape = array.data.shape[:-roi.dims()] shape += (roi.get_shape() // voxel_size) spec = self.spec[array_key].copy() spec.roi = roi logger.info("allocating array of shape %s for %s", shape, array_key) batch.arrays[array_key] = Array(data=np.zeros(shape), spec=spec) for (points_key, spec) in batch_spec.points_specs.items(): roi = spec.roi spec = self.spec[points_key].copy() spec.roi = roi batch.points[points_key] = Points(data={}, spec=spec) logger.debug("setup batch to fill %s", batch) return batch
def provide(self, request): batch = Batch() roi_points = request[PointsKeys.TEST_POINTS].roi # get all points inside the requested ROI points = {} for point_id, point in self.all_points.items(): if roi_points.contains(point.location): points[point_id] = point batch.points[PointsKeys.TEST_POINTS] = Points( points, PointsSpec(roi=roi_points)) roi_array = request[ArrayKeys.GT_LABELS].roi image = np.ones(roi_array.get_shape() / self.voxel_size, dtype=np.uint64) # label half of GT_LABELS differently depth = image.shape[0] image[0:depth // 2] = 2 spec = self.spec[ArrayKeys.GT_LABELS].copy() spec.roi = roi_array batch.arrays[ArrayKeys.GT_LABELS] = Array(image, spec=spec) return batch
def provide(self, request): batch = Batch() roi_points = request[PointsKeys.TEST_POINTS].roi roi_array = request[ArrayKeys.TEST_LABELS].roi roi_voxel = roi_array // self.spec[ArrayKeys.TEST_LABELS].voxel_size data = np.zeros(roi_voxel.get_shape(), dtype=np.uint32) data[:, ::2] = 100 for i, point in self.points.items(): loc = self.point_to_voxel(roi_array, point.location) data[loc] = i spec = self.spec[ArrayKeys.TEST_LABELS].copy() spec.roi = roi_array batch.arrays[ArrayKeys.TEST_LABELS] = Array(data, spec=spec) points = {} for i, point in self.points.items(): if roi_points.contains(point.location): points[i] = point batch.points[PointsKeys.TEST_POINTS] = Points( points, PointsSpec(roi=roi_points)) return batch
def provide(self, request): batch = Batch() roi_points = request[PointsKeys.PRESYN].roi batch.points[PointsKeys.PRESYN] = Points( {}, PointsSpec(roi=roi_points)) return batch
def provide(self, request): batch = Batch() roi_points = request[PointsKeys.PRESYN].roi trg_points = request[PointsKeys.POSTSYN].roi # get all pre points inside the requested ROI pre_points = {} post_points = {} syn_id = 0 for pre_id, post_id in self.partners: loc = self.points[pre_id] if roi_points.contains(loc): pre_point = PreSynPoint(location=loc, partner_ids=[post_id], location_id=pre_id, synapse_id=syn_id) pre_points[pre_id] = pre_point loc = self.points[post_id] if trg_points.contains(loc): post_point = PostSynPoint(location=loc, partner_ids=[pre_id], location_id=post_id, synapse_id=syn_id) post_points[post_id] = post_point syn_id += 1 batch.points[PointsKeys.PRESYN] = Points(pre_points, PointsSpec(roi=roi_points)) batch.points[PointsKeys.POSTSYN] = Points(post_points, PointsSpec(roi=trg_points)) if ArrayKeys.OBJECTMASK in request: roi_array = request[ArrayKeys.OBJECTMASK].roi spec = self.spec[ArrayKeys.OBJECTMASK].copy() spec.roi = roi_array batch.arrays[ArrayKeys.OBJECTMASK] = Array( self.objectmask[(roi_array / self.voxel_size).to_slices()], spec=ArraySpec(roi=roi_array, voxel_size=self.voxel_size)) return batch
def provide(self, request): timing_process = Timing(self) timing_process.start() batch = Batch() with h5py.File(self.filename, 'r') as hdf_file: # if pre and postsynaptic locations required, their id # SynapseLocation dictionaries should be created together s.t. ids # are unique and allow to find partner locations if PointsKeys.PRESYN in request.points_specs or PointsKeys.POSTSYN in request.points_specs: assert self.kind == 'synapse' # If only PRESYN or POSTSYN requested, assume PRESYN ROI = POSTSYN ROI. pre_key = PointsKeys.PRESYN if PointsKeys.PRESYN in request.points_specs else PointsKeys.POSTSYN post_key = PointsKeys.POSTSYN if PointsKeys.POSTSYN in request.points_specs else PointsKeys.PRESYN presyn_points, postsyn_points = self.__get_syn_points( pre_roi=request.points_specs[pre_key].roi, post_roi=request.points_specs[post_key].roi, syn_file=hdf_file) points = { PointsKeys.PRESYN: presyn_points, PointsKeys.POSTSYN: postsyn_points } else: assert self.kind == 'presyn' or self.kind == 'postsyn' synkey = list(self.datasets.items())[0][0] # only key of dic. presyn_points, postsyn_points = self.__get_syn_points( pre_roi=request.points_specs[synkey].roi, post_roi=request.points_specs[synkey].roi, syn_file=hdf_file) points = { synkey: presyn_points if self.kind == 'presyn' else postsyn_points } for (points_key, request_spec) in request.points_specs.items(): logger.debug("Reading %s in %s...", points_key, request_spec.roi) points_spec = self.spec[points_key].copy() points_spec.roi = request_spec.roi logger.debug("Number of points len()".format( len(points[points_key]))) batch.points[points_key] = Points(data=points[points_key], spec=points_spec) timing_process.stop() batch.profiling_stats.add(timing_process) return batch
def provide(self, request): timing = Timing(self) timing.start() batch = Batch() # if pre and postsynaptic locations requested, their id : SynapseLocation dictionaries should be created # together s.t. the ids are unique and allow to find partner locations if PointsKeys.PRESYN in request.points or PointsKeys.POSTSYN in request.points: try: # either both have the same roi, or only one of them is requested assert request.points[PointsKeys.PRESYN] == request.points[ PointsKeys.POSTSYN] except: assert PointsKeys.PRESYN not in request.points or PointsKeys.POSTSYN not in request.points if PointsKeys.PRESYN in request.points: presyn_points, postsyn_points = self.__read_syn_points( roi=request.points[PointsKeys.PRESYN]) elif PointsKeys.POSTSYN in request.points: presyn_points, postsyn_points = self.__read_syn_points( roi=request.points[PointsKeys.POSTSYN]) for (points_key, roi) in request.points.items(): # check if requested points can be provided if points_key not in self.spec: raise RuntimeError( "Asked for %s which this source does not provide" % points_key) # check if request roi lies within provided roi if not self.spec[points_key].roi.contains(roi): raise RuntimeError( "%s's ROI %s outside of my ROI %s" % (points_key, roi, self.spec[points_key].roi)) logger.debug("Reading %s in %s..." % (points_key, roi)) id_to_point = { PointsKeys.PRESYN: presyn_points, PointsKeys.POSTSYN: postsyn_points }[points_key] batch.points[points_key] = Points(data=id_to_point, spec=PointsSpec(roi=roi)) logger.debug("done") timing.stop() batch.profiling_stats.add(timing) return batch
def provide(self, request): timing = Timing(self) timing.start() batch = Batch() with h5py.File(self.filename, 'r') as hdf_file: # if pre and postsynaptic locations required, their id # SynapseLocation dictionaries should be created together s.t. ids # are unique and allow to find partner locations if PointsKeys.PRESYN in request.points_specs or PointsKeys.POSTSYN in request.points_specs: assert request.points_specs[ PointsKeys.PRESYN].roi == request.points_specs[ PointsKeys.POSTSYN].roi # Cremi specific, ROI offset corresponds to offset present in the # synapse location relative to the raw data. dataset_offset = self.spec[PointsKeys.PRESYN].roi.get_offset() presyn_points, postsyn_points = self.__get_syn_points( roi=request.points_specs[PointsKeys.PRESYN].roi, syn_file=hdf_file, dataset_offset=dataset_offset) for (points_key, request_spec) in request.points_specs.items(): logger.debug("Reading %s in %s...", points_key, request_spec.roi) id_to_point = { PointsKeys.PRESYN: presyn_points, PointsKeys.POSTSYN: postsyn_points }[points_key] points_spec = self.spec[points_key].copy() points_spec.roi = request_spec.roi batch.points[points_key] = Points(data=id_to_point, spec=points_spec) logger.debug("done") timing.stop() batch.profiling_stats.add(timing) return batch
def provide(self, request: BatchRequest) -> Batch: timing = Timing(self) timing.start() logger.debug("Swc points source got request for %s", request[self.points].roi) # Retrieve all points in the requested region using a kdtree for speed points = self._query_kdtree( self.data.tree, ( np.array(request[self.points].roi.get_begin()), np.array(request[self.points].roi.get_end()), ), ) # Obtain subgraph that contains these points. Keep track of edges that # are present in the main graph, but not the subgraph sub_graph, predecessors, successors = self._points_to_graph(points) # Handle boundary cases self._handle_boundary_crossings( sub_graph, predecessors, successors, request[self.points].roi ) # Convert graph into Points format points_data = self._graph_to_data(sub_graph) points_spec = PointsSpec(roi=request[self.points].roi.copy()) batch = Batch() batch.points[self.points] = Points(points_data, points_spec) timing.stop() batch.profiling_stats.add(timing) return batch
def provide(self, request): timing = Timing(self) timing.start() spec = self.get_spec() batch = Batch() with h5py.File(self.filename, 'r') as f: for (volume_type, roi) in request.volumes.items(): if volume_type not in spec.volumes: raise RuntimeError( "Asked for %s which this source does not provide" % volume_type) if not spec.volumes[volume_type].contains(roi): raise RuntimeError( "%s's ROI %s outside of my ROI %s" % (volume_type, roi, spec.volumes[volume_type])) interpolate = { VolumeTypes.RAW: True, VolumeTypes.GT_LABELS: False, VolumeTypes.GT_MASK: False, VolumeTypes.ALPHA_MASK: True, }[volume_type] if volume_type in self.volume_phys_offset: offset_shift = np.array( self.volume_phys_offset[volume_type]) / np.array( self.resolutions[volume_type]) roi_offset = roi.shift(tuple(-offset_shift)) else: roi_offset = roi logger.debug("Reading %s in %s..." % (volume_type, roi_offset)) batch.volumes[volume_type] = Volume( self.__read(f, self.datasets[volume_type], roi_offset), roi=roi, resolution=self.resolutions[volume_type]) # interpolate=interpolate) # if pre and postsynaptic locations required, their id : SynapseLocation dictionaries should be created # together s.t. ids are unique and allow to find partner locations if PointsTypes.PRESYN in request.points or PointsTypes.POSTSYN in request.points: # assert request.points[PointsTypes.PRESYN] == request.points[PointsTypes.POSTSYN] # Cremi specific, ROI offset corresponds to offset present in the # synapse location relative to the raw data. # TODO: Make this generic and in the same style as done for volume_phys_offst. dataset_offset = self.get_spec().points[ PointsTypes.PRESYN].get_offset() presyn_points, postsyn_points = self.__get_syn_points( roi=request.points[PointsTypes.PRESYN], syn_file=f, dataset_offset=dataset_offset) for (points_type, roi) in request.points.items(): if points_type not in spec.points: raise RuntimeError( "Asked for %s which this source does not provide" % points_type) if not spec.points[points_type].contains(roi): raise RuntimeError( "%s's ROI %s outside of my ROI %s" % (points_type, roi, spec.points[points_type])) logger.debug("Reading %s in %s..." % (points_type, roi)) id_to_point = { PointsTypes.PRESYN: presyn_points, PointsTypes.POSTSYN: postsyn_points }[points_type] # TODO: so far assumed that all points have resolution of raw volume batch.points[points_type] = Points( data=id_to_point, roi=roi, resolution=self.resolutions[VolumeTypes.RAW]) logger.debug("done") timing.stop() batch.profiling_stats.add(timing) return batch
def provide(self, request): timing = Timing(self) timing.start() batch = Batch() with h5py.File(self.filename, 'r') as hdf_file: for (volume_type, request_spec) in request.volume_specs.items(): logger.debug("Reading %s in %s...", volume_type, request_spec.roi) voxel_size = self.spec[volume_type].voxel_size # scale request roi to voxel units dataset_roi = request_spec.roi / voxel_size # shift request roi into dataset dataset_roi = dataset_roi - self.spec[ volume_type].roi.get_offset() / voxel_size # create volume spec volume_spec = self.spec[volume_type].copy() volume_spec.roi = request_spec.roi # add volume to batch batch.volumes[volume_type] = Volume( self.__read(hdf_file, self.datasets[volume_type], dataset_roi), volume_spec) # if pre and postsynaptic locations required, their id # SynapseLocation dictionaries should be created together s.t. ids # are unique and allow to find partner locations if PointsTypes.PRESYN in request.points_specs or PointsTypes.POSTSYN in request.points_specs: # assert request.points_specs[PointsTypes.PRESYN].roi == request.points_specs[PointsTypes.POSTSYN].roi # Cremi specific, ROI offset corresponds to offset present in the # synapse location relative to the raw data. assert self.spec[PointsTypes.PRESYN].roi.get_offset() == self.spec[PointsTypes.POSTSYN].roi.get_offset(),\ "Pre and Post synaptic offsets are not the same" # pdb.set_trace() # assert request[PointsTypes.PRESYN].roi == request[PointsTypes.POSTSYN].roi,\ # "Pre and Post synaptic roi requests are not the same" dataset_offset = self.spec[PointsTypes.PRESYN].roi.get_offset() presyn_points, postsyn_points = self.__get_syn_points( roi=request.points_specs[PointsTypes.PRESYN].roi, syn_file=hdf_file, dataset_offset=dataset_offset) for (points_type, request_spec) in request.points_specs.items(): logger.debug("Reading %s in %s...", points_type, request_spec.roi) id_to_point = { PointsTypes.PRESYN: presyn_points, PointsTypes.POSTSYN: postsyn_points }[points_type] # TODO: so far assumed that all points have resolution of raw volume points_spec = self.spec[points_type].copy() points_spec.roi = request_spec.roi batch.points[points_type] = Points(data=id_to_point, spec=points_spec) logger.debug("done") timing.stop() batch.profiling_stats.add(timing) return batch