예제 #1
0
    def execute(self, slot, subindex, roi, result):
        featMatrix=[]
        labelsMatrix=[]
        for i,labels in enumerate(self.inputs["Labels"]):
            if labels.meta.shape is not None:
                labels=labels[:].wait()

                indexes=numpy.nonzero(labels[...,0].view(numpy.ndarray))
                #Maybe later request only part of the region?

                image=self.inputs["Images"][i][:].wait()

                features=image[indexes]
                labels=labels[indexes]

                featMatrix.append(features)
                labelsMatrix.append(labels)


        featMatrix=numpy.concatenate(featMatrix,axis=0)
        labelsMatrix=numpy.concatenate(labelsMatrix,axis=0)

        # train and store self._forest_count forests in parallel
        pool = RequestPool()
        for i in range(self._forest_count):
            def train_and_store(number):
                result[number] = vigra.learning.RandomForest(self._tree_count)
                result[number].learnRF(featMatrix.astype(numpy.float32),labelsMatrix.astype(numpy.uint32))
            req = pool.request(partial(train_and_store, i))

        pool.wait()

        return result
예제 #2
0
    def execute(self, slot, subindex, roi, result):
        with self._lock:
            if self.cache is None:
                fullBlockShape = numpy.array([self.blockShape.value for i in self.Input.meta.shape])
                fun = self.inputs["Function"].value
                #data = self.inputs["Input"][:].wait()
                #split up requests into blocks
                shape = self.Input.meta.shape
                numBlocks = numpy.ceil(shape/(1.0*fullBlockShape)).astype("int")
                blockCache = numpy.ndarray(shape = numpy.prod(numBlocks), dtype=self.Output.meta.dtype)
                pool = RequestPool()
                #blocks holds the different roi keys for each of the blocks
                blocks = itertools.product(*[range(i) for i in numBlocks])
                blockKeys = []
                for b in blocks:
                    start = b * fullBlockShape
                    stop = b * fullBlockShape + fullBlockShape
                    stop = numpy.min(numpy.vstack((stop, shape)), axis=0)
                    blockKey = roiToSlice(start, stop)
                    blockKeys.append(blockKey)
                
                def predict_block(i):
                    data = self.Input[blockKeys[i]].wait()
                    blockCache[i] = fun(data)
                    
                for i,f in enumerate(blockCache):
                    req = pool.request(partial(predict_block,i))

                pool.wait()
                pool.clean()

                self.cache = [fun(blockCache)]
            return self.cache
예제 #3
0
    def execute(self, slot, subindex, roi, result):
        t1 = time.perf_counter()
        key = roi.toSlice()
        nlabels = self.inputs["LabelsCount"].value

        traceLogger.debug(
            "OpPredictRandomForest: Requesting classifier. roi={}".format(roi))
        forests = self.inputs["Classifier"][:].wait()

        if any(forest is None for forest in forests):
            # Training operator may return 'None' if there was no data to train with
            return np.zeros(np.subtract(roi.stop, roi.start),
                            dtype=np.float32)[...]

        traceLogger.debug("OpPredictRandomForest: Got classifier")
        #assert RF.labelCount() == nlabels, "ERROR: OpPredictRandomForest, labelCount differs from true labelCount! %r vs. %r" % (RF.labelCount(), nlabels)

        newKey = key[:-1]
        newKey += (slice(0, self.inputs["Image"].meta.shape[-1], None), )

        res = self.inputs["Image"][newKey].wait()

        shape = res.shape
        prod = np.prod(shape[:-1])
        res.shape = (prod, shape[-1])
        features = res

        predictions = [0] * len(forests)

        t2 = time.perf_counter()

        pool = RequestPool()

        def predict_forest(i):
            predictions[i] = forests[i].predict(
                np.asarray(features, dtype=np.float32))
            predictions[i] = predictions[i].reshape(result.shape[:-1])

        for i, f in enumerate(forests):
            req = pool.request(partial(predict_forest, i))

        pool.wait()
        pool.clean()
        #predictions[0] = forests[0].predict(np.asarray(features, dtype = np.float32), normalize = False)
        #predictions[0] = predictions[0].reshape(result.shape)
        prediction = np.dstack(predictions)
        result[...] = prediction

        # If our LabelsCount is higher than the number of labels in the training set,
        # then our results aren't really valid.  FIXME !!!
        # Duplicate the last label's predictions
        #for c in range(result.shape[-1]):
        #    result[...,c] = prediction[...,min(c+key[-1].start, prediction.shape[-1]-1)]

        t3 = time.perf_counter()

        logger.debug(
            "Predict took %fseconds, actual RF time was %fs, feature time was %fs"
            % (t3 - t1, t3 - t2, t2 - t1))
        return result
예제 #4
0
    def execute(self, slot, subindex, roi, result):
        t1 = time.time()
        key = roi.toSlice()
        nlabels=self.inputs["LabelsCount"].value

        traceLogger.debug("OpPredictRandomForest: Requesting classifier. roi={}".format(roi))
        forests=self.inputs["Classifier"][:].wait()

        if forests is None or any(x is None for x in forests):
            # Training operator may return 'None' if there was no data to train with
            return numpy.zeros(numpy.subtract(roi.stop, roi.start), dtype=numpy.float32)[...]

        traceLogger.debug("OpPredictRandomForest: Got classifier")
        #assert RF.labelCount() == nlabels, "ERROR: OpPredictRandomForest, labelCount differs from true labelCount! %r vs. %r" % (RF.labelCount(), nlabels)

        newKey = key[:-1]
        newKey += (slice(0,self.inputs["Image"].meta.shape[-1],None),)

        res = self.inputs["Image"][newKey].wait()

        shape=res.shape
        prod = numpy.prod(shape[:-1])
        res.shape = (prod, shape[-1])
        features=res

        predictions = [0]*len(forests)

        def predict_forest(number):
            predictions[number] = forests[number].predictProbabilities(numpy.asarray(features, dtype=numpy.float32))

        t2 = time.time()

        # predict the data with all the forests in parallel
        pool = RequestPool()

        for i,f in enumerate(forests):
            req = pool.request(partial(predict_forest, i))

        pool.wait()
        pool.clean()

        prediction=numpy.dstack(predictions)
        prediction = numpy.average(prediction, axis=2)
        prediction.shape =  shape[:-1] + (forests[0].labelCount(),)
        #prediction = prediction.reshape(*(shape[:-1] + (forests[0].labelCount(),)))

        # If our LabelsCount is higher than the number of labels in the training set,
        # then our results aren't really valid.  FIXME !!!
        # Duplicate the last label's predictions
        for c in range(result.shape[-1]):
            result[...,c] = prediction[...,min(c+key[-1].start, prediction.shape[-1]-1)]

        t3 = time.time()

        self.logger.debug("predict roi=%r took %fseconds, actual RF time was %fs, feature time was %fs" % (key, t3-t1, t3-t2, t2-t1))
        
        return result
예제 #5
0
    def execute(self, slot, subindex, roi, result):
        with self._lock:
            if self.cache is None:
                shape = self.Input.meta.shape
                # self.blockshape has None in the last dimension to indicate that it should not be
                # handled block-wise. None is replaced with the image shape in the respective axis.
                fullBlockShape = []
                for u, v in zip(self.blockShape.value, shape):
                    if u is not None:
                        fullBlockShape.append(u)
                    else:
                        fullBlockShape.append(v)
                fullBlockShape = numpy.array(fullBlockShape,
                                             dtype=numpy.float64)

                # data = self.inputs["Input"][:].wait()
                # split up requests into blocks

                numBlocks = numpy.ceil(shape / fullBlockShape).astype("int")
                blockCache = numpy.ndarray(shape=numpy.prod(numBlocks),
                                           dtype=self.Output.meta.dtype)
                pool = RequestPool()
                # blocks holds the different roi keys for each of the blocks
                blocks = itertools.product(
                    *[list(range(i)) for i in numBlocks])
                blockKeys = []
                for b in blocks:
                    start = b * fullBlockShape
                    stop = b * fullBlockShape + fullBlockShape
                    stop = numpy.min(numpy.vstack((stop, shape)), axis=0)
                    blockKey = roiToSlice(start, stop)
                    blockKeys.append(blockKey)

                fun = self.inputs["Function"].value

                def predict_block(i):
                    data = self.Input[blockKeys[i]].wait()
                    blockCache[i] = fun(data)

                for i, f in enumerate(blockCache):
                    req = pool.request(partial(predict_block, i))

                pool.wait()
                pool.clean()

                self.cache = [fun(blockCache)]
            return self.cache
예제 #6
0
    def execute(self, slot, subindex, roi, result):
        with self._lock:
            if self.cache is None:
                shape = self.Input.meta.shape
                # self.blockshape has None in the last dimension to indicate that it should not be
                # handled block-wise. None is replaced with the image shape in the respective axis.
                fullBlockShape = []
                for u, v in zip(self.blockShape.value, shape):
                    if u is not None:
                        fullBlockShape.append(u)
                    else:
                        fullBlockShape.append(v)
                fullBlockShape = numpy.array(fullBlockShape, dtype=numpy.float64)

                #data = self.inputs["Input"][:].wait()
                #split up requests into blocks

                numBlocks = numpy.ceil(shape / fullBlockShape).astype("int")
                blockCache = numpy.ndarray(shape = numpy.prod(numBlocks), dtype=self.Output.meta.dtype)
                pool = RequestPool()
                #blocks holds the different roi keys for each of the blocks
                blocks = itertools.product(*[list(range(i)) for i in numBlocks])
                blockKeys = []
                for b in blocks:
                    start = b * fullBlockShape
                    stop = b * fullBlockShape + fullBlockShape
                    stop = numpy.min(numpy.vstack((stop, shape)), axis=0)
                    blockKey = roiToSlice(start, stop)
                    blockKeys.append(blockKey)

                fun = self.inputs["Function"].value
                def predict_block(i):
                    data = self.Input[blockKeys[i]].wait()
                    blockCache[i] = fun(data)

                for i,f in enumerate(blockCache):
                    req = pool.request(partial(predict_block,i))

                pool.wait()
                pool.clean()

                self.cache = [fun(blockCache)]
            return self.cache
예제 #7
0
    def execute(self, slot, subindex, roi, result):
        with self._lock:
            if self.cache is None:
                fullBlockShape = numpy.array(
                    [self.blockShape.value for i in self.Input.meta.shape])
                fun = self.inputs["Function"].value
                #data = self.inputs["Input"][:].wait()
                #split up requests into blocks
                shape = self.Input.meta.shape
                numBlocks = numpy.ceil(shape /
                                       (1.0 * fullBlockShape)).astype("int")
                blockCache = numpy.ndarray(shape=numpy.prod(numBlocks),
                                           dtype=self.Output.meta.dtype)
                pool = RequestPool()
                #blocks holds the different roi keys for each of the blocks
                blocks = itertools.product(*[range(i) for i in numBlocks])
                blockKeys = []
                for b in blocks:
                    start = b * fullBlockShape
                    stop = b * fullBlockShape + fullBlockShape
                    stop = numpy.min(numpy.vstack((stop, shape)), axis=0)
                    blockKey = roiToSlice(start, stop)
                    blockKeys.append(blockKey)

                def predict_block(i):
                    data = self.Input[blockKeys[i]].wait()
                    blockCache[i] = fun(data)

                for i, f in enumerate(blockCache):
                    req = pool.request(partial(predict_block, i))

                pool.wait()
                pool.clean()

                self.cache = [fun(blockCache)]
            return self.cache
예제 #8
0
    def execute(self, slot, subindex, slot_roi, target):
        assert slot == self.Features or slot == self.Output
        if slot == self.Features:
            feature_slice = roiToSlice(slot_roi.start, slot_roi.stop)
            index = subindex[0]
            feature_slice = list(feature_slice)

            # Translate channel slice of this feature to the channel slice of the output slot.
            output_channel_offset = self.featureOutputChannels[index][0]
            feature_slice[1] = slice(
                output_channel_offset + feature_slice[1].start,
                output_channel_offset + feature_slice[1].stop)
            slot_roi = SubRegion(self.Output, pslice=feature_slice)

            # Get output slot region for this channel
            return self.execute(self.Output, (), slot_roi, target)
        elif slot == self.Output:
            # Correlation of variable 'families' representing reference frames:
            #  ______________________________
            # | input/output frame           |  input/output shape given by slots
            # |  _________________________   |
            # | | smooth frame            |  |  pre-smoothing op needs halo around filter roi
            # | |  ____________________   |  |
            # | | |filter frame        |  |  |  filter needs halo around target roi
            # | | |  _______________   |  |  |
            # | | | | target frame  |  |  |  |  target is given by output_roi

            # note: The 'full_' variable prefix refers to the full 5D shape (tczyx), without 'full_' variables mostly
            #       refer to the 3D space subregion (zyx)

            full_output_slice = slot_roi.toSlice()

            logger.debug(
                f"OpPixelFeaturesPresmoothed: request {slot_roi.pprint()}")

            assert (slot_roi.stop <= self.Output.meta.shape).all()

            full_output_shape = self.Output.meta.shape
            full_output_start, full_output_stop = sliceToRoi(
                full_output_slice, full_output_shape)
            assert len(full_output_shape) == 5
            if all(self.ComputeIn2d.value
                   ):  # todo: check for this particular slice
                axes2enlarge = (0, 1, 1)
            else:
                axes2enlarge = (1, 1, 1)

            output_shape = full_output_shape[2:]
            output_start = full_output_start[2:]
            output_stop = full_output_stop[2:]

            axistags = self.Output.meta.axistags
            target = target.view(vigra.VigraArray)
            target.axistags = copy.copy(axistags)

            # filter roi in input frame
            # sigma = 0.7, because the features receive a pre-smoothed array and don't need much of a neighborhood
            input_filter_start, input_filter_stop = roi.enlargeRoiForHalo(
                output_start,
                output_stop,
                output_shape,
                0.7,
                self.WINDOW_SIZE,
                enlarge_axes=axes2enlarge)

            # smooth roi in input frame
            input_smooth_start, input_smooth_stop = roi.enlargeRoiForHalo(
                input_filter_start,
                input_filter_stop,
                output_shape,
                self.max_sigma,
                self.WINDOW_SIZE,
                enlarge_axes=axes2enlarge,
            )

            # target roi in filter frame
            filter_target_start = roi.TinyVector(output_start -
                                                 input_filter_start)
            filter_target_stop = roi.TinyVector(output_stop -
                                                input_filter_start)

            # filter roi in smooth frame
            smooth_filter_start = roi.TinyVector(input_filter_start -
                                                 input_smooth_start)
            smooth_filter_stop = roi.TinyVector(input_filter_stop -
                                                input_smooth_start)

            filter_target_slice = roi.roiToSlice(filter_target_start,
                                                 filter_target_stop)
            input_smooth_slice = roi.roiToSlice(input_smooth_start,
                                                input_smooth_stop)

            # pre-smooth for all requested time slices and all channels
            full_input_smooth_slice = (full_output_slice[0], slice(None),
                                       *input_smooth_slice)
            req = self.Input[full_input_smooth_slice]
            source = req.wait()
            req.clean()
            req.destination = None
            if source.dtype != numpy.float32:
                sourceF = source.astype(numpy.float32)
                try:
                    source.resize((1, ), refcheck=False)
                except Exception:
                    pass
                del source
                source = sourceF

            sourceV = source.view(vigra.VigraArray)
            sourceV.axistags = copy.copy(self.Input.meta.axistags)

            dimCol = len(self.scales)
            dimRow = self.matrix.shape[0]

            presmoothed_source = [None] * dimCol

            source_smooth_shape = tuple(smooth_filter_stop -
                                        smooth_filter_start)
            full_source_smooth_shape = (
                full_output_stop[0] - full_output_start[0],
                self.Input.meta.shape[1],
            ) + source_smooth_shape
            try:
                for j in range(dimCol):
                    for i in range(dimRow):
                        if self.matrix[i, j]:
                            # There is at least one filter op with this scale
                            break
                    else:
                        # There is no filter op at this scale
                        continue

                    if self.scales[j] > 1.0:
                        tempSigma = math.sqrt(self.scales[j]**2 - 1.0)
                    else:
                        tempSigma = self.scales[j]

                    presmoothed_source[j] = numpy.ndarray(
                        full_source_smooth_shape, numpy.float32)

                    droi = (
                        (0, *tuple(smooth_filter_start._asint())),
                        (sourceV.shape[1],
                         *tuple(smooth_filter_stop._asint())),
                    )
                    for i, vsa in enumerate(sourceV.timeIter()):
                        presmoothed_source[j][
                            i, ...] = self._computeGaussianSmoothing(
                                vsa,
                                tempSigma,
                                droi,
                                in2d=self.ComputeIn2d.value[j])

            except RuntimeError as e:
                if "kernel longer than line" in str(e):
                    raise RuntimeError(
                        "Feature computation error:\nYour image is too small to apply a filter with "
                        f"sigma={self.scales[j]:.1f}. Please select features with smaller sigmas."
                    )
                else:
                    raise e

            del sourceV
            try:
                source.resize((1, ), refcheck=False)
            except ValueError:
                # Sometimes this fails, but that's okay.
                logger.debug("Failed to free array memory.")
            del source

            cnt = 0
            written = 0
            closures = []
            # connect individual operators
            for i in range(dimRow):
                for j in range(dimCol):
                    if self.matrix[i, j]:
                        oslot = self.featureOps[i][j].Output
                        req = None
                        slices = oslot.meta.shape[1]
                        if (cnt + slices >= slot_roi.start[1]
                                and slot_roi.start[1] - cnt < slices
                                and slot_roi.start[1] + written <
                                slot_roi.stop[1]):
                            begin = 0
                            if cnt < slot_roi.start[1]:
                                begin = slot_roi.start[1] - cnt
                            end = slices
                            if cnt + end > slot_roi.stop[1]:
                                end = slot_roi.stop[1] - cnt

                            # feature slice in output frame
                            feature_slice = (slice(None),
                                             slice(
                                                 written, written + end -
                                                 begin)) + (slice(None), ) * 3

                            subtarget = target[feature_slice]
                            # readjust the roi for the new source array
                            full_filter_target_slice = [
                                full_output_slice[0],
                                slice(begin, end), *filter_target_slice
                            ]
                            filter_target_roi = SubRegion(
                                oslot, pslice=full_filter_target_slice)

                            closure = partial(
                                oslot.operator.execute,
                                oslot,
                                (),
                                filter_target_roi,
                                subtarget,
                                sourceArray=presmoothed_source[j],
                            )
                            closures.append(closure)

                            written += end - begin
                        cnt += slices
            pool = RequestPool()
            for c in closures:
                pool.request(c)
            pool.wait()
            pool.clean()

            for i in range(len(presmoothed_source)):
                if presmoothed_source[i] is not None:
                    try:
                        presmoothed_source[i].resize((1, ))
                    except Exception:
                        presmoothed_source[i] = None
    def execute(self, slot, subindex, slot_roi, target):
        assert slot == self.Features or slot == self.Output
        if slot == self.Features:
            feature_slice = roiToSlice(slot_roi.start, slot_roi.stop)
            index = subindex[0]
            feature_slice = list(feature_slice)

            # Translate channel slice of this feature to the channel slice of the output slot.
            output_channel_offset = self.featureOutputChannels[index][0]
            feature_slice[1] = slice(
                output_channel_offset + feature_slice[1].start, output_channel_offset + feature_slice[1].stop
            )
            slot_roi = SubRegion(self.Output, pslice=feature_slice)

            # Get output slot region for this channel
            return self.execute(self.Output, (), slot_roi, target)
        elif slot == self.Output:
            # Correlation of variable 'families' representing reference frames:
            #  ______________________________
            # | input/output frame           |  input/output shape given by slots
            # |  _________________________   |
            # | | smooth frame            |  |  pre-smoothing op needs halo around filter roi
            # | |  ____________________   |  |
            # | | |filter frame        |  |  |  filter needs halo around target roi
            # | | |  _______________   |  |  |
            # | | | | target frame  |  |  |  |  target is given by output_roi

            # note: The 'full_' variable prefix refers to the full 5D shape (tczyx), without 'full_' variables mostly
            #       refer to the 3D space subregion (zyx)

            full_output_slice = slot_roi.toSlice()

            logger.debug(f"OpPixelFeaturesPresmoothed: request {slot_roi.pprint()}")

            assert (slot_roi.stop <= self.Output.meta.shape).all()

            full_output_shape = self.Output.meta.shape
            full_output_start, full_output_stop = sliceToRoi(full_output_slice, full_output_shape)
            assert len(full_output_shape) == 5
            if all(self.ComputeIn2d.value):  # todo: check for this particular slice
                axes2enlarge = (0, 1, 1)
            else:
                axes2enlarge = (1, 1, 1)

            output_shape = full_output_shape[2:]
            output_start = full_output_start[2:]
            output_stop = full_output_stop[2:]

            axistags = self.Output.meta.axistags
            target = target.view(vigra.VigraArray)
            target.axistags = copy.copy(axistags)

            # filter roi in input frame
            # sigma = 0.7, because the features receive a pre-smoothed array and don't need much of a neighborhood
            input_filter_start, input_filter_stop = roi.enlargeRoiForHalo(
                output_start, output_stop, output_shape, 0.7, self.WINDOW_SIZE, enlarge_axes=axes2enlarge
            )

            # smooth roi in input frame
            input_smooth_start, input_smooth_stop = roi.enlargeRoiForHalo(
                input_filter_start,
                input_filter_stop,
                output_shape,
                self.max_sigma,
                self.WINDOW_SIZE,
                enlarge_axes=axes2enlarge,
            )

            # target roi in filter frame
            filter_target_start = roi.TinyVector(output_start - input_filter_start)
            filter_target_stop = roi.TinyVector(output_stop - input_filter_start)

            # filter roi in smooth frame
            smooth_filter_start = roi.TinyVector(input_filter_start - input_smooth_start)
            smooth_filter_stop = roi.TinyVector(input_filter_stop - input_smooth_start)

            filter_target_slice = roi.roiToSlice(filter_target_start, filter_target_stop)
            input_smooth_slice = roi.roiToSlice(input_smooth_start, input_smooth_stop)

            # pre-smooth for all requested time slices and all channels
            full_input_smooth_slice = (full_output_slice[0], slice(None), *input_smooth_slice)
            req = self.Input[full_input_smooth_slice]
            source = req.wait()
            req.clean()
            req.destination = None
            if source.dtype != numpy.float32:
                sourceF = source.astype(numpy.float32)
                try:
                    source.resize((1,), refcheck=False)
                except Exception:
                    pass
                del source
                source = sourceF

            sourceV = source.view(vigra.VigraArray)
            sourceV.axistags = copy.copy(self.Input.meta.axistags)

            dimCol = len(self.scales)
            dimRow = self.matrix.shape[0]

            presmoothed_source = [None] * dimCol

            source_smooth_shape = tuple(smooth_filter_stop - smooth_filter_start)
            full_source_smooth_shape = (
                full_output_stop[0] - full_output_start[0],
                self.Input.meta.shape[1],
            ) + source_smooth_shape
            try:
                for j in range(dimCol):
                    for i in range(dimRow):
                        if self.matrix[i, j]:
                            # There is at least one filter op with this scale
                            break
                    else:
                        # There is no filter op at this scale
                        continue

                    if self.scales[j] > 1.0:
                        tempSigma = math.sqrt(self.scales[j] ** 2 - 1.0)
                    else:
                        tempSigma = self.scales[j]

                    presmoothed_source[j] = numpy.ndarray(full_source_smooth_shape, numpy.float32)

                    droi = (
                        (0, *tuple(smooth_filter_start._asint())),
                        (sourceV.shape[1], *tuple(smooth_filter_stop._asint())),
                    )
                    for i, vsa in enumerate(sourceV.timeIter()):
                        presmoothed_source[j][i, ...] = self._computeGaussianSmoothing(
                            vsa, tempSigma, droi, in2d=self.ComputeIn2d.value[j]
                        )

            except RuntimeError as e:
                if "kernel longer than line" in str(e):
                    raise RuntimeError(
                        "Feature computation error:\nYour image is too small to apply a filter with "
                        f"sigma={self.scales[j]:.1f}. Please select features with smaller sigmas."
                    )
                else:
                    raise e

            del sourceV
            try:
                source.resize((1,), refcheck=False)
            except ValueError:
                # Sometimes this fails, but that's okay.
                logger.debug("Failed to free array memory.")
            del source

            cnt = 0
            written = 0
            closures = []
            # connect individual operators
            for i in range(dimRow):
                for j in range(dimCol):
                    if self.matrix[i, j]:
                        oslot = self.featureOps[i][j].Output
                        req = None
                        slices = oslot.meta.shape[1]
                        if (
                            cnt + slices >= slot_roi.start[1]
                            and slot_roi.start[1] - cnt < slices
                            and slot_roi.start[1] + written < slot_roi.stop[1]
                        ):
                            begin = 0
                            if cnt < slot_roi.start[1]:
                                begin = slot_roi.start[1] - cnt
                            end = slices
                            if cnt + end > slot_roi.stop[1]:
                                end = slot_roi.stop[1] - cnt

                            # feature slice in output frame
                            feature_slice = (slice(None), slice(written, written + end - begin)) + (slice(None),) * 3

                            subtarget = target[feature_slice]
                            # readjust the roi for the new source array
                            full_filter_target_slice = [full_output_slice[0], slice(begin, end), *filter_target_slice]
                            filter_target_roi = SubRegion(oslot, pslice=full_filter_target_slice)

                            closure = partial(
                                oslot.operator.execute,
                                oslot,
                                (),
                                filter_target_roi,
                                subtarget,
                                sourceArray=presmoothed_source[j],
                            )
                            closures.append(closure)

                            written += end - begin
                        cnt += slices
            pool = RequestPool()
            for c in closures:
                pool.request(c)
            pool.wait()
            pool.clean()

            for i in range(len(presmoothed_source)):
                if presmoothed_source[i] is not None:
                    try:
                        presmoothed_source[i].resize((1,))
                    except Exception:
                        presmoothed_source[i] = None
예제 #10
0
    def execute(self, slot, subindex, roi, result):

        progress = 0
        numImages = len(self.Images)
        self.progressSignal(progress)
        featMatrix=[]
        labelsMatrix=[]
        tagList = []

        
        #result[0] = self._svr

        for i,labels in enumerate(self.inputs["ForegroundLabels"]):
            if labels.meta.shape is not None:
                opGaussian = OpGaussianSmoothing(parent = self, graph = self.graph)
                opGaussian.Sigma.setValue(self.Sigma.value)
                opGaussian.Input.connect(self.ForegroundLabels[i])
                blocks = self.inputs["nonzeroLabelBlocks"][i][0].wait()
                
                reqlistlabels = []
                reqlistbg = []
                reqlistfeat = []
                progress += 10 / numImages
                self.progressSignal(progress)
                
                for b in blocks[0]:
                    request = opGaussian.Output[b]
                    #request = labels[b]
                    featurekey = list(b)
                    featurekey[-1] = slice(None, None, None)
                    request2 = self.Images[i][featurekey]
                    request3 = self.inputs["BackgroundLabels"][i][b]
                    reqlistlabels.append(request)
                    reqlistfeat.append(request2)
                    reqlistbg.append(request3)

                traceLogger.debug("Requests prepared")

                numLabelBlocks = len(reqlistlabels)
                progress_outer = [progress]
                if numLabelBlocks > 0:
                    progressInc = (80 - 10)/(numLabelBlocks * numImages)

                def progressNotify(req):
                    progress_outer[0] += progressInc/2
                    self.progressSignal(progress_outer[0])

                for ir, req in enumerate(reqlistfeat):
                    req.notify_finished(progressNotify)
                    req.submit()

                for ir, req in enumerate(reqlistlabels):
                    req.notify_finished(progressNotify)
                    req.submit()

                for ir, req in enumerate(reqlistbg):
                    req.notify_finished(progressNotify)
                    req.submit()
                
                traceLogger.debug("Requests fired")
                

                #Fixme: Maybe later request only part of the region?

                #image=self.inputs["Images"][i][:].wait()
                for ir, req in enumerate(reqlistlabels):
                    
                    labblock = req.wait()
                    
                    image = reqlistfeat[ir].wait()
                    labbgblock = reqlistbg[ir].wait()
                    labblock = labblock.reshape((image.shape[:-1]))
                    image = image.reshape((-1, image.shape[-1]))
                    labbgindices = np.where(labbgblock == 2)            
                    labbgindices = np.ravel_multi_index(labbgindices, labbgblock.shape)
                    
                    newDot, mapping, tags = \
                    self._svr.prepareDataRefactored(labblock, labbgindices)
                    #self._svr.prepareData(labblock, smooth = True)

                    labels   = newDot[mapping]
                    features = image[mapping]

                    featMatrix.append(features)
                    labelsMatrix.append(labels)
                    tagList.append(tags)
                
                progress = progress_outer[0]

                traceLogger.debug("Requests processed")


        self.progressSignal(80 / numImages)
        if len(featMatrix) == 0 or len(labelsMatrix) == 0:
            result[:] = None

        else:
            posTags = [tag[0] for tag in tagList]
            negTags = [tag[1] for tag in tagList]
            numPosTags = np.sum(posTags)
            numTags = np.sum(posTags) + np.sum(negTags)
            fullFeatMatrix = np.ndarray((numTags, self.Images[0].meta.shape[-1]), dtype = np.float64)
            fullLabelsMatrix = np.ndarray((numTags), dtype = np.float64)
            fullFeatMatrix[:] = np.NAN
            fullLabelsMatrix[:] = np.NAN
            currPosCount = 0
            currNegCount = numPosTags
            for i, posCount in enumerate(posTags):
                fullFeatMatrix[currPosCount:currPosCount + posTags[i],:] = featMatrix[i][:posCount,:]
                fullLabelsMatrix[currPosCount:currPosCount + posTags[i]] = labelsMatrix[i][:posCount]
                fullFeatMatrix[currNegCount:currNegCount + negTags[i],:] = featMatrix[i][posCount:,:]
                fullLabelsMatrix[currNegCount:currNegCount + negTags[i]] = labelsMatrix[i][posCount:]
                currPosCount += posTags[i]
                currNegCount += negTags[i]


            assert(not np.isnan(np.sum(fullFeatMatrix)))

            fullTags = [np.sum(posTags), np.sum(negTags)]
            #pool = RequestPool()

            maxima = np.max(fullFeatMatrix, axis=0)
            minima = np.min(fullFeatMatrix, axis=0)
            normalizationFactors = (minima,maxima)
            



            boxConstraintList = []
            boxConstraints = None
            if self.BoxConstraintRois.ready() and self.BoxConstraintValues.ready():
                for i, slot in enumerate(zip(self.BoxConstraintRois,self.BoxConstraintValues)):
                    for constr, val in zip(slot[0].value, slot[1].value):
                        boxConstraintList.append((i, constr, val))
                if len(boxConstraintList) > 0:
                    boxConstraints = self.constructBoxConstraints(boxConstraintList)

            params = self._svr.get_params() 
            try:
                pool = RequestPool()
                def train_and_store(i):
                    result[i] = SVR(minmax = normalizationFactors, **params)
                    result[i].fitPrepared(fullFeatMatrix, fullLabelsMatrix, tags = fullTags, boxConstraints = boxConstraints, numRegressors
                         = self.numRegressors, trainAll = False)
                for i in range(self.numRegressors):
                    req = pool.request(partial(train_and_store, i))
                
                pool.wait()
                pool.clean()
            
            except:
                logger.error("ERROR: could not learn regressor")
                logger.error("fullFeatMatrix shape = {}, dtype = {}".format(fullFeatMatrix.shape, fullFeatMatrix.dtype) )
                logger.error("fullLabelsMatrix shape = {}, dtype = {}".format(fullLabelsMatrix.shape, fullLabelsMatrix.dtype) )
                raise
            finally:
                self.progressSignal(100) 

        return result
예제 #11
0
    def execute(self, slot, subindex, roi, result):
        progress = 0
        self.progressSignal(progress)
        numImages = len(self.Images)

        key = roi.toSlice()
        featMatrix=[]
        labelsMatrix=[]
        for i,labels in enumerate(self.inputs["Labels"]):
            if labels.meta.shape is not None:
                #labels=labels[:].wait()
                blocks = self.inputs["nonzeroLabelBlocks"][i][0].wait()

                progress += 10/numImages
                self.progressSignal(progress)

                reqlistlabels = []
                reqlistfeat = []
                traceLogger.debug("Sending requests for {} non-zero blocks (labels and data)".format( len(blocks[0])) )
                for b in blocks[0]:

                    request = labels[b]
                    featurekey = list(b)
                    featurekey[-1] = slice(None, None, None)
                    request2 = self.inputs["Images"][i][featurekey]

                    reqlistlabels.append(request)
                    reqlistfeat.append(request2)

                traceLogger.debug("Requests prepared")

                numLabelBlocks = len(reqlistlabels)
                progress_outer = [progress] # Store in list for closure access
                if numLabelBlocks > 0:
                    progressInc = (80-10)/numLabelBlocks/numImages

                def progressNotify(req):
                    # Note: If we wanted perfect progress reporting, we could use lock here
                    #       to protect the progress from being incremented simultaneously.
                    #       But that would slow things down and imperfect reporting is okay for our purposes.
                    progress_outer[0] += progressInc/2
                    self.progressSignal(progress_outer[0])

                for ir, req in enumerate(reqlistfeat):
                    image = req.notify_finished(progressNotify)

                for ir, req in enumerate(reqlistlabels):
                    labblock = req.notify_finished(progressNotify)

                traceLogger.debug("Requests fired")

                for ir, req in enumerate(reqlistlabels):
                    traceLogger.debug("Waiting for a label block...")
                    labblock = req.wait()

                    traceLogger.debug("Waiting for an image block...")
                    image = reqlistfeat[ir].wait()

                    indexes=numpy.nonzero(labblock[...,0].view(numpy.ndarray))
                    features=image[indexes]
                    labbla=labblock[indexes]

                    featMatrix.append(features)
                    labelsMatrix.append(labbla)

                progress = progress_outer[0]

                traceLogger.debug("Requests processed")

        self.progressSignal(80/numImages)

        if len(featMatrix) == 0 or len(labelsMatrix) == 0:
            # If there was no actual data for the random forest to train with, we return None
            result[:] = None
        else:
            featMatrix=numpy.concatenate(featMatrix,axis=0)
            labelsMatrix=numpy.concatenate(labelsMatrix,axis=0)
            maxLabel = self.inputs["MaxLabel"].value
            labelList = range(1, maxLabel+1) if maxLabel > 0 else list()

            try:
                logger.debug("Learning with Vigra...")
                # train and store self._forest_count forests in parallel
                pool = RequestPool()

                for i in range(self._forest_count):
                    def train_and_store(number):
                        result[number] = vigra.learning.RandomForest(self._tree_count, labels=labelList)
                        result[number].learnRF( numpy.asarray(featMatrix, dtype=numpy.float32),
                                                numpy.asarray(labelsMatrix, dtype=numpy.uint32))
                    req = pool.request(partial(train_and_store, i))

                pool.wait()
                pool.clean()

                logger.debug("Vigra finished")
            except:
                logger.error( "ERROR: could not learn classifier" )
                logger.error( "featMatrix shape={}, max={}, dtype={}".format(featMatrix.shape, featMatrix.max(), featMatrix.dtype) )
                logger.error( "labelsMatrix shape={}, max={}, dtype={}".format(labelsMatrix.shape, labelsMatrix.max(), labelsMatrix.dtype ) )
                raise
            finally:
                self.progressSignal(100)

        return result
예제 #12
0
    def execute(self, slot, subindex, roi, result):
        progress = 0
        self.progressSignal(progress)
        numImages = len(self.Images)

        featMatrix=[]
        labelsMatrix=[]
        tagsMatrix = []

        result[0] = SVR(self.UnderMult.value, self.OverMult.value, limitDensity = True, **self.SelectedOption.value)
        for i,labels in enumerate(self.inputs["Labels"]):
            if labels.meta.shape is not None:
                #labels=labels[:].wait()
                blocks = self.inputs["nonzeroLabelBlocks"][i][0].wait()

                progress += 10/numImages
                self.progressSignal(progress)

                reqlistlabels = []
                reqlistfeat = []
                traceLogger.debug("Sending requests for {} non-zero blocks (labels and data)".format( len(blocks[0])) )
                for b in blocks[0]:

                    request = labels[b]
                    featurekey = list(b)
                    featurekey[-1] = slice(None, None, None)
                    request2 = self.inputs["Images"][i][featurekey]

                    reqlistlabels.append(request)
                    reqlistfeat.append(request2)

                traceLogger.debug("Requests prepared")

                numLabelBlocks = len(reqlistlabels)
                progress_outer = [progress] # Store in list for closure access
                if numLabelBlocks > 0:
                    progressInc = (80-10)/numLabelBlocks/numImages

                def progressNotify(req):
                    # Note: If we wanted perfect progress reporting, we could use lock here
                    #       to protect the progress from being incremented simultaneously.
                    #       But that would slow things down and imperfect reporting is okay for our purposes.
                    progress_outer[0] += progressInc/2
                    self.progressSignal(progress_outer[0])

                for ir, req in enumerate(reqlistfeat):
                    image = req.notify_finished(progressNotify)

                for ir, req in enumerate(reqlistlabels):
                    labblock = req.notify_finished(progressNotify)

                traceLogger.debug("Requests fired")

                for ir, req in enumerate(reqlistlabels):
                    traceLogger.debug("Waiting for a label block...")
                    labblock = req.wait()

                    traceLogger.debug("Waiting for an image block...")
                    image = reqlistfeat[ir].wait()

                    newImg, newDot, mapping, tags = \
                    result[0].prepareData(image, labblock, sigma = self.Sigma.value, smooth = True, normalize = False)

                    features = newImg[mapping]
                    labbla = newDot[mapping]

                    #indexes=np.nonzero(labblock[...,0].view(np.ndarray))
                    #features=image[indexes]
                    #labbla=labblock[indexes]

                    featMatrix.append(features)
                    labelsMatrix.append(labbla)
                    tagsMatrix.append(tags)

                progress = progress_outer[0]

                traceLogger.debug("Requests processed")

        self.progressSignal(80/numImages)

        if len(featMatrix) == 0 or len(labelsMatrix) == 0:
            # If there was no actual data for the random forest to train with, we return None
            result[:] = None
        else:
            featMatrix=np.concatenate(featMatrix,axis=0)
            labelsMatrix=np.concatenate(labelsMatrix,axis=0)
            tagsMatrix=np.concatenate(tagsMatrix,axis=0)

            try:
                logger.debug("Learning with Vigra...")

                pool = RequestPool()

                #result[0].fitPrepared(featMatrix, labelsMatrix, tagsMatrix, self.Epsilon.value)
                req = pool.request(partial(result[0].fitPrepared, featMatrix, labelsMatrix, tagsMatrix, self.Epsilon.value))
                pool.wait()
                pool.clean()

                logger.debug("Vigra finished")
            except:
                logger.error( "ERROR: could not learn classifier" )
                logger.error( "featMatrix shape={}, max={}, dtype={}".format(featMatrix.shape, featMatrix.max(), featMatrix.dtype) )
                logger.error( "labelsMatrix shape={}, max={}, dtype={}".format(labelsMatrix.shape, labelsMatrix.max(), labelsMatrix.dtype ) )
                raise
            finally:
                self.progressSignal(100)

        return result
예제 #13
0
    def execute(self, slot, subindex, rroi, result):
        assert slot == self.Features or slot == self.Output
        if slot == self.Features:
            key = roiToSlice(rroi.start, rroi.stop)
            index = subindex[0]
            key = list(key)
            channelIndex = self.Input.meta.axistags.index('c')
            
            # Translate channel slice to the correct location for the output slot.
            key[channelIndex] = slice(self.featureOutputChannels[index][0] + key[channelIndex].start,
                                      self.featureOutputChannels[index][0] + key[channelIndex].stop)
            rroi = SubRegion(self.Output, pslice=key)

            # Get output slot region for this channel
            return self.execute(self.Output, (), rroi, result)
        elif slot == self.outputs["Output"]:
            key = rroi.toSlice()
            
            logger.debug("OpPixelFeaturesPresmoothed: request %s" % (rroi.pprint(),))
            
            cnt = 0
            written = 0
            assert (rroi.stop<=self.outputs["Output"].meta.shape).all()
            flag = 'c'
            channelAxis=self.inputs["Input"].meta.axistags.index('c')
            axisindex = channelAxis
            oldkey = list(key)
            oldkey.pop(axisindex)


            inShape  = self.inputs["Input"].meta.shape
            hasChannelAxis = (self.Input.meta.axistags.axisTypeCount(vigra.AxisType.Channels) > 0)
            #if (self.Input.meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0):
            #    noChannels = True
            inAxistags = self.inputs["Input"].meta.axistags
                
            shape = self.outputs["Output"].meta.shape
            axistags = self.outputs["Output"].meta.axistags

            result = result.view(vigra.VigraArray)
            result.axistags = copy.copy(axistags)


            hasTimeAxis = self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Time)
            timeAxis=self.inputs["Input"].meta.axistags.index('t')

            subkey = popFlagsFromTheKey(key,axistags,'c')
            subshape=popFlagsFromTheKey(shape,axistags,'c')
            at2 = copy.copy(axistags)
            at2.dropChannelAxis()
            subshape=popFlagsFromTheKey(subshape,at2,'t')
            subkey = popFlagsFromTheKey(subkey,at2,'t')

            oldstart, oldstop = roi.sliceToRoi(key, shape)

            start, stop = roi.sliceToRoi(subkey,subkey)
            maxSigma = max(0.7,self.maxSigma)  #we use 0.7 as an approximation of not doing any smoothing
            #smoothing was already applied previously
            
            # The region of the smoothed image we need to give to the feature filter (in terms of INPUT coordinates)
            # 0.7, because the features receive a pre-smoothed array and don't need much of a neighborhood 
            vigOpSourceStart, vigOpSourceStop = roi.enlargeRoiForHalo(start, stop, subshape, 0.7, self.WINDOW_SIZE)
            
            
            # The region of the input that we need to give to the smoothing operator (in terms of INPUT coordinates)
            newStart, newStop = roi.enlargeRoiForHalo(vigOpSourceStart, vigOpSourceStop, subshape, maxSigma, self.WINDOW_SIZE)
            
            newStartSmoother = roi.TinyVector(start - vigOpSourceStart)
            newStopSmoother = roi.TinyVector(stop - vigOpSourceStart)
            roiSmoother = roi.roiToSlice(newStartSmoother, newStopSmoother)

            # Translate coordinates (now in terms of smoothed image coordinates)
            vigOpSourceStart = roi.TinyVector(vigOpSourceStart - newStart)
            vigOpSourceStop = roi.TinyVector(vigOpSourceStop - newStart)

            readKey = roi.roiToSlice(newStart, newStop)

            writeNewStart = start - newStart
            writeNewStop = writeNewStart +  stop - start

            treadKey=list(readKey)

            if hasTimeAxis:
                if timeAxis < channelAxis:
                    treadKey.insert(timeAxis, key[timeAxis])
                else:
                    treadKey.insert(timeAxis-1, key[timeAxis])
            if  self.inputs["Input"].meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0:
                treadKey =  popFlagsFromTheKey(treadKey,axistags,'c')
            else:
                treadKey.insert(channelAxis, slice(None,None,None))

            treadKey=tuple(treadKey)

            req = self.inputs["Input"][treadKey]
            
            sourceArray = req.wait()
            req.clean()
            #req.result = None
            req.destination = None
            if sourceArray.dtype != numpy.float32:
                sourceArrayF = sourceArray.astype(numpy.float32)
                try:
                    sourceArray.resize((1,), refcheck = False)
                except:
                    pass
                del sourceArray
                sourceArray = sourceArrayF
                
            #if (self.Input.meta.axistags.axisTypeCount(vigra.AxisType.Channels) == 0):
                #add a channel dimension to make the code afterwards more uniform
            #    sourceArray = sourceArray.view(numpy.ndarray)
            #    sourceArray = sourceArray.reshape(sourceArray.shape+(1,))
            sourceArrayV = sourceArray.view(vigra.VigraArray)
            sourceArrayV.axistags =  copy.copy(inAxistags)
            
            dimCol = len(self.scales)
            dimRow = self.matrix.shape[0]

            sourceArraysForSigmas = [None]*dimCol

            #connect individual operators
            try:
                for j in range(dimCol):
                    hasScale = False
                    for i in range(dimRow):
                        if self.matrix[i,j]:
                            hasScale = True
                    if not hasScale:
                        continue
                    destSigma = 1.0
                    if self.scales[j] > destSigma:
                        tempSigma = math.sqrt(self.scales[j]**2 - destSigma**2)
                    else:
                        destSigma = 0.0
                        tempSigma = self.scales[j]
                    vigOpSourceShape = list(vigOpSourceStop - vigOpSourceStart)
                                        
                    if hasTimeAxis:
                        if timeAxis < channelAxis:
                            vigOpSourceShape.insert(timeAxis, ( oldstop - oldstart)[timeAxis])
                        else:
                            vigOpSourceShape.insert(timeAxis-1, ( oldstop - oldstart)[timeAxis])
                        vigOpSourceShape.insert(channelAxis, inShape[channelAxis])
    
                        sourceArraysForSigmas[j] = numpy.ndarray(tuple(vigOpSourceShape),numpy.float32)
                        
                        for i,vsa in enumerate(sourceArrayV.timeIter()):
                            droi = (tuple(vigOpSourceStart._asint()), tuple(vigOpSourceStop._asint()))
                            tmp_key = getAllExceptAxis(len(sourceArraysForSigmas[j].shape),timeAxis, i) 
                            sourceArraysForSigmas[j][tmp_key] = self._computeGaussianSmoothing(vsa, tempSigma, droi)

                    else:
                        droi = (tuple(vigOpSourceStart._asint()), tuple(vigOpSourceStop._asint()))                            
                        sourceArraysForSigmas[j] = self._computeGaussianSmoothing(sourceArrayV, tempSigma, droi)
            
            except RuntimeError as e:
                if e.message.find('kernel longer than line') > -1:
                    message = "Feature computation error:\nYour image is too small to apply a filter with sigma=%.1f. Please select features with smaller sigmas." % self.scales[j]
                    raise RuntimeError(message)
                else:
                    raise e

            del sourceArrayV
            try:
                sourceArray.resize((1,), refcheck = False)
            except ValueError:
                # Sometimes this fails, but that's okay.
                logger.debug("Failed to free array memory.")                
            del sourceArray

            closures = []

            #connect individual operators
            for i in range(dimRow):
                for j in range(dimCol):
                    val=self.matrix[i,j]
                    if val:
                        vop= self.featureOps[i][j]
                        oslot = vop.outputs["Output"]
                        req = None
                        #inTagKeys = [ax.key for ax in oslot.meta.axistags]
                        #print inTagKeys, flag
                        if hasChannelAxis:
                            slices = oslot.meta.shape[axisindex]
                            if cnt + slices >= rroi.start[axisindex] and rroi.start[axisindex]-cnt<slices and rroi.start[axisindex]+written<rroi.stop[axisindex]:
                                begin = 0
                                if cnt < rroi.start[axisindex]:
                                    begin = rroi.start[axisindex] - cnt
                                end = slices
                                if cnt + end > rroi.stop[axisindex]:
                                    end -= cnt + end - rroi.stop[axisindex]
                                key_ = copy.copy(oldkey)
                                key_.insert(axisindex, slice(begin, end, None))
                                reskey = [slice(None, None, None) for x in range(len(result.shape))]
                                reskey[axisindex] = slice(written, written+end-begin, None)
                                
                                destArea = result[tuple(reskey)]
                                #readjust the roi for the new source array
                                roiSmootherList = list(roiSmoother)
                                
                                roiSmootherList.insert(axisindex, slice(begin, end, None))
                                
                                if hasTimeAxis:
                                    # The time slice in the ROI doesn't matter:
                                    # The sourceArrayParameter below overrides the input data to be used.
                                    roiSmootherList.insert(timeAxis, 0)
                                roiSmootherRegion = SubRegion(oslot, pslice=roiSmootherList)
                                
                                closure = partial(oslot.operator.execute, oslot, (), roiSmootherRegion, destArea, sourceArray = sourceArraysForSigmas[j])
                                closures.append(closure)

                                written += end - begin
                            cnt += slices
                        else:
                            if cnt>=rroi.start[axisindex] and rroi.start[axisindex] + written < rroi.stop[axisindex]:
                                reskey = [slice(None, None, None) for x in range(len(result.shape))]
                                slices = oslot.meta.shape[axisindex]
                                reskey[axisindex]=slice(written, written+slices, None)
                                #print "key: ", key, "reskey: ", reskey, "oldkey: ", oldkey, "resshape:", result.shape
                                #print "roiSmoother:", roiSmoother
                                destArea = result[tuple(reskey)]
                                #print "destination area:", destArea.shape
                                logger.debug(oldkey, destArea.shape, sourceArraysForSigmas[j].shape)
                                oldroi = SubRegion(oslot, pslice=oldkey)
                                #print "passing roi:", oldroi
                                closure = partial(oslot.operator.execute, oslot, (), oldroi, destArea, sourceArray = sourceArraysForSigmas[j])
                                closures.append(closure)

                                written += 1
                            cnt += 1
            pool = RequestPool()
            for c in closures:
                r = pool.request(c)
            pool.wait()
            pool.clean()

            for i in range(len(sourceArraysForSigmas)):
                if sourceArraysForSigmas[i] is not None:
                    try:
                        sourceArraysForSigmas[i].resize((1,))
                    except:
                        sourceArraysForSigmas[i] = None