Пример #1
0
class DarkCurrentMap(Iteratives):
    '''
    Averages given background images
    removing single time effects
    '''
    
    def __init__(self, twoImages, noise_level_function=None, 
                 calcVariance=False, **kwargs):
        Iteratives.__init__(self, **kwargs)
        
        assert len(twoImages) > 1, 'need at least 2 images'

        self.det = SingleTimeEffectDetection(twoImages, noise_level_function, nStd=3)
        self._map = MaskedMovingAverage(shape=twoImages[0].shape, calcVariance=calcVariance)
        self._map.update(self.det.noSTE)


    def addImg(self, img, raiseIfConvergence=False):
        self._map.update( img, self.det.addImage(img).mask_clean )
        if raiseIfConvergence:
            return self.checkConvergence(self.fine.var**0.5)


    def map(self):
        return self._map.avg
    
    
    def uncertaintyMap(self):
        return self._map.var**0.5


    def uncertainty(self):
        return np.mean(self._map.var)**0.5
def flatFieldFromCalibration(bgImages, images, calcStd=False):
    '''
    returns a flat-field correction map 
    through conditional average of multiple images reduced by a background image
    
    calcStd -> set to True to also return the standard deviation
    '''
    #AVERAGE BACKGROUND IMAGES IF MULTIPLE ARE GIVEN:
    if ( type(bgImages) in (tuple, list) 
            or type(bgImages) is np.ndarray and bgImages.ndim == 3 ) :
        if len(bgImages) > 1:
            avgBg = averageSameExpTimes(bgImages)
        else:
            avgBg = imread(bgImages[0])
    else:
        avgBg = imread(bgImages)

    i0 = imread(images[0]) - avgBg
    noise_level_function,_ = oneImageNLF(i0)

    m = MaskedMovingAverage(shape=i0.shape, calcVariance=calcStd)
    m.update(i0)
    
    for i in images[1:]:
        i = imread(i)
        thresh = m.avg - noise_level_function(m.avg) * 3
        m.update(i, i>thresh)

    mx = m.avg.max()
    if calcStd:
        return m.avg/mx, m.var**0.5/mx
    return m.avg/mx
Пример #3
0
class DarkCurrentMap(Iteratives):
    '''
    Averages given background images
    removing single time effects
    '''
    def __init__(self,
                 twoImages,
                 noise_level_function=None,
                 calcVariance=False,
                 **kwargs):
        Iteratives.__init__(self, **kwargs)

        assert len(twoImages) > 1, 'need at least 2 images'

        self.det = SingleTimeEffectDetection(twoImages,
                                             noise_level_function,
                                             nStd=3)
        self._map = MaskedMovingAverage(shape=twoImages[0].shape,
                                        calcVariance=calcVariance)
        self._map.update(self.det.noSTE)

    def addImg(self, img, raiseIfConvergence=False):
        self._map.update(img, self.det.addImage(img).mask_clean)
        if raiseIfConvergence:
            return self.checkConvergence(self.fine.var**0.5)

    def map(self):
        return self._map.avg

    def uncertaintyMap(self):
        return self._map.var**0.5

    def uncertainty(self):
        return np.mean(self._map.var)**0.5
    def __init__(self,
                 images,
                 noise_level_function=None,
                 nStd=4,
                 save_ste_indices=False,
                 calcVariance=False,
                 dtype=float):
        self.save_ste_indices = save_ste_indices

        i1 = imread(images[0], 'gray', dtype=dtype)
        i2 = imread(images[1], 'gray')

        self.mask_STE = None
        if save_ste_indices:
            self.mask_STE = np.zeros(shape=i1.shape, dtype=bool)

        self.mma = MaskedMovingAverage(shape=i1.shape,
                                       calcVariance=calcVariance,
                                       dtype=i1.dtype)

        # MINIMUM OF BOTH IMAGES:
        self.mma.update(np.min((i1, i2), axis=0))

        if noise_level_function is None:
            noise_level_function = oneImageNLF(self.mma.avg)[0]
        self.noise_level_function = noise_level_function
        self.threshold = noise_level_function(self.mma.avg) * nStd

        self.addImage(np.max((i1, i2), axis=0))

        for i in images[2:]:
            self.addImage(imread(i, 'gray'))
Пример #5
0
    def _firstImg(self, img):

        if self.scale_factor is None:
            # determine so that smaller image size has 50 px
            self.scale_factor = 100 / min(img.shape)
        img = rescale(img, self.scale_factor)

        self._m = MaskedMovingAverage(shape=img.shape)
        if self.ksize is None:
            self.ksize = max(3, int(min(img.shape) / 10))
        self._first = False
        return img
    def __init__(self, img, bg=None, maxDev=1e-4, maxIter=10, remove_border_size=0,
                 # feature_size=5,
                 cameraMatrix=None, distortionCoeffs=None):  # 20
        """
        Args:
            img (path or array): Reference image
        Kwargs:
            bg (path or array): background image - same for all given images
            maxDev (float): Relative deviation between the last two iteration steps
                            Stop iterative refinement, if deviation is smaller
            maxIter (int): Stop iterative refinement after maxIter steps
        """
        self.lens = None
        if cameraMatrix is not None:
            self.lens = LensDistortion()
            self.lens._coeffs['distortionCoeffs'] = distortionCoeffs
            self.lens._coeffs['cameraMatrix'] = cameraMatrix

        self.maxDev = maxDev
        self.maxIter = maxIter
        self.remove_border_size = remove_border_size
        #self.feature_size = feature_size
        img = imread(img, 'gray')

        self.bg = bg
        if bg is not None:
            self.bg = getBackground(bg)
            if not isinstance(self.bg, np.ndarray):
                self.bg = np.full_like(img, self.bg, dtype=img.dtype)
            else:
                self.bg = self.bg.astype(img.dtype)
            img = cv2.subtract(img, self.bg)

        if self.lens is not None:
            img = self.lens.correct(img, keepSize=True)
        # CREATE TEMPLATE FOR PATTERN COMPARISON:
        pos = self._findObject(img)
        self.obj_shape = img[pos].shape

        PatternRecognition.__init__(self, img[pos])

        self._ff_mma = MaskedMovingAverage(shape=img.shape,
                                           dtype=np.float64)

        self.object = None

        self.Hs = []    # Homography matrices of all fitted images
        self.Hinvs = []  # same, but inverse
        self.fits = []  # all imaged, fitted to reference
        self._fit_masks = []

        self._refined = False
Пример #7
0
    def __init__(self,
                 twoImages,
                 noise_level_function=None,
                 calcVariance=False,
                 **kwargs):
        Iteratives.__init__(self, **kwargs)

        assert len(twoImages) > 1, 'need at least 2 images'

        self.det = SingleTimeEffectDetection(twoImages,
                                             noise_level_function,
                                             nStd=3)
        self._map = MaskedMovingAverage(shape=twoImages[0].shape,
                                        calcVariance=calcVariance)
        self._map.update(self.det.noSTE)
    def __init__(self, images, noise_level_function=None, nStd=4,
                 save_ste_indices=False, calcVariance=False, dtype=float):
        self.save_ste_indices = save_ste_indices

        i1 = imread(images[0], 'gray', dtype=dtype)
        i2 = imread(images[1], 'gray')

        self.mask_STE = None
        if save_ste_indices:
            self.mask_STE = np.zeros(shape=i1.shape, dtype=bool)

        self.mma = MaskedMovingAverage(shape=i1.shape,
                                       calcVariance=calcVariance,
                                       dtype=i1.dtype)

        # MINIMUM OF BOTH IMAGES:
        self.mma.update(np.min((i1, i2), axis=0))

        if noise_level_function is None:
            noise_level_function = oneImageNLF(self.mma.avg)[0]
        self.noise_level_function = noise_level_function
        self.threshold = noise_level_function(self.mma.avg) * nStd

        self.addImage(np.max((i1, i2), axis=0))

        for i in images[2:]:
            self.addImage(imread(i, 'gray'))
Пример #9
0
    def __init__(self, twoImages, noise_level_function=None, 
                 calcVariance=False, **kwargs):
        Iteratives.__init__(self, **kwargs)
        
        assert len(twoImages) > 1, 'need at least 2 images'

        self.det = SingleTimeEffectDetection(twoImages, noise_level_function, nStd=3)
        self._map = MaskedMovingAverage(shape=twoImages[0].shape, calcVariance=calcVariance)
        self._map.update(self.det.noSTE)
Пример #10
0
    def addImg(self, i):
        img = imread(i, 'gray', dtype=float)
        img -= self.bg
        self._orig_shape = img.shape

        if self.scale_factor is None:
            # determine so that smaller image size has 50 px
            self.scale_factor = 100.0 / min(img.shape)
        s = [int(s * self.scale_factor) for s in img.shape]

        img = resize(img, s)

        if self._m is None:
            self._m = MaskedMovingAverage(shape=img.shape)
            if self.ksize is None:
                self.ksize = max(3, int(min(img.shape) / 10))

        f = FitHistogramPeaks(img)
        sp = getSignalPeak(f.fitParams)

        # non-backround indices:
        ind = img > sp[1] - self.nstd * sp[2]
        # blur:
        blurred = minimum_filter(img, 3)
        blurred = maximum_filter(blurred, self.ksize)
        gblurred = gaussian_filter(blurred, self.ksize)
        blurred[ind] = gblurred[ind]

        # scale [0-1]:
        mn = img[~ind].mean()
        if np.isnan(mn):
            mn = 0
        mx = blurred.max()
        blurred -= mn
        blurred /= (mx - mn)

        ind = blurred > self._m.avg

        self._m.update(blurred, ind)
        self.bglevel += mn
        self._mx += mx

        self._n += 1
def flatFieldFromCalibration(bgImages, images, calcStd=False):
    '''
    returns a flat-field correction map 
    through conditional average of multiple images reduced by a background image
    
    calcStd -> set to True to also return the standard deviation
    '''
    #AVERAGE BACKGROUND IMAGES IF MULTIPLE ARE GIVEN:
    if (type(bgImages) in (tuple, list)
            or type(bgImages) is np.ndarray and bgImages.ndim == 3):
        if len(bgImages) > 1:
            avgBg = averageSameExpTimes(bgImages)
        else:
            avgBg = imread(bgImages[0])
    else:
        avgBg = imread(bgImages)

    i0 = imread(images[0]) - avgBg
    noise_level_function, _ = oneImageNLF(i0)

    m = MaskedMovingAverage(shape=i0.shape, calcVariance=calcStd)
    m.update(i0)

    for i in images[1:]:
        i = imread(i)
        thresh = m.avg - noise_level_function(m.avg) * 3
        m.update(i, i > thresh)

    mx = m.avg.max()
    if calcStd:
        return m.avg / mx, m.var**0.5 / mx
    return m.avg / mx
    def __next__(self):
        # THE IMAGED OBJECT WILL BE AVERAGED FROM ALL
        # INDIVITUAL IMAGES SHOWING THIS OBJECT FROM DIFFERENT POSITIONS:
        obj = MaskedMovingAverage(shape=self.obj_shape)

        with np.errstate(divide='ignore', invalid='ignore'):
            for f, h in zip(self.fits, self.Hinvs):
                warpedflatField = cv2.warpPerspective(self.flatField,
                                                      h, (f.shape[1], f.shape[0]))
                obj.update(f / warpedflatField, warpedflatField != 0)

        self.object = obj.avg

        # THE NEW flatField WILL BE OBTAINED FROM THE WARPED DIVIDENT
        # BETWEEN ALL IMAGES THE THE ESTIMATED IMAGE OOBJECT
        sh = self.flatField.shape
        s = MaskedMovingAverage(shape=sh)

        for f, mask, h in zip(self.fits, self._fit_masks, self.Hs):
            div = f / self.object
            # ->do not interpolate between background and image border
            div[mask] = np.nan
            div = cv2.warpPerspective(div, h, (sh[1], sh[0]),  # borderMode=cv2.BORDER_TRANSPARENT
                                      )
            div = np.nan_to_num(div)
            s.update(div, div != 0)

        new_flatField = s.avg

        # STOP ITERATION?
        # RMSE excluding NaNs:
        dev = np.nanmean((new_flatField[::10, ::10] -
                          self.flatField[::10, ::10])**2)**0.5
        print('residuum: %s' % dev)
        if self.n >= self.maxIter or (self._last_dev and (
                (self.n > 4 and dev > self._last_dev) or
                dev < self.maxDev)):
            raise StopIteration

        # remove erroneous values:
        self.flatField = np.clip(new_flatField, 0, 1)

        self.n += 1
        self._last_dev = dev
        return self.n
Пример #13
0
    def addImg(self, i):
        img = imread(i, 'gray', dtype=float)
        img -= self.bg
        self._orig_shape = img.shape

        if self.scale_factor is None:
                # determine so that smaller image size has 50 px
            self.scale_factor = 100.0 / min(img.shape)
        s = [int(s * self.scale_factor) for s in img.shape]

        img = resize(img, s)

        if self._m is None:
            self._m = MaskedMovingAverage(shape=img.shape)
            if self.ksize is None:
                self.ksize = max(3, int(min(img.shape) / 10))

        f = FitHistogramPeaks(img)
        sp = getSignalPeak(f.fitParams)

        # non-backround indices:
        ind = img > sp[1] - self.nstd * sp[2]
        # blur:
        blurred = minimum_filter(img, 3)
        blurred = maximum_filter(blurred, self.ksize)
        gblurred = gaussian_filter(blurred, self.ksize)
        blurred[ind] = gblurred[ind]

        # scale [0-1]:
        mn = img[~ind].mean()
        if np.isnan(mn):
            mn = 0
        mx = blurred.max()
        blurred -= mn
        blurred /= (mx - mn)

        ind = blurred > self._m.avg

        self._m.update(blurred, ind)
        self.bglevel += mn
        self._mx += mx

        self._n += 1
class SingleTimeEffectDetection(object):
    '''
    Detect and remove Single-time-effects (STE) using min. 2 equivalent images
    public attributes:

    .mask_clean --> STE-free indices
    .mask_STE   --> STE indices (only avail. if save_ste_indices=True)
    .noSTE      --> STE free average image
    '''
    def __init__(self,
                 images,
                 noise_level_function=None,
                 nStd=4,
                 save_ste_indices=False,
                 calcVariance=False,
                 dtype=float):
        self.save_ste_indices = save_ste_indices

        i1 = imread(images[0], 'gray', dtype=dtype)
        i2 = imread(images[1], 'gray')

        self.mask_STE = None
        if save_ste_indices:
            self.mask_STE = np.zeros(shape=i1.shape, dtype=bool)

        self.mma = MaskedMovingAverage(shape=i1.shape,
                                       calcVariance=calcVariance,
                                       dtype=i1.dtype)

        # MINIMUM OF BOTH IMAGES:
        self.mma.update(np.min((i1, i2), axis=0))

        if noise_level_function is None:
            noise_level_function = oneImageNLF(self.mma.avg)[0]
        self.noise_level_function = noise_level_function
        self.threshold = noise_level_function(self.mma.avg) * nStd

        self.addImage(np.max((i1, i2), axis=0))

        for i in images[2:]:
            self.addImage(imread(i, 'gray'))

    @property
    def noSTE(self):
        return self.mma.avg

    def addImage(self, image, mask=None):
        '''
        #########
        mask -- optional
        '''
        self._last_diff = diff = image - self.noSTE

        ste = diff > self.threshold
        removeSinglePixels(ste)

        self.mask_clean = clean = ~ste

        if mask is not None:
            clean = np.logical_and(mask, clean)

        self.mma.update(image, clean)

        if self.save_ste_indices:
            self.mask_STE += ste

        return self

    def countSTE(self):
        '''
        return number of found STE
        '''
        return label(self.mask_STE)[1]

    def relativeAreaSTE(self):
        '''
        return STE area - relative to image area
        '''
        s = self.noSTE.shape
        return np.sum(self.mask_STE) / (s[0] * s[1])

    def intensityDistributionSTE(self, bins=10, range=None):
        '''
        return distribution of STE intensity
        '''
        v = np.abs(self._last_diff[self.mask_STE])
        return np.histogram(v, bins, range)
class ObjectVignettingSeparation(PatternRecognition):
    """
    If an imaged object is superimposed by a flat field map
    (often determined by vignetting) the actual object signal can
    be separated from the cameras flatField using multiple imaged of the object
    at different positions. For this the following steps are needed:
    1. Set the first given image as reference.
   For every other image ... ( .addImg() )
    2. Calculate translation, rotation, shear - difference through pattern recognition
    3. Warp every image in order to fit the reference one.
    4. Set an initial flatField image from the local maximum of every image
   Iterate:
    5. Divide every warped image by its flatField.
    6. Define .object as the average of all fitted and flatField corrected images
    7. Extract .flatField as the ratio of (fitted) .object to every given image

    Usage:

    >>> o = ObjectFlatFieldSeparation(ref_img)
    >>> for img in imgs:
    >>>     o.addImg(img)
    >>> flatField, obj = o.separate()
    """

    def __init__(self, img, bg=None, maxDev=1e-4, maxIter=10, remove_border_size=0,
                 # feature_size=5,
                 cameraMatrix=None, distortionCoeffs=None):  # 20
        """
        Args:
            img (path or array): Reference image
        Kwargs:
            bg (path or array): background image - same for all given images
            maxDev (float): Relative deviation between the last two iteration steps
                            Stop iterative refinement, if deviation is smaller
            maxIter (int): Stop iterative refinement after maxIter steps
        """
        self.lens = None
        if cameraMatrix is not None:
            self.lens = LensDistortion()
            self.lens._coeffs['distortionCoeffs'] = distortionCoeffs
            self.lens._coeffs['cameraMatrix'] = cameraMatrix

        self.maxDev = maxDev
        self.maxIter = maxIter
        self.remove_border_size = remove_border_size
        #self.feature_size = feature_size
        img = imread(img, 'gray')

        self.bg = bg
        if bg is not None:
            self.bg = getBackground(bg)
            if not isinstance(self.bg, np.ndarray):
                self.bg = np.full_like(img, self.bg, dtype=img.dtype)
            else:
                self.bg = self.bg.astype(img.dtype)
            img = cv2.subtract(img, self.bg)

        if self.lens is not None:
            img = self.lens.correct(img, keepSize=True)
        # CREATE TEMPLATE FOR PATTERN COMPARISON:
        pos = self._findObject(img)
        self.obj_shape = img[pos].shape

        PatternRecognition.__init__(self, img[pos])

        self._ff_mma = MaskedMovingAverage(shape=img.shape,
                                           dtype=np.float64)

        self.object = None

        self.Hs = []    # Homography matrices of all fitted images
        self.Hinvs = []  # same, but inverse
        self.fits = []  # all imaged, fitted to reference
        self._fit_masks = []

        self._refined = False

    # TODO: remove that property?
    @property
    def flatField(self):
        return self._ff_mma.avg

    @flatField.setter
    def flatField(self, arr):
        self._ff_mma.avg = arr

    def addImg(self, img, maxShear=0.015, maxRot=100, minMatches=12,
               borderWidth=3):  # borderWidth=100
        """
        Args:
            img (path or array): image containing the same object as in the reference image
        Kwargs:
            maxShear (float): In order to define a good fit, refect higher shear values between
                              this and the reference image
            maxRot (float): Same for rotation
            minMatches (int): Minimum of mating points found in both, this and the reference image
        """
        try:
            fit, img, H, H_inv, nmatched = self._fitImg(img)
        except Exception as e:
            print(e)
            return

        # CHECK WHETHER FIT IS GOOD ENOUGH:
        (translation, rotation, scale, shear) = decompHomography(H)
        print('Homography ...\n\ttranslation: %s\n\trotation: %s\n\tscale: %s\n\tshear: %s'
              % (translation, rotation, scale, shear))
        if (nmatched > minMatches
                and abs(shear) < maxShear
                and abs(rotation) < maxRot):
            print('==> img added')
            # HOMOGRAPHY:
            self.Hs.append(H)
            # INVERSE HOMOGRSAPHY
            self.Hinvs.append(H_inv)
            # IMAGES WARPED TO THE BASE IMAGE
            self.fits.append(fit)
            # ADD IMAGE TO THE INITIAL flatField ARRAY:
            i = img > self.signal_ranges[-1][0]

            # remove borders (that might have erroneous light):
            i = minimum_filter(i, borderWidth)

            self._ff_mma.update(img, i)

            # create fit img mask:
            mask = fit < self.signal_ranges[-1][0]
            mask = maximum_filter(mask, borderWidth)
            # IGNORE BORDER
            r = self.remove_border_size
            if r:
                mask[:r, :] = 1
                mask[-r:, :] = 1
                mask[:, -r:] = 1
                mask[:, :r] = 1
            self._fit_masks.append(mask)

            # image added
            return fit
        return False

    def error(self, nCells=15):
        '''
        calculate the standard deviation of all fitted images, 
        averaged to a grid
        '''
        s0, s1 = self.fits[0].shape
        aR = s0 / s1
        if aR > 1:
            ss0 = int(nCells)
            ss1 = int(ss0 / aR)
        else:
            ss1 = int(nCells)
            ss0 = int(ss1 * aR)
        L = len(self.fits)

        arr = np.array(self.fits)
        arr[np.array(self._fit_masks)] = np.nan
        avg = np.tile(np.nanmean(arr, axis=0), (L, 1, 1))
        arr = (arr - avg) / avg

        out = np.empty(shape=(L, ss0, ss1))

        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)

            for n, f in enumerate(arr):
                out[n] = subCell2DFnArray(f, np.nanmean, (ss0, ss1))

        return np.nanmean(out**2)**0.5

    def separate(self):
        self.flatField = self._createInitialflatField()

        # todo: remove follwing
#         self.init_ff = self.flatField.copy()

        for step in self:
            print('iteration step %s/%s' % (step, self.maxIter))

        # TODO: remove smooth from here - is better be done in post proc.
        smoothed_ff, mask = self.smooth()

        if self.lens is not None:
            smoothed_ff = self.lens.distortImage(smoothed_ff)
            mask = self.lens.distortImage(mask.astype(np.uint8)).astype(bool)

        return smoothed_ff, mask, self.flatField, self.object

    def smooth(self):
        # TODO: there is non nan in the ff img, or?
        mask = self.flatField == 0
        from skimage.filters.rank import median, mean
        from skimage.morphology import disk

        ff = mean(median(self.flatField, disk(5), mask=~mask),
                  disk(13), mask=~mask)

        return ff.astype(float) / ff.max(), mask

    def __iter__(self):
        # use iteration to refine the flatField array

        # keep track of deviation between two iteration steps
        # for break criterion:
        self._last_dev = None
        self.n = 0  # iteration number

        return self

    def __next__(self):
        # THE IMAGED OBJECT WILL BE AVERAGED FROM ALL
        # INDIVITUAL IMAGES SHOWING THIS OBJECT FROM DIFFERENT POSITIONS:
        obj = MaskedMovingAverage(shape=self.obj_shape)

        with np.errstate(divide='ignore', invalid='ignore'):
            for f, h in zip(self.fits, self.Hinvs):
                warpedflatField = cv2.warpPerspective(self.flatField,
                                                      h, (f.shape[1], f.shape[0]))
                obj.update(f / warpedflatField, warpedflatField != 0)

        self.object = obj.avg

        # THE NEW flatField WILL BE OBTAINED FROM THE WARPED DIVIDENT
        # BETWEEN ALL IMAGES THE THE ESTIMATED IMAGE OOBJECT
        sh = self.flatField.shape
        s = MaskedMovingAverage(shape=sh)

        for f, mask, h in zip(self.fits, self._fit_masks, self.Hs):
            div = f / self.object
            # ->do not interpolate between background and image border
            div[mask] = np.nan
            div = cv2.warpPerspective(div, h, (sh[1], sh[0]),  # borderMode=cv2.BORDER_TRANSPARENT
                                      )
            div = np.nan_to_num(div)
            s.update(div, div != 0)

        new_flatField = s.avg

        # STOP ITERATION?
        # RMSE excluding NaNs:
        dev = np.nanmean((new_flatField[::10, ::10] -
                          self.flatField[::10, ::10])**2)**0.5
        print('residuum: %s' % dev)
        if self.n >= self.maxIter or (self._last_dev and (
                (self.n > 4 and dev > self._last_dev) or
                dev < self.maxDev)):
            raise StopIteration

        # remove erroneous values:
        self.flatField = np.clip(new_flatField, 0, 1)

        self.n += 1
        self._last_dev = dev
        return self.n

    def _createInitialflatField(self, downscale_size=9):
        s0, s1 = self.flatField.shape
        f = int(max(s0, s1) / downscale_size)
        every = int(f / 3.5)

        s = fastFilter(self.flatField, f, every)
        # make relative
        s /= s.max()
        return s

    def _fitImg(self, img):
        '''
        fit perspective and size of the input image to the reference image
        '''
        img = imread(img, 'gray')
        if self.bg is not None:
            img = cv2.subtract(img, self.bg)

        if self.lens is not None:
            img = self.lens.correct(img, keepSize=True)

        (H, _, _, _, _, _, _, n_matches) = self.findHomography(img)
        H_inv = self.invertHomography(H)

        s = self.obj_shape
        fit = cv2.warpPerspective(img, H_inv, (s[1], s[0]))
        return fit, img, H, H_inv, n_matches

    def _findObject(self, img):
        '''
        Create a bounding box around the object within an image
        '''
        from imgProcessor.imgSignal import signalMinimum
        # img is scaled already
        i = img > signalMinimum(img)  # img.max()/2.5
        # filter noise, single-time-effects etc. from mask:
        i = minimum_filter(i, 4)
        return boundingBox(i)
class SingleTimeEffectDetection(object):
    '''
    Detect and remove Single-time-effects (STE) using min. 2 equivalent images
    public attributes:

    .mask_clean --> STE-free indices
    .mask_STE   --> STE indices (only avail. if save_ste_indices=True)
    .noSTE      --> STE free average image
    '''

    def __init__(self, images, noise_level_function=None, nStd=4,
                 save_ste_indices=False, calcVariance=False, dtype=float):
        self.save_ste_indices = save_ste_indices

        i1 = imread(images[0], 'gray', dtype=dtype)
        i2 = imread(images[1], 'gray')

        self.mask_STE = None
        if save_ste_indices:
            self.mask_STE = np.zeros(shape=i1.shape, dtype=bool)

        self.mma = MaskedMovingAverage(shape=i1.shape,
                                       calcVariance=calcVariance,
                                       dtype=i1.dtype)

        # MINIMUM OF BOTH IMAGES:
        self.mma.update(np.min((i1, i2), axis=0))

        if noise_level_function is None:
            noise_level_function = oneImageNLF(self.mma.avg)[0]
        self.noise_level_function = noise_level_function
        self.threshold = noise_level_function(self.mma.avg) * nStd

        self.addImage(np.max((i1, i2), axis=0))

        for i in images[2:]:
            self.addImage(imread(i, 'gray'))

    @property
    def noSTE(self):
        return self.mma.avg

    def addImage(self, image, mask=None):
        '''
        #########
        mask -- optional
        '''
        self._last_diff = diff = image - self.noSTE

        ste = diff > self.threshold
        removeSinglePixels(ste)

        self.mask_clean = clean = ~ste

        if mask is not None:
            clean = np.logical_and(mask, clean)

        self.mma.update(image, clean)

        if self.save_ste_indices:
            self.mask_STE += ste

        return self

    def countSTE(self):
        '''
        return number of found STE
        '''
        return label(self.mask_STE)[1]

    def relativeAreaSTE(self):
        '''
        return STE area - relative to image area
        '''
        s = self.noSTE.shape
        return np.sum(self.mask_STE) / (s[0] * s[1])

    def intensityDistributionSTE(self, bins=10, range=None):
        '''
        return distribution of STE intensity
        '''
        v = np.abs(self._last_diff[self.mask_STE])
        return np.histogram(v, bins, range)
Пример #17
0
class FlatFieldFromImgFit(object):

    def __init__(self, images=None, bg_images=None,
                 ksize=None, scale_factor=None):
        '''
        calculate flat field from multiple non-calibration images
        through ....
        * blurring each image
        * masked moving average of all images to even out individual deviations
        * fit vignetting function of average OR 2d-polynomal
        '''
        #self.nstd = nstd
        self.ksize = ksize
        self.scale_factor = scale_factor

        self.bglevel = []  # average background level
        self._mx = 0
        self._n = 0
#         self._m = None
        self._small_shape = None
        self._first = True

        self.bg = getBackground(bg_images)

        if images is not None:
            for n, i in enumerate(images):
                print('%s/%s' % (n + 1, len(images)))
                self.addImg(i)

    def _firstImg(self, img):

        if self.scale_factor is None:
            # determine so that smaller image size has 50 px
            self.scale_factor = 100 / min(img.shape)
        img = rescale(img, self.scale_factor)

        self._m = MaskedMovingAverage(shape=img.shape)
        if self.ksize is None:
            self.ksize = max(3, int(min(img.shape) / 10))
        self._first = False
        return img

    def _read(self, img):
        img = imread(img, 'gray', dtype=float)
        img -= self.bg
        return img

    @property
    def result(self):
        return self._m.avg
#         return minimum_filter(self._m.avg,self.ksize)

    @property
    def mask(self):
        return self._m.n > 0
#         return minimum_filter(self._m.n>0,self.ksize)

    def addImg(self, i):
        img = self._read(i)

        if self._first:
            img = self._firstImg(img)
        elif self.scale_factor != 1:
            img = rescale(img, self.scale_factor)
        try:
            f = FitHistogramPeaks(img)
        except AssertionError:
            return
        #sp = getSignalPeak(f.fitParams)
        mn = getSignalMinimum(f.fitParams)
        # non-backround indices:
        ind = img > mn  # sp[1] - self.nstd * sp[2]
        # blur:
        # blurred = minimum_filter(img, 3)#remove artefacts
        #blurred = maximum_filter(blurred, self.ksize)
#         blurred = img
#         gblurred = gaussian_filter(img, self.ksize)
#         ind = minimum_filter(ind, self.ksize)
        nind = np.logical_not(ind)
        gblurred = maskedFilter(img, nind, ksize=2 * self.ksize,
                                fill_mask=False,
                                fn="mean")

        #blurred[ind] = gblurred[ind]
        # scale [0-1]:
        mn = img[nind].mean()
        if np.isnan(mn):
            mn = 0
        mx = gblurred[ind].max()
        gblurred -= mn
        gblurred /= (mx - mn)
#         img -= mn
#         img /= (mx - mn)
#         ind = np.logical_and(ind, img > self._m.avg)

        self._m.update(gblurred, ind)
        self.bglevel.append(mn)
        self._mx += mx

        self._n += 1

#         import pylab as plt
#         plt.imshow(self._m.avg)
#         plt.show()

    def background(self):
        return np.median(self.bglevel)
Пример #18
0
class FlatFieldFromImgFit(object):
    def __init__(self, images=None, nstd=3, ksize=None, scale_factor=None):
        '''
        calculate flat field from multiple non-calibration images
        through ....
        * blurring each image
        * masked moving average of all images to even out individual deviations
        * fit vignetting function of average OR 2d-polynomal
        '''
        self.nstd = nstd
        self.ksize = ksize
        self.scale_factor = scale_factor

        self.bglevel = 0  #average background level
        self._mx = 0
        self._n = 0
        self._m = None
        if images is not None:
            for n, i in enumerate(images):
                print '%s/%s' % (n + 1, len(images))
                self.addImg(i)

    def addImg(self, i):
        img = imread(i, 'gray', dtype=float)
        self._orig_shape = img.shape

        if self.scale_factor is None:
            #determine so that smaller image size has 50 px
            self.scale_factor = 100.0 / min(img.shape)
        s = [s * self.scale_factor for s in img.shape]

        img = resize(img, s)

        if self._m is None:
            self._m = MaskedMovingAverage(shape=img.shape)
            if self.ksize is None:
                self.ksize = max(3, int(min(img.shape) / 10))

        f = FitHistogramPeaks(img)
        sp = getSignalPeak(f.fitParams)

        #non-backround indices:
        ind = img > sp[1] - self.nstd * sp[2]
        #blur:
        blurred = minimum_filter(img, 3)
        blurred = maximum_filter(blurred, self.ksize)
        gblurred = gaussian_filter(blurred, self.ksize)
        blurred[ind] = gblurred[ind]

        #scale [0-1]:
        mn = img[~ind].mean()
        if np.isnan(mn):
            mn = 0
        mx = blurred.max()
        blurred -= mn
        blurred /= (mx - mn)

        ind = blurred > self._m.avg

        self._m.update(blurred, ind)
        self.bglevel += mn
        self._mx += mx

        self._n += 1

    def flatFieldFromFunction(self):
        '''
        calculate flatField from fitting vignetting function to averaged fit-image
        returns flatField, average background level, fitted image, valid indices mask
        '''
        s0, s1 = self._m.avg.shape
        #f-value, alpha, fx, cx,     cy
        guess = (s1 * 0.7, 0, 1, s0 / 2.0, s1 / 2.0)

        #set assume normal plane - no tilt and rotation:
        fn = lambda (x, y), f, alpha, fx, cx, cy: vignetting(
            (x * fx, y), f, alpha, cx=cx, cy=cy)

        fitimg = self._m.avg
        mask = fitimg > 0.5

        flatfield = fit2dArrayToFn(fitimg,
                                   fn,
                                   mask=mask,
                                   guess=guess,
                                   output_shape=self._orig_shape)[0]

        return flatfield, self.bglevel / self._n, fitimg, mask

    def flatFieldFromFit(self):
        '''
        calculate flatField from 2d-polynomal fit filling
        all high gradient areas within averaged fit-image
        
        returns flatField, average background level, fitted image, valid indices mask
        '''

        fitimg = self._m.avg
        #replace all dark and high gradient variations:
        mask = np.logical_or(fitimg < 0.5, highGrad(fitimg))

        out = fitimg.copy()
        lastm = 0

        for _ in xrange(10):
            out = polyfit2dGrid(out, mask, 2)
            mask = highGrad(out)
            m = mask.sum()
            if m == lastm:
                break
            lastm = m

        out = np.clip(out, 0.1, 1)

        out = resize(out, self._orig_shape, mode='reflect')
        return out, self.bglevel / self._n, fitimg, mask
Пример #19
0
class FlatFieldFromImgFit(object):
    def __init__(self, images=None, nstd=3, ksize=None, scale_factor=None):
        '''
        calculate flat field from multiple non-calibration images
        through ....
        * blurring each image
        * masked moving average of all images to even out individual deviations
        * fit vignetting function of average OR 2d-polynomal
        '''
        self.nstd = nstd
        self.ksize = ksize
        self.scale_factor = scale_factor
        
        self.bglevel = 0 #average background level
        self._mx = 0
        self._n = 0
        self._m = None
        if images is not None:
            for n,i in enumerate(images):
                print '%s/%s' %(n+1,len(images))
                self.addImg(i)
            
            
    def addImg(self, i):
            img = imread(i, 'gray', dtype=float)
            self._orig_shape = img.shape

            if self.scale_factor is None:
                #determine so that smaller image size has 50 px
                self.scale_factor = 100.0/min(img.shape)
            s = [s*self.scale_factor for s in img.shape]
 
            img = resize(img,s)

            if self._m is None:           
                self._m = MaskedMovingAverage(shape=img.shape)
                if self.ksize is None:
                    self.ksize = max(3, int(min(img.shape)/10))

            f = FitHistogramPeaks(img)
            sp  = getSignalPeak(f.fitParams)
            
            #non-backround indices:
            ind = img > sp[1]-self.nstd*sp[2]
            #blur:
            blurred = minimum_filter(img,3)
            blurred = maximum_filter(blurred,self.ksize)
            gblurred = gaussian_filter(blurred, self.ksize)
            blurred[ind]=gblurred[ind]

            #scale [0-1]:
            mn = img[~ind].mean()
            if np.isnan(mn):
                mn = 0
            mx = blurred.max()
            blurred-=mn
            blurred/=(mx-mn)
            
            ind = blurred>self._m.avg
            
            self._m.update(blurred, ind)
            self.bglevel += mn
            self._mx += mx    

            self._n +=1


    def flatFieldFromFunction(self): 
        '''
        calculate flatField from fitting vignetting function to averaged fit-image
        returns flatField, average background level, fitted image, valid indices mask
        '''   
        s0,s1 = self._m.avg.shape
                #f-value, alpha, fx, cx,     cy
        guess = (s1*0.7,  0,     1 , s0/2.0, s1/2.0)
        
        #set assume normal plane - no tilt and rotation:
        fn = lambda (x,y),f,alpha, fx,cx,cy:  vignetting((x*fx,y),  f, alpha, 
                cx=cx,cy=cy)
    
        fitimg = self._m.avg
        mask = fitimg>0.5
        
        flatfield = fit2dArrayToFn(fitimg, fn, mask=mask, 
                        guess=guess,output_shape=self._orig_shape)[0]
        
        return flatfield, self.bglevel/self._n, fitimg, mask



    def flatFieldFromFit(self):
        '''
        calculate flatField from 2d-polynomal fit filling
        all high gradient areas within averaged fit-image
        
        returns flatField, average background level, fitted image, valid indices mask
        '''
        
        fitimg = self._m.avg
        #replace all dark and high gradient variations:
        mask = np.logical_or(fitimg < 0.5, highGrad(fitimg))  
  
        out = fitimg.copy()
        lastm = 0

        for _ in xrange(10):
            out = polyfit2dGrid(out, mask, 2)
            mask =  highGrad(out) 
            m = mask.sum()
            if m == lastm:
                break
            lastm = m

        out = np.clip(out,0.1,1) 

        out = resize(out,self._orig_shape, mode='reflect')
        return  out, self.bglevel / self._n, fitimg, mask