Esempio n. 1
0
def _featuresim(reference_img, distorted,img):

    img = inputs[0]
    dst = _preprocess( img, 25 )
    
    r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
    imgY = 0.299 * r + 0.587 * g + 0.114 * b
    imgI = 0.596 * r - 0.275 * g - 0.321 * b
    imgQ = 0.212 * r - 0.523 * g + 0.311 * b
    
    r_d, g_d, b_d = dst[:,:,0], dst[:,:,1], dst[:,:,2]
    dstY = 0.299 * r_d + 0.587 * g_d + 0.114 * b_d
    dstI = 0.596 * r_d - 0.275 * g_d - 0.321 * b_d
    dstQ = 0.212 * r_d - 0.523 * g_d + 0.311 * b_d
    
    t1 = 0.85
    t2 = 160
    t3 = 200
    t4 = 200
    
    s_Q = ( 2*imgQ*dstQ + t4 )  / ( imgQ**2 + dstQ**2 + t4 )
    
    s_I = ( 2*imgI*dstI + t3 )  / ( imgI**2 + dstI**2 + t3 )
    
    pc1 = phasepack.phasecong(imgY, nscale = 4, norient = 4, minWaveLength = 6, mult = 2, sigmaOnf=0.55)
    pc2 = phasepack.phasecong(dstY, nscale = 4, norient = 4, minWaveLength = 6, mult = 2, sigmaOnf=0.55)
    pc1 = pc1[0]
    pc2 = pc2[0]
    
    s_PC = ( 2*pc1*pc2 + t1 )  / ( pc1**2 + pc2**2 + t1 )
    
    g1 = scharr( imgY )
    g2 = scharr( dstY )
    s_G = ( 2*g1*g2 + t2 )  / ( g1**2 + g2**2 + t2 )
    
    s_L = s_PC * s_G
    s_C = s_I * s_Q
    
    pcM = np.maximum(pc1,pc2)
    
    
    fsim = round( np.nansum( s_L * pcM) / np.nansum(pcM), 3)
    
    fsimc = round( np.nansum( s_L * s_C**0.3 * pcM) / np.nansum(pcM), 3)
    
    print 'FSIM: ' + str(fsim)
    print 'FSIMC: ' + str(fsimc)
Esempio n. 2
0
def dsqm(img1,original,blkSize = 100):
    img1 = img1.astype(np.float32) / 256.0
    original = original.astype(np.float32) / 256.0

    h,w,p = original.shape
    imgL = original
    imgV = img1
    cp = np.zeros((h,w))
    offsetX = 2
    offsetY = 2
    imgLYIQ = rgb2yiq(imgL).astype(np.float32)
    imgLY = imgLYIQ[:,:,1]

    imgVYIQ = rgb2yiq(imgV).astype(np.float32)
    imgVY = imgVYIQ[:,:,1]
    
    brow = blkSize
    bcol = brow
    blkV = makeBlocks(imgVY, brow, bcol)
    blkRows, blkCols = blkV.shape[0:2]
    bestMatch = np.full((blkRows,blkCols), {'v':None,'x':None,'y':None})
    blkVmatch = np.full((blkRows,blkCols), {})
    score = np.zeros(blkV.shape)
    for i in xrange(blkCols):
        for j in xrange(blkRows):
            T = blkV[j,i]
            Tx = i * bcol
            Ty = j * brow
            Bx = (i+1) * bcol
            By = (j+1) * brow

            img = imgLY[max(0, Ty-offsetY):min(h, By+offsetY),max(0,Tx-offsetX):min(w, Bx+offsetX)]
            orig = original[max(0, Ty-offsetY):min(h, By+offsetY),max(0,Tx-offsetX):min(w, Bx+offsetX)]
            warped = imgV[max(0, Ty-offsetY):min(h, By+offsetY),max(0,Tx-offsetX):min(w, Bx+offsetX)]
            b = imgV[j*bcol:(j+1)*bcol,i*brow:(i+1)*brow]
            mp = cv2.matchTemplate(orig, b, cv2.TM_CCORR_NORMED)
            #mp = cv2.matchTemplate(img, T, cv2.TM_CCORR_NORMED)
            y,x = np.unravel_index(np.argmax(mp),mp.shape)
            bestMatch[j,i] = {'v': mp[y,x], 'x': x, 'y': y}
            blkVmatch[j,i]['arr'] = img[y:(y+bcol),x:(x+brow)]
            a = orig[y:y+bcol,x:x+brow]
            x = phasecong(T)
            y = phasecong(blkVmatch[j,i]['arr'])
            score[j,i] = np.abs(x[0].mean()-y[0].mean())
            cp[j*bcol:(j+1)*bcol,i*brow:(i+1)*brow] = (y[0]-x[0])**2*256
    return cp.mean(), cp
Esempio n. 3
0
    def _apply_phasecong(self) -> Tuple[np.ndarray, np.ndarray]:
        """Apply phasecong to image and return (M, m)

        Notes:
        - M (Maximum moment of phase congruency covariance - edge strength)
        - m (minimum moment of phase congruency covariance - corner strength)
        """
        results = phasecong(self.img, self.config.pc_nscale,
                            self.config.pc_norient,
                            self.config.pc_minWaveLength, self.config.pc_mult,
                            self.config.pc_sigmaOnf, self.config.pc_k,
                            self.config.pc_cutOff, self.config.pc_g,
                            self.config.pc_noiseMethod)
        # results[0] = M, results[1] = m
        return results[0], results[1]
Esempio n. 4
0
def measure_pc_2d(img, pcn=1, pbflag=True, epsilon=0.001):
    # wrapper function to take care of two different pc measurement types

    if (pcn == 1):
        pc_vals, phibar = measure_pc1_2d(img, epsilon)
    elif (pcn == 2):
        pc_vals, phibar = measure_pc2_2d(img, epsilon)
    elif (pcn == 3):
        pc_vals = pp.phasecong(img)[0]
        phibar = np.zeros_like(pc_vals)  #dont get phibar vals

    if (pbflag == False):
        return (pc_vals)
    else:
        return (pc_vals, phibar)
Esempio n. 5
0
 def calc_edges(self,t):
     self.layer_to_numpy(self.layer)    
     if self.layer.bandCount() > 1:
         #print "returning false"
         return False
     if t == 0:
         return filters.sobel(self.arrays[0].astype(float))
     if t == 1:
         return filters.sobel_h(self.arrays[0].astype(float))
     if t == 2:
         return filters.sobel_v(self.arrays[0].astype(float))    
     if t == 3:
         return filters.prewitt(self.arrays[0].astype(float))    
     if t == 4:
         return filters.roberts(self.arrays[0].astype(float))    
     if t == 5:
         return filters.scharr(self.arrays[0].astype(float))  
     if t == 6 and phasepack_:
         out = phasepack.phasecong(self.arrays[0].astype(float))
         return out[0]
Esempio n. 6
0
    def _apply_log_gabor_filters(self, image):
        """Apply the log-Gabor filters in the input image.

        Args:
            image (ndarray): input image.

        Returns:
            ndarray: image responses to Log-Gabor filters.
                     Shape: (Scales X Orientations X Rows X Cols).
        """

        # The values of the parameters for the log-Gabor filters were defined as
        # suggested in a previous study [2], because they demonstrated good
        # results in texture extraction when Log-Gabor filters were used for
        # image description.
        min_wavelen = 3
        scale_factor = 2
        sigma_on_f = 0.65

        # Build a filter bank with Log-Gabor filters
        _, _, _, _, _, image_responses, _ = phasepack.phasecong(
            image,
            nscale=self._n_scales,
            norient=self._n_orient,
            minWaveLength=min_wavelen,
            mult=scale_factor,
            sigmaOnf=sigma_on_f,
            k=2.0,
            g=3)

        # Compute the magnitude (absolute values) of the image responses
        image_responses = np.abs(image_responses)

        # Convert list of lists in a single array
        image_responses_array = np.swapaxes(image_responses, 0, 1)

        return image_responses_array
Esempio n. 7
0
def feature_sim(data):
    """
    Return the Feature Similarity Index (FSIM).
    Can also return FSIMc for color images
    
    Zhang, L., Zhang, L., Mou, X., & Zhang, D. (2011). 
    FSIM: A feature similarity index for image quality assessment. 
    IEEE Transactions on Image Processing, 20(8), 2378–2386. 
    http://doi.org/10.1109/TIP.2011.2109730
    """

    # Convert the input images to YIQ color space
    # Y is the luma compenent, i.e. B & W
    # imgY = 0.299 * r + 0.587 * g + 0.114 * b

    for d in data:

        reference = data[0]
        # Constants provided by the authors
        t1 = 0.85
        t2 = 160

        # Phase congruency (PC) images. "PC...a dimensionless measure for the
        # significance of local structure.

        pc1 = phasepack.phasecong(reference,
                                  nscale=4,
                                  norient=4,
                                  minWaveLength=6,
                                  mult=2,
                                  sigmaOnf=0.55)

        pc2 = phasepack.phasecong(d,
                                  nscale=4,
                                  norient=4,
                                  minWaveLength=6,
                                  mult=2,
                                  sigmaOnf=0.55)

        pc1 = pc1[0]  # Reference PC map
        pc2 = pc2[0]  # Distorted PC map

        # Similarity of PC components
        s_PC = (2 * pc1 + pc2 + t1) / (pc1**2 + pc2**2 + t1)

        # compute the Scharr gradient magnitude representation of the images
        # in both the x and y direction
        refgradX = cv2.Sobel(reference, cv2.CV_64F, dx=1, dy=0, ksize=-1)
        refgradY = cv2.Sobel(reference, cv2.CV_64F, dx=0, dy=1, ksize=-1)

        targradX = cv2.Sobel(d, cv2.CV_64F, dx=1, dy=0, ksize=-1)
        targradY = cv2.Sobel(d, cv2.CV_64F, dx=0, dy=1, ksize=-1)

        refgradient = np.maximum(refgradX, refgradY)
        targradient = np.maximum(targradX, targradY)

        #refgradient = np.sqrt(( refgradX**2 ) + ( refgradY**2 ))

        #targradient = np.sqrt(( targradX**2 ) + ( targradY**2 ))

        # The gradient magnitude similarity

        s_G = (2 * refgradient + targradient + t2) / (refgradient**2 +
                                                      targradient**2 + t2)

        s_L = s_PC * s_G  # luma similarity

        pcM = np.maximum(pc1, pc2)

        fsim = round(np.nansum(s_L * pcM) / np.nansum(pcM), 3)

        fsim_vals.append(fsim)
Esempio n. 8
0
def EdgeMapExtraction(im, **kwargs):
    """Function to apply different filters to obtain maps based on edge detection

    Parameters
    ----------
    im: ndarray 2D or 3D
        Input image.
    edge_detector: str
        Selection of the edge detector wanted: ['Sobel1stDev', 'Prewitt1stDev', 'Sobel2ndDev', 
        'Prewitt2ndDev', 'GaborBank', 'PhaseCong'].

    Returns
    -------
    maps: ndarray of np.double
        If edge_detector is either 'Sobel1stDev', 'Prewitt1stDev', 'Sobel2ndDev', 'Prewitt2ndDev',
        maps is of size of im.
        If edge_detector is 'GaborBank', the size depends of the configuration of the filter bank.
        If edge_detector is 'PhaseCong', two maps of the same size as im are returned. The first map
        is an edge-based detection whereas the second map is a blob-based detector.
    
    """

    # Check the dimension of the input image
    if len(im.shape) == 2:
        nd_im = 2
    elif len(im.shape) == 3:
        nd_im = 3
    else:
        raise ValueError('mahotas.edge: Can only handle 2D and 3D images.')

    # Assign the edge detector
    edge_detector= kwargs.pop('edge_detector', 'Sobel1stDev')
    detector_list = ['Sobel1stDev', 'Prewitt1stDev', 'Sobel2ndDev', 'Prewitt2ndDev', 'GaborBank', 'PhaseCong']
    if not any(edge_detector in dl for dl in detector_list):
        raise ValueError('mahotas.edge: The name of the detector is unknown.')

    if edge_detector == 'Sobel1stDev':
        edge_im = generic_gradient_magnitude(im, sobel)
    elif edge_detector == 'Prewitt1stDev':
        edge_im = generic_gradient_magnitude(im, prewitt)
    elif edge_detector == 'Sobel2ndDev':
        edge_im = generic_laplace(im, sobel)
    elif edge_detector == 'Prewitt2ndDev':
        edge_im = generic_laplace(im, prewitt)
    elif edge_detector == 'GaborBank':        
        # Check that the input image is only 2D
        if nd_im != 2:
            raise ValueError('mahotas.edge.phase-congruency: Cannot handle 3D image yet. Go for 2D.')
        # Extract the value of the parameters
        n_freq = kwargs.pop('n_freq', 10)
        freq_range = kwargs.pop('freq_range', (.05, .2))
        n_theta = kwargs.pop('n_theta', 6)
        win_size = kwargs.pop('win_size', (15., 15.))
        # Generate the different kernel which are needed
        kernels_gabor, kernels_gabor_params = GaborKernelBank(n_freq=n_freq, 
                                                              freq_range=freq_range, 
                                                              n_theta=n_theta, 
                                                              win_size=win_size)
        # Extract the maps from Gabor
        edge_im = BuildMaps2DGabor(im, kernels_gabor)
        
        # Return the maps and the parameters of the Gabor kernels
        return (edge_im, kernels_gabor_params)
    elif edge_detector == 'PhaseCong':
        # Check that the input image is only 2D
        if nd_im != 2:
            raise ValueError('mahotas.edge.phase-congruency: Cannot handle 3D image yet. Go for 2D.')
        # Extract the value of the parameters
        nscale = kwargs.pop('nscale', 5)
        norient = kwargs.pop('norient', 6)
        minWaveLength = kwargs.pop('minWaveLength', 3)
        mult = kwargs.pop('mult', 2.1)
        sigmaOnf = kwargs.pop('sigmaOnf', .55)
        k = kwargs.pop('k', 2.)
        cutOff = kwargs.pop('cutOff', .5)
        g = kwargs.pop('g', 10)
        noiseMethod = kwargs.pop('noiseMethod', -1)

        M, m, ori, ft, PC, EO, T = phc.phasecong(im, 
                                                 nscale=nscale, 
                                                 norient=norient, 
                                                 minWaveLength=minWaveLength, 
                                                 mult=mult,
                                                 sigmaOnf=sigmaOnf, 
                                                 k=k, 
                                                 cutOff=cutOff, 
                                                 g=g, 
                                                 noiseMethod=noiseMethod)
        return [M, m]

    return edge_im
Esempio n. 9
0
def compute_fsim(im1, im2):
    """Compute the Feature Similarity Index (FSIM) between two images.
    Parameters
    ----------
    im1, im2 : ndarray
        Image.  Any dimensionality.
    Returns
    -------
    fsim : float
        The FSIM metric.
    pc_max : ndarray
        Maximum Phase Congruency Feature Map Image.
        A map combining the important structures in each image.
    References
    ----------
    Zhang, L., Zhang, L., Mou, X., & Zhang, D. (2011).
    FSIM: A feature similarity index for image quality assessment.
    IEEE Transactions on Image Processing, 20(8), 2378–2386.
    http://doi.org/10.1109/TIP.2011.2109730
    Notes
    -------
    """

    # print("Computing Feature Similarity...")
    start = timer()

    # Stability constants
    t1 = 0.85
    t2 = 160

    # First we construct Phase Congruency (PC) images.
    # PC is a dimensionless measure of the significance of local structure.
    # We rely on the phasepack library (https://github.com/alimuldal/phasepack) for this computation.
    # The parameters may vary from the implementation of Zhang et al.
    pc1 = phasepack.phasecong(im1,
                              nscale=4,
                              norient=4,
                              minWaveLength=6,
                              mult=2,
                              sigmaOnf=0.55)[0]
    pc2 = phasepack.phasecong(im2,
                              nscale=4,
                              norient=4,
                              minWaveLength=6,
                              mult=2,
                              sigmaOnf=0.55)[0]

    # Next we compute the similarity of the PC images
    s_pc = (2 * pc1 * pc2 + t1) / (pc1**2 + pc2**2 + t1)

    # Next we compute the Sobel gradient magnitude representation of each image in both the x and y direction.
    im1_gradient_x = cv2.Sobel(im1, cv2.CV_64F, dx=1, dy=0, ksize=-1)
    im1_gradient_y = cv2.Sobel(im1, cv2.CV_64F, dx=0, dy=1, ksize=-1)
    im2_gradient_x = cv2.Sobel(im2, cv2.CV_64F, dx=1, dy=0, ksize=-1)
    im2_gradient_y = cv2.Sobel(im2, cv2.CV_64F, dx=0, dy=1, ksize=-1)

    # These gradients are used to construct a gradient magnitude feature map for im1 and im2.
    im1_gm = np.sqrt((im1_gradient_x**2) + (im1_gradient_y**2))
    im2_gm = np.sqrt((im2_gradient_x**2) + (im2_gradient_y**2))

    # Now we have two feature maps: Phase Congruency and Gradient Magnitude (GM)
    # We will now compute the similarity of the GM maps.
    s_gm = (2 * im1_gm * im2_gm + t2) / (im1_gm**2 + im2_gm**2 + t2)

    # We simply combine the GM and PC similarity to compute the total similarity
    if len(s_gm.shape) == 3:
        s_pc = s_pc[:, :, np.newaxis]
        s_pc = np.repeat(s_pc, 3, axis=2)
    # s_gm = np.sum(s_gm, axis=2)
    s_total = s_pc * s_gm

    # However, different locations have different contributions to the perception of image similarity.
    # For example, edges are more important than smooth areas, so high PC values indicate important structures.
    # We then weight the importance using the maximum values of the PC image pair.
    pc_max = np.maximum(pc1, pc2)
    if len(s_gm.shape) == 3:
        pc_max = pc_max[:, :, np.newaxis]
        pc_max = np.repeat(pc_max, 3, axis=2)
    fsim = np.sum(s_total * pc_max) / np.sum(pc_max)
    end = timer()
    # print("Computing Feature Similarity...Complete. Elapsed Time: [s] " + str(end - start))

    return fsim, pc_max
Esempio n. 10
0
import phasepack
from skimage import io
import matplotlib.pyplot as plt
import ftdetect.cleanedges as cleanedges
from skimage import filters
import numpy as np

img_fn = "/home/moi/Work/19-04-14 DF12 Morten Iversen/DF12/400m/IMG_6427.JPG"

img = io.imread(img_fn, as_gray=True)

img = img[500:1000, 3500:4000]

# TODO: You may want to experiment with the values of 'nscales' and 'k', the noise compensation factor.
M, m, ori, ft, PC, EO, T = phasepack.phasecong(img)

# TODO: M or M + m?
edges = M

# TODO: tlo, thi
edges_clean = cleanedges.hystThresh(edges)

fig, axes = plt.subplots(ncols=4, sharex=True, sharey=True)

axes[0].imshow(img)
axes[1].imshow(edges)
axes[3].imshow(edges_clean > 0)

# Experitur: start from single values for parameters and infer suitable range
# How to do that? -> p * [1 - epsilon, 1 + epsilon]
# jitter=True
    def fit(self, modality, ground_truth=None, cat=None):
        """Compute the images images.

        Parameters
        ----------
        modality : object of type TemporalModality
            The modality object of interest.

        ground-truth : object of type GTModality or None
            The ground-truth of GTModality. If None, the whole data will be
            considered.

        cat : str or None
            String corresponding at the ground-truth of interest. Cannot be
            None if ground-truth is not None.

        Return
        ------
        self : object
             Return self.

        """
        super(PhaseCongruencyExtraction, self).fit(modality=modality,
                                              ground_truth=ground_truth,
                                              cat=cat)

        # Check that the filter provided is known
        if self.type_filter not in KNOWN_FILTER:
            raise ValueError('{} filter is unknown'.format(self.type_filter))

        # Regular filters
        if self.type_filter == 'regular':

            # Extract the parameters for the function
            nscale = self.dict_params.pop('n_scale', 5)
            norient = self.dict_params.pop('n_orient', 6)
            minWaveLength = self.dict_params.pop('minWaveLength', 3)
            mult = self.dict_params.pop('mult', 2.1)
            sigmaOnf = self.dict_params.pop('sigmaOnf', .55)
            k = self.dict_params.pop('k', 2.)
            cutOff = self.dict_params.pop('cutOff', .5)
            g = self.dict_params.pop('g', 10)
            noiseMethod = self.dict_params.pop('noiseMethod', -1)

            # Create a list to catch the different outputs
            M_vol = []
            ori_vol = []
            ft_vol = []

            # Compute the convolution for each slice
            for sl in range(modality.data_.shape[2]):
                M, _, ori, ft, _, _, _ = phasecong(modality.data_[:, :, sl],
                                                   nscale=nscale,
                                                   norient=norient,
                                                   minWaveLength=minWaveLength,
                                                   mult=mult,
                                                   sigmaOnf=sigmaOnf,
                                                   k=k,
                                                   cutOff=cutOff,
                                                   g=g,
                                                   noiseMethod=noiseMethod)
                # Append the data
                M_vol.append(M)
                ori_vol.append(ori)
                ft_vol.append(ft)

        # Monogenic filters
        elif self.type_filter == 'monogenic':

            # Extract the parameters for the function
            nscale = self.dict_params.pop('n_scale', 5)
            minWaveLength = self.dict_params.pop('minWaveLength', 3)
            mult = self.dict_params.pop('mult', 2.1)
            sigmaOnf = self.dict_params.pop('sigmaOnf', .55)
            k = self.dict_params.pop('k', 2.)
            cutOff = self.dict_params.pop('cutOff', .5)
            g = self.dict_params.pop('g', 10)
            noiseMethod = self.dict_params.pop('noiseMethod', -1)
            deviationGain = self.dict_params.pop('deviationGain', 1.5)

            # Create a list to catch the different outputs
            M_vol = []
            ori_vol = []
            ft_vol = []

            # Compute the convolution for each slice
            for sl in range(modality.data_.shape[2]):
                M, ori, ft, _ = phasecongmono(modality.data_[:, :, sl],
                                              nscale=nscale,
                                              minWaveLength=minWaveLength,
                                              mult=mult,
                                              sigmaOnf=sigmaOnf,
                                              k=k,
                                              cutOff=cutOff,
                                              g=g,
                                              noiseMethod=noiseMethod,
                                              deviationGain=deviationGain)
                # Append the data
                M_vol.append(M)
                ori_vol.append(ori)
                ft_vol.append(ft)

        # Convert all the list to numpy array
        M_vol = np.array(M_vol)
        ori_vol = np.array(ori_vol)
        ft_vol = np.array(ft_vol)

        # Roll the axis
        M_vol = np.rollaxis(M_vol, 0, 3)
        ori_vol = np.rollaxis(ori_vol, 0, 3)
        ft_vol = np.rollaxis(ft_vol, 0, 3)

        self.data_ = np.array([M_vol, ori_vol, ft_vol])

        return self
Esempio n. 12
0
def feature_sim(reference, target):
    
    """
    Return the Feature Similarity Index (FSIM).
    Can also return FSIMc for color images
    
    Zhang, L., Zhang, L., Mou, X., & Zhang, D. (2011). 
    FSIM: A feature similarity index for image quality assessment. 
    IEEE Transactions on Image Processing, 20(8), 2378–2386. 
    http://doi.org/10.1109/TIP.2011.2109730
    """
    
    # Convert the input images to YIQ color space
    # Y is the luma compenent, i.e. B & W
    # imgY = 0.299 * r + 0.587 * g + 0.114 * b

    # Constants provided by the authors
    t1 = 0.85
    t2 = 160
    
    # Phase congruency (PC) images. "PC...a dimensionless measure for the
    # significance of local structure.
    
    pc1 = phasepack.phasecong(reference, nscale = 4, norient = 4, 
                              minWaveLength = 6, mult = 2, sigmaOnf=0.55)
                              
    pc2 = phasepack.phasecong(target, nscale = 4, norient = 4,
                              minWaveLength = 6, mult = 2, sigmaOnf=0.55)
                              
    pc1 = pc1[0]  # Reference PC map
    pc2 = pc2[0]  # Distorted PC map
    
    # Similarity of PC components
    s_PC = ( 2*pc1 + pc2 + t1 )  / ( pc1**2 + pc2**2 + t1 )
    
    # compute the Scharr gradient magnitude representation of the images
    # in both the x and y direction
    refgradX = cv2.Sobel(reference, cv2.CV_64F, dx = 1, dy = 0, ksize = -1)
    refgradY = cv2.Sobel(reference, cv2.CV_64F, dx = 0, dy = 1, ksize = -1)
    
    targradX = cv2.Sobel(target, cv2.CV_64F, dx = 1, dy = 0, ksize = -1)
    targradY = cv2.Sobel(target, cv2.CV_64F, dx = 0, dy = 1, ksize = -1)
    
    refgradient = np.maximum(refgradX, refgradY)    
    targradient = np.maximum(targradX, targradY)   
    
    #refgradient = np.sqrt(( refgradX**2 ) + ( refgradY**2 ))
    
    #targradient = np.sqrt(( targradX**2 ) + ( targradY**2 ))

    # The gradient magnitude similarity

    s_G = (2*refgradient + targradient + t2) / (refgradient**2 + targradient**2 + t2)
    
    s_L = s_PC * s_G  # luma similarity
    
    pcM = np.maximum(pc1,pc2)
        
    fsim = round( np.nansum( s_L * pcM) / np.nansum(pcM), 3)
    
    return fsim
Esempio n. 13
0
r_d, g_d, b_d = dst[:,:,0], dst[:,:,1], dst[:,:,2]
dstY = 0.299 * r_d + 0.587 * g_d + 0.114 * b_d
dstI = 0.596 * r_d - 0.275 * g_d - 0.321 * b_d
dstQ = 0.212 * r_d - 0.523 * g_d + 0.311 * b_d

t1 = 0.85
t2 = 160
t3 = 200
t4 = 200

s_Q = ( 2*imgQ + dstQ + t4 )  / ( imgQ**2 + dstQ**2 + t4 )

s_I = ( 2*imgI + dstI + t3 )  / ( imgI**2 + dstI**2 + t3 )

pc1 = phasepack.phasecong(imgY, nscale = 4, norient = 4, minWaveLength = 6, mult = 2, sigmaOnf=0.55)
pc2 = phasepack.phasecong(dstY, nscale = 4, norient = 4, minWaveLength = 6, mult = 2, sigmaOnf=0.55)
pc1 = pc1[0]
pc2 = pc2[0]

s_PC = ( 2*pc1 + pc2 + t1 )  / ( pc1**2 + pc2**2 + t1 )

g1 = scharr( imgY )
g2 = scharr( dstY )
s_G = ( 2*g1 + g2 + t2 )  / ( g1**2 + g2**2 + t2 )

s_L = s_PC * s_G
s_C = s_I * s_Q

pcM = np.maximum(pc1,pc2)
Esempio n. 14
0
def phasecong_Mm(roi):
    r = phasecong(roi,PC_NSCALE,PC_NORIENT,PC_MIN_WAVELENGTH,PC_MULT,PC_SIGMA_ONF,PC_K,PC_CUTOFF,PC_G,PC_NOISEMETHOD)

    M, m = r[0:2]

    return M + m