Exemplo n.º 1
0
def my_ssr(src_img, size):
    '''
    按照定义书写的ssr,但是效果不好,有待解决
    :param src_img:
    :param size:
    :return:
    '''
    # 求光照分量
    L_blur = cv2.GaussianBlur(src_img, (size, size), 0)
    L_blur = np.float32(L_blur)
    L_blur_min = np.min(L_blur)
    L_blur = (L_blur - L_blur_min)

    # 求反射分类
    src_img = src_img / 255.0
    L_blur = L_blur / 255.0
    src_img = np.float32(src_img)
    log_blur = cv2.log(L_blur + 1.0)
    log_img = cv2.log(src_img + 1.0)

    log_r = log_img - log_blur

    ssr = (log_r - np.min(log_r)) / (np.max(log_r) - np.min(log_r))
    ssr = np.uint8(ssr * 255)
    return ssr
Exemplo n.º 2
0
def MSRCR(img, scales, s1, s2):
    h, w = img.shape[:2]
    weight = 1 / 3.0
    alpha = 125.0
    beta = 46.0
    scales_size = len(scales)
    log_R = np.zeros((h, w), dtype=np.float32)

    img_sum = np.sum(img, axis=2, keepdims=True)
    img_sum = replaceZeroes(img_sum)
    gray_img = []

    for i in range(len(img.shape[:2])):
        img[:, :, i] = replaceZeroes(img[:, :, i])
        for j in range(scales_size):
            L_blur = cv.GaussianBlur(img[:, :, i], (scales[i], scales[i]), 0)
            L_blur = replaceZeroes(L_blur)

            dst_img = cv.log(img[:, :, i] / 255.0)
            dst_Lblur = cv.log(L_blur / 255.0)
            dst_ixl = cv.multiply(dst_img, dst_Lblur)
            log_R += weight * cv.subtract(dst_img, dst_ixl)

        MSRCR = beta * (cv.log(alpha * img[:, :, i]) - cv.log(img_sum))
        gray = simplestColorBalance(MSRCR, s1, s2)
        gray_img.append(gray)
    return gray_img
Exemplo n.º 3
0
def MSRCP(img, scales, s1, s2):
    h, w = img.shape[:2]
    scales_size = len(scales)
    B_chan = img[:, :, 0]
    G_chan = img[:, :, 1]
    R_chan = img[:, :, 2]
    log_R = np.zeros((h, w), dtype=np.float32)
    array_255 = np.full((h, w), 255.0, dtype=np.float32)

    I_array = (B_chan + G_chan + R_chan) / 3.0
    I_array = replaceZeroes(I_array)

    for i in range(0, scales_size):
        L_blur = cv2.GaussianBlur(I_array, (scales[i], scales[i]), 0)
        L_blur = replaceZeroes(L_blur)
        dst_I = cv2.log(I_array / 255.0)
        dst_Lblur = cv2.log(L_blur / 255.0)
        dst_ixl = cv2.multiply(dst_I, dst_Lblur)
        log_R += cv2.subtract(dst_I, dst_ixl)
    MSR = log_R / 3.0
    Int1 = simple_color_balance(MSR, s1, s2)

    B_array = np.maximum(B_chan, G_chan, R_chan)
    A = np.minimum(array_255 / B_array, Int1 / I_array)
    R_channel_out = A * R_chan
    G_channel_out = A * G_chan
    B_channel_out = A * B_chan

    MSRCP_Out_img = cv2.merge([B_channel_out, G_channel_out, R_channel_out])
    MSRCP_Out = cv2.convertScaleAbs(MSRCP_Out_img)

    return MSRCP_Out
Exemplo n.º 4
0
def singleScaleRetinex(img, sigma):
    img = np.float64(img)
    #retinex = np.log10(img) - np.log10(cv2.GaussianBlur(img, (0, 0), sigma))
    logI = cv2.log(img)
    logGI = cv2.log(cv2.cv2.GaussianBlur(img, (0, 0), sigma))
    retinex = cv2.subtract(logI,logGI)
    return retinex
Exemplo n.º 5
0
    def ShowHomomorphicFilter(imgSrc):
        # imgSrc = cv.resize(imgSrc, (int(imgSrc.shape[1]/4), int(imgSrc.shape[0]/4)))
        imgLnSrc = imgSrc
        # 先把范围控制下,不然0值被log以后会出无限值
        cv.normalize(imgLnSrc, imgLnSrc, 1, 255, cv.NORM_MINMAX)
        imgLnSrc = np.float64(imgLnSrc)
        cv.log(imgLnSrc, imgLnSrc)
        b, g, r = cv.split(imgLnSrc)
        H = ImageHandler.CreateHomomorphicFilterTemplate(
            imgLnSrc.shape[0] * 2, imgLnSrc.shape[1] * 2)
        b = ImageHandler.HandleFFTPerChannel(b, H)
        g = ImageHandler.HandleFFTPerChannel(g, H)
        r = ImageHandler.HandleFFTPerChannel(r, H)

        merged = cv.merge((b, g, r))
        imgOut = merged[0:imgSrc.shape[0], 0:imgSrc.shape[1],
                        0:imgSrc.shape[2]]
        # 两次归一,是因为出来的大小太大了,exp会出无限值
        cv.normalize(imgOut, imgOut, 0, 1, cv.NORM_MINMAX)
        cv.exp(imgOut, imgOut)
        cv.normalize(imgOut, imgOut, 0, 1, cv.NORM_MINMAX)

        cv.cvtColor(imgSrc, cv.COLOR_BGR2RGB, imgSrc)
        cv.cvtColor(imgOut, cv.COLOR_BGR2RGB, imgOut)
        plt.figure(1), plt.imshow(imgSrc)
        plt.figure(2), plt.imshow(imgOut)
        plt.show()
Exemplo n.º 6
0
def singleScaleRetinex_with_return(img, sigma):
    print(img[1:5,1:5])
    img = np.float64(img)
    print(img[1:5, 1:5])
    #retinex = np.log10(img) - np.log10(cv2.GaussianBlur(img, (0, 0), sigma))
    logI = cv2.log(img)
    logGI = cv2.log(cv2.cv2.GaussianBlur(img, (0, 0), sigma))
    retinex = cv2.subtract(logI,logGI)

    m = np.argmax(retinex)
    r, c = divmod(m, retinex.shape[1])
    # print ('max',np.argmax(r))
    # print(r, c)
    print(retinex.dtype)
    print(retinex.shape)

    #img_retinex = np.uint8(retinex)
    print(retinex[10:30, 10:30])
    retinex_return = np.exp(retinex)
    print('aMSRCR dtype', retinex_return .dtype)
    print(retinex_return[10:30,10:30])
    dst_R = cv2.normalize(retinex_return, None, 0, 255, cv2.NORM_MINMAX)
    log_uint8 = cv2.convertScaleAbs(dst_R)
    print(retinex_return[10:20,10:20])
    return log_uint8
Exemplo n.º 7
0
def deilluminate2(img):
    b, g, r = cv2.split(img)
    h, s, v = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    log_v = cv2.log(np.float32(v))
    blur_v = cv2.log(np.float32(cv2.GaussianBlur(v, (63, 63), 41)))
    res = np.exp(log_v - blur_v)
    return cv2.cvtColor(np.uint8(res * 255), cv2.COLOR_GRAY2BGR)
Exemplo n.º 8
0
 def dftcv():
     h = cv2.getOptimalDFTSize(self.grayimage.shape[0])
     w = cv2.getOptimalDFTSize(self.grayimage.shape[1])
     padding = cv2.copyMakeBorder(self.grayimage,
                                  0,
                                  h - len(self.image),
                                  0,
                                  w - len(self.image[0]),
                                  cv2.BORDER_CONSTANT,
                                  value=[0, 0, 0])
     planes = [np.float32(padding), np.zeros(padding.shape, np.float32)]
     complexI = cv2.merge(planes)
     cv2.dft(complexI, complexI)
     cv2.split(complexI, planes)
     cv2.magnitude(planes[0], planes[1], planes[0])
     magI = planes[0]
     matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
     cv2.add(matOfOnes, magI, magI)
     cv2.log(magI, magI)
     magI_rows, magI_cols = magI.shape
     magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
     cx = int(magI_rows / 2)
     cy = int(magI_cols / 2)
     q0 = magI[0:cx, 0:cy]
     q1 = magI[cx:cx + cx, 0:cy]
     q2 = magI[0:cx, cy:cy + cy]
     q3 = magI[cx:cx + cx, cy:cy + cy]
     tmp = np.copy(q0)
     magI[0:cx, 0:cy] = q3
     magI[cx:cx + cx, cy:cy + cy] = tmp
     tmp = np.copy(q1)
     magI[cx:cx + cx, 0:cy] = q2
     magI[0:cx, cy:cy + cy] = tmp
     cv2.normalize(magI, magI, 0, 1, cv2.NORM_MINMAX)
     return magI
Exemplo n.º 9
0
def MSRCR(img, scales, s1, s2):
    h, w = img.shape[:2]
    scles_size = len(scales)
    log_R = np.zeros((h, w), dtype=np.float32)

    img_sum = np.add(img[:, :, 0], img[:, :, 1], img[:, :, 2])
    img_sum = replaceZeroes(img_sum)
    gray_img = []

    for j in range(3):
        img[:, :, j] = replaceZeroes(img[:, :, j])
        for i in range(0, scles_size):
            L_blur = cv2.GaussianBlur(img[:, :, j], (scales[i], scales[i]), 0)
            L_blur = replaceZeroes(L_blur)

            dst_img = cv2.log(img[:, :, j] / 255.0)
            dst_Lblur = cv2.log(L_blur / 255.0)
            dst_ixl = cv2.multiply(dst_img, dst_Lblur)
            log_R += cv2.subtract(dst_img, dst_ixl)

        MSR = log_R / 3.0
        MSRCR = MSR * (cv2.log(125.0 * img[:, :, j]) - cv2.log(img_sum))
        gray = simple_color_balance(MSRCR, s1, s2)
        gray_img.append(gray)
    return gray_img
Exemplo n.º 10
0
def main(argv):
    filename = argv[0] if len(argv) > 0 else "images/g2-200px.jpg"
    I = cv.imread(filename, cv.IMREAD_GRAYSCALE)
    if I is None:
        print('error opening image')
        return -1

    rows, cols = I.shape
    m = cv.getOptimalDFTSize(rows)
    n = cv.getOptimalDFTSize(cols)
    padded = cv.copyMakeBorder(I,
                               0,
                               m - rows,
                               0,
                               n - cols,
                               cv.BORDER_CONSTANT,
                               value=[0, 0, 0])

    planes = [np.float32(padded), np.zeros(padded.shape, np.float32)]
    complexI = cv.merge(planes)
    cv.dft(complexI, complexI)

    cv.split(complexI, planes)
    cv.magnitude(planes[0], planes[1], planes[0])
    magI = planes[0]

    matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
    cv.add(matOfOnes, magI, magI)
    cv.log(magI, magI)
    magI_rows, magI_cols = magI.shape
    magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
    cx = int(magI_rows / 2)
    cy = int(magI_cols / 2)
    q0 = magI[0:cx, 0:cy]

    q1 = magI[cx:cx + cx, 0:cy]
    q2 = magI[0:cx, cy:cy + cy]
    q3 = magI[cx:cx + cx, cy:cy + cy]
    tmp = np.copy(q0)

    magI[0:cx, 0:cy] = q3
    magI[cx:cx + cx, cy:cy + cy] = tmp
    tmp = np.copy(q1)

    magI[cx:cx + cx, 0:cy] = q2
    magI[0:cx, cy:cy + cy] = tmp

    cv.normalize(magI, magI, 0, 1, cv.NORM_MINMAX)

    cv.imshow("input image", I)
    print('nilai pixel gambar Grayscale :')
    print('')
    print I
    print('')
    cv.imshow("spectrum magnitude", magI)
    print('Nilai spectrum magnitude :')
    print('')
    print magI
    cv.waitKey()
Exemplo n.º 11
0
def deilluminate_single(gray):
    blur = cv2.GaussianBlur(gray, (63, 63), 41)
    gray = cv2.log(np.float32(gray))
    blur = cv2.log(np.float32(blur))
    res = np.exp(gray - blur)
    res = cv2.normalize(res, 0, 255, cv2.NORM_MINMAX) * 255
    gray = np.uint8(res)
    return gray
Exemplo n.º 12
0
def deilluminate(img):
    h, s, gray = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    blur = cv2.GaussianBlur(gray, (63, 63), 41)
    gray = cv2.log(np.float32(gray))
    blur = cv2.log(np.float32(blur))
    res = np.exp(gray - blur)
    res = cv2.normalize(res, 0, 255, cv2.NORM_MINMAX) * 255
    v = np.uint8(res)
    return cv2.cvtColor(cv2.merge((h, s, v)), cv2.COLOR_HSV2BGR)
Exemplo n.º 13
0
def motion_deflicker(frames, img):
    log_median = cv2.log(np.float32(np.median(frames, axis=0)))
    log_img = cv2.log(np.float32(img))
    diff = cv2.GaussianBlur(log_img - log_median, (21, 21), 0)
    res = img / np.exp(diff)
    res = res.clip(max=255)
    blur = cv2.GaussianBlur(np.uint8(res), (5, 5), 0)
    res = cv2.addWeighted(np.uint8(res), 1.5, blur, -0.5, 0)
    return res
Exemplo n.º 14
0
def split_r_part(hsv_v, nkernelSize, mean_illumination, ADJUST_DELTA=10):
    v_blur = cv2.GaussianBlur(hsv_v, (nkernelSize, nkernelSize), 0)
    v_log = cv2.log(hsv_v)
    v_blur_log = cv2.log(v_blur)
    r_part_log = v_log - v_blur_log
    r_part = cv2.exp(r_part_log)
    r_part = cv2.convertScaleAbs(r_part, alpha=mean_illumination)
    r_part_32F = r_part.astype(np.float32)
    r_part_32F -= ADJUST_DELTA
    return r_part_32F
Exemplo n.º 15
0
def main(argv):
    print_help()
    filename = argv[0] if len(argv) > 0 else 'lena.jpg'
    I = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE)
    if I is None:
        print('Error opening image')
        return -1
    
    rows, cols = I.shape
    m = cv.getOptimalDFTSize( rows )
    n = cv.getOptimalDFTSize( cols )
    padded = cv.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv.BORDER_CONSTANT, value=[0, 0, 0])
    
    planes = [np.float32(padded), np.zeros(padded.shape, np.float32)]
    complexI = cv.merge(planes)         # Add to the expanded another plane with zeros
    
    cv.dft(complexI, complexI)         # this way the result may fit in the source matrix
    
    cv.split(complexI, planes)                   # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
    cv.magnitude(planes[0], planes[1], planes[0])# planes[0] = magnitude
    magI = planes[0]
    
    matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
    cv.add(matOfOnes, magI, magI) #  switch to logarithmic scale
    cv.log(magI, magI)
    np.set_printoptions(threshold=1024**2)
   
    magI_rows, magI_cols = magI.shape
    # crop the spectrum, if it has an odd number of rows or columns
    magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
    cx = int(magI_rows/2)
    cy = int(magI_cols/2)
    q0 = magI[0:cx, 0:cy]         # Top-Left - Create a ROI per quadrant
    q1 = magI[cx:cx+cx, 0:cy]     # Top-Right
    q2 = magI[0:cx, cy:cy+cy]     # Bottom-Left
    q3 = magI[cx:cx+cx, cy:cy+cy] # Bottom-Right
    tmp = np.copy(q0)               # swap quadrants (Top-Left with Bottom-Right)
    magI[0:cx, 0:cy] = q3
    magI[cx:cx + cx, cy:cy + cy] = tmp
    tmp = np.copy(q1)               # swap quadrant (Top-Right with Bottom-Left)
    magI[cx:cx + cx, 0:cy] = q2
    magI[0:cx, cy:cy + cy] = tmp
    print(magI.shape)
    matrix = magI[::64,::64].astype(int).tolist()
    s = [[str(e) for e in row] for row in matrix]
    lens = [max(map(len, col)) for col in zip(*s)]
    fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
    table = [fmt.format(*row) for row in s]
    print('\n'.join(table))
    
    cv.normalize(magI, magI, 0, 1, cv.NORM_MINMAX) # Transform the matrix with float values into a
    
    cv.imshow("Input Image"       , I   )    # Show the result
    cv.imshow("spectrum magnitude", magI)
    cv.waitKey()
 def SSR(self, src_img, size):
     L_blur = cv2.GaussianBlur(src_img, (size, size), 0)
     img = self.replaceZeroes(src_img)
     L_blur = self.replaceZeroes(L_blur)
     dst_Img = cv2.log(img / 255.0)
     dst_Lblur = cv2.log(L_blur / 255.0)
     dst_IxL = cv2.multiply(dst_Img, dst_Lblur)
     log_R = cv2.subtract(dst_Img, dst_IxL)
     dst_R = cv2.normalize(log_R, None, 0, 255, cv2.NORM_MINMAX)
     log_uint8 = cv2.convertScaleAbs(dst_R)
     return log_uint8
Exemplo n.º 17
0
    def __init__(self, image, parent=None):
        super(FrequencyWidget, self).__init__(parent)

        self.ampl_radio = QRadioButton(self.tr('Amplitude'))
        self.ampl_radio.setChecked(True)
        self.phase_radio = QRadioButton(self.tr('Phase'))
        self.dct_radio = QRadioButton(self.tr('DCT Map'))
        self.last_radio = self.ampl_radio
        self.thr_spin = QSpinBox()
        self.thr_spin.setRange(0, 255)
        self.thr_spin.setSpecialValueText(self.tr('Off'))
        self.ratio_label = QLabel()
        self.filter_check = QCheckBox(self.tr('Filter'))

        self.ampl_radio.clicked.connect(self.process)
        self.phase_radio.clicked.connect(self.process)
        self.dct_radio.clicked.connect(self.process)
        self.thr_spin.valueChanged.connect(self.process)
        self.filter_check.stateChanged.connect(self.process)

        gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
        rows, cols = gray.shape
        height = cv.getOptimalDFTSize(rows)
        width = cv.getOptimalDFTSize(cols)
        padded = cv.copyMakeBorder(gray, 0, height - rows, 0, width - cols,
                                   cv.BORDER_CONSTANT).astype(np.float32)
        planes = cv.merge([padded, np.zeros_like(padded)])
        dft = cv.split(np.fft.fftshift(cv.dft(planes)))
        mag, phase = cv.cartToPolar(dft[0], dft[1])
        dct = cv.dct(padded)
        self.result = [
            normalize_mat(img)
            for img in [cv.log(mag), phase, cv.log(dct)]
        ]

        self.image = image
        self.viewer = ImageViewer(self.image, None)
        self.process()

        top_layout = QHBoxLayout()
        top_layout.addWidget(QLabel(self.tr('Coefficients:')))
        top_layout.addWidget(self.ampl_radio)
        top_layout.addWidget(self.phase_radio)
        top_layout.addWidget(self.dct_radio)
        top_layout.addWidget(self.filter_check)
        top_layout.addStretch()
        top_layout.addWidget(QLabel(self.tr('Threshold:')))
        top_layout.addWidget(self.thr_spin)
        top_layout.addWidget(self.ratio_label)

        main_layout = QVBoxLayout()
        main_layout.addLayout(top_layout)
        main_layout.addWidget(self.viewer)
        self.setLayout(main_layout)
Exemplo n.º 18
0
def discriteFourierTransform(img):
    rows, columns = np.shape(img)
    m = cv2.getOptimalDFTSize(rows)
    n = cv2.getOptimalDFTSize(columns)

    imBorder = cv2.copyMakeBorder(img,
                                  0,
                                  m - rows,
                                  0,
                                  n - columns,
                                  cv2.BORDER_CONSTANT,
                                  value=0)

    planes = [np.float32(imBorder), np.zeros(imBorder.shape, np.float32)]
    complexI = cv2.merge(
        planes)  # Add to the expanded another plane with zeros

    #    dft1 = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    #    dft2 = cv2.dft(np.float32(imBorder), flags=cv2.DFT_COMPLEX_OUTPUT)
    cv2.dft(complexI,
            complexI)  # this way the result may fit in the source matrix
    cv2.split(complexI,
              planes)  # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))

    cv2.magnitude(planes[0], planes[1], planes[0])  # planes[0] = magnitude

    magI = planes[0]

    matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
    cv2.add(matOfOnes, magI, magI)  #  switch to logarithmic scale
    cv2.log(magI, magI)

    magI_rows, magI_cols = magI.shape
    # crop the spectrum, if it has an odd number of rows or columns
    magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
    cx = int(magI_rows / 2)
    cy = int(magI_cols / 2)
    q0 = magI[0:cx, 0:cy]  # Top-Left - Create a ROI per quadrant
    q1 = magI[cx:cx + cx, 0:cy]  # Top-Right
    q2 = magI[0:cx, cy:cy + cy]  # Bottom-Left
    q3 = magI[cx:cx + cx, cy:cy + cy]  # Bottom-Right
    tmp = np.copy(q0)  # swap quadrants (Top-Left with Bottom-Right)
    magI[0:cx, 0:cy] = q3
    magI[cx:cx + cx, cy:cy + cy] = tmp
    tmp = np.copy(q1)  # swap quadrant (Top-Right with Bottom-Left)
    magI[cx:cx + cx, 0:cy] = q2
    magI[0:cx, cy:cy + cy] = tmp

    cv2.normalize(
        magI, magI, 0, 1,
        cv2.NORM_MINMAX)  # Transform the matrix with float values into a
    plt.imshow(magI)
    plt.show()
Exemplo n.º 19
0
def spectrum_magnitude(img):
    # Pad image to optimal size.
    #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    rows, cols = img.shape[:2]
    nrows = cv2.getOptimalDFTSize(rows)
    ncols = cv2.getOptimalDFTSize(cols)
    padded = cv2.copyMakeBorder(img,
                                0,
                                nrows - rows,
                                0,
                                ncols - cols,
                                cv2.BORDER_CONSTANT,
                                value=[0, 0, 0])

    # Make space to complex and real values.
    planes = [np.float32(padded), np.zeros(padded.shape, np.float32)]
    Icomplex = cv2.merge(planes)

    # Make the DST
    dft = cv2.dft(Icomplex, Icomplex)

    # Transform real and complex values to magnitude.
    cv2.split(Icomplex, planes)
    cv2.magnitude(planes[0], planes[1], planes[0])
    Imag = planes[0]

    # Convert to log scale.
    ones = np.ones(Imag.shape, dtype=Imag.dtype)
    cv2.add(ones, Imag, Imag)
    cv2.log(Imag, Imag)

    Imag_rows, Imag_cols = Imag.shape
    Imag = Imag[0:Imag_rows & -2, 0:Imag_cols & -2]
    cx = int(Imag_rows / 2)
    cy = int(Imag_cols / 2)

    q0 = Imag[0:cx, 0:cy]  # Top left
    q1 = Imag[cx:cx + cx, 0:cy]  # Top right
    q2 = Imag[0:cx, cy:cy + cy]
    q3 = Imag[cx:cx + cx, cy:cy + cy]

    # Swap top left with bottom right.
    tmp = np.copy(q0)
    Imag[0:cx, 0:cy] = q3
    Imag[cx:cx + cx, cy:cy + cy] = tmp

    # Swap top right with bottom left.
    tmp = np.copy(q1)
    Imag[cx:cx + cx, 0:cy] = q2
    Imag[0:cx, cy:cy + cy] = tmp

    cv2.normalize(Imag, Imag, 0, 1, cv2.NORM_MINMAX)
    return Imag
Exemplo n.º 20
0
def moire_image(I, debug=1):
    rows, cols = I.shape
    m = cv2.getOptimalDFTSize(rows)
    n = cv2.getOptimalDFTSize(cols)
    padded = cv2.copyMakeBorder(I,
                                0,
                                m - rows,
                                0,
                                n - cols,
                                cv2.BORDER_CONSTANT,
                                value=[0, 0, 0])

    planes = [np.float32(padded), np.zeros(padded.shape, np.float32)]
    complexI = cv2.merge(
        planes)  # Add to the expanded another plane with zeros

    cv2.dft(complexI,
            complexI)  # this way the result may fit in the source matrix

    cv2.split(complexI,
              planes)  # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
    cv2.magnitude(planes[0], planes[1], planes[0])  # planes[0] = magnitude
    magI = planes[0]

    matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
    cv2.add(matOfOnes, magI, magI)  # switch to logarithmic scale
    cv2.log(magI, magI)

    magI_rows, magI_cols = magI.shape
    # crop the spectrum, if it has an odd number of rows or columns
    magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
    cx = int(magI_rows / 2)
    cy = int(magI_cols / 2)
    q0 = magI[0:cx, 0:cy]  # Top-Left - Create a ROI per quadrant
    q1 = magI[cx:cx + cx, 0:cy]  # Top-Right
    q2 = magI[0:cx, cy:cy + cy]  # Bottom-Left
    q3 = magI[cx:cx + cx, cy:cy + cy]  # Bottom-Right
    tmp = np.copy(q0)  # swap quadrants (Top-Left with Bottom-Right)
    magI[0:cx, 0:cy] = q3
    magI[cx:cx + cx, cy:cy + cy] = tmp
    tmp = np.copy(q1)  # swap quadrant (Top-Right with Bottom-Left)
    magI[cx:cx + cx, 0:cy] = q2
    magI[0:cx, cy:cy + cy] = tmp
    # print(magI)
    magII = cv2.normalize(
        magI, None, 0, 1,
        cv2.NORM_MINMAX)  # Transform the matrix with float values into a

    if debug == 1:
        cv2.imshow("fourier", magII)

    return magI
Exemplo n.º 21
0
def main(argv):

    print_help()

    filename = argv[0] if len(argv) > 0 else 'abc.png'

    I = cv2.imread('abc.png', cv2.IMREAD_GRAYSCALE)
    if I is None:
        print('Error Opening Image')
        return -1

    rows, cols = I.shape
    m = cv2.getOptimalDFTSize(rows)
    n = cv2.getOptimalDFTSize(cols)
    padded = cv2.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv2.BORDER_CONSTANT, value=[0, 0, 0])

    planes = [np.float32(padded), np.zeros(padded.shape, np.float32)]
    complexI = cv2.merge(planes)

    cv2.dft(complexI, complexI)

    cv2.split(complexI, planes)
    cv2.magnitude(planes[0], planes[1], planes[0])
    magI = planes[0]

    matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
    cv2.add(matOfOnes, magI, magI)  # switch to logarithmic scale
    cv2.log(magI, magI)

    magI_rows, magI_cols = magI.shape
    # crop the spectrum, if it has an odd number of rows or columns
    magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
    cx = int(magI_rows / 2)
    cy = int(magI_cols / 2)
    q0 = magI[0:cx, 0:cy]  # Top-Left - Create a ROI per quadrant
    q1 = magI[cx:cx + cx, 0:cy]  # Top-Right
    q2 = magI[0:cx, cy:cy + cy]  # Bottom-Left
    q3 = magI[cx:cx + cx, cy:cy + cy]  # Bottom-Right
    tmp = np.copy(q0)  # swap quadrants (Top-Left with Bottom-Right)
    magI[0:cx, 0:cy] = q3
    magI[cx:cx + cx, cy:cy + cy] = tmp
    tmp = np.copy(q1)  # swap quadrant (Top-Right with Bottom-Left)
    magI[cx:cx + cx, 0:cy] = q2
    magI[0:cx, cy:cy + cy] = tmp

    cv2.normalize(magI, magI, 0, 1, cv2.NORM_MINMAX)  # Transform the matrix with float values into a

    cv2.imshow("Input Image", I)  # Show the result
    cv2.imshow("spectrum magnitude", magI)
    cv2.waitKey()
Exemplo n.º 22
0
def noniternorm(img):
    b,g,r = cv2.split(img)
    b = np.float32(b)
    g = np.float32(g)
    r = np.float32(r)
    log_b = cv2.log(b) 
    log_g = cv2.log(g) 
    log_r = cv2.log(r) 
    b = cv2.exp(log_b - cv2.mean(log_b)[0])
    g = cv2.exp(log_g - cv2.mean(log_g)[0])
    r = cv2.exp(log_r - cv2.mean(log_r)[0])
    b = cv2.normalize(b, 0, 255, cv2.NORM_MINMAX)*255
    g = cv2.normalize(g, 0, 255, cv2.NORM_MINMAX)*255
    r = cv2.normalize(r, 0, 255, cv2.NORM_MINMAX)*255
    return cv2.merge((np.uint8(b),np.uint8(g),np.uint8(r)))
Exemplo n.º 23
0
    def __init__(self, img, time, **config):
        self.config = self.default_config
        # TODO: use utils.tools import dict_update
        # self.config = dict_update(self.config, dict(config))

        assert len(img.shape) == 2, 'Event Simulator takes only gray image'


        if self.config["use_log_image"]:
            img = cv.log(self.config["log_eps"] + img)

        self.last_img = img.copy()
        self.ref_values = img.copy()
        self.last_event_timestamp = np.zeros_like(img)
        self.current_time = time
        self.H, self.W = img.shape


        cp = self.config["contrast_threshold_pos"]
        cm = self.config["contrast_threshold_neg"]
        sigma_cp = self.config["contrast_threshold_sigma_pos"]
        sigma_cm = self.config["contrast_threshold_sigma_neg"]
        minimum_contrast_threshold = 0.01


        stepsize_pos = np.full_like(img, cp) + np.random.normal(0, sigma_cp, [self.H, self.W])
        stepsize_neg = np.full_like(img, cm) + np.random.normal(0, sigma_cm, [self.H, self.W])
        self.stepsize_pos = np.maximum(minimum_contrast_threshold, stepsize_pos)
        self.stepsize_neg = np.maximum(minimum_contrast_threshold, stepsize_neg)
def main():
    threshold = 0.61

    image = cv2.imread('video_images/frame0000.png')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = image.astype('float64')
    image = cv2.log(cv2.add(image, 1))
    #image = cv2.imread('data/slider_depth/images/frame_00000000.png')

    np.savetxt("CSV_files/image_init.csv",
               np.asarray(image),
               fmt='%i',
               delimiter=",")
    #read the first image (initial image) / assume it is log intensity

    events = utils.read_data('event_output/sim_events.txt')
    #events = utils.read_data('data/slider_depth/events.txt')

    for event in events:
        if event[3] == 1:
            image[int(event[1]), int(event[2])] -= threshold
        else:
            image[int(event[1]), int(event[2])] += threshold

    #add events to first image
    image = np.exp(image).astype('uint8')
    cv2.imwrite('Images/final_frame_delta_mod.png',
                cv2.cvtColor(image, cv2.COLOR_GRAY2RGB))
    np.savetxt("CSV_files/image_final.csv",
               np.asarray(image),
               fmt='%i',
               delimiter=",")
    plt.title('Final frame of reconstruction')
    plt.imshow(image, cmap='gray')
    plt.show()
Exemplo n.º 25
0
def fft_image(fft_mat):
    '''将频率矩阵转换为可视图像'''
    # log函数中加1,避免log(0)出现.
    log_mat = cv.log(1 + cv.magnitude(fft_mat[:, :, 0], fft_mat[:, :, 1]))
    # 标准化到0~255之间
    cv.normalize(log_mat, log_mat, 0, 255, cv.NORM_MINMAX)
    return np.uint8(np.around(log_mat))
Exemplo n.º 26
0
def saliency_feature(img):
    img_orig = img
    img = cv2.resize(img, (64, 64))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # h = cv2.getOptimalDFTSize(img.shape[0])
    # w = cv2.getOptimalDFTSize(img.shape[1])
    # print "Resizing (%d, %d) to (%d, %d)" % (img.shape[0], img.shape[1], h, w)
    # h = (h - img.shape[0])/2.0
    # w = (w - img.shape[1])/2.0
    # img = cv2.copyMakeBorder(img, int(math.floor(h)), int(math.ceil(h)), int(math.floor(w)), int(math.ceil(w)), cv2.BORDER_CONSTANT, value=0)

    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:, :, 0], dft[:, :, 1])
    L = cv2.log(A)
    h_n = (1. / 3**2) * np.ones((3, 3))
    R = L - cv2.filter2D(L, -1, h_n)
    S = cv2.GaussianBlur(
        cv2.idft(np.dstack(cv2.polarToCart(cv2.exp(R), P)),
                 flags=cv2.DFT_REAL_OUTPUT)**2, (0, 0), 8)
    S = cv2.resize(cv2.normalize(S, None, 0, 1, cv2.NORM_MINMAX),
                   (img_orig.shape[1], img_orig.shape[0]))

    # cv2.namedWindow('tmp1', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp1', img_orig)
    # cv2.namedWindow('tmp', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp', S)
    # cv2.waitKey()

    return S
Exemplo n.º 27
0
def measure_blurriness_DFT(img):
    
    """ More complex blurriness measure averaging top 90% of frequencies in image
    """
    
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    blur_img = cv2.GaussianBlur(img, (3, 3), 0)
    
    dftHeight = cv2.getOptimalDFTSize(blur_img.shape[0])
    dftWidth = cv2.getOptimalDFTSize(blur_img.shape[1])
    
    complexImg = np.zeros([dftHeight, dftWidth, 2], dtype=float)
    complexImg[0:img.shape[0], 0:img.shape[1], 0] = img / 255.0
            
    dft_img = cv2.dft(complexImg)
    dft_img = cv2.magnitude(dft_img[:, :, 0], dft_img[:, :, 1])
    dft_img = cv2.log(dft_img + 1)
    cv2.normalize(dft_img, dft_img, 0, 1, cv2.NORM_MINMAX)
    
    dft_img_h, dft_img_w = dft_img.shape[:2]
    win_size = dft_img_w * 0.55
    window = dft_img[dft_img_h / 2 - win_size:dft_img_h / 2 + win_size,
                     dft_img_w / 2 - win_size:dft_img_w / 2 + win_size]
 
    return np.mean(np.abs(window))
def saliency_feature(img):
    img_orig = img
    img = cv2.resize(img, (64, 64))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # h = cv2.getOptimalDFTSize(img.shape[0])
    # w = cv2.getOptimalDFTSize(img.shape[1])
    # print "Resizing (%d, %d) to (%d, %d)" % (img.shape[0], img.shape[1], h, w)
    # h = (h - img.shape[0])/2.0
    # w = (w - img.shape[1])/2.0
    # img = cv2.copyMakeBorder(img, int(math.floor(h)), int(math.ceil(h)), int(math.floor(w)), int(math.ceil(w)), cv2.BORDER_CONSTANT, value=0)

    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:,:,0], dft[:,:,1])
    L = cv2.log(A)
    h_n = (1./3**2)*np.ones((3,3))
    R = L - cv2.filter2D(L, -1, h_n)
    S = cv2.GaussianBlur(cv2.idft(np.dstack(cv2.polarToCart(cv2.exp(R), P)), flags=cv2.DFT_REAL_OUTPUT)**2, (0,0), 8)
    S = cv2.resize(cv2.normalize(S, None, 0, 1, cv2.NORM_MINMAX), (img_orig.shape[1],img_orig.shape[0]))

    # cv2.namedWindow('tmp1', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp1', img_orig)
    # cv2.namedWindow('tmp', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp', S)
    # cv2.waitKey()

    return S
Exemplo n.º 29
0
def compute_hash_pattern_correction(folder):
    fns = glob.glob(os.path.join(folder, "*.tif*"))

    if len(fns) == 0:
        print "No tif files found in: %s" % (folder)
        sys.exit()

    if True:
        ims = [ip.open_image(fn).astype(np.float32) for fn in fns]
        im_mean = ims[0].copy()
        for im in ims[1:]:
            im_mean += im
        im_mean /= len(ims)
        background = cv2.GaussianBlur(im_mean, (0, 0),
                                      8,
                                      borderType=cv2.BORDER_REPLICATE)
        pattern = im_mean - background
        pattern -= pattern.mean()
    else:
        background = ip.open_image(
            r"C:\Users\Neil\BT\Data\R2 FFT\FF Wafer Images\precomputed\std - ff.tif"
        ).astype(np.float32) / 4.0
        im_mean = ip.open_image(
            r"C:\Users\Neil\BT\Data\R2 FFT\FF Wafer Images\precomputed\SUM_Stack.tif"
        ).astype(np.float32) / 4.0
        pattern = im_mean - background
        pattern -= pattern.mean()

    if False:
        view = ImageViewer(im_mean)
        ImageViewer(background)
        ImageViewer(pattern)
        view.show()
        sys.exit()

    # find a mask of the peaks
    fft = fftshift(cv2.dft(pattern, flags=cv2.DFT_COMPLEX_OUTPUT))
    fft_mag = cv2.magnitude(fft[:, :, 0], fft[:, :, 1])
    fft_smooth = cv2.GaussianBlur(cv2.medianBlur(fft_mag, ksize=5),
                                  ksize=(0, 0),
                                  sigmaX=5)
    fft_log = cv2.log(fft_smooth)
    THRESH = 13.75
    mask = fft_log > THRESH

    # ignore middle (low frequency stuff)
    RADIUS = 35

    h, w = pattern.shape
    ys, xs = draw.circle(h // 2, w // 2, RADIUS)
    mask[ys, xs] = 0

    np.save("hash_fft_mask.npy", mask)
    print "FFT mask saved to 'hash_fft_mask.npy'"

    if False:
        view = ImageViewer(fft_log)
        view = ImageViewer(mask)
        view.show()
Exemplo n.º 30
0
def single_deflicker(grayimgs):
    logimgs = [cv2.log(np.float32(x)) for x in grayimgs]
    median = np.median(logimgs, axis=0)
    diff = np.abs(logimgs[-1] - median)
    blur = cv2.GaussianBlur(diff, (3,3), 1, 1)
    illumination_est = np.exp(blur)
    output = grayimgs[-1]/(illumination_est)
    return output
Exemplo n.º 31
0
def SSR2(src_img, size):
    """
    使用高斯滤波
    :param src_img: 输入的图像
    :param size: 卷积核大小
    :return: SSR算法之后的矩阵
    """
    L_blur = cv.GaussianBlur(src_img, (size, size), 0)
    img = replaceZeroes(src_img)
    L_blur = replaceZeroes(L_blur)
    dst_Img = cv.log(img / 255.0)
    dst_Lblur = cv.log(L_blur / 255.0)
    dst_IxL = cv.multiply(dst_Img, dst_Lblur)
    log_R = cv.subtract(dst_Img, dst_IxL)
    dst_R = cv.normalize(log_R, None, 0, 255, cv.NORM_MINMAX)
    log_uint8 = cv.convertScaleAbs(dst_R)
    return log_uint8
Exemplo n.º 32
0
def single_deflicker(grayimgs):
    logimgs = [cv2.log(np.float32(x)) for x in grayimgs]
    median = np.median(logimgs, axis=0)
    diff = np.abs(logimgs[-1] - median)
    blur = cv2.GaussianBlur(diff, (3, 3), 1, 1)
    illumination_est = np.exp(blur)
    output = grayimgs[-1] / (illumination_est)
    return output
Exemplo n.º 33
0
def gamma(rng, img, gamma_range):
    gamma = rng_between(rng, gamma_range[0], gamma_range[1])
    k = 1.0 / gamma
    img = cv2.exp(k * cv2.log(img.astype('float32') + 1e-15))
    f = math.pow(255.0, 1 - k)
    img = img * f
    img = cv2.add(img, np.zeros_like(img), dtype=0)  # clip
    return img
Exemplo n.º 34
0
def log_chroma(img): 
    """Log-chromacity"""
    b,g,r = cv2.split(img)
    b = np.float32(b)
    g = np.float32(g)
    r = np.float32(r)
    sum = cv2.pow(b+g+r+0.1, 1/3.0)
    b = b/sum
    g = g/sum
    r = r/sum
    b = cv2.log(b)
    g = cv2.log(g)
    r = cv2.log(r)
    b = cv2.normalize(b,0,255,cv2.NORM_MINMAX)*255
    g = cv2.normalize(g,0,255,cv2.NORM_MINMAX)*255 
    r = cv2.normalize(r,0,255,cv2.NORM_MINMAX)*255 
    out = cv2.merge((np.uint8(b),np.uint8(g),np.uint8(r)))
    return out
Exemplo n.º 35
0
    def hue_histogram_as_image(self, hist):
        """ Returns a nice representation of a hue histogram """

        histimg_hsv = cv2.createImage( (320,200), 8, 3)
        
        mybins = cv2.cloneMatND(hist.bins) #Contain all values
        cv2.log(mybins, mybins) #Calculate logarithm of all values (so there are all above 0)
        
        (_, hi, _, _) = cv2.MinMaxLoc(mybins)
        cv2.convertScale(mybins, mybins, 255. / hi) #Rescale all element to get the highest at 255

        w,h = cv2.getSize(histimg_hsv)
        hdims = cv2.getDims(mybins)[0]
        for x in range(w):
            xh = (180 * x) / (w - 1)  # hue sweeps from 0-180 across the image
            val = int(mybins[int(hdims * x / w)] * h / 255)
            cv2.rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
            cv2.rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)

        histimg = cv2.createImage( (320,200), 8, 3) #Convert image from hsv to RGB
        cv2.cvtColor(histimg_hsv, histimg, cv2.CV_HSV2BGR)
        return histimg
Exemplo n.º 36
0
def logTransform(filename):
	img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)

	#Se convierte la imagen a CV_32F
	transformed = np.float32(img)
	#Se le suma 1 por la formula c log(r + 1)
	transformed = transformed + 1
	#Se le aplica el logaritmo
	transformed = cv2.log(transformed)
	#Se escalan los valores, de nuevo a 8 bits
	transformed = cv2.convertScaleAbs(transformed)
	#Se normalizan los valores a 0-255
	cv2.normalize(src=transformed, dst=transformed, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)

	return [img, transformed]
Exemplo n.º 37
0
def normalize_data_frames(data_frames,empty_frame, dark_frame,image_process_config=None):
    """
    Build single frame from list of frames. Frame normalized to empty frame

    :param data_frames:
    :param empty_frame:
    :param dark_frame:
    :return: list of frames ()each frame - dict{angle, data}
    """
    ed_frame=empty_frame
    ed_frame=ed_frame*(ed_frame>=1)+1.0*(ed_frame<1)
    mean_data=get_mean_prepoces_frame(data_frames,image_process_config,dark_frame=dark_frame)
    dd_frame=mean_data
    dd_frame=dd_frame*(dd_frame>=1)+1.0*(dd_frame<1)
    tmp_data=dd_frame/ed_frame
    tmp_data=tmp_data*(0<tmp_data)*(tmp_data<=1)+1.0*(tmp_data>1)
    tmp_data=-cv2.log(tmp_data)
#    tmp_data=tmp_data*(0<tmp_data)*(tmp_data<=5)+5.0*(tmp_data>5)
    tmp_angle=float(data_frames[0]['angle'])
    return {'angle':tmp_angle,'data':tmp_data}
Exemplo n.º 38
0
def cv_fft_shift_orig(complexImg):
    planes = cv2.split(complexImg)
    magImg = cv2.magnitude(planes[0], planes[1])
    magImg = magImg + 1
    magImg = cv2.log(magImg)
    cx = magImg.shape[1]/2
    cy = magImg.shape[0]/2
    q0 = magImg[0:cy, 0:cx] # top-left
    q1 = magImg[0:cy, cx:] # top-right
    q2 = magImg[cy:, 0:cx] # bottom-left
    q3 = magImg[cy:, cx:] # bottom-right
    tmp = q0.copy()
    np.copyto(q0, q3)
    np.copyto(q3, tmp)

    tmp = q1.copy()
    np.copyto(q1, q2)
    np.copyto(q2, tmp)

    # magImg = cv2.normalize(magImg, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)
    return magImg
Exemplo n.º 39
0
    def capture(self, frame):
        self.fn += 1

        if (self.fn%60==0):
            r1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            hist_img = cv2.calcHist([r1],[0],None,[256],[0,256])
            hist_img /= (640*480)
            lhist = cv2.log(hist_img)
            hist_img = (lhist*hist_img)
            if self.tamper_entropy_mid:
                self.tamper_entropy_old = self.tamper_entropy_mid
            if self.tamper_entropy_now:
                self.tamper_entropy_mid = self.tamper_entropy_now
            self.tamper_entropy_now = -(hist_img.sum())

            if self.tamper_entropy_now and self.tamper_entropy_mid and self.tamper_entropy_old:
                if abs(self.tamper_entropy_now - self.tamper_entropy_mid) > min(self.tamper_entropy_now, self.tamper_entropy_mid) / 10 \
                and abs(self.tamper_entropy_now - self.tamper_entropy_old) > min(self.tamper_entropy_now,self.tamper_entropy_old) / 10:
                    self.tamper = True
                else:
                    self.tamper = False
    def show_specturm(self, dft_result):
        """
        Show spectrun graph.
        """
        # Split fourier into real and imaginary parts
        image_Re, image_Im = cv2.split(dft_result)

        # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
        magnitude = cv2.sqrt(image_Re ** 2.0 + image_Im ** 2.0)

        # Compute log(1 + Mag)
        log_spectrum = cv2.log(1.0 + magnitude)

        # Rearrange the quadrants of Fourier image so that the origin is at
        # the image center
        # shift_dft(log_spectrum, log_spectrum)

        # normalize and display the results as rgb
        cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.cv.CV_MINMAX)
        # plt.imshow(log_spectrum)
        # plt.show()
        cv2.imshow(self.spectrum_winname, log_spectrum)
Exemplo n.º 41
0
def log_correction(img):
    result   = np.copy(img)
    result   = result / 255.0
    result = np.ones(result.shape) + result
    result = cv2.log(result)
    return np.uint8(result * 255)
def calcRotAngle(srcImgOrg,srcImgGray):
    angleD = 0
    opWidth = cv.getOptimalDFTSize(srcImgGray.shape[1])
    opHeight = cv.getOptimalDFTSize(srcImgGray.shape[0])

    padded = cv.copyMakeBorder(srcImgGray, 0, opWidth - srcImgGray.shape[1] , 0, opHeight - srcImgGray.shape[0], cv.BORDER_CONSTANT);
    plane = np.zeros(padded.shape,dtype=np.float32)
    planes = [padded,plane]
    #Merge into a double-channel image
    comImg = cv.merge(planes)
    cv.dft(comImg,comImg)
    cv.split(comImg, planes)

    planes[0] = cv.magnitude(planes[0], planes[1]);
    magMat = planes[0]
    magMat += np.ones(magMat.shape)
    cv.log(magMat,magMat);

    cx = magMat.shape[1] / 2;
    cy = magMat.shape[0] / 2
    q0 = magMat[0:cx,0: cy ]
    q1 = magMat[cx:,0: cy]
    q2 = magMat[0:cx, cy:]
    q3 = magMat[cx:,cy:]
    c1 = np.vstack((q3,q2))
    c2 = np.vstack((q1,q0))
    magMat2 = np.hstack((c1,c2))

    cv.normalize(magMat2, magMat, 0, 1,cv.NORM_MINMAX);
    magMat = cv.resize(magMat,(magMat.shape[0] / 2,magMat.shape[1]/2))
    magMat = magMat * 255
    magMat = cv.threshold(magMat,GRAY_THRESH,255,cv.THRESH_BINARY)[1].astype(np.uint8)
    lines = cv.HoughLines(magMat,1,np.pi/180, HOUGH_VOTE);
    #cv.imshow("mag_binary", magMat);
    #lineImg = np.ones(magMat.shape,dtype=np.uint8)
    angle = 0
    if lines != None and len(lines) != 0:
        for line in lines[0]:
            #print line
            rho = line[0]
            theta = line[1]
            if  (theta < (np.pi/4. )) or (theta > (3.*np.pi/4.0)):
                print 'Vertical line , rho : %f , theta : %f'%(rho,theta)
                pt1 = (int(rho/np.cos(theta)),0)
                pt2 = (int((rho-magMat.shape[0]*np.sin(theta))/np.cos(theta)),magMat.shape[0])
                #cv.line( lineImg, pt1, pt2, (255))
                angle = theta
            else:
                print 'Horiz line , rho : %f , theta : %f'%(rho,theta)
                pt1 = (0,int(rho/np.sin(theta)))
                pt2 = (magMat.shape[1], int((rho-magMat.shape[1]*np.cos(theta))/np.sin(theta)))
                #cv.line(lineImg, pt1, pt2, (255), 1)
                angle = theta + np.pi / 2
        #cv.imshow('lineImg',lineImg)
        #Find the proper angel
        if angle > (np.pi / 2):
            angle = angle - np.pi

        #Calculate the rotation angel
        #The image has to be square,
        #so that the rotation angel can be calculate right
        print 'angle : %f' % angle

        #print srcImgOrg.shape
        alpha = float(srcImgOrg.shape[1]) / float(srcImgOrg.shape[0])
        print 'alpha : %f' % alpha
        if alpha > 1:
            angleT = srcImgOrg.shape[1] * np.tan(angle) / srcImgOrg.shape[0];
            angleD = np.arctan(angleT) * 180 / np.pi;
        else:
            angleD = angle * 180 / np.pi
        print 'angleD : %f' % angleD
    return angleD
Exemplo n.º 43
0
def dftSkew(im):

    # convert to grayscale
    # im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    h, w = im.shape[:2]

    realInput = im.astype(np.float64)

    # perform an optimally sized dft
    dft_M = cv2.getOptimalDFTSize(w)
    dft_N = cv2.getOptimalDFTSize(h)

    # copy A to dft_A and pad dft_A with zeros
    dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
    dft_A[:h, :w, 0] = realInput

    # no need to pad bottom part of dft_A with zeros because of
    # use of nonzeroRows parameter in cv2.dft()
    cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)

    cv2.imshow("win", im)

    # Split fourier into real and imaginary parts
    image_Re, image_Im = cv2.split(dft_A)

    # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    magnitude = cv2.sqrt(image_Re ** 2.0 + image_Im ** 2.0)

    # Compute log(1 + Mag)
    log_spectrum = cv2.log(1.0 + magnitude)

    # Rearrange the quadrants of Fourier image so that the origin is at
    # the image center
    shift_dft(log_spectrum, log_spectrum)

    # normalize and display the results as rgb
    cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX)
    magMat = log_spectrum * 255
    magMat = np.uint8(np.around(magMat))
    cv2.imwrite("dft.png", magMat)
    rows = h
    cols = w
    #    //imwrite("imageText_mag.jpg",magImg);

    #    //Turn into binary image
    (_, magImg) = cv2.threshold(magMat, 160, 255, cv2.THRESH_BINARY)
    cv2.imwrite("dft1.png", magImg)
    #    //imwrite("imageText_bin.jpg",magImg);

    #    //Find lines with Hough Transformation
    pi180 = np.pi / 180
    linImg = np.zeros(magImg.shape)
    lines = cv2.HoughLines(magImg, 1, pi180, 100, 0, 0)
    print lines
    for line in lines[0]:
        rho = line[0]
        theta = line[1]
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
        pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
        cv2.line(linImg, pt1, pt2, (255), 1)
    cv2.imwrite("dlines.png", linImg)
    #    //imwrite("imageText_line.jpg",linImg);
    #    if(lines.size() == 3){
    #        cout << "found three angels:" << endl;
    #            cout << lines[0][1]*180/CV_PI << endl << lines[1][1]*180/CV_PI << endl << lines[2][1]*180/CV_PI << endl << endl;
    #    }

    #    //Find the proper angel from the three found angels
    angel = 0
    piThresh = np.pi / 90
    pi2 = np.pi / 2
    for line in lines[0]:
        theta = line[1]
        if abs(theta) < piThresh or abs(theta - pi2) < piThresh:
            continue
        else:
            angel = theta
            break

    #    //Calculate the rotation angel
    #    //The image has to be square,
    #    //so that the rotation angel can be calculate right
    if angel < pi2:
        angel = angel
    else:
        angel = angel - np.pi

    if angel != pi2:
        angelT = rows * tan(angel) / cols
        angel = np.arctan(angelT)
    angelD = angel * 180 / np.pi

    # Rotate the image to recover
    rotMat = cv2.getRotationMatrix2D((cols / 2, rows / 2), angelD, 1.0)
    dstImg = cv2.warpAffine(im, rotMat, (cols, rows))
    cv2.imwrite("dresult.png", dstImg)
Exemplo n.º 44
0
    // crop the spectrum, if it has an odd number of rows or columns
    magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));

    // rearrange the quadrants of Fourier image  so that the origin is at the image center
    int cx = magI.cols/2;
    int cy = magI.rows/2;

    Mat q0(magI, Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
    Mat q1(magI, Rect(cx, 0, cx, cy));  // Top-Right
    Mat q2(magI, Rect(0, cy, cx, cy));  // Bottom-Left
    Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
'''
planes = cv2.split(complexImg)
magImg = cv2.magnitude(planes[0], planes[1])
magImg = magImg + 1
magImg = cv2.log(magImg)

cx = magImg.shape[1]/2
cy = magImg.shape[0]/2

q0 = magImg[0:cy, 0:cx] # top-left
q1 = magImg[0:cy, cx:] # top-right
q2 = magImg[cy:, 0:cx] # bottom-left
q3 = magImg[cy:, cx:] # bottom-right

'''
    Mat tmp;                           // swap quadrants (Top-Left with Bottom-Right)
    q0.copyTo(tmp);
    q3.copyTo(q0);
    tmp.copyTo(q3);
Exemplo n.º 45
0
    def capture(self, frame):
        self.fn += 1
        if (self.fn % 60)==0:
            r,img2 = cv2.threshold(self.rgbfilter_gray(frame,self.THRESHOLD_RGB), self.THRESHOLD_RGB, 255, cv2.THRESH_BINARY)
            r = (cv2.countNonZero(img2)*100)/ (frame.shape[0]*frame.shape[1])
            if (r<20):
                if self.THRESHOLD_RGB<50:
                    self.THRESHOLD_RGB += 1
                elif (r>30):
                    if self.THRESHOLD_RGB>1:
                        self.THRESHOLD_RGB -= 1

        # Smoke detection processing here
        imgbg = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        imgbg = cv2.equalizeHist(imgbg)
        r,mask = self.filters(frame, self.bground, self.THRESHOLD_RGB)
        mask1 = cv2.bitwise_and(imgbg,mask)
        mask2 = cv2.bitwise_and(self.bground,mask)

        self.frames.append(imgbg)

        self.s_fgmask = cv2.absdiff(mask1,mask2)
        r,fgmask1 = cv2.threshold(self.s_fgmask, self.THRESHOLD_HIGH, 255, cv2.THRESH_BINARY_INV)
        r,fgmask2 = cv2.threshold(self.s_fgmask, self.THRESHOLD_LOW, 255, cv2.THRESH_BINARY)
        self.s_fgmask = cv2.bitwise_and(fgmask1,fgmask2)
        res = imgbg

        contours, hierarchy = cv2.findContours(self.s_fgmask,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)

        self.contours2 = []
        extents_t = []
        entropies_t = []
        areas_t = []
        points_t = []
        cx_t = []
        cy_t = []

        extents_n = []
        entropies_n = []
        areas_n = []
        points_n = []
        quality_n = []
        self.index_n = []
        curs_n = []
        cx_n = []
        cy_n = []


        nowc = 0
        # Get data on our contours
        self.bground2 = self.bground

        for cnt in contours:
            area = cv2.contourArea(cnt)
            nowc+=area
            if area > self.MINCONTOUR:
                empty = self.empty_src.copy()
                cv2.drawContours(empty, cnt, -1, (255), -1)

                res1 = cv2.bitwise_and(self.bground2,empty)
                r,msk = cv2.threshold(res1, 0, 255, cv2.THRESH_BINARY)

                hist_img = cv2.calcHist([res1],[0],msk,[256],[0,256])
                hist_img /= area
                bgmax = hist_img.max()
                lhist = cv2.log(hist_img)
                hist_img = (lhist*hist_img)
                entropy_bg = -(hist_img.sum())

                res1 = res
                res1 = cv2.bitwise_and(res1,empty)
                r,msk = cv2.threshold(res1, 0, 255, cv2.THRESH_BINARY)

                hist = cv2.calcHist([res1],[0],msk,[256],[0,256])
                hist  /= area
                fgmax = hist.max()
                lhist = cv2.log(hist)
                hist = (lhist*hist)
                entropy_fg = -(hist.sum())

                res1 = cv2.absdiff(res,self.bground2)
                res1 = cv2.bitwise_and(res1,empty)
                r,msk = cv2.threshold(res1, 0, 255, cv2.THRESH_BINARY)


                hist2 = cv2.calcHist([res1],[0],msk,[256],[0,256])
                hist2  /= area
                fgmax = hist2.max()
                lhist = cv2.log(hist2)
                hist2 = (lhist*hist2)
                entropy_diff = -(hist2.sum())

                if (entropy_diff<entropy_fg and entropy_diff<entropy_bg):
                    self.contours2.append(cnt)
                    points_t.append(len(cnt))
                    (x,y,w,h) = cv2.boundingRect(cnt)

                    entropies_t.append((entropy_bg,entropy_fg,entropy_diff))
                    extents_t.append((x,y,w,h))
                    areas_t.append(area)
                    M = cv2.moments(cnt)
                    cx_t.append(int(M['m10']/M['m00']))
                    cy_t.append(int(M['m01']/M['m00']))


        if nowc>(((frame.shape[0]*frame.shape[1])*2)/3) and self.prevc!=0:
            for a in range(0,self.FRAMES_BACK):
                self.frames.pop(0)
                self.frames.append(mask1)
            extents_t = []
            entropies_t = []
            points_t = []
            areas_t = []
            curs_t = []
            cx_t = []
            cy_t = []
            self.extents = []
            self.entropies = []
            self.points = []
            self.areas = []
            self.curs = []
            self.cx = []
            self.cy = []
            self.prevc = nowc
        self.prevc = nowc

        for a in range(0,len(self.cx)):
            found = False
            for b in range(0,len(cx_t[:])):
                if not found and cx_t[b]>=0 and self.curs[a]>=0 and ((abs(self.cx[a]-cx_t[b])<self.THRESHOLD_GEO/2 \
                and abs(self.cy[a]-cy_t[b])<self.THRESHOLD_GEO/2) or (abs(self.cx[a]-cx_t[b])<self.THRESHOLD_GEO  \
                and abs(self.cy[a]-cy_t[b])<self.THRESHOLD_GEO)):
                    if ((self.areas[a]!=areas_t[b] or (extents_t[b]!=self.extents[a])) or self.points[a]!=points_t[b]) \
                    and abs(entropies_t[b][2]-self.entropies[a][2])<min(entropies_t[b][2],self.entropies[a][2]):
                        self.index_n.append(b)
                        extents_n.append(extents_t[b])
                        areas_n.append(areas_t[b])
                        points_n.append(points_t[b])
                        entropies_n.append(entropies_t[b])
                        cx_n.append(cx_t[b])
                        cy_n.append(cy_t[b])

                        #if curs[a]>=(FRAMES)/2:
                        #    print fn,entropies_t[b],entropies[a],curs[a]
                        curs_n.append(self.curs[a]+1)

                    extents_t[b] = -200
                    areas_t[b] = -200
                    points_t[b] = -200
                    cx_t[b] = -200
                    cy_t[b] = -200
                    found = True

        # Get new ones
        for a in range(0,len(extents_t)):
            if extents_t[a]>=0 and areas_t[a]>=0 and points_t[a]>0:
                curs_n.append(0)
                extents_n.append(extents_t[a])
                areas_n.append(areas_t[a])
                points_n.append(points_t[a])
                entropies_n.append((entropies_t[a][0],entropies_t[a][1],entropies_t[a][2]))
                self.index_n.append(a)
                cx_n.append(cx_t[a])
                cy_n.append(cy_t[a])

        # Copy over the new frames
        self.extents = extents_n[:]
        self.entropies = entropies_n[:]
        self.areas = areas_n[:]
        self.points = points_n[:]
        self.curs = curs_n[:]
        self.cx = cx_n[:]
        self.cy = cy_n[:]
Exemplo n.º 46
0
	def describe(self, image):
		#Convert to graysacale
		im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
		features = []
		h, w = im.shape[:2]

		realInput = im.astype(np.float64)

		# perform an optimally sized dft
		dft_M = cv2.getOptimalDFTSize(w)
		dft_N = cv2.getOptimalDFTSize(h)

		# copy A to dft_A and pad dft_A with zeros
		dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
		dft_A[:h, :w, 0] = realInput

		# no need to pad bottom part of dft_A with zeros because of
		# use of nonzeroRows parameter in cv2.dft()
		cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)

		# Split fourier into real and imaginary parts
		image_Re, image_Im = cv2.split(dft_A)

		# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
		magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0)

		# Compute log(1 + Mag)
		log_spectrum = cv2.log(1.0 + magnitude)

		# Rearrange the quadrants of Fourier image so that the origin is at
		# the image center
		self.shift_dft(log_spectrum, log_spectrum)

		# normalize and display the results as rgb
		cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX)

		h, w = log_spectrum.shape[:2]
		
		#Calcula media com uma mascara de 3x3		
		if(self.maskSize == 3):
			i_h = 1
			while i_h < h:
				i_w = 1
				while i_w < w:
					s = log_spectrum[i_h-1, i_w-1] + log_spectrum[i_h-1, i_w] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h, i_w-1] + log_spectrum[i_h, i_w] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h+1, i_w-1] + log_spectrum[i_h+1, i_w] + log_spectrum[i_h+1, i_w+1]
					p = s / 9
					features.append(p)
					i_w += 3
				i_h += 3
		
		#Calcula media com uma mascara de 5x5
		elif(self.maskSize == 5):
			i_h = 2
			while i_h < h:
				i_w = 2
				while i_w < w:
					s = log_spectrum[i_h-2, i_w-2] + log_spectrum[i_h-2, i_w-1] + log_spectrum[i_h-2, i_w+1] + log_spectrum[i_h-2, i_w] + log_spectrum[i_h-2, i_w+1] + log_spectrum[i_h-2, i_w+2]
					s += log_spectrum[i_h-1, i_w-2] + log_spectrum[i_h-1, i_w-1] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h-1, i_w] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h-1, i_w+2]
					s += log_spectrum[i_h, i_w-2] + log_spectrum[i_h, i_w-1] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h, i_w] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h, i_w+2]
					s += log_spectrum[i_h+1, i_w-2] + log_spectrum[i_h+1, i_w-1] + log_spectrum[i_h+1, i_w+1] + log_spectrum[i_h+1, i_w] + log_spectrum[i_h+1, i_w+1] + log_spectrum[i_h+1, i_w+2]
					s += log_spectrum[i_h+2, i_w-2] + log_spectrum[i_h+2, i_w-1] + log_spectrum[i_h+2, i_w+1] + log_spectrum[i_h+2, i_w] + log_spectrum[i_h+2, i_w+1] + log_spectrum[i_h+2, i_w+2]
					p = s / 25
					features.append(p)
					i_w += 5
				i_h += 5
		#Calcula media com uma mascara de 7x7
		elif(self.maskSize == 7):
			i_h = 3
			while i_h < h - 7:
				i_w = 3
				while i_w < w - 7:
					s = log_spectrum[i_h-3, i_w-3] + log_spectrum[i_h-3, i_w-2] + log_spectrum[i_h-3, i_w-1] + log_spectrum[i_h-3, i_w] + log_spectrum[i_h-3, i_w+1] + log_spectrum[i_h-3, i_w+2] + log_spectrum[i_h-3, i_w+3]
					s += log_spectrum[i_h-2, i_w-3] + log_spectrum[i_h-2, i_w-2] + log_spectrum[i_h-2, i_w-1] + log_spectrum[i_h-2, i_w] + log_spectrum[i_h-2, i_w+1] + log_spectrum[i_h-2, i_w+2] + log_spectrum[i_h-2, i_w+3]
					s += log_spectrum[i_h-1, i_w-3] + log_spectrum[i_h-1, i_w-2] + log_spectrum[i_h-1, i_w-1] + log_spectrum[i_h-1, i_w] + log_spectrum[i_h-1, i_w+1] + log_spectrum[i_h-1, i_w+2] + log_spectrum[i_h-1, i_w+3]
					s += log_spectrum[i_h, i_w-3] + log_spectrum[i_h, i_w-2] + log_spectrum[i_h, i_w-1] + log_spectrum[i_h, i_w] + log_spectrum[i_h, i_w+1] + log_spectrum[i_h, i_w+2] + log_spectrum[i_h, i_w+3]
					s += log_spectrum[i_h+1, i_w-3] + log_spectrum[i_h+1, i_w-2] + log_spectrum[i_h+1, i_w-1] + log_spectrum[i_h+1, i_w] + log_spectrum[i_h+1, i_w+1] + log_spectrum[i_h+1, i_w+2] + log_spectrum[i_h+1, i_w+3]
					s += log_spectrum[i_h+2, i_w-3] + log_spectrum[i_h+2, i_w-2] + log_spectrum[i_h+2, i_w-1] + log_spectrum[i_h+2, i_w] + log_spectrum[i_h+2, i_w+1] + log_spectrum[i_h+2, i_w+2] + log_spectrum[i_h+2, i_w+3]
					s += log_spectrum[i_h+3, i_w-3] + log_spectrum[i_h+3, i_w-2] + log_spectrum[i_h+3, i_w-1] + log_spectrum[i_h+3, i_w] + log_spectrum[i_h+3, i_w+1] + log_spectrum[i_h+3, i_w+2] + log_spectrum[i_h+3, i_w+3]
					p = s / 49
					features.append(p)
					i_w += 7
				i_h += 7

		return features
Exemplo n.º 47
0
    # copy A to dft_A and pad dft_A with zeros
    dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64)
    dft_A[:h, :w, 0] = realInput

    # no need to pad bottom part of dft_A with zeros because of
    # use of nonzeroRows parameter in cv2.dft()
    cv2.dft(dft_A, dst=dft_A, nonzeroRows=h)

    cv2.imshow("win", im)

    # Split fourier into real and imaginary parts
    image_Re, image_Im = cv2.split(dft_A)

    # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0)

    # Compute log(1 + Mag)
    log_spectrum = cv2.log(1.0 + magnitude)

    # Rearrange the quadrants of Fourier image so that the origin is at
    # the image center
    shift_dft(log_spectrum, log_spectrum)

    # normalize and display the results as rgb
    cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX)
    cv2.imshow("magnitude", log_spectrum)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemplo n.º 48
0
def main(argv):

    print_help()

    filename = argv[0] if len(argv) > 0 else 'lena.jpg'

    I = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE)
    if I is None:
        print('Error opening image')
        return -1
    ## [expand]
    rows, cols = I.shape
    m = cv.getOptimalDFTSize( rows )
    n = cv.getOptimalDFTSize( cols )
    padded = cv.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv.BORDER_CONSTANT, value=[0, 0, 0])
    ## [expand]
    ## [complex_and_real]
    planes = [np.float32(padded), np.zeros(padded.shape, np.float32)]
    complexI = cv.merge(planes)         # Add to the expanded another plane with zeros
    ## [complex_and_real]
    ## [dft]
    cv.dft(complexI, complexI)         # this way the result may fit in the source matrix
    ## [dft]
    # compute the magnitude and switch to logarithmic scale
    # = > log(1 + sqrt(Re(DFT(I)) ^ 2 + Im(DFT(I)) ^ 2))
    ## [magnitude]
    cv.split(complexI, planes)                   # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
    cv.magnitude(planes[0], planes[1], planes[0])# planes[0] = magnitude
    magI = planes[0]
    ## [magnitude]
    ## [log]
    matOfOnes = np.ones(magI.shape, dtype=magI.dtype)
    cv.add(matOfOnes, magI, magI) #  switch to logarithmic scale
    cv.log(magI, magI)
    ## [log]
    ## [crop_rearrange]
    magI_rows, magI_cols = magI.shape
    # crop the spectrum, if it has an odd number of rows or columns
    magI = magI[0:(magI_rows & -2), 0:(magI_cols & -2)]
    cx = int(magI_rows/2)
    cy = int(magI_cols/2)

    q0 = magI[0:cx, 0:cy]         # Top-Left - Create a ROI per quadrant
    q1 = magI[cx:cx+cx, 0:cy]     # Top-Right
    q2 = magI[0:cx, cy:cy+cy]     # Bottom-Left
    q3 = magI[cx:cx+cx, cy:cy+cy] # Bottom-Right

    tmp = np.copy(q0)               # swap quadrants (Top-Left with Bottom-Right)
    magI[0:cx, 0:cy] = q3
    magI[cx:cx + cx, cy:cy + cy] = tmp

    tmp = np.copy(q1)               # swap quadrant (Top-Right with Bottom-Left)
    magI[cx:cx + cx, 0:cy] = q2
    magI[0:cx, cy:cy + cy] = tmp
    ## [crop_rearrange]
    ## [normalize]
    cv.normalize(magI, magI, 0, 1, cv.NORM_MINMAX) # Transform the matrix with float values into a
    ## viewable image form(float between values 0 and 1).
    ## [normalize]
    cv.imshow("Input Image"       , I   )    # Show the result
    cv.imshow("spectrum magnitude", magI)
    cv.waitKey()