Пример #1
0
def _equalize_images(distractor_images_dict, targets):
    mean_lm_list = []
    # calculate mean luminance of each image
    # TODO: add contrast equalization
    for key in distractor_images_dict:
        for im in distractor_images_dict[key]:
            data = np.asarray(im)
            non_transparent_indices = (data[:, :, 3] != 0)
            yiq = rgb2yiq(data[:, :, 0:3] / 255)
            mean_lm_list.append(np.mean(yiq[non_transparent_indices, 0]))
    for im in targets:
        data = np.asarray(im)
        non_transparent_indices = (data[:, :, 3] != 0)
        yiq = rgb2yiq(data[:, :, 0:3] / 255)
        mean_lm_list.append(np.mean(yiq[non_transparent_indices, 0]))
    mean_lm = np.mean(mean_lm_list)
    for key in distractor_images_dict:
        for j in range(len(distractor_images_dict[key])):
            new_im = _equalize_luminance(distractor_images_dict[key][j],
                                         mean_lm)
            new_im.info = distractor_images_dict[key][j].info
            distractor_images_dict[key][j] = new_im
    for j in range(len(targets)):
        new_im = _equalize_luminance(targets[j], mean_lm)
        new_im.info = targets[j].info
        targets[j] = new_im
    return distractor_images_dict, targets
Пример #2
0
 def test_yuv(self):
     rgb = np.array([[[1.0, 1.0, 1.0]]])
     assert_array_almost_equal(rgb2yuv(rgb), np.array([[[1, 0, 0]]]))
     assert_array_almost_equal(rgb2yiq(rgb), np.array([[[1, 0, 0]]]))
     assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[1, 0, 0]]]))
     assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[235, 128, 128]]]))
     rgb = np.array([[[0.0, 1.0, 0.0]]])
     assert_array_almost_equal(rgb2yuv(rgb), np.array([[[0.587, -0.28886916, -0.51496512]]]))
     assert_array_almost_equal(rgb2yiq(rgb), np.array([[[0.587, -0.27455667, -0.52273617]]]))
     assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[0.587, -0.331264, -0.418688]]]))
     assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[144.553, 53.797, 34.214]]]))
Пример #3
0
 def test_yuv(self):
     rgb = np.array([[[1.0, 1.0, 1.0]]])
     assert_array_almost_equal(rgb2yuv(rgb), np.array([[[1, 0, 0]]]))
     assert_array_almost_equal(rgb2yiq(rgb), np.array([[[1, 0, 0]]]))
     assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[1, 0, 0]]]))
     assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[235, 128, 128]]]))
     rgb = np.array([[[0.0, 1.0, 0.0]]])
     assert_array_almost_equal(rgb2yuv(rgb), np.array([[[0.587, -0.28886916, -0.51496512]]]))
     assert_array_almost_equal(rgb2yiq(rgb), np.array([[[0.587, -0.27455667, -0.52273617]]]))
     assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[0.587, -0.331264, -0.418688]]]))
     assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[144.553,   53.797,   34.214]]]))
Пример #4
0
def match_luminance(content, style):
    content = content / 255
    style = style / 255
    content = color.rgb2yiq(content)
    style = color.rgb2yiq(style)
    mean_c = np.mean(content)
    mean_s = np.mean(style)
    stddev_c = np.std(content)
    stddev_s = np.std(style)
    style = (stddev_c / stddev_s) * (style - mean_s) + mean_c
    style = np.clip(color.yiq2rgb(style), 0, 1) * 255
    return style
Пример #5
0
 def test_yuv_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)[::16, ::16]
     assert_array_almost_equal(yuv2rgb(rgb2yuv(img_rgb)), img_rgb)
     assert_array_almost_equal(yiq2rgb(rgb2yiq(img_rgb)), img_rgb)
     assert_array_almost_equal(ypbpr2rgb(rgb2ypbpr(img_rgb)), img_rgb)
     assert_array_almost_equal(ycbcr2rgb(rgb2ycbcr(img_rgb)), img_rgb)
     assert_array_almost_equal(ydbdr2rgb(rgb2ydbdr(img_rgb)), img_rgb)
Пример #6
0
 def test_yuv_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)[::16, ::16]
     assert_array_almost_equal(yuv2rgb(rgb2yuv(img_rgb)), img_rgb)
     assert_array_almost_equal(yiq2rgb(rgb2yiq(img_rgb)), img_rgb)
     assert_array_almost_equal(ypbpr2rgb(rgb2ypbpr(img_rgb)), img_rgb)
     assert_array_almost_equal(ycbcr2rgb(rgb2ycbcr(img_rgb)), img_rgb)
     assert_array_almost_equal(ydbdr2rgb(rgb2ydbdr(img_rgb)), img_rgb)
Пример #7
0
 def processingLayer1(self):
     """
     Processing for YIQ, LAB, YCBC color models
     """
     print(" ")
     print("Preprocessing - Layer 1 processing")
     widgets = [
         progressbar.Percentage(), ' ',
         progressbar.Bar(), ' ',
         progressbar.ETA(), ' ',
         progressbar.AdaptiveETA()
     ]
     bar = progressbar.ProgressBar(widgets=widgets, maxval=3)
     bar.start()
     self.yiq = rgb2yiq(self.image)  ## 1
     self.lab = rgb2lab(self.image)  ## 2
     self.ycb = rgb2ycbcr(self.image)  ## 2
     bar.update(1)
     ## morph procedure
     binary_yiq = self.morfprocess(self.yiq, 2, 1, 1, 0, self.percent)
     binary_lab = self.morfprocess(self.lab, 2, 1, 0, 0, self.percent)
     binary_ycb = self.morfprocess(self.ycb, 2, 1, 0, 0, self.percent)
     bar.update(2)
     ## Arragne the information
     self.Array_layer1[:, :, 0] = binary_yiq
     self.Array_layer1[:, :, 1] = binary_lab
     self.Array_layer1[:, :, 2] = binary_ycb
     bar.update(3)
     ## Return the information
     return self.Array_layer1
Пример #8
0
 def test_rgb2yiq_conversion(self):
     rgb = img_as_float(self.img_rgb)[::16, ::16]
     yiq = rgb2yiq(rgb).reshape(-1, 3)
     gt = np.array([colorsys.rgb_to_yiq(pt[0], pt[1], pt[2])
                    for pt in rgb.reshape(-1, 3)]
                   )
     assert_almost_equal(yiq, gt, decimal=2)
Пример #9
0
 def test_rgb2yiq_conversion(self):
     rgb = img_as_float(self.img_rgb)[::16, ::16]
     yiq = rgb2yiq(rgb).reshape(-1, 3)
     gt = np.array([colorsys.rgb_to_yiq(pt[0], pt[1], pt[2])
                    for pt in rgb.reshape(-1, 3)]
                   )
     assert_almost_equal(yiq, gt, decimal=2)
Пример #10
0
def dsqm(img1,original,blkSize = 100):
    img1 = img1.astype(np.float32) / 256.0
    original = original.astype(np.float32) / 256.0

    h,w,p = original.shape
    imgL = original
    imgV = img1
    cp = np.zeros((h,w))
    offsetX = 2
    offsetY = 2
    imgLYIQ = rgb2yiq(imgL).astype(np.float32)
    imgLY = imgLYIQ[:,:,1]

    imgVYIQ = rgb2yiq(imgV).astype(np.float32)
    imgVY = imgVYIQ[:,:,1]
    
    brow = blkSize
    bcol = brow
    blkV = makeBlocks(imgVY, brow, bcol)
    blkRows, blkCols = blkV.shape[0:2]
    bestMatch = np.full((blkRows,blkCols), {'v':None,'x':None,'y':None})
    blkVmatch = np.full((blkRows,blkCols), {})
    score = np.zeros(blkV.shape)
    for i in xrange(blkCols):
        for j in xrange(blkRows):
            T = blkV[j,i]
            Tx = i * bcol
            Ty = j * brow
            Bx = (i+1) * bcol
            By = (j+1) * brow

            img = imgLY[max(0, Ty-offsetY):min(h, By+offsetY),max(0,Tx-offsetX):min(w, Bx+offsetX)]
            orig = original[max(0, Ty-offsetY):min(h, By+offsetY),max(0,Tx-offsetX):min(w, Bx+offsetX)]
            warped = imgV[max(0, Ty-offsetY):min(h, By+offsetY),max(0,Tx-offsetX):min(w, Bx+offsetX)]
            b = imgV[j*bcol:(j+1)*bcol,i*brow:(i+1)*brow]
            mp = cv2.matchTemplate(orig, b, cv2.TM_CCORR_NORMED)
            #mp = cv2.matchTemplate(img, T, cv2.TM_CCORR_NORMED)
            y,x = np.unravel_index(np.argmax(mp),mp.shape)
            bestMatch[j,i] = {'v': mp[y,x], 'x': x, 'y': y}
            blkVmatch[j,i]['arr'] = img[y:(y+bcol),x:(x+brow)]
            a = orig[y:y+bcol,x:x+brow]
            x = phasecong(T)
            y = phasecong(blkVmatch[j,i]['arr'])
            score[j,i] = np.abs(x[0].mean()-y[0].mean())
            cp[j*bcol:(j+1)*bcol,i*brow:(i+1)*brow] = (y[0]-x[0])**2*256
    return cp.mean(), cp
Пример #11
0
 def extract(self, x):
     YIQ = rgb2yiq(x)  # From RGB to YIQ
     Y = YIQ[:, :, 0]
     (hist, _) = np.histogram(Y.ravel(), bins=self.numPointsHistogram)
     # normalize the histogram
     hist = hist.astype('float')
     hist /= (hist.sum() + 1e-7)
     return hist
Пример #12
0
def YIQ(image):
    image_yiq = rgb2yiq(image)
    image_yiq = np.around(image_yiq * 255, decimals=0)

    #print(image_yiq)
    #print(type(image_yiq))
    image_yiq = image_yiq.astype(np.uint8)
    #image_yiq = image_yiq.astype(np.float32)
    return image_yiq
Пример #13
0
def compute_priorities(front, C, D, cote_patch, mask, img, color):
    (n, m) = C.shape
    P = np.zeros((n, m))
    coordonnees = np.stack((np.nonzero(front)[0], np.nonzero(front)[1]),
                           axis=-1)
    if (color):
        imgYIQ = rgb2yiq(img)
        imgY = imgYIQ[:, :, 0]

        raw_gradient = np.gradient(imgY)
        gradient = np.stack((raw_gradient[0], raw_gradient[1]), axis=-1)
    else:
        raw_gradient = np.gradient(img)
        gradient = np.stack((raw_gradient[0], raw_gradient[1]), axis=-1)

    raw_normal_vectors = compute_n(mask)
    normal_vectors = np.stack((raw_normal_vectors[0], raw_normal_vectors[1]),
                              axis=-1)

    element_structurant = np.ones((3, 3))
    #temp_mask = scnd.morphology.binary_dilation(mask, element_structurant).astype('float')
    old_front_approx = abs(
        mask - scnd.morphology.binary_dilation(mask, element_structurant))

    for psi_p in coordonnees:
        gradient[psi_p[0],
                 psi_p[1]] = calculate_grad_approx(psi_p[0], psi_p[1],
                                                   gradient, old_front_approx)
        D[psi_p[0], psi_p[1]] = abs(
            np.vdot(
                np.array([
                    gradient[psi_p[0], psi_p[1]][1], gradient[psi_p[0],
                                                              psi_p[1]][0]
                ]), normal_vectors[psi_p[0], psi_p[1]]))

        p_x_min = max(0, psi_p[0] - cote_patch // 2)
        p_x_max = min(psi_p[0] + cote_patch // 2, n - 1) + 1
        p_y_min = max(0, psi_p[1] - cote_patch // 2)
        p_y_max = min(psi_p[1] + cote_patch // 2, m - 1) + 1

        #confidence = np.sum(C[p_x_min: p_x_max, p_y_min: p_y_max]) / ((p_x_max - p_x_min) * (p_y_max - p_y_min))
        norme = (p_x_max - p_x_min) * (p_y_max - p_y_min)  # Approximation
        sum = 0
        for i in range(p_x_min, p_x_max):
            for j in range(p_y_min, p_y_max):
                if mask[i, j] == 0.0:
                    sum += C[i, j]

        C[psi_p[0], psi_p[1]] = sum / norme

    for i in range(n):
        for j in range(m):
            if front[i, j] == 1.0:
                P[i, j] = C[i, j] * D[i, j]

    return P, C
Пример #14
0
def yiq_blend(im1, im2, mask, max_levels, filter_size_im, filter_size_mask):
    """
    :param im1: input rgb image to be blended
    :param im2: input rgb image to be blended
    :param mask: boolean mask containing True and False representing which parts of im1 and im2 should
                appear in the resulting im_blend
    :param max_levels: max_levels parameter you should use when generating the Gaussian and Laplacian pyramids.
    :param filter_size_im: is the size of the Gaussian filter (an odd scalar that represents a squared filter)
                    which defining the filter used in the construction of the Laplacian pyramids of im1 and im2
    :param filter_size_mask: size of the Gaussian filter(an odd scalar that represents a squared filter) which
                defining the filter used in the construction of the Gaussian pyramid of mask.
    :return: the blended image
    """
    y_im1 = rgb2yiq(im1)
    y_im2 = rgb2yiq(im2)
    channel = 0
    y_im2[:, :, channel] = pyramid_blending(y_im1[:, :, channel],
                                            y_im2[:, :,
                                                  channel], mask, max_levels,
                                            filter_size_im, filter_size_mask)

    blended_i = yiq2rgb(y_im2)
    return blended_i
Пример #15
0
def preprocess(original, marked):
    """
    Preprocesses input images: the original grayscale image, and the
    version marked with colored scribbles. Converts images to YUV space
    and trims based on (???).
    """
    # read in images from files
    grayscale = cv2.imread(original)
    if grayscale is None:
        raise Exception(f"Could not read from image file '{original}'")

    marked = cv2.imread(marked)
    if marked is None:
        raise Exception(f"Could not read from image file '{marked}'")

    marked = cv2.cvtColor(marked, cv2.COLOR_BGR2RGB)

    # scale to float
    grayscale = grayscale / 255.
    marked = marked / 255.

    # isolate colored markings
    marks = np.sum(np.abs(grayscale - marked), axis=2) > PRECISION
    marks = marks.astype('double')

    # convert to YIQ
    gray_ntsc = rgb2yiq(grayscale)
    marked_ntsc = rgb2yiq(marked)

    # create image to be colorized
    h, w, c = gray_ntsc.shape
    im = np.empty((h, w, c))
    im[:, :, 0] = gray_ntsc[:, :, 0]
    im[:, :, 1] = marked_ntsc[:, :, 1]
    im[:, :, 2] = marked_ntsc[:, :, 2]

    return (marks, im)
Пример #16
0
def extract_superpixel_features(stats, img, adj_matrix, binaryCropMask):

    imgHSV = rgb2hsv(img)
    imgLAB = rgb2lab(img)
    imgYCbCr = rgb2ycbcr(img) / PIXEL_MAX_VAL
    imgYIQ = rgb2yiq(img)

    # set pixel range to [0,1]
    imgRGB = img / PIXEL_MAX_VAL
    imgRGBsum = np.sqrt(np.sum(np.square(imgRGB), axis=0))
    # broadcasting will make division in 3 channels
    imgRGBnorm = np.divide(imgRGB, imgRGBsum)

    gradient_maps = get_7_gradient_maps(imgRGB)
    minCoverPercentage = 0.5
    color_spaces = [imgRGB, imgRGBnorm, imgLAB, imgHSV, imgYCbCr, imgYIQ]

    features = []
    for i, s in enumerate(stats):
        # get features of superpixels inside a mask
        if s.area <= 0 or np.mean(binaryCropMask[s.coords[:, 0], s.coords[:, 1]]) <= minCoverPercentage:
            continue
        feature = []
        # put coords of superpixel as a feature
        feature.append(s.centroid[0])
        feature.append(s.centroid[1])

        # get features of 8 neighbors and itself
        neighbors = get8neighbors4_superpixel(stats, adj_matrix, i)
        neighbors.append(i)
        for color_space in color_spaces:
            for nei in neighbors:
                for channel in range(3):
                    coords = stats[nei].coords
                    color_values = color_space[coords[:,
                                                      0], coords[:, 1], channel]
                    feature.append(np.mean(color_values))
                    feature.append(np.var(color_values))

        for gradient_map in gradient_maps:
            for nei in neighbors:
                coords = stats[nei].coords
                values = gradient_map[coords[:, 0], coords[:, 1]]
                feature.append(np.mean(values))
                feature.append(np.var(values))
        features.append(feature)

    return features
Пример #17
0
def _equalize_luminance(im, new_mean):
    im_arr = np.asarray(im)
    ret_im = np.zeros_like(im_arr, dtype=np.uint8)  # asarray is const
    ret_im[:, :, 3] = im_arr[:, :, 3]
    non_transparent_indices = (im_arr[:, :, 3] != 0)
    rgb = im_arr[:, :, 0:3] / 255  # turn to 0-1 image instead of 0-255
    yiq = rgb2yiq(rgb)
    cur_mean = np.mean(yiq[non_transparent_indices,
                           0])  # disregard pixels with alpha=0
    yiq[non_transparent_indices, 0] -= (cur_mean - new_mean)
    rgb = np.round(yiq2rgb(yiq) *
                   255)  # convert back to RGB nad non-normalized image
    rgb /= np.max(rgb)  # normalize
    rgb = np.round(rgb * 255)  # back to 0-255
    ret_im[:, :, 0:3] = rgb
    ret_im = Image.fromarray(ret_im)
    return ret_im
Пример #18
0
 def test_yuv_roundtrip(self, channel_axis):
     img_rgb = img_as_float(self.img_rgb)[::16, ::16]
     img_rgb = np.moveaxis(img_rgb, source=-1, destination=channel_axis)
     assert_array_almost_equal(
         yuv2rgb(rgb2yuv(img_rgb, channel_axis=channel_axis),
                 channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         yiq2rgb(rgb2yiq(img_rgb, channel_axis=channel_axis),
                 channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         ypbpr2rgb(rgb2ypbpr(img_rgb, channel_axis=channel_axis),
                   channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         ycbcr2rgb(rgb2ycbcr(img_rgb, channel_axis=channel_axis),
                   channel_axis=channel_axis), img_rgb)
     assert_array_almost_equal(
         ydbdr2rgb(rgb2ydbdr(img_rgb, channel_axis=channel_axis),
                   channel_axis=channel_axis), img_rgb)
def histogram_equalize(im_orig):
    """
    performs histogram equalization to the image
    :param im_orig: an ndimage array
    :return: array of equalized image, the original histogram and the cumulative histogram
    """
    if len(im_orig.shape) > 2:
        # rgb
        YIQim = rgb2yiq(im_orig) * 255
        hist_orig, bin_edges = np.histogram(YIQim[:, :, 0], 256)
        rows, columns, dim = im_orig.shape
        cum_hist = np.cumsum(hist_orig)
        cum_hist = cum_hist.astype(np.float64)
    else:
        # grayscale
        im_orig *= 255
        hist_orig, bin_edges = np.histogram(im_orig, 256)
        cum_hist = np.cumsum(hist_orig)
        cum_hist = cum_hist.astype(np.float64)
        rows, columns = im_orig.shape

    tot_pixels = rows * columns
    cum_hist = (cum_hist / tot_pixels)
    minimum = min(np.nonzero(cum_hist)[0])
    maximum = np.nonzero(cum_hist)[0][-1]
    minVal = cum_hist[minimum]
    maxVal = cum_hist[maximum]
    cum_hist = (255 * ((cum_hist - minVal) / (maxVal - minVal)))
    cum_hist = np.around(cum_hist)
    if len(im_orig.shape) > 2:
        im_eq = np.copy(YIQim)
        y_values = cum_hist[YIQim[:, :, 0].astype(np.int8)]
        im_eq[:, :, 0] = y_values
        im_eq = yiq2rgb(im_eq / 255)
    else:
        im_eq = cum_hist[im_orig.astype(np.int8)]
    cum_hist /= 255
    cum_hist = np.clip(cum_hist, 0, 1)
    return [im_eq, hist_orig, cum_hist]
Пример #20
0
import cv2
from skimage import color as scl
from matplotlib import pyplot as plt

imgRGB = cv2.cvtColor(cv2.imread("Lenna.jpg", cv2.IMREAD_COLOR),
                      cv2.COLOR_BGR2RGB)
imgHSV = scl.rgb2hsv(imgRGB)
imgYIQ = scl.rgb2yiq(imgRGB)

plt.subplot(221)
plt.imshow(imgRGB)
plt.title("Original Image")

imgRGB[:, :, 0] = cv2.equalizeHist(imgRGB[:, :, 0])
imgRGB[:, :, 1] = cv2.equalizeHist(imgRGB[:, :, 1])
imgRGB[:, :, 2] = cv2.equalizeHist(imgRGB[:, :, 2])
plt.subplot(222)
plt.imshow(imgRGB)
plt.title("RGB")

# imgHSV[:, :, 2] = cv2.equalizeHist(imgHSV[:, :, 2])
plt.subplot(223)
plt.imshow(scl.hsv2rgb(imgHSV))
plt.title("HSV")

# imgYIQ[:, :, 0] = cv2.normalize(cv2.equalizeHist(cv2.normalize(imgYIQ[:, :, 0], None, 0, 255, cv2.NORM_MINMAX)), None, 0.0, 1.0, cv2.NORM_MINMAX)
# imgYIQ[:, :, 0] = yiq1
plt.subplot(224)
plt.imshow(scl.yiq2rgb(imgYIQ))
print(imgYIQ[:, :, 0].max)
plt.title("YIQ")
def signal_from_ROI(input_fullname):

    # Open video
    vid = cv2.VideoCapture(input_fullname)

    counter = 0
    frameNumber = 0
    amplified_signal = [0, 0]
    length = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
    FPS = int(vid.get(cv2.CAP_PROP_FPS))
    print("Frames per second: ", FPS)

    temp_cdata = vid.read()

    # ROI selection
    r = cv2.selectROI(temp_cdata[1])
    row_ini = int(r[1])
    row_fin = int(r[1] + r[3])
    cols_ini = int(r[0])
    cols_fin = int(r[0] + r[2])

    # Shows interactive image
    fig = plt.figure()
    ax = fig.add_subplot(111)
    Ln, = ax.plot(amplified_signal)
    plt.axis([0, 1, 0, 0.1])
    ax.set_xlim([0, length + 2])
    plt.ion()
    plt.show()

    # Loop
    while vid.isOpened():
        print("Procesando: %.1f%%" % (100 * counter / length))
        temp_cdata = vid.read()
        if temp_cdata[0]:
            # creates ROI and convert to RGB
            temp_BGR_ROI = temp_cdata[1][row_ini:row_fin, cols_ini:cols_fin, :]
            temp_RGB_ROI = cv2.cvtColor(temp_BGR_ROI, cv2.COLOR_BGR2RGB)
            currentFrame_ROI = temp_RGB_ROI.astype('float') / 255

            # Show ROI in video
            cv2.imshow("ROI", temp_BGR_ROI)

            # Reference frame
            if counter == 2:
                RGB_ini_ROI = currentFrame_ROI

            # Creates signal
            if counter > 2:
                diff_ROI_RGB = currentFrame_ROI - RGB_ini_ROI
                # diff_ROI_RGB = np.clip(diff_ROI_RGB, a_min=0, a_max=1)
                diff_ROI_RGB = np.abs(diff_ROI_RGB)
                diff_ROI_YIQ = color.rgb2yiq(diff_ROI_RGB)
                mean_Y = np.mean(diff_ROI_YIQ[:, :, 0])
                amplified_signal = np.append(amplified_signal, mean_Y)

                # shows peaks every 2 seconds
                buffer_length = np.round(2 * FPS)
                if frameNumber % buffer_length == 0:
                    # search and draw peaks
                    peaks, _ = find_peaks(amplified_signal[(
                        len(amplified_signal) -
                        buffer_length):len(amplified_signal)],
                                          height=0)
                    plt.plot(
                        len(amplified_signal) - buffer_length + peaks,
                        amplified_signal[len(amplified_signal) -
                                         buffer_length + peaks], "x")

                # Update amplified signal in figure
                Ln.set_ydata(amplified_signal)
                Ln.set_xdata(range(len(amplified_signal)))
                plt.pause(0.01)

                frameNumber = frameNumber + 1
        else:
            break

        counter = counter + 1
        ### end of WHILE ####

    # Shows remaining peaks in the signal
    peaks, _ = find_peaks(
        amplified_signal[(len(amplified_signal) -
                          buffer_length):len(amplified_signal)],
        height=0)
    plt.plot(
        len(amplified_signal) - buffer_length + peaks,
        amplified_signal[len(amplified_signal) - buffer_length + peaks], "x")
    plt.pause(5.0)
    # Release video
    vid.release()
def vidmag_fn(input_fullname, parameters):

    alpha = parameters['alpha']
    lambda_c = parameters['lambda_c']
    fl = parameters['fl']
    fh = parameters['fh']
    samplingRate = parameters['samplingRate']
    chromAttenuation = parameters['chromAttenuation']
    nlevels = parameters['nlevels']

    # Butterworth coefficients
    [low_a, low_b] = signal.butter(1, fl / samplingRate, 'low')
    [high_a, high_b] = signal.butter(1, fh / samplingRate, 'low')

    # output fullname format
    input_filename = os.path.splitext(os.path.basename(input_fullname))[0]
    output_fullname = c.PROCESSED_VIDEO_DIR+input_filename+'-butter-from-'+str(fl)+'-to-'+str(fh)+'Hz'+\
              '-alpha-'+str(alpha)+'-lambda_c-'+str(lambda_c)+\
              '-chromAtn-'+str(chromAttenuation)+'.mp4'

    input_video = cv2.VideoCapture(input_fullname)
    vidHeight = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    vidWidth = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
    fr = int(input_video.get(cv2.CAP_PROP_FPS))
    length = int(input_video.get(cv2.CAP_PROP_FRAME_COUNT))

    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    output_video = cv2.VideoWriter(output_fullname, fourcc, fr,
                                   (vidWidth, vidHeight), 1)

    # First frame
    temp_cdata = input_video.read()
    rgbframe = temp_cdata[1].astype('float') / 255.0

    # get desired sizes used in all the Laplacian pyramid levels
    dsizes = np.zeros((nlevels + 1, 2))
    for k in range(0, nlevels + 1):
        dsizes[k, :] = [
            np.floor(rgbframe.shape[0] / (2**k)),
            np.floor(rgbframe.shape[1] / (2**k))
        ]
    desired_sizes = tuple(map(tuple, dsizes))
    print(desired_sizes)

    # first frame processing (initial conditions)
    frame = color.rgb2yiq(rgbframe)
    lpyr = buildlpyr(frame, nlevels,
                     desired_sizes)  # creates Laplacian pyramid
    lowpass1 = lpyr
    lowpass2 = lpyr
    pyr_prev = lpyr

    output_frame = color.yiq2rgb(frame)  # yiq color space
    output_frame = output_frame * 255
    output_video.write(output_frame.astype('uint8'))

    # processing remaining frames
    counter = 1
    while input_video.isOpened():
        print("Processing: %.1f%%" % (100 * counter / length))

        temp_cdata = input_video.read()
        if not (temp_cdata[0]):
            break

        #from rgb to yiq
        rgbframe = temp_cdata[1].astype('float') / 255
        frame = color.rgb2yiq(rgbframe)

        # Laplacian pyramid (expansion)
        lpyr = buildlpyr(frame, nlevels, desired_sizes)

        # Temporal filter
        lowpass1 = (-high_b[1] * lowpass1 + high_a[0] * lpyr +
                    high_a[1] * pyr_prev) / high_b[0]
        lowpass2 = (-low_b[1] * lowpass2 + low_a[0] * lpyr +
                    low_a[1] * pyr_prev) / low_b[0]
        filtered = lowpass1 - lowpass2
        pyr_prev = lpyr

        # Amplification
        delta = lambda_c / 8 / (1 + alpha)
        exaggeration_factor = 2
        lambda_ = (vidHeight**2 + vidWidth**2)**0.5 / 3
        filtered[0] = np.zeros_like(filtered[0])
        filtered[-1] = np.zeros_like(filtered[-1])

        for i in range(nlevels - 1, 1, -1):
            # equation 14 (paper vidmag, see references)
            currAlpha = lambda_ / delta / 8 - 1
            currAlpha = currAlpha * exaggeration_factor
            # from figure 6 (paper vidmag, see references)
            if currAlpha > alpha:
                filtered[i] = alpha * filtered[i]
            else:
                filtered[i] = currAlpha * filtered[i]
            lambda_ = lambda_ / 2

        # Laplacian pyramid (contraction)
        for i in range(nlevels - 1):
            aux = cv2.pyrUp(filtered[i],
                            dstsize=(int(desired_sizes[nlevels - 2 - i][1]),
                                     int(desired_sizes[nlevels - 2 - i][0])))
            pyr_contraida = cv2.add(aux, filtered[i + 1])

        # Components chrome attenuation
        pyr_contraida[:, :, 1] = pyr_contraida[:, :, 1] * chromAttenuation
        pyr_contraida[:, :, 2] = pyr_contraida[:, :, 2] * chromAttenuation

        # adding contracted pyramid to current frame
        output_frame = pyr_contraida + frame

        # recovering rgb frame
        output_frame = color.yiq2rgb(output_frame)
        output_frame = np.clip(output_frame, a_min=0, a_max=1)
        output_frame = output_frame * 255

        # saving processed frame to output video
        output_video.write(output_frame.astype('uint8'))
        counter = counter + 1

        ### end of WHILE ####

    # release video
    output_video.release()
    input_video.release()

    return output_fullname
Пример #23
0
def main(args):
    content_image = Image.open(FLAGS.content)
    content_image = resize(content_image)
    style_image = Image.open(FLAGS.style).resize(content_image.size)

    content_image = np.asarray(content_image, dtype=np.float32)
    style_image = np.asarray(style_image, dtype=np.float32)

    # match luminance between style and content image
    style_image = match_luminance(content_image, style_image)

    content_image = np.expand_dims(content_image, 0)
    style_image = np.expand_dims(style_image, 0)

    img_shape = content_image.shape
    with tf.name_scope('image'):
        random_image = tf.random_normal(mean=1, stddev=.01, shape=img_shape)
        image = tf.Variable(initial_value=random_image,
                            name='image',
                            dtype=tf.float32)
        tf.summary.image('img', tf.clip_by_value(image, 0, 255), max_outputs=1)
        # subtract mean
        inputs = image - IMAGENET_MEAN
        # convert to BGR, because VGG16 was trained on BGR images
        channels = tf.unstack(inputs, axis=-1)
        inputs = tf.stack([channels[2], channels[1], channels[0]], axis=-1)
    _, endpoints = vgg_16(inputs, is_training=False, scope='vgg_16')

    saver = tf.train.Saver(var_list=tf.get_collection(
        tf.GraphKeys.GLOBAL_VARIABLES, scope='vgg_16'))

    style_tensors = [endpoints[l] for l in STYLE_LAYERS]
    content_tensors = [endpoints[l] for l in CONTENT_LAYERS]

    image_style_tensors = [endpoints[l] for l in STYLE_LAYERS]
    image_content_tensors = [endpoints[l] for l in CONTENT_LAYERS]

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess, FLAGS.ckpt_file)
        style_features = sess.run(style_tensors,
                                  feed_dict={image: style_image})
        content_features = sess.run(content_tensors,
                                    feed_dict={image: content_image})

    # define style loss
    style_losses = []
    for image_layer, style_layer in zip(image_style_tensors, style_features):
        _, height, width, channels = image_layer.get_shape().as_list()
        size = height * width * channels

        # computer gram matrices
        image_feats_reshape = tf.reshape(image_layer, [-1, channels])
        image_gram = tf.matmul(tf.transpose(image_feats_reshape),
                               image_feats_reshape) / size
        style_feats_reshape = tf.reshape(style_layer, [-1, channels])
        style_gram = tf.matmul(tf.transpose(style_feats_reshape),
                               style_feats_reshape) / size

        loss = tf.square(
            tf.norm(image_gram - style_gram, ord='fro', axis=(0, 1)))
        style_losses.append(loss)

    style_loss = STYLE_WEIGHT * tf.add_n(style_losses)

    # define content loss
    content_losses = []
    for image_layer, content_layer in zip(image_content_tensors,
                                          content_features):
        _, height, width, channels = image_layer.get_shape().as_list()
        size = height * width * channels
        loss = tf.nn.l2_loss(image_layer - content_layer) / size
        content_losses.append(loss)

    content_loss = CONTENT_WEIGHT * tf.add_n(content_losses)

    # total variation denoising loss
    tvd_loss = TVD_WEIGHT * tf.reduce_sum(tf.image.total_variation(image))

    loss = style_loss + content_loss + tvd_loss

    global_step = tf.train.get_or_create_global_step()
    optim = tf.train.AdamOptimizer(LEARNING_RATE)
    train_op = optim.minimize(loss, global_step=global_step, var_list=[image])

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver.restore(sess, FLAGS.ckpt_file)

        noise = tf.random_normal(mean=1, stddev=.01, shape=img_shape)
        rand_init = tf.clip_by_value(content_image * noise, 0, 255)
        image.assign(rand_init).eval()
        for step in range(FLAGS.steps):
            t0 = time.time()
            _, style_loss_val, content_loss_val, tvd_loss_val, loss_val = sess.run(
                [train_op, style_loss, content_loss, tvd_loss, loss])
            t = time.time() - t0
            if step % 10 == 0:
                format_str = 'step: {}/{} loss: style: {}, content: {}, tvd: {}, total: {} | time: {:.2f} s/step'
                print(
                    format_str.format(step, FLAGS.steps, style_loss_val,
                                      content_loss_val, tvd_loss_val, loss_val,
                                      t))

        img = sess.run(image)[0]

    # transfer luminance
    img = np.clip(img, 0, 255) / 255
    content_image = content_image[0] / 255
    result_y = np.expand_dims(color.rgb2yiq(img)[:, :, 0], 2)
    content_iq = color.rgb2yiq(content_image)[:, :, 1:]
    img = np.dstack((result_y, content_iq))
    img = np.clip(color.yiq2rgb(img), 0, 1)
    imsave(FLAGS.result_file, img)
Пример #24
0
def pruebacolor(img):
    print("rgb")
    figure(0)
    imshow(img[:, :, 0])
    title("rgb r")
    figure(1)
    imshow(img[:, :, 1])
    title("rgb g")
    figure(2)
    imshow(img[:, :, 2])
    title("rgb b")
    yiq = rgb2yiq(img)
    print("yiq")
    figure(3)
    imshow(yiq[:, :, 0])
    title("yiq y")
    figure(4)
    imshow(yiq[:, :, 1])
    title("yiq i")
    figure(5)
    imshow(yiq[:, :, 2])
    title("yiq q")
    hsv = rgb2hsv(img)
    print("hsv")
    figure(6)
    imshow(hsv[:, :, 0])
    title("hsv h")
    figure(7)
    imshow(hsv[:, :, 1])
    title("hsv s")
    figure(8)
    imshow(hsv[:, :, 2])
    title("hsv v")
    xyz = rgb2xyz(img)
    print("xyz")
    figure(9)
    imshow(xyz[:, :, 0])
    title("xyz x")
    figure(10)
    imshow(xyz[:, :, 1])
    title("xyz y")
    figure(11)
    imshow(xyz[:, :, 2])
    title("xyz z")
    lab = rgb2lab(img)
    print("lab")
    figure(12)
    imshow(lab[:, :, 0])
    title("lab l")
    figure(13)
    imshow(lab[:, :, 1])
    title("lab a")
    figure(14)
    imshow(lab[:, :, 2])
    title("lab b")
    ycbcr = rgb2ycbcr(img)
    print("ycbcr")
    figure(15)
    imshow(ycbcr[:, :, 0])
    title("ycbcr y")
    figure(16)
    imshow(ycbcr[:, :, 1])
    title("ycbcr cb")
    figure(17)
    imshow(ycbcr[:, :, 2])
    title("ycbcr cr")
Пример #25
0
def main(content_image_path, style_image_path, iterations, content_img_height,
         style_img_height, tv_weight,
         style_weight, content_weight, save_gif, preserve_color, learning_rate,
         beta_1, beta_2, epsilon):
    """Performs neural style transfer on a content image and style image."""
    model = get_model()

    # Load images
    content_image = load_and_process_img(content_image_path, content_img_height)
    style_image = load_and_process_img(style_image_path, style_img_height)
    content_image_yiq = rgb2yiq(
        load_img(content_image_path, content_img_height))

    # Compute content and style features
    style_outputs = model(style_image)
    content_outputs = model(content_image)

    # Get the style and content feature representations from our model
    style_features = [style_layer[0] for style_layer in
                      style_outputs[:NUM_STYLE_LAYERS]]
    content_features = [content_layer[0] for content_layer in
                        content_outputs[NUM_STYLE_LAYERS:]]
    gram_style_features = [gram_matrix(style_feature) for style_feature in
                           style_features]

    # Set initial image
    init_image = load_and_process_img(content_image_path, content_img_height,
                                      as_gray=preserve_color)
    init_image = tf.Variable(init_image, dtype=tf.float32)

    # Create our optimizer
    optimizer = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=beta_1,
                                   beta_2=beta_2, epsilon=epsilon)

    # Create config dictionary
    loss_weights = (style_weight, content_weight, tv_weight)
    config = {
        'model': model,
        'loss_weights': loss_weights,
        'init_image': init_image,
        'gram_style_features': gram_style_features,
        'content_features': content_features
    }

    images = []

    # Optimization loop
    for step in range(1, iterations + 1):
        start_time = time.time()
        grads, loss = compute_grads(config)
        optimizer.apply_gradients([(grads, init_image)])
        clipped = tf.clip_by_value(init_image, MIN_VALS, MAX_VALS)
        init_image.assign(clipped)
        img = deprocess_image(init_image.numpy())
        if preserve_color:
            img = rgb2yiq(img)
            img[:, :, 1:] = content_image_yiq[:, :, 1:]
            img = yiq2rgb(img)
            img = np.clip(img, 0, 1)
            img = (img * 255).astype('uint8')
        images.append(img)
        end_time = time.time()
        print('Finished step {} ({:.03} seconds)\nLoss: {}\n'.format(
            step, end_time - start_time, loss))

    # Save final image
    imsave('stylized.jpg', images[-1])

    if save_gif:
        create_gif(images, 'transformation.gif')
Пример #26
0
def pixelmatch(img1, img2):
    yuv1 = rgb2yiq(img1)
    yuv2 = rgb2yiq(img2)
    delta2 = np.square(yuv1 - yuv2)  # why square?
    return delta2 @ [0.5053, 0.299, 0.1957]
Пример #27
0
        for i_b in range(B_prime_pyramid[l].shape[0]):
            print("\tRow %d" % i_b)
            for j_b in range(B_prime_pyramid[l].shape[1]):
                i_a, j_a = best_match(A_pyramid, A_prime_pyramid, B_pyramid,
                                      B_prime_pyramid, s, l, i_b, j_b)
                B_prime_pyramid[l][i_b][j_b] = A_prime_pyramid[l][i_a][j_a]
                s[(i_b, j_b)] = (i_a, j_a)
    return B_prime_pyramid[0]


if __name__ == '__main__':
    A = plt.imread(INPUT + A_NAME)
    A_prime = plt.imread(INPUT + A_PRIME_NAME)
    B = plt.imread(INPUT + B_NAME)
    if USE_LUMINANCE:
        A, A_prime, B = rgb2yiq(A), rgb2yiq(A_prime), rgb2yiq(B)
        transform_func, inverse_transform_func = compute_luminance_transforms(
            A, B)
        A[:, :, 0] = transform_func(A[:, :, 0])
        A_prime[:, :, 0] = transform_func(A_prime[:, :, 0])
        B[:, :, 0] = transform_func(B[:, :, 0])

    B_prime = create_image_analogy(A, A_prime, B)
    if USE_LUMINANCE:
        B_prime[:, :, 0] = inverse_transform_func(B_prime[:, :, 0])
        B_prime = yiq2rgb(B_prime)

    try:
        os.makedirs(OUTPUT)
    except:
        pass
Пример #28
0
print(img_gray_01)
print("変換後: [0, 255]")
print(img_gray)

io.imshow(img_gray.astype(np.uint8))


# #### グレースケール化②
# 別のグレースケール化方法も試してみましょう。一度RGB空間からYIQ空間へ変換し、Yを利用を利用します。YIQ形式は、グレースケール情報がカラーデータから分離しているため、同じ信号をカラーと白黒の両方で使用可能です。
# 
# (※ グレースケール化のアルゴリズムによっては `img_gray_01` と `img_yiq[:, :, 0]` が等しくなりますが、skimageでは異なります)

# In[38]:


img_yiq = color.rgb2yiq(img_true)
img_conb = np.concatenate(
    (img_yiq[:, :, 0], img_yiq[:, :, 1], img_yiq[:, :, 2]), axis=1)
io.imshow(img_conb)


# In[33]:


# skimage.color.rgb2gray と比較
img_conb2 = np.concatenate((img_yiq[:, :, 0], img_gray_01), axis=1)
io.imshow(img_conb2)


# In[47]:
Пример #29
0
 def __call__(self, img):
     img = np.asarray(img, np.uint8)
     img = color.rgb2yiq(img)
     return img
def quantize(im_orig, n_quant, n_iter):
    """
    a function that performs optimal quantization of a given grayscale or RGB image.

    :param im_orig: is the input grayscale or RGB image to be quantized (float64 image with values in [0, 1]).
    :param n_quant: is the number of intensities your output im_quant image should have.
    :param n_iter: is the maximum number of iterations of the optimization procedure
    :return: im_quant - is the quantized output image.
            error - is an array with shape (n_iter,) (or less) of the total intensities error for each iteration of the
            quantization procedure
    """
    error = []
    q = np.array([0] * n_quant, dtype=np.float64)
    z = [0] * (n_quant + 1)
    if len(im_orig.shape) > 2:
        # rgb
        YIQim = rgb2yiq(im_orig)
        hist = np.histogram(YIQim[:, :, 0] * 255, bins=256)[0]
    else:
        # grayscale
        hist = np.histogram(im_orig * 255, bins=256)[0]
    cum_hist = np.cumsum(hist)
    for i in range(1, n_quant):
        z[i] = np.where(cum_hist > (i / n_quant) * cum_hist[255])[0][0] - 1
    z[n_quant] = 255

    q_1 = 0
    q_2 = 0
    for i in range(n_iter):
        change = False  # boolean to see if z changed

        # calculate new q
        for j in range(n_quant):
            for x in range(z[j], z[j + 1] + 1):
                q_2 += hist[x]
                q_1 += (x * hist[x])
            q[j] = int(q_1 / q_2)
            q_1 = 0
            q_2 = 0

            # calculate new z
        for j in range(1, n_quant):
            z_1 = int((q[j - 1] + q[j]) / 2)
            if z_1 != z[j]:
                z[j] = z_1
                change = True

        error.append(compute_error(n_quant, z, q, hist))
        if not change:
            lut = create_lut(z, q)
            if len(im_orig.shape) > 2:
                YIQim *= 255
                im_quant = np.copy(YIQim)
                y_values = lut[YIQim[:, :, 0].astype(np.int8)]
                im_quant[:, :, 0] = y_values
                im_quant = yiq2rgb(im_quant)
            else:
                im_orig *= 255
                im_quant = lut[im_orig.astype(np.int8)]
            return [im_quant, error]

    lut = create_lut(z, q)
    if len(im_orig.shape) > 2:
        YIQim *= 255
        im_quant = np.copy(YIQim)
        y_values = lut[YIQim[:, :, 0].astype(np.int8)]
        im_quant[:, :, 0] = y_values
        im_quant = yiq2rgb(im_quant / 255)
    else:
        im_orig *= 255
        im_quant = lut[im_orig.astype(np.int8)]
    return [im_quant, error]
Пример #31
0
def toYCbCr(rgb):
    return color.rgb2yiq(rgb)
Пример #32
0
def generateFeaturesImages(img_rgb, fparams):
    
    f_imgs = dict()
    
    f_imgs['gray'] = color.rgb2gray(img_rgb)
    
    f_imgs['RGB'] = img_rgb

    if fparams['LAB']['use']:
        f_imgs['LAB'] = color.rgb2lab(img_rgb)
    if fparams['HSV']['use']:
        f_imgs['HSV'] = color.rgb2hsv(img_rgb)
    if fparams['YCbCr']['use']:
        f_imgs['YCbCr'] = color.rgb2ycbcr(img_rgb)
    if fparams['xyz']['use']:
        f_imgs['xyz'] = color.rgb2xyz(img_rgb)
    if fparams['yiq']['use']:
        f_imgs['yiq'] = color.rgb2yiq(img_rgb)
    if fparams['yuv']['use']:
        f_imgs['yuv'] = color.rgb2yuv(img_rgb)

    if fparams['entropy']['use']:
        f_imgs['entropy'] = entropy(img_as_ubyte(f_imgs['gray']), disk(5))
    #POR CAUSA DOS COMENTARIOS     TESTE
    '''
    if fparams['texton']['use']:    # appende gabor filter responses to extract mean and std
        kernels = generateKernels()   #generate gabor filters
        texton_imgs = np.empty((f_imgs['gray'].shape[0], f_imgs['gray'].shape[1], fparams['texton']['n_kernels']))
        for k, kernel in enumerate(kernels):
            texton_imgs[:,:,k] = nd.convolve(f_imgs['gray'], kernel)
            
        if fparams['texton']['mean'] or fparams['texton']['std']:
            f_imgs['texton'] = texton_imgs
        
        if fparams['texton']['hist_max']:
            f_imgs['hist_max'] = np.argmax(texton_imgs, 2)
            
        if fparams['texton']['hist_kmeans']:
            data_t = np.empty((f_imgs['gray'].size, fparams['texton']['n_kernels']))
        
            for k in range(fparams['texton']['n_kernels']):
                data_t [:, k] = texton_imgs[:,:,k].flatten()
            
            textons = kmeans.predict(data_t)

            f_imgs['hist_kmeans'] = textons.reshape(f_imgs['gray'].shape[0], f_imgs['gray'].shape[1])
    ''' 
    '''          
    if fparams['grayD']['use'] or fparams['grayA']['use']:
        f_imgs['grayMeans'] = generateGrayMeans(f_imgs['gray'], 6, 6, 1)  #gray diff between mean gray of the block and some areas
    '''
    if fparams['LBP']['use']:
        if fparams['LBP']['gray']:
            f_imgs['LBP_gray'] = local_binary_pattern(f_imgs['gray'], fparams['LBP']['n_neibor'], fparams['LBP']['radius'])  #lbp image
        if fparams['LBP']['red']:
            f_imgs['LBP_r'] = local_binary_pattern(f_imgs['RGB'][:,:,0], fparams['LBP']['n_neibor'], fparams['LBP']['radius'])  #lbp image
        if fparams['LBP']['green']:
            f_imgs['LBP_g'] = local_binary_pattern(f_imgs['RGB'][:,:,1], fparams['LBP']['n_neibor'], fparams['LBP']['radius'])  #lbp image
        if fparams['LBP']['blue']:
            f_imgs['LBP_b'] = local_binary_pattern(f_imgs['RGB'][:,:,2], fparams['LBP']['n_neibor'], fparams['LBP']['radius'])  #lbp image            
   
    
    """ if fparams['VegetationIndex']['use']:
        img_nir = img_rgb.astype(float)[:,:,0]
        img_g = img_rgb.astype(float)[:,:,1]
        img_r = img_rgb.astype(float)[:,:,2]
        if fparams['VegetationIndex']['NDVI']:
            f_imgs['NDVI'] = ((img_nir-img_r)/(img_nir+img_r)) 
        
        if fparams['VegetationIndex']['NNIR']: 
            f_imgs['NNIR'] =  ((img_nir)/(img_nir+img_g+img_r))
            
        if fparams['VegetationIndex']['NGREEN']:
            f_imgs['NGREEN'] = ((img_g)/(img_nir+img_g+img_r))
            
        if fparams['VegetationIndex']['NRED']:
            f_imgs['NRED'] = ((img_r)/(img_nir+img_g+img_r))
            
        if fparams['VegetationIndex']['PVI']:
            b,a = np.polyfit(img_r[0,], img_nir[0,], 1) #regression to find values for b and a 
            f_imgs['PVI'] = ((b*img_nir*img_r)+a)/(np.sqrt(b+1)) """
   
    return f_imgs
Пример #33
0
import cv2
from skimage import color as scl
from matplotlib import pyplot as plt

img = cv2.cvtColor(cv2.imread("Lenna.jpg", cv2.IMREAD_COLOR),
                   cv2.COLOR_BGR2RGB)
imgYIQ = scl.rgb2yiq(img)

plt.subplot(221)
plt.imshow(img)
plt.title("Colorful image")
plt.subplot(222)
plt.imshow(imgYIQ[:, :, 0], cmap='gray')
plt.title("Y")
plt.subplot(223)
plt.imshow(imgYIQ[:, :, 1], cmap='gray')
plt.title("I")
plt.subplot(224)
plt.imshow(imgYIQ[:, :, 2], cmap='gray')
plt.title("Q")
plt.subplots_adjust(left=0.1,
                    bottom=0.1,
                    right=0.9,
                    top=0.9,
                    wspace=0.6,
                    hspace=0.6)
plt.show()