def denoising_video_colored(itdi, sq, vid, fs, hs, tps, ws, tws):
    # add unfiltered first itdi - 1 frames and resize it, just for be sure all is ok
    for i in range(itdi):
        sq.append(
            cv2.fastNlMeansDenoisingColored(vid[i], None, fs, hs, tps, ws))

    # denoise all possible frames in itdi range considering all the tws frames
    for k in range(len(vid) - itdi * 2):
        # create a list of temporalWindowSize frames
        l_tws = [vid[k + 1] for i in range(tws)]

        # 7 there is recommended size in pixels of the template patch that is used to compute weights (should be odd)
        # more method syntax details could be found there:
        # https://shimat.github.io/opencvsharp/html/d12fad98-53b0-c14a-6496-5c52ee633019.htm
        sq.append(
            cv2.fastNlMeansDenoisingColoredMulti(l_tws, itdi, tws, None, fs,
                                                 hs, tps, ws))

        # there is a probability to use CUDA, but it'll be less accurate: there is no cuda method applicable to video
        # More here: http://docs.opencv.org/trunk/d1/d79/group__photo__denoise.html#ga21abc1c8b0e15f78cd3eff672cb6c476

        # merge all filtered images into sequence changing resolution to needed
        # more about resizing: http://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#resize

    # add unfiltered last itdi - 1 frames
    for i in range(itdi):
        sq.append(
            cv2.fastNlMeansDenoisingColored(vid[len(vid) - itdi + i], None, fs,
                                            hs, tps, ws))
Beispiel #2
0
def reduceNoise(images, greyScale):
    if (len(images) > 1):
        if greyScale == 1:
            return cv.fastNlMeansDenoisingMulti(images, 0, len(images))
        else:
            return cv.fastNlMeansDenoisingColoredMulti(images, 1, len(images))
    elif (len(images) == 1):
        if greyScale == 1:
            return cv.fastNlMeansDenoising(images[0], None, 10)
Beispiel #3
0
    def denoise(self):
        temporalWindowSize = 3
        totalFrames = len(self.frames)
        for frameNo in range(temporalWindowSize / 2):
            cv2.fastNlMeansDenoisingColored(self.frames[frameNo])

        for frameNo in range(totalFrames - temporalWindowSize / 2, totalFrames):
            cv2.fastNlMeansDenoisingColored(self.frames[frameNo])

        for frameNo in range(temporalWindowSize / 2, totalFrames - temporalWindowSize / 2):
            newFrame = cv2.fastNlMeansDenoisingColoredMulti(srcImgs = self.frames, imgToDenoiseIndex = frameNo, temporalWindowSize = temporalWindowSize)
            self.frames[frameNo] = newFrame
            print frameNo
Beispiel #4
0
    def denoise_img(image_list):
        """Denoise one or multiple images"""

        print("denoising")

        if len(image_list) == 1:
            denoised = cv2.fastNlMeansDenoisingColored(image_list[0], None, 10,
                                                       10, 7, 21)

        else:

            # make the list odd
            if (len(image_list)) % 2 == 0:
                image_list.pop()
            # get the middle element
            middle = int(float(len(image_list)) / 2 - 0.5)

            width = sys.maxsize
            heigth = sys.maxsize

            # getting smallest images size
            for img in image_list:
                size = tuple(img.shape[1::-1])
                if size[0] < width: width = size[0]
                if size[1] < heigth: heigth = size[1]

            # resizing all images to the smallest one
            image_list = [
                cv2.resize(elem, (width, heigth)) for elem in image_list
            ]

            imgToDenoiseIndex = middle
            temporalWindowSize = len(image_list)
            hColor = 3
            searchWindowSize = 17
            hForColorComponents = 1
            # print(temporalWindowSize, imgToDenoiseIndex)

            denoised = cv2.fastNlMeansDenoisingColoredMulti(
                image_list,
                imgToDenoiseIndex,
                temporalWindowSize,
                hColor=hColor,
                searchWindowSize=searchWindowSize,
                hForColorComponents=hForColorComponents)
        print("denosed")

        return denoised
Beispiel #5
0
def fastNlMeasnDenoisingMulti(input_path, output_path):
    imgs = []
    files = sorted(os.listdir(input_path))
    for file in files[:10]:
        img = Image.open(input_path + file)
        img = np.array(img)
        imgs.append(img)
    for index, (target_file,
                append_file) in enumerate(zip(files[5:-5], files[10:])):
        img = cv2.fastNlMeansDenoisingColoredMulti(imgs, 5, 5, None, 6, 10, 7,
                                                   21)
        img = Image.fromarray(img)
        img.save(output_path + target_file)
        img = Image.open(input_path + append_file)
        img = np.array(img)
        imgs.append(img)
        imgs.pop(0)
Beispiel #6
0
    def calc_mean_color(self, rect):
        """calculate mean color in sub rectangle"""

        slice = self.get_subpicture(rect)
        self.input_buf.append(slice)
        self.resize_bufs()

        if self.denoise and len(self.input_buf) == self.input_buf_size:
            slice = cv2.fastNlMeansDenoisingColoredMulti(
                self.input_buf, self.input_buf_size // 2, self.input_buf_size,
                None, 3, 3, 7, 35)

        # we like the green channel more
        # weights = [1.15, 2.9, 0.8]
        weights = [0, 0.85, 0.4]

        return sum(weight * np.mean(slice[:, :, c])
                   for c, weight in enumerate(weights)) / sum(weights)
# Initialize the frame buffer
frames = []

# Load the frames to buffer
while vidcap.isOpened():
    success, image = vidcap.read()
    if success:
        frames.append(image)
    else:
        break

# Set up the start frame index
start_frame = 5

# Enhance and save video frame-by-frame
for i in range(start_frame, len(frames) - (start_frame + 1)):
    output = frames[i]
    if (args["denoise"] == True):
        output = cv2.fastNlMeansDenoisingColoredMulti(frames, i, 11)
    if (args["sharpen"] == True):
        output = np.array(Image.fromarray(output).filter(ImageFilter.DETAIL))
    enhanced_video.write(output)
    update_progress(i, num_frames - (start_frame + 1))

# Finish video enhancement
print('\n\nSuccessfully enhanced video !!!\n')
'''
Sample run:-
python enhance.py --input_video results/secret_outvid_300.avi --denoise --sharpen
'''
Beispiel #8
0
    h = cv2.getTrackbarPos('h', 'image')
    t = cv2.getTrackbarPos('t', 'image')
    s = cv2.getTrackbarPos('s', 'image')
    if t < 1:
        t = 1
    if s < 1:
        s = 1
    if t % 2 == 0:
        t += 1
    if s % 2 == 0:
        s += 1
    if count < 5:
        float64 = np.float64(img)
        float64 = float64 + noise
        noised = np.uint8(np.clip(float64, 0, 255))
        imglist.append(noised)
        continue
    else:
        count = 0
        dst = cv2.fastNlMeansDenoisingColoredMulti(imglist, 2, 3, None, t, s,
                                                   h)
        img3 = np.hstack((img, dst))
        cv2.imshow('image', img3)
    if cv2.waitKey(1) == 27:
        break
    if cv2.waitKey(1) == ord('p'):
        print()
cv2.destroyAllWindows()
if a == 1:
    cap.release()
    def get_packet(self):
        ctr = self.data['image_ctr']
        rate = rospy.Rate(50)
        while ctr + NUM_FRAMES > self.data['image_ctr']:
            rate.sleep()
        images = []
        with self.lock['image_raw']:
            for image in self.data['image_raw']:
                images.append(copy.deepcopy(image))
        with self.lock['global_gps']:
            global_gps = copy.deepcopy(self.data['global_gps'])
        """
        with self.lock['image_raw']:
            image_raw = copy.deepcopy(self.data['image_raw'])
        try:
            image = self.cv_bridge.imgmsg_to_cv2(image_raw, desired_encoding="bgr8")
        except CvBridgeError as e:
            rospy.logerr(e)
            return ['ERR']
        """
        cv_images = []
        for image in images:
            try:
                cv_images.append(
                    self.cv_bridge.imgmsg_to_cv2(image,
                                                 desired_encoding="bgr8"))
            except CvBridgeError as e:
                rospy.logerr(e)
                return ['ERR']

        image = cv_images[0]
        if NUM_FRAMES > 1:
            image = cv2.fastNlMeansDenoisingColoredMulti(cv_images, 1, 1)

        params = [cv2.IMWRITE_JPEG_QUALITY, self.params['jpeg_quality']]
        name = '%s_pegasus_video_streamer.jpg' % (os.path.dirname(
            self.params['mavros_namespace'])[1:], )
        filename = os.path.join(tempfile.gettempdir(), name)
        cv2.imwrite(filename, image, params)
        if global_gps is not None:
            exif_dict = piexif.load(filename)
            exif_dict['GPS'] = {
                piexif.GPSIFD.GPSLatitudeRef:
                "N",
                piexif.GPSIFD.GPSLatitude:
                self._get_degree_minute_second(global_gps.latitude),
                piexif.GPSIFD.GPSLongitudeRef:
                "W",
                piexif.GPSIFD.GPSLongitude:
                self._get_degree_minute_second(global_gps.longitude),
                piexif.GPSIFD.GPSAltitudeRef:
                0,
                piexif.GPSIFD.GPSAltitude:
                self._get_altitude(global_gps.altitude + 50)
            }
            print(exif_dict)
            exif_bytes = piexif.dump(exif_dict)
            piexif.insert(exif_bytes, filename)
        data = []
        buf_size = sock_buffsize - 64 - verify_data.HASH_SIZE  # buffer size to fit in 1024 protobuf packet
        f = open(filename, 'rb')
        d = f.read(buf_size)
        while d:
            data.append(d)
            d = f.read(buf_size)
        f.close()
        return data
彩色图像算法

fastNlMeansDenoisingColored(src[, dst[, h[, hColor[, templateWindowSize[, searchWindowSize]]]]]) -> dst
h=3, hColor=3, tempWindow=7, searchWindow=21

推荐参数
噪声 ∂        块大小 s  搜索窗口     衰减参数 h
0< ∂ =<25   |3 x 3  |21 x 21    |0.55 * ∂   |
25< ∂ =<55  |5 x 5  |35 x 35    |0.40 * ∂   |
55< ∂ =<100 |7 x 7  |35 x 35    |0.35 * ∂   |

"""
color = cv2.fastNlMeansDenoisingColored(img)
"""
适用于顺序帧序列的去噪声方法

1.
fastNlMeansDenoisingMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize[, dst[, h[, templateWindowSize[, searchWindowSize]]]]) -> dst

2.
fastNlMeansDenoisingColoredMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize[, dst[, h[, hColor[, templateWindowSize[, searchWindowSize]]]]]) -> dst

"""
denoising = cv2.fastNlMeansDenoisingMulti()
denoising = cv2.fastNlMeansDenoisingColoredMulti()
"""
直方图均衡化处理

需要注意的是源图必须是 8bit一维图像,所以彩色图像必须分通道处理
"""
Hist = cv2.equalizeHist(img)
Beispiel #11
0
cv2.waitKey(0)

"""
Python: cv2.fastNlMeansDenoisingMulti(srcImgs, imgToDenoiseIndex, temporalWindowSize[, dst[, h[, templateWindowSize[, searchWindowSize]]]]) -> dst
Parameters:	
srcImgs – Input 8-bit 1-channel, 2-channel or 3-channel images sequence. All images 
should have the same type and size.
imgToDenoiseIndex – Target image to denoise index in srcImgs sequence
temporalWindowSize – Number of surrounding images to use for target image denoising. 
Should be odd. 
Images from imgToDenoiseIndex - temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize / 2 
from srcImgs will be used to denoise srcImgs[imgToDenoiseIndex] image.
dst – Output image with the same size and type as srcImgs images.
templateWindowSize – Size in pixels of the template patch that is used to compute weights. 
Should be odd. Recommended value 7 pixels
searchWindowSize – Size in pixels of the window that is used to compute weighted average 
for given pixel. Should be odd. Affect performance linearly: 
    greater searchWindowsSize - greater denoising time. Recommended value 21 pixels
h – Parameter regulating filter strength for luminance component. Bigger h value perfectly 
removes noise but also removes image details, smaller h value preserves details but also 
preserves some noise
"""
dst2 = cv2.fastNlMeansDenoisingMulti([gray], 0, 1, None, 6, 7, 21)
cv2.imshow('Fast Means Denoising Multi', dst2)
cv2.waitKey(0)

dst3 = cv2.fastNlMeansDenoisingColoredMulti([image], 0, 1, None, 6, 6, 7, 21)
cv2.imshow('Fast Means Denoising Multi (color)', dst3)
cv2.waitKey(0)

cv2.destroyAllWindows()
Beispiel #12
0
kernel_5x5 = np.ones((5,5) , np.float32) / 25
kernel_7x7 = np.ones((7,7) , no.float32) / 49

#Blur - Convolotion
blurred = cv2.filter2D(image, -1, kernel_3x3)

#Blur - Averaged
blured_box = cv2.blur(image, (3,3))

#Blur - Gaussian
blurred_gauss = cv2.GaussianBlur(image, (7,7), 0)

#Blur Median
blurred_median = cv2.medianBlur(image, 5) #Painted type effect

#Blur - Bilateral
blurred_bileteral = cv2.bilateralFilter(image, 9, 75, 75) #Very effective noise but keeps edges sharp

#Image Denoising
#Nonlocal Means Denoising (Cleaning noise and making image more DSLR)
dst = cv2.fastNlMeansDenoisingColored(image, None, 6, 6, 7, 21) # For color image
      cv2.fastNlMeansDenoising()  # Works with single grayscale image
      cv2.fastNlMeansDenoisingMulti() # works with image sequence captured in short period of time (grayscale)
      cv2.fastNlMeansDenoisingColoredMulti() # works with image sequence captured in short period of time (color)


#Sharpening
|-1 -1 -1|
|-1 +9 -1| #Sum is 1 so don't need to normalization
|-1 -1 -1|
Beispiel #13
0
# start = time.time()
# dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
# print('time: ', time.time() - start)

# plt.subplot(121),plt.imshow(img)
# plt.subplot(122),plt.imshow(dst)
# plt.show()

import numpy as np
import cv2
import matplotlib.pyplot as plt
import time

cap = cv2.VideoCapture('/home/ljj/1code/13926303.mp4')

# create a list of first 5 frames
img = [cap.read()[1] for i in range(5)]

# Denoise 3rd frame considering all the 5 frames
start = time.time()
dst = cv2.fastNlMeansDenoisingColoredMulti(img, 2, 5, None, 4, 7, 35)
print('time: ', time.time() - start)

# plt.subplot(131),plt.imshow(img[2],'gray')
# plt.subplot(133),plt.imshow(dst,'gray')
# plt.show()

plt.imshow(img[2], 'gray')
plt.savefig('ori.jpg')
plt.imshow(dst, 'gray')
plt.savefig('after.jpg')
Beispiel #14
0
def mp_process(index: int, data: list) -> tuple[int, list]:
    return index, cv.fastNlMeansDenoisingColoredMulti(data, int(len(data) / 2),
                                                      len(data))
# -*- coding: utf-8 -*-
# @Author: lcl1026504480
# @Date:   2019-06-12 15:36:42
# @Last Modified by:   lcl1026504480
# @Last Modified time: 2019-06-12 15:37:31
import numpy as np
import cv2
from matplotlib import pyplot as plt
import imnoise
img = [cv2.imread(str(i) + ".png") for i in range(1, 4)]
noisy = [imnoise.gauss(i, 0, 10) for i in img]
# 下面是加椒盐噪声
# noisy = [imnoise.sp(i) for i in img]
dst = cv2.fastNlMeansDenoisingColoredMulti(noisy, 1, 3, None, 4, 7, 35)
noisy = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in noisy]
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
plt.subplot(221), plt.imshow(noisy[0])
plt.subplot(222), plt.imshow(noisy[1])
plt.subplot(223), plt.imshow(noisy[2])
plt.subplot(224), plt.imshow(dst)
plt.show()
Beispiel #16
0

def load(name):
    a = cv2.imread(path + name)
    b = a.copy()
    b[:, :, 0] = a[:, :, 2]
    b[:, :, 2] = a[:, :, 0]

    return b


#%%
b = []
for i in range(3):
    b.append(load("b" + str(i) + ".png"))

b = np.array(b)

plt.imshow(b[0])
#%%
b_n = cv2.fastNlMeansDenoisingColoredMulti(b,
                                           imgToDenoiseIndex=1,
                                           temporalWindowSize=1,
                                           h=3,
                                           templateWindowSize=7,
                                           searchWindowSize=21)
#%%
plt.figure(figsize=(9, 9))
plt.imshow(b_n)
#%%
plt.imsave(path + "b_n.png", b_n)