コード例 #1
0
def b_clahe_nlm_de(image_path,
                   sigma_s,
                   sigma_r,
                   denoise=False,
                   verbose=False,
                   limit=None):
    bgr = cv2.imread(image_path)

    if denoise:
        bgr = cv2.bilateralFilter(bgr, 3, 3, 2)

    lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
    lab_planes = cv2.split(lab)
    clahe = cv2.createCLAHE(clipLimit=limit)
    lab_planes[0] = clahe.apply(lab_planes[0])
    #lab_planes[1] = clahe.apply(lab_planes[1])
    #lab_planes[2] = clahe.apply(lab_planes[2])
    lab = cv2.merge(lab_planes)
    bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

    if denoise:
        bgr = cv2.fastNlMeansDenoisingColored(bgr, None, 10, 10, 3, 9)
        bgr = cv2.detailEnhance(bgr, sigma_s=sigma_s, sigma_r=sigma_r)

    if verbose:
        cv2.imshow("test", bgr)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    return bgr
コード例 #2
0
    def __init__(self, imdb):
        self.imdb = imdb

        # all filters
        self.filters = odict([
            ('Background Subtraction (mean)',
             imdb.pipeline().use_window().single_bgsub3(method='mean')),
            ('Background Subtraction (median)',
             imdb.pipeline().use_window().single_bgsub3(method='median')),
            ('Background Subtraction (min)',
             imdb.pipeline().use_window().single_bgsub3(method='min')),
            ('Background Subtraction (max)',
             imdb.pipeline().use_window().single_bgsub3(method='mean')),
            ('Original', imdb.pipeline()),
            ('Greyscale', imdb.pipeline().grey()),
            ('Edges', imdb.pipeline().grey().pipe(
                lambda im: cv2.Laplacian(im, cv2.CV_64F)).invert()),

            # https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
            ('Stylization', imdb.pipeline().pipe(
                lambda im: cv2.stylization(im, sigma_s=10, sigma_r=0.4))),
            ('Pencil Sketch', imdb.pipeline().pipe(lambda im: cv2.pencilSketch(
                im, sigma_s=10, sigma_r=0.1, shade_factor=0.02)[1])),
            ('Detail Enhance', imdb.pipeline().pipe(
                lambda im: cv2.detailEnhance(im, sigma_s=20, sigma_r=0.15))),
            ('Edge Preserving',
             imdb.pipeline().pipe(lambda im: cv2.edgePreservingFilter(
                 im, flags=1, sigma_s=30, sigma_r=0.4))),
        ])

        for name in self.filters:
            self.filters[name].fake_crop()
コード例 #3
0
def load_train_image(data_dir,
                     img_name,
                     is_hflip=False,
                     hshift=0,
                     vshift=0,
                     rotate=0,
                     scale_size=0,
                     is_color_trans=False,
                     is_fancy_pca_trans=False,
                     is_edge_enh_trans=False,
                     test_time_aug=None,
                     paddings=None,
                     tile_size=None):
    '''
    load a train image
    '''
    img_file_name = tile.get_img_name(img_name)
    img_ext = 'jpg'
    img = load_image_file(data_dir, img_file_name, img_ext, rotate)
    # img.shape: (height, width, 3)

    if is_color_trans:
        img = color.transform(img)
    if is_fancy_pca_trans:
        img = fancy_pca.rgb_shift(img)
    if is_edge_enh_trans:
        img = cv2.detailEnhance(img, sigma_s=5, sigma_r=0.1)

    img = np.moveaxis(img, 2, 0)
    # img.shape: (3, height, width)

    return preprocess(img, img_name, is_hflip, hshift, vshift, scale_size,
                      paddings, tile_size, test_time_aug)
コード例 #4
0
def is_fake(img):
    w, h = img.shape[0], img.shape[1]
    print(w, h)
    if w > h*2:
        print("is_fake")
        return -1
    if h < w/10:
        print("is_fake")
        return -1
    img = cv2.fastNlMeansDenoisingColored(img,None,5,5,3,5)

    img = cv2.detailEnhance(img, sigma_s=3, sigma_r=0.7)

    img_copy = img.copy()
    kernel = np.ones((3, 3), np.float32) / 9
    blur_img = cv2.filter2D(img_copy, -1, kernel)
    img_gray = cv2.cvtColor(blur_img, cv2.COLOR_BGR2GRAY)
    edged = cv2.Canny(img_gray, 0, 255)
    size = img.shape[0]*img.shape[1]
    index = np.sum(np.count_nonzero(edged))
    print(index)
    if index == 0 or index/size < 0.001:
        print("is_fake")
        return -1
    return 1
コード例 #5
0
def dehaze(I,
           tmin=0.1,
           w=15,
           alpha=0.4,
           omega=0.75,
           p=0.1,
           eps=1e-3,
           reduce=False):
    m, n, _ = I.shape
    Idark, Ibright = get_illumination_channel(I, w)
    A = get_atmosphere(I, Ibright, p)

    init_t = get_initial_transmission(A, Ibright)
    if reduce:
        init_t = reduce_init_t(init_t)
    corrected_t = get_corrected_transmission(I, A, Idark, Ibright, init_t,
                                             alpha, omega, w)

    normI = (I - I.min()) / (I.max() - I.min())
    refined_t = guided_filter(normI, corrected_t, w, eps)
    J_refined = get_final_image(I, A, refined_t, tmin)

    enhanced = (J_refined * 255).astype(np.uint8)
    f_enhanced = cv2.detailEnhance(enhanced, sigma_s=10, sigma_r=0.15)
    f_enhanced = cv2.edgePreservingFilter(f_enhanced,
                                          flags=1,
                                          sigma_s=64,
                                          sigma_r=0.2)
    return f_enhanced
コード例 #6
0
def img_preprocessing(img_dst):
    img = crop_black_edge(img_dst)
    img = cv2.detailEnhance(img, sigma_s=10, sigma_r=0.15)

    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    mask_all = get_red_mask(hsv)
    Indexs = []
    Indexs.append(np.array([0.5] * img.shape[0]))
    Indexs.append(np.array([0.5] * img.shape[1]))

    for iter in range(0, len(Indexs)):
        Strict_Range = (img.shape[iter] * np.array([1 / 4, 3 / 4])).astype(int)
        Indexs[iter][Strict_Range[0]:Strict_Range[1] + 1] = 0.9
        Indexs[iter] = Indexs[iter] * img.shape[1 - iter]

    mask_hv = np.zeros(mask_all.shape, dtype=np.uint8)
    Rows = np.where(np.sum(mask_all == 255, 0) - Indexs[1] > 0)[0]
    Cols = np.where(np.sum(mask_all == 255, 1) - Indexs[0] > 0)[0]
    mask_hv[Cols, :] = 255
    mask_hv[:, Rows] = 255
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    mask_hv = cv2.dilate(mask_hv, element, iterations=2)
    mask_hv = cv2.bitwise_and(mask_all, mask_hv)

    im_trunc = cv2.inpaint(img, mask_hv, 3, cv2.INPAINT_TELEA)
    im_gray = cv2.cvtColor(im_trunc, cv2.COLOR_BGR2GRAY)
    (thresh, im_bw) = cv2.threshold(im_gray, 127, 255,
                                    cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    im_bw = cv2.bitwise_or(im_bw, mask_hv)
    kernel = np.ones((2, 2), np.uint8)
    opening = cv2.morphologyEx(im_bw, cv2.MORPH_CLOSE, kernel)
    opening = crop_black_edge_result(opening)
    return opening
コード例 #7
0
ファイル: image.py プロジェクト: zephyract/my_repo
 def detailEnhance(self, sigma_s=50, sigma_r=0.15):
     '''
     细节增强
     参数同上
     '''
     out = cv2.detailEnhance(self.img, sigma_s=50, sigma_r=sigma_r)
     self.showImage("detailEnhance.png", out)
コード例 #8
0
async def savepdf(event):
    ok = await event.get_reply_message()
    if not (ok and (ok.media)):
        await eor(
            event, "`Reply to Images/pdf which u want to merge as a single pdf..`"
        )
        return
    ultt = await ok.download_media()
    if ultt.endswith(("png", "jpg", "jpeg", "webp")):
        xx = await eor(event, "`Processing...`")
        image = cv2.imread(ultt)
        original_image = image.copy()
        ratio = image.shape[0] / 500.0
        image = imutils.resize(image, height=500)
        image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
        image_y = np.zeros(image_yuv.shape[0:2], np.uint8)
        image_y[:, :] = image_yuv[:, :, 0]
        image_blurred = cv2.GaussianBlur(image_y, (3, 3), 0)
        edges = cv2.Canny(image_blurred, 50, 200, apertureSize=3)
        contours, hierarchy = cv2.findContours(
            edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
        )
        polygons = []
        for cnt in contours:
            hull = cv2.convexHull(cnt)
            polygons.append(
                cv2.approxPolyDP(hull, 0.01 * cv2.arcLength(hull, True), False)
            )
            sortedPoly = sorted(polygons, key=cv2.contourArea, reverse=True)
            cv2.drawContours(image, sortedPoly[0], -1, (0, 0, 255), 5)
            simplified_cnt = sortedPoly[0]
        if len(simplified_cnt) == 4:
            cropped_image = four_point_transform(
                original_image, simplified_cnt.reshape(4, 2) * ratio
            )
            gray_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
            T = threshold_local(gray_image, 11, offset=10, method="gaussian")
            ok = (gray_image > T).astype("uint8") * 255
        if len(simplified_cnt) != 4:
            ok = cv2.detailEnhance(original_image, sigma_s=10, sigma_r=0.15)
        cv2.imwrite("o.png", ok)
        image1 = PIL.Image.open("o.png")
        im1 = image1.convert("RGB")
        a = dani_ck("pdf/scan.pdf")
        im1.save(a)
        await xx.edit(
            f"Done, Now Reply Another Image/pdf if completed then use {hndlr}pdsend to merge nd send all as pdf",
        )
        os.remove("o.png")
    elif ultt.endswith(".pdf"):
        a = dani_ck("pdf/scan.pdf")
        await ultroid_bot.download_media(ok, a)
        await eor(
            event,
            f"Done, Now Reply Another Image/pdf if completed then use {hndlr}pdsend to merge nd send all as pdf",
        )
    else:
        await eor(event, "`Reply to a Image/pdf only...`")
    os.remove(ultt)
コード例 #9
0
async def imgscan(event):
    ok = await event.get_reply_message()
    if not (ok and (ok.media)):
        await event.eor("`Reply The pdf u Want to Download..`")
        return
    ultt = await ok.download_media()
    if not ultt.endswith(("png", "jpg", "jpeg", "webp")):
        await event.eor("`Reply to a Image only...`")
        os.remove(ultt)
        return
    xx = await event.eor(get_string("com_1"))
    image = cv2.imread(ultt)
    original_image = image.copy()
    ratio = image.shape[0] / 500.0
    hi, wid = image.shape[:2]
    ra = 500 / float(hi)
    dmes = (int(wid * ra), 500)
    image = cv2.resize(image, dmes, interpolation=cv2.INTER_AREA)
    image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
    image_y = np.zeros(image_yuv.shape[:2], np.uint8)
    image_y[:, :] = image_yuv[:, :, 0]
    image_blurred = cv2.GaussianBlur(image_y, (3, 3), 0)
    edges = cv2.Canny(image_blurred, 50, 200, apertureSize=3)
    contours, hierarchy = cv2.findContours(
        edges,
        cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE,
    )
    polygons = []
    for cnt in contours:
        hull = cv2.convexHull(cnt)
        polygons.append(
            cv2.approxPolyDP(hull, 0.01 * cv2.arcLength(hull, True), False))
        sortedPoly = sorted(polygons, key=cv2.contourArea, reverse=True)
        cv2.drawContours(image, sortedPoly[0], -1, (0, 0, 255), 5)
        simplified_cnt = sortedPoly[0]
    if len(simplified_cnt) == 4:
        cropped_image = four_point_transform(
            original_image,
            simplified_cnt.reshape(4, 2) * ratio,
        )
        gray_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
        T = threshold_local(gray_image, 11, offset=10, method="gaussian")
        ok = (gray_image > T).astype("uint8") * 255
    if len(simplified_cnt) != 4:
        ok = cv2.detailEnhance(original_image, sigma_s=10, sigma_r=0.15)
    cv2.imwrite("o.png", ok)
    image1 = Image.open("o.png")
    im1 = image1.convert("RGB")
    scann = f"Scanned {ultt.split('.')[0]}.pdf"
    im1.save(scann)
    await event.client.send_file(event.chat_id,
                                 scann,
                                 reply_to=event.reply_to_msg_id)
    await xx.delete()
    os.remove(ultt)
    os.remove("o.png")
    os.remove(scann)
コード例 #10
0
ファイル: main.py プロジェクト: nwnightwind/Smart-camera
 def detail_enhance(self):
     if self.raw_image is None:
         return 0
     if self.ui.horizontalSlider_6.value() == 0:
         self.current_img = self.raw_image
         self.show_image()
         return 0
     value = self.ui.horizontalSlider_6.value() * 0.05
     self.current_img = cv2.detailEnhance(self.current_img, sigma_s=50, sigma_r=value)
コード例 #11
0
 def _py_enhance_shape(img):
     images = []
     for i in range(len(img)):
         images += [
             cv2.detailEnhance(
                 cv2.cvtColor(img[i].numpy().astype(np.uint8),
                              cv2.IMREAD_COLOR), 10, prc)
         ]
     return np.array(images)
コード例 #12
0
async def scan_pdf(message: Message):
    """image to pdf conversion"""
    reply = message.reply_to_message
    if not reply or not reply.media:
        await message.edit("Please reply to an image...", del_in=5)
        return
    media = await reply.download()
    if not media.endswith((".jpg", ".jpeg", ".png", ".webp")):
        await message.edit("Please reply to an image...", del_in=5)
        os.remove(media)
        return
    await message.edit("Processing...")
    image = cv2.imread(media)
    original_image = image.copy()
    ratio = image.shape[0] / 500.0
    image = imutils.resize(image, height=500)
    image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
    image_y = np.zeros(image_yuv.shape[0:2], np.uint8)
    image_y[:, :] = image_yuv[:, :, 0]
    image_blurred = cv2.GaussianBlur(image_y, (3, 3), 0)
    edges = cv2.Canny(image_blurred, 50, 200, apertureSize=3)
    contours, hierarchy = cv2.findContours(
        edges,
        cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE,
    )
    polygons = []
    for cnt in contours:
        hull = cv2.convexHull(cnt)
        polygons.append(cv2.approxPolyDP(hull, 0.01 * cv2.arcLength(hull, True), False))
        sortedPoly = sorted(polygons, key=cv2.contourArea, reverse=True)
        cv2.drawContours(image, sortedPoly[0], -1, (0, 0, 255), 5)
        simplified_cnt = sortedPoly[0]
    if len(simplified_cnt) == 4:
        cropped_image = four_point_transform(
            original_image,
            simplified_cnt.reshape(4, 2) * ratio,
        )
        gray_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
        T = threshold_local(gray_image, 11, offset=10, method="gaussian")
        ok = (gray_image > T).astype("uint8") * 225
    if len(simplified_cnt) != 4:
        ok = cv2.detailEnhance(original_image, sigma_s=10, sigma_r=0.15)
    cv2.imwrite("png.png", ok)
    image1 = PIL.Image.open("png.png")
    im1 = image1.convert("RGB")
    scann = media.split("/")[3]
    scann = f"Scanned {scann.split('.')[0]}.pdf"
    im1.save(scann)
    await userge.send_document(
        message.chat.id, scann, reply_to_message_id=reply.message_id
    )
    await message.delete()
    os.remove(media)
    os.remove("png.png")
    os.remove(scann)
コード例 #13
0
def detailenhance(img):
    smooth = st.slider('Smoothness', 3, 99, 5, step=2)
    kernel = st.slider('Sharpness', 1, 40, 3, step=2)
    edge_preserve = st.slider('Tune Color Averaging effects', 0.0, 1.0, 0.05)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.medianBlur(gray, kernel)
    edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                  cv2.THRESH_BINARY, 9, 9)
    color = cv2.detailEnhance(img, sigma_s=smooth, sigma_r=edge_preserve)
    cartoon = cv2.bitwise_and(color, color, mask=edges)
    return color
コード例 #14
0
ファイル: image_service.py プロジェクト: luiz-vinholi/py-ocr
    def treat(self):
        image = cv2.detailEnhance(self._original_image, sigma_s=100, sigma_r=1)

        image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        thresh = cv2.threshold(image_gray, 127, 255, cv2.THRESH_BINARY)[1]

        treated_image = pillow.Image.fromarray(thresh)
        treated_image.save('./src/images/treated-cpfl.jpeg')

        self.treated_image = treated_image

        return self
コード例 #15
0
ファイル: final.py プロジェクト: jll2884/SnapchatLikeGUI
def noFilter():
    cancel()
    ret, frame = cap.read()
    frame = cv2.flip(frame, 1)
    cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
    blur = cv2.detailEnhance(cv2image, sigma_s=10, sigma_r=0.15)
    img = Image.fromarray(blur)
    imgtk = ImageTk.PhotoImage(image=img)
    lmain.imgtk = imgtk
    lmain.configure(image=imgtk)
    global job
    job = lmain.after(10, noFilter)
コード例 #16
0
    def predict(self, input1, input2):

        size = 128
        optimizer = Adam(self.lr)
        model_path = os.path.join(self.outputFolder, 'model.h5')
        weights_path = os.path.join(self.outputFolder, 'weights.h5')

        #Read left image
        img = []
        image1 = cv2.imread(input1)
        image1 = cv2.resize(image1, (size, size), interpolation=cv2.INTER_AREA)
        image1 = cv2.fastNlMeansDenoisingColored(image1, None, 10, 10, 21, 7)
        image1 = cv2.detailEnhance(image1, sigma_s=10, sigma_r=0.15)
        image1 = img_to_array(image1)
        img.append(image1)

        #Read right image
        image2 = cv2.imread(input2)
        image2 = cv2.resize(image2, (size, size), interpolation=cv2.INTER_AREA)
        image2 = cv2.fastNlMeansDenoisingColored(image2, None, 10, 10, 21, 7)
        image2 = cv2.detailEnhance(image2, sigma_s=10, sigma_r=0.15)
        image2 = img_to_array(image2)
        img.append(image2)
        r_img_pp = np.array([img], dtype="float") / 255.0

        #Load model
        with tf.device('/cpu:0'):
            model = load_model(model_path,
                               compile=False,
                               custom_objects={'tf': tf})
            model.load_weights(weights_path)
            model.compile(loss=self.loss,
                          optimizer=optimizer,
                          metrics=['accuracy'])
            input_shape = (size, size, 3)
            chanDim = -1

            predict = model.predict([r_img_pp[:, 0], r_img_pp[:, 1]])

        return predict
コード例 #17
0
def show(i):
    #print(.jpg")
    #image = Image.open(str(i)+".jpg")
    #cimg = cv.CreateImageHeader(image.size, cv.IPL_DEPTH_8U, 3)  # CV Image
    img = cv.imread(str(i)+".jpg",1)
    res = cv.resize(img, (170, 250))
    #dst = cv.edgePreservingFilter(res, flags=1, sigma_s=60, sigma_r=0.4)
    dst = cv.detailEnhance(res, sigma_s=10, sigma_r=0.1)
    cv.namedWindow("Image")
    cv.moveWindow("Image",120,150)
    cv.imshow("Image",dst)
    cv.waitKey(0)
    cv.destroyAllWindows()
コード例 #18
0
def convert(inputfile, outputfile):
    img = cv.imread(inputfile)
    img = cv.GaussianBlur(img, (3, 3), 0)
    # hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
    # h, s, v = cv.split(hsv)
    # lim = 255 - 50
    # v[v > lim] = 255
    # v[v <= lim] += 50
    # final_hsv = cv.merge((h, s, v))
    # img = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR)
    cartoon_image = cv.stylization(img, sigma_s=60, sigma_r=0.25)
    dst = cv.detailEnhance(cartoon_image, sigma_s=30, sigma_r=0.1)
    cv.imwrite(outputfile, dst)
コード例 #19
0
async def hehe(event):
    if not event.reply_to_msg_id:
        await event.reply("Reply to any Image.")
        return   
    reply = await event.get_reply_message()
    await event.edit('`Processing...`')
    image = await bot.download_media(reply.media, path)
    img = cv2.VideoCapture(image) 
    ret, frame = img.read()
    dtl = cv2.detailEnhance(frame, sigma_s=10, sigma_r=0.15)
    cv2.imwrite("danish.jpg", dtl)
    await event.client.send_file(event.chat_id, "danish.jpg", force_document=False, reply_to=event.reply_to_msg_id)
    await event.delete()
    shutil.rmtree(path)
    os.remove("danish.jpg")
コード例 #20
0
def get_flatten_document(img):
    widthImg = img.shape[0]
    heightImg = img.shape[1]
    img = cv2.resize(img, (widthImg, heightImg))
    img = cv2.detailEnhance(img, 10, 0.25)
    imgThres = preProcessing(img)
    biggest = getContours(imgThres)
    imgContour = img.copy()
    cv2.drawContours(imgContour, biggest, -1, (0, 0, 255), 20)
    if biggest.size != 0:
        imgWraped = getWarp(img, biggest, widthImg, heightImg)

    else:
        imgWraped = img

    return cv2.resize(imgWraped, (heightImg, widthImg))
コード例 #21
0
    def apply_filter(self, image, *args):
        img = numpy.array(image)
        # convert the image into grayscale image
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Blur the grayscale image with median blur
        gray_blur = cv2.medianBlur(gray, 3)

        # Apply adaptive thresholding to detect edges
        edges = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)

        # Sharpen the image
        color = cv2.detailEnhance(img, sigma_s=5, sigma_r=0.5)

        # Merge the colors of same images using "edges" as a mask
        cartoon = cv2.bitwise_and(color, color, mask=edges)
        result = PIL.Image.fromarray(cartoon, 'RGB')
        return result
コード例 #22
0
def cartoonify():
    global image

    image_copy = image.copy()

    image_copy = cv2.edgePreservingFilter(image_copy,
                                          flags=1,
                                          sigma_s=sigmaS_edgePreservingFilter,
                                          sigma_r=sigmaR_edgePreservingFilter)

    image_copy = cv2.detailEnhance(image_copy,
                                   sigma_s=sigmaS_detailEnhance,
                                   sigma_r=sigmaR_detailEnhance)

    grayImage = cv2.cvtColor(image_copy, cv2.COLOR_BGR2GRAY)

    grayImageInv = 255 - grayImage.copy()

    if (blurAmount > 0):
        gaussian = cv2.GaussianBlur(grayImageInv,
                                    (2 * blurAmount + 1, 2 * blurAmount + 1),
                                    0)
    else:
        gaussian = grayImageInv

    # gaussian = cv2.GaussianBlur(grayImageInv, (47, 47), 0)
    blend = cv2.divide(grayImage, 255 - gaussian, scale=256.0)

    # Sharpen kernel
    sharpen = np.array(([0, -1, 0], [-1, 5, -1], [0, -1, 0]), dtype="int")
    # Using 2D filter by applying the sharpening kernel
    sharpenOutput = cv2.filter2D(blend, -1, sharpen)

    print("sigmaS_edgePreservingFilter: ", sigmaS_edgePreservingFilter)
    print("sigmaR_edgePreservingFilter: ", sigmaR_edgePreservingFilter)
    print("sigmaS_detailEnhance: ", sigmaS_detailEnhance)
    print("sigmaR_detailEnhance: ", sigmaR_detailEnhance)
    print("blurAmount: ", 2 * blurAmount + 1)

    cv2.imshow("Cartoonify", sharpenOutput)
    k = cv2.waitKey(0)

    if k == 27:
        cv2.destroyAllWindows()
コード例 #23
0
def live():
    video = cv.VideoCapture(0, cv.CAP_DSHOW)
    video.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
    video.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
    while (True):
        ret, frame = video.read()
        frame = cv.GaussianBlur(frame, (5, 5), 0)
        # hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
        # h, s, v = cv.split(hsv)
        # lim = 255 - 50
        # v[v > lim] = 255
        # v[v <= lim] += 50
        # final_hsv = cv.merge((h, s, v))
        # img = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR)
        cartoon_image = cv.stylization(frame, sigma_s=60, sigma_r=0.25)
        dst = cv.detailEnhance(cartoon_image, sigma_s=30, sigma_r=0.1)
        cv.imshow('cartoon', dst)
        if cv.waitKey(1) & 0xFF == ord('q'):
            break
コード例 #24
0
def segment_dot_ijazah(og, val=47, dot_size=3, min_width=32):
    img = og.copy()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    dot = DotsSegmentation(rlsa_val=val)
    
    rects = dot.segment(gray, dot_size=dot_size, min_width=min_width)
    segmented_imgs = []
    model = load_model('trained_models/engchars-sgd-100-90.h5')
    for i, rect in enumerate(rects):
        x,y,w,h = rect
        segmented_img = gray[y:y+h, x:x+w]
        label = ''

        cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
        
        # get label
        if x > 200 and x < 400:
            # segment from colored image. for detailEnhance process.
            label_img = og[y:y+h+10, 0:x]
            
            label_img = cv2.cvtColor(cv2.detailEnhance(label_img, sigma_s=10, sigma_r=0.15),
                                     cv2.COLOR_BGR2GRAY)
            
            chars = segment_characters(label_img)
            test_set = []
            for j, entry in enumerate(chars):
                box, char_img = entry[0], entry[1]
                mnist_like = to_mnist(char_img, aspect_ratio=False)
                
                test_set.append(mnist_like)

            test_set = np.asarray(test_set).reshape(-1, 28, 28, 1)
            predicted_y = model.predict(test_set)
        
            for prediction in predicted_y:
                label += string.ascii_letters[prediction.argmax()]
                
        segmented_imgs.append((segmented_img,
                               process_label(label, metrics='ratio', tolerance=0.4),
                               rect))

    return img, segmented_imgs
コード例 #25
0
def parse_img(path, draw):
    cropped = cv2.imread(path)

    h, w, _ = cropped.shape

    cropped = cv2.resize(cropped, (w * 2, h * 2))
    cropped = cv2.detailEnhance(cropped, sigma_s=60, sigma_r=1.2)
    cropped = cv2.resize(cropped, (400, 800))

    text = ocr.recog(cropped)

    # parse output
    lines = text.split("\n")

    parsed = {}
    for line in lines:

        # use no spaces for logic
        no_spaces = line.replace(" ", "")
        if no_spaces != "":  # checks for empty strinmgs or empty strings with spaces
            if "=" in no_spaces or "-" in no_spaces or ":" in no_spaces:
                sides = []
                if "=" in line:
                    sides = line.split("=")
                elif "-" in line:
                    sides = line.split("-")
                elif ":" in line:
                    sides = line.split(":")

                if not ("" in sides):

                    # print(sides)
                    stripped = []
                    for side in sides:
                        stripped.append(side.strip())
                    parsed[stripped[0]] = stripped[1]

                # parsed.append(line)
    if draw:
        display(cropped)
    return parsed
コード例 #26
0
        def transform(self, renderer_type_id):
            print("transform {}".format(renderer_type_id))
            try:
                src_image = self.get_opencv_image()
                self.transformed_image = None

                if src_image.any() != None:
                    if renderer_type_id == MainView.EdgePreserveSmoothingByNormalizedConvolutionFilter:
                        self.transformed_image = cv2.edgePreservingFilter(
                            src_image, flags=1)

                    if renderer_type_id == MainView.EdgePreserveSmoothingByRecursiveFilter:
                        self.transformed_image = cv2.edgePreservingFilter(
                            src_image, flags=2)

                    if renderer_type_id == MainView.DetailEnhancement:
                        self.transformed_image = cv2.detailEnhance(src_image)

                    if renderer_type_id == MainView.MonochromePencilSketch:
                        self.transformed_image, _ = cv2.pencilSketch(
                            src_image,
                            sigma_s=10,
                            sigma_r=0.1,
                            shade_factor=0.03)

                    if renderer_type_id == MainView.ColorPencilSketch:
                        _, self.transformed_image = cv2.pencilSketch(
                            src_image,
                            sigma_s=10,
                            sigma_r=0.1,
                            shade_factor=0.03)

                    if renderer_type_id == MainView.Stylization:
                        self.transformed_image = cv2.stylization(src_image)

                    if self.transformed_image.all() != None:
                        self.set_opencv_image(self.transformed_image)
                        self.update()
            except:
                traceback.print_exc()
コード例 #27
0
def cv2_filter(img, flag):
    if flag == 'None':
        return img
    elif flag == 'Pencil':
        dst1_gray, dst1_color = cv2.pencilSketch(img,
                                                 sigma_s=50,
                                                 sigma_r=0.15,
                                                 shade_factor=0.04)
        return dst1_color
    elif flag == 'Style':
        dst2 = cv2.stylization(img, sigma_s=50, sigma_r=0.15)
        return dst2
    elif flag == 'Detail':
        dst3 = cv2.detailEnhance(img, sigma_s=50, sigma_r=0.15)
        return dst3
    elif flag == 'Edge':
        dst4 = cv2.edgePreservingFilter(img, flags=1, sigma_s=50, sigma_r=0.15)
        return dst4
    elif flag == 'udinverse':
        return img[::-1]
    elif flag == 'lrinverse':
        return cv2.flip(img, flipCode=1)
コード例 #28
0
def im_filter(im_bgr, filter_name, value):
    """
    使用指定滤镜对图片进行处理
    :param im_bgr: BGR图片
    :param filter_name: 滤镜名
    :param value: 设定值(0~10)
    :param gray: 取得灰度图
    @return: 处理后BGR图片
    """
    if im_bgr is None:
        return 0
    if value == 0:
        return im_bgr
    value = value * 0.05

    # 铅笔灰度滤镜
    if filter_name == "pencil_gray":
        im_gray, im_color = cv2.pencilSketch(im_bgr, sigma_s=50, sigma_r=value, shade_factor=0.04)
        im_new = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)

    # 铅笔彩色滤镜
    if filter_name == "pencil_color":
        im_gray, im_new = cv2.pencilSketch(im_bgr, sigma_s=50, sigma_r=value, shade_factor=0.04)

    # 风格化滤镜
    if filter_name == "stylize":
        im_new = cv2.stylization(im_bgr, sigma_s=50, sigma_r=value)

    # 细节增强滤镜
    if filter_name == "detail_enhance":
        im_new = cv2.detailEnhance(im_bgr, sigma_s=50, sigma_r=value)

    # 边缘保持
    if filter_name == "edge_preserve":
        im_new = cv2.edgePreservingFilter(im_bgr, flags=1, sigma_s=50, sigma_r=value)

    return im_new
コード例 #29
0
import cv2

img = cv2.imread("bob_esponja.jpg", cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
enhance = cv2.detailEnhance(img)
cv2.imshow('BOB', enhance)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite("bob_result.jpg", img)
コード例 #30
0
ファイル: npr.py プロジェクト: AlexSnet/learnopencv
OpenCV Non-Photorealistic Rendering Python Example

Copyright 2015 by Satya Mallick <*****@*****.**>
'''

import cv2

# Read image
im = cv2.imread("cow.jpg");

# Edge preserving filter with two different flags.
imout = cv2.edgePreservingFilter(im, flags=cv2.RECURS_FILTER);
cv2.imwrite("edge-preserving-recursive-filter.jpg", imout);

imout = cv2.edgePreservingFilter(im, flags=cv2.NORMCONV_FILTER);
cv2.imwrite("edge-preserving-normalized-convolution-filter.jpg", imout);

# Detail enhance filter
imout = cv2.detailEnhance(im);
cv2.imwrite("detail-enhance.jpg", imout);

# Pencil sketch filter
imout_gray, imout = cv2.pencilSketch(im, sigma_s=60, sigma_r=0.07, shade_factor=0.05);
cv2.imwrite("pencil-sketch.jpg", imout_gray);
cv2.imwrite("pencil-sketch-color.jpg", imout);

# Stylization filter
cv2.stylization(im,imout);
cv2.imwrite("stylization.jpg", imout);

コード例 #31
0
        # bbox can be None if detected fail

        # if bbox is not None:
        #     x, y, a, b, _ = bbox
        #     ## face only from original image
        #     img = image[y:(y+b), x:(x+a)]
            # cv2.imshow("ok", img)
            # cv2.waitKey(0)

        conf = None
#         check_result = fake_detection(img.copy(), sigma_, sigmaMax, k, thresh, ctx, queue, mf, prg, delta, device_id)

#         if check_result:
#             print(link_image, "is fake with score=", 1)
#             results.append([link_image, "fake_detected_by_opencv", 1])
#         else:
        img = cv2.detailEnhance(img, sigma_s=10, sigma_r=0.15)
        check_result, conf, image, bbox = dl_face_spoof_detect(img.copy(), model_dir, model_test, image_cropper, img_heights, exact_thresh)
        if check_result:
            print(link_image, "is fake")
            results.append([link_image, "fake_detected_by_dl", conf])
        if not check_result:
            print(link_image, "is truth")
            results.append([link_image, 0, conf])

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    pd.DataFrame(results, columns=["path_to_image", "is_fake", "conf"]).to_csv(
        os.path.join(save_dir, "{}_result.csv".format(datetime.now().strftime("%d_%m_%Y_%H_%M_%S"))))
コード例 #32
0
def sketch_img(img):
    # outimg = cv2.stylization(img, sigma_s=60, sigma_r=0.07)
    outimg = cv2.edgePreservingFilter(img, flags=1, sigma_s=60, sigma_r=0.4)
    outimg = cv2.detailEnhance(outimg, sigma_s=10, sigma_r=0.15)
    dst_gray, dst_color = cv2.pencilSketch(outimg, sigma_s=60, sigma_r=0.07, shade_factor=0.05)
    return dst_gray