コード例 #1
0
def highligther(image, shape, colors, alpha=0.75):
    overlay = image.copy()
    output = image.copy()
    for (i, name) in enumerate(FACIAL_LANDMARKS_IDXS.keys()):
        (j, k) = FACIAL_LANDMARKS_IDXS[name]
        pts = shape[j:k]
        hull = cv2.convexHull(pts)
        cv2.drawContours(overlay, [hull], -1, colors[i], -1)
        
        hull = cv2.convexHull(pts, returnPoints = False)

    cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
    return output
コード例 #2
0
def bilateral_filter(img,
                     kernel=3,
                     sigmaSpace=10,
                     sigmaColor=100,
                     padding="VALID"):
    row = img.shape[0]
    col = img.shape[1]
    channels = img.shape[2]
    result = img.copy()
    half_kernel = int((kernel - 1) / 2)
    for c in range(channels):
        for i in range(half_kernel, row - half_kernel - 1):  # 对每一个点进行处理
            for j in range(half_kernel, col - half_kernel - 1):
                weightSum = 0
                filterValue = 0
                for row_d in range(-half_kernel, half_kernel):
                    for col_d in range(-half_kernel, half_kernel):
                        distance_Square = row_d * row_d + col_d * col_d
                        position_x = i + half_kernel + row_d
                        position_y = j + half_kernel + col_d
                        value_Square = np.power(
                            img[i, j, c] - img[position_x, position_y, c], 2)
                        weight = np.exp(-1 *
                                        (distance_Square /
                                         (2 * sigmaSpace**2) + value_Square /
                                         (2 * sigmaColor**2)))
                        weightSum += weight  # 权重和归一化
                        filterValue += (weight *
                                        img[position_x, position_y, c])
                outputs = filterValue / weightSum
                result[i, j, c] = outputs

    return result
コード例 #3
0
def flip2(image, direction):
    inversed = image.copy()
    if direction == 'horizontal':
        inversed = np.transpose(inversed, axes=0)
    else:
        inversed = np.transpose(inversed, axes=1)
    return inversed
コード例 #4
0
def flip1(image, direction):
    inversed = image.copy()
    if direction == 'horizontal':
        inversed = np.fliplr(inversed)
        # inversed = np.transpose(inversed, axes=0)
    else:
        inversed = np.flipud(inversed)
        # inversed = np.transpose(img_aux, axes=1)
    return inversed
コード例 #5
0
def run_detection_image(path):

    image = cv2.imread(path)
    # image base 64   retimag = np_to_base64(image)
    #    print(base64_to_pil(retimag))
    # # copy to draw on

    draw = image.copy()
    draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
    #
    # # preprocess image for network
    image = preprocess_image(image)
    image, scale = resize_image(image)
    #
    # # process image
    start = time.time()
    boxes, scores, labels = model.predict_on_batch(
        np.expand_dims(image, axis=0))
    print("processing time: ", time.time() - start)
    #
    # # correct for image scale
    boxes /= scale

    # # visualize detections
    for box, score, label in zip(boxes[0], scores[0], labels[0]):
        # scores are sorted so we can break
        if score < 0.3:
            break

        color = label_color(label)

        b = box.astype(int)
        draw_box(draw, b, color=color)

        caption = "{} {:.3f}".format(labels_to_names[label], score)
        draw_caption(draw, b, caption)

    #plt.figure(figsize=(30, 30))
    #plt.axis('off')
    # abcd = plt.imshow(draw)
    # #plt.show()
    #
    # #file, ext = os.path.splitext(filepath)
    # #image_name = file.split('/')[-1] + ext
    # #output_path = os.path.join('examples1/results/', image_name)
    #
    # draw_conv = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
    # #cv2.imwrite(output_path, draw_conv)
    # return image
    retimag = np_to_base64(draw)
    strimg = base64_to_pil(retimag)
    strimg.save("testimg.png")
    print(strimg)
    return send_file('testimg.png', mimetype="image/png")
コード例 #6
0
    def __call__(self, sample: Sample) -> Sample:
        """
        Apply transformation on provided sample.

        :param sample:
        :return: transformed sample
        """
        # State whether we flip the image:
        if random.uniform(0., 1.) <= self.probability:
            image = sample["image"]
            key_points = sample["keypoints"].copy()
            w = image.shape[1]
            key_points[:, 0] = w - key_points[:, 0]
            image = image.copy()
            image = cv2.flip(image, 1)
            sample = {"image": image, "keypoints": key_points}
        return sample
コード例 #7
0
def run_detection_image(video_path, vwriter, output_path):
    vcapture = cv2.VideoCapture(video_path)
    count = 0
    success = True
    start = time.time()
    while success:
        #if count % 100 == 0:
        print("frame: ", count)
        count += 1  # see what frames you are at
        # Read next image
        success, image = vcapture.read()

        if success:
            draw = image.copy()
            draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)

            # # preprocess image for network
            image = preprocess_image(image)
            image, scale = resize_image(image)

            boxes, scores, labels = model.predict_on_batch(
                np.expand_dims(image, axis=0))

            boxes /= scale

            for box, score, label in zip(boxes[0], scores[0], labels[0]):
                # scores are sorted so we can break
                if score < 0.3:
                    break

                color = label_color(label)

                b = box.astype(int)
                draw_box(draw, b, color=color)

                caption = "{} {:.3f}".format(labels_to_names[label], score)
                draw_caption(draw, b, caption)

            vwriter.write(draw)
    vcapture.release()
    vwriter.release()  #
    end = time.time()
    #play_video(output_path)
    print("Total Time: ", end - start)
    return send_file(output_path)
コード例 #8
0
ファイル: runner.py プロジェクト: atashfeshan/Melanoma
 def __init__(self, image: np.ndarray, n_cluster=3, n_s=4, m_s=0.5):
     self.n_s = n_s
     self.m_s = m_s
     self.n_cluster = n_cluster
     self.image = image
     self.raw_image = image.copy()
     self.Clustering = clustering.Clustering
     self.Smooth = smoothing.Smooth
     self.Median = find_median.Median
     self.FindMole = find_mole.FindMole
     self.Compute = compute_ratio.Computer
     self.Perimeter = perimeter.Perimeter
     self.Filter = filter_perimeter.FilterPerimeter
     self.p = 0
     self.s = []
     self.label = 0
     self.median = [0, 0]
     self.mole = []
     self.ratio = 0
     self.perimeter = 0
コード例 #9
0
n10_median=median_filter(n10,3)
plt.imshow(n10_median)

n10_padded_k=add_padding_k(n10,5)
n10_temp5=average_filter_k(n10_padded_k,5)
plt.imshow(n10_temp5)

n10_padded_k7=add_padding_k(n10,7)
n10_temp7=average_filter_k(n10_padded_k7,7)
plt.imshow(n10_temp7)

import numpy as np
import cv2

img = cv2.imread('lena.png', cv2.IMREAD_GRAYSCALE)
img_out = img.copy()

height = img.shape[0]
width = img.shape[1]

for i in np.arange(3, height-3):
    for j in np.arange(3, width-3):
        neighbors = []
        for k in np.arange(-3, 4):
            for l in np.arange(-3, 4):
                a = img.item(i+k, j+l)
                neighbors.append(a)
        neighbors.sort()
        median = neighbors[24]
        b = median
        img_out.itemset((i,j), b)
コード例 #10
0
def bounding_box(image):
    roi_copy = image.copy()
    roi_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)

    # ## mask of green (36,0,0) ~ (70, 255,255)
    # mask1 = cv2.inRange(hsv, (36, 0, 0), (70, 255,255))

    # ## mask o yellow (15,0,0) ~ (36, 255, 255)
    # mask2 = cv2.inRange(hsv, (15,0,0), (36, 255, 255))

    # filter black color
    # mask1 = cv2.inRange(roi_hsv, numpy.array([0, 0, 0]), numpy.array([180, 255, 125]))
    mask1 = cv2.inRange(roi_hsv, numpy.array([0, 0, 0]),
                        numpy.array([70, 255, 255]))
    mask1 = cv2.morphologyEx(
        mask1, cv2.MORPH_CLOSE,
        cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # find contours
    # cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
    cv2MajorVersion = cv2.__version__.split(".")[0]
    # check for contours on thresh
    if int(cv2MajorVersion) >= 4:
        ctrs, hier = cv2.findContours(mask1.copy(), cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
    else:
        im2, ctrs, hier = cv2.findContours(mask1.copy(), cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)

    bbox_file = open("bbox_points.txt", "a")
    # sort contours
    sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
    for i, ctr in enumerate(sorted_ctrs):
        if cv2.contourArea(ctr) > 50:
            peri = cv2.arcLength(ctr, True)
            approx = cv2.approxPolyDP(ctr, 0.02 * peri, True)
            x, y, w, h = cv2.boundingRect(approx)
            print(x, y, cv2.contourArea(ctr))

            # Get bounding box
            # x, y, w, h = cv2.boundingRect(ctr)

            # Getting ROI
            roi = image[y:y + h, x:x + w]
            # show ROI
            # cv2.imshow('segment no:'+str(i),roi)
            # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            if w > 15 and h > 15:
                # cv2.imwrite('out_check_out.png'.format(i), roi)
                cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
                bbox_file.write(
                    str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " +
                    "\n")

    bbox_file.close()
    cv2.imshow('marked areas', image)
    cv2.imwrite('out_check_out.png', image)
    cv2.waitKey(0)
コード例 #11
0
def bounding_box(imageName, FrameName, display_width, display_height):
    image = cv2.imread(imageName)
    roi_copy = image.copy()
    roi_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)

    # ## mask of green (36,0,0) ~ (70, 255,255)
    # mask1 = cv2.inRange(hsv, (36, 0, 0), (70, 255,255))

    # ## mask o yellow (15,0,0) ~ (36, 255, 255)
    # mask2 = cv2.inRange(hsv, (15,0,0), (36, 255, 255))

    # filter black color
    # mask1 = cv2.inRange(roi_hsv, numpy.array([0, 0, 0]), numpy.array([180, 255, 125]))
    mask1 = cv2.inRange(roi_hsv, numpy.array([1, 1, 1]), numpy.array([255, 255, 255]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # find contours
    # cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
    cv2MajorVersion = cv2.__version__.split(".")[0]
    # check for contours on thresh
    if int(cv2MajorVersion) >= 4:
        ctrs, hier = cv2.findContours(mask1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    else:
        im2, ctrs, hier = cv2.findContours(mask1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    bbox_file = open("./File_out/bbox_points.txt", "a")
    bbox_file.write(FrameName + " | ")
    # sort contours
    sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
    valid_boxes = []
    for i, ctr in enumerate(sorted_ctrs):
        if cv2.contourArea(ctr) > 50:
            peri = cv2.arcLength(ctr, True)
            approx = cv2.approxPolyDP(ctr, 0.02 * peri, True)
            x, y, w, h = cv2.boundingRect(approx)
            # Getting ROI
            roi = image[y:y + h, x:x + w]
            # show ROI
            # cv2.imshow('segment no:'+str(i),roi)
            # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            if w > 70 and h > 30:
                # cv2.imwrite('out_check_out.png'.format(i), roi)
                valid_boxes.append([x, y, w, h])

                cv2.rectangle(image, (x, y), (x + w+50, y + h + 50), (0, 255, 0), 2)
                bbox_file.write(
                    str(x) + " " + str(y) + " " + str(w+50) + " " + str(
                        h + 50) + " " + " | ")

                # if x < 50 or y < 50:
                #     cv2.rectangle(image, (x, y), (x + display_width - x - 10, y + h), (0, 255, 0), 2)
                #     bbox_file.write(
                #         str(x) + " " + str(y) + " " + str(display_width - x - 10) + " " + str(h + 20) + " " + " | ")
                # else:
                #     cv2.rectangle(image, (x - 50, y - 50), (x + display_width - x - 10, y + h + 50), (0, 255, 0), 2)
                #     bbox_file.write(
                #         str(x - 50) + " " + str(y - 50) + " " + str(display_width - x - 10) + " " + str(
                #             h + 50) + " " + " | ")

    bbox_file.write("\n")
    bbox_file.close()
    # cv2.imshow('marked areas', image)
    cv2.imwrite("./Image_out/" + FrameName + 'box.png', image)
コード例 #12
0
    def findTemplate(self, image, template):
        image = imread(image, pilmode="RGB")
        image = image.copy()

        template_original = imread(template, pilmode="RGB")
        template_rotate_90_counterclockwise = cv2.rotate(
            template_original, cv2.ROTATE_90_COUNTERCLOCKWISE)
        template_rotate_90_clockwise = cv2.rotate(template_original,
                                                  cv2.ROTATE_90_CLOCKWISE)
        template_rotate_180 = cv2.rotate(template_original, cv2.ROTATE_180)

        template_original = template_original.copy()
        template_rotate_90_counterclockwise = template_rotate_90_counterclockwise.copy(
        )
        template_rotate_90_clockwise = template_rotate_90_clockwise.copy()
        template_rotate_180 = template_rotate_180.copy()

        templateArray = []
        templateArray.append(template_original)
        templateArray.append(template_rotate_90_counterclockwise)
        templateArray.append(template_rotate_90_clockwise)
        templateArray.append(template_rotate_180)

        maxMaxVal = 0
        for template in templateArray:
            template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
            template = cv2.Canny(template, 50, 200)
            (tH, tW) = template.shape[:2]
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            found = None

            # loop over the scales of the image
            for scale in np.linspace(0.2, 1.0, 15)[::-1]:
                # resize the image according to the scale, and keep track
                # of the ratio of the resizing
                resized = imutils.resize(gray,
                                         width=int(gray.shape[1] * scale))
                r = gray.shape[1] / float(resized.shape[1])
                # if the resized image is smaller than the template, then break
                # from the loop
                if resized.shape[0] < tH or resized.shape[1] < tW:
                    break

                # detect edges in the resized, grayscale image and apply template
                # matching to find the template in the image
                edged = cv2.Canny(resized, 50, 200)
                result = cv2.matchTemplate(edged, template,
                                           cv2.TM_CCOEFF_NORMED)

                (_, maxVal, _, maxLoc) = cv2.minMaxLoc(result, None)

                # if new maximum correlation value is found, then update
                # the bookkeeping variable
                if found is None or maxVal > found[0]:
                    found = (maxVal, maxLoc, r)
                    print(maxVal)
                    # checking if it's a good match, value of 0.1 selected based on testings
                    # if (maxVal > 0.1):
                    #     haveFoundBool = True

            if found[0] > maxMaxVal:
                maxMaxVal = found[0]

            if maxMaxVal > 0.1:
                return maxMaxVal
            # uncomment below to draw detected object (Mongolian ID)

            # (_, maxLoc, r) = found
            # (startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
            # (endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))
            # # draw a bounding box around the detected result and display the image
            # cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
            # cv2.imshow("Image", image)
            # cv2.waitKey(0)

            # return haveFoundBool
        return maxMaxVal
コード例 #13
0
        for i in range(0, convolved.shape[0], 1):
            convolved_y.append(
                np.convolve(convolved[i, :], kernel, mode='same'))

        convolved = np.array(convolved_y)
        convolved = np.transpose(convolved)

    return convolved


# ------------------------ 2D CONVOLUTION ------------------------
img_path = "Experiment-118-cut4.tiff"

# load image as pixel array
image = image.imread(img_path)
image_original = image.copy()

image = np.dot(image[..., :3], [0.299, 0.587, 0.114])  # Convert RGB to Grays

#image = np.log(image+0.000000001)

# Formulas from http://devernay.free.fr/cours/vision/pdf/c3.pdf
filter_radius = 64
alpha = 0.25

# ------------------------ SMOOTHING ------------------------
kernel_smooth = np.zeros((filter_radius * 2 + 1))
c = (1 - np.exp(-alpha)) / (1 + np.exp(-alpha))
kernel_fct = (lambda c_param, alpha_param, x_param: c_param * np.exp(
    -alpha_param * np.fabs(x_param)))
for x in range(-filter_radius, filter_radius + 1, 1):

def post_processing(img):
    x, y = img.shape[:2]
    for i in range(x):
        for j in range(y):
            for k in range(3):
                if img[i, j][k] > 127:
                    img[i, j][k] = 255
                else:
                    img[i, j][k] = 0


img = cv2.imread("00_img/img_01.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
split_img = img.copy()
x, y = split_img.shape[:2]
split(split_img, 30, 0, x - 1, 0, y - 1)

plot1 = plt.figure("Original Image")
plt.imshow(img)
plot2 = plt.figure("After splitting : ")
plt.imshow(split_img)

quadtree = QuadTree().insert(img)
merge_img = quadtree.get_image(7)
post_processing(merge_img)
plot3 = plt.figure("After applying  Merging : ")
plt.imshow(merge_img)

plt.show()