Exemple #1
0
import matplotlib.pyplot as plt
import numpy as np


def cv_show(img, name):
    cv2.imshow(name, img)
    cv2.waitKey()
    cv2.destroyAllWindows()


img = cv2.imread('lena.jpg', cv2.IMREAD_GRAYSCALE)
cv_show(img, 'img')

img = cv2.imread('lena.jpg', cv2.IMREAD_GRAYSCALE)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
sobelx = cv2.convertScaleAbs(sobelx)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
sobely = cv2.convertScaleAbs(sobely)
sobelxy = cv2.addWeighted(sobelx, 0.5, sobely, 0.5, 0)
cv_show(sobelxy, 'sobelxy')
#不同算子的差异
img = cv2.imread('lena.jpg', cv2.IMREAD_GRAYSCALE)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
sobelx = cv2.convertScaleAbs(sobelx)
sobely = cv2.convertScaleAbs(sobely)
sobelxy = cv2.addWeighted(sobelx, 0.5, sobely, 0.5, 0)

scharrx = cv2.Scharr(img, cv2.CV_64F, 1, 0)
scharry = cv2.Scharr(img, cv2.CV_64F, 0, 1)
scharrx = cv2.convertScaleAbs(scharrx)
Exemple #2
0
    else:
        # (len(array))
        start = time.time()
        roi_gray_bgra = array[0]
        # cv2.imshow("roi_gray_bgra", roi_gray_bgra)

        # Aumento la luminosita
        # value = 150
        # roi_gray_bgra = np.where((255 - roi_gray_bgra) < value, 255, roi_gray_bgra + value)
        # cv2.imshow("grey_new", roi_gray_bgra)

        alpha = 1.5  # Contrast control (1.0-3.0)
        beta = 100  # Brightness control (0-100)

        # cv2.imshow("roi_gray_bgra", roi_gray_bgra)
        roi_gray_bgra = cv2.convertScaleAbs(roi_gray_bgra, alpha=alpha)
        # cv2.imshow("roi_gray_bgra_contrast", roi_gray_bgra)

        # print("base_shape")
        # print(base.shape)
        # print("roi_gray")
        # print(roi_gray_scaled.shape)
        # print("roi_gray_scaled")
        # print(roi_gray_scaled.shape)

        w_2 = int(roi_gray_bgra.shape[1] / 2)
        h_2 = int(roi_gray_bgra.shape[0] / 2)

        if w_2 % 2 != 0:
            w_2 = w_2 - 1
        if h_2 % 2 != 0:
    def getObjectBoxes(self):
        import pyrealsense2 as rs
        # Import Numpy for easy array manipulation
        import numpy as np
        # Import OpenCV for easy image rendering
        import cv2
        rs_intrinsics_obj, depth_image, depth_scale = self.intrinsics, self.depth, .001  #Need to change real sense code to get scale
        clipping_distance_in_meters = 4  #3 meter
        clipping_distance = clipping_distance_in_meters / depth_scale

        # pixels further than clipping_distance to grey

        #No scaling necessary because already scaled
        middle_distance_in_meters = depth_image[420][
            200]  #Depth of white line, 480 x 640 (y,x)

        print("Depth of pixel near middle of image:",
              middle_distance_in_meters)
        # depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
        bg_removed = np.where(
            (depth_image > clipping_distance) |
            (depth_image <= middle_distance_in_meters + .1 / .001), 0, 255
        )  #middle_depth/depth_scale, yield 255(white) if inside range, objects need to be white colored
        # Render images
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        array = np.array(bg_removed, dtype='uint8')

        img_cv = cv2.resize(array, (640, 480))

        ret, thresh = cv2.threshold(img_cv, 200, 255, 0)

        # find contours
        coins_contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
                                             cv2.CHAIN_APPROX_SIMPLE)

        # make copy of image

        # find contours of large enough area
        min_coin_area = 1500
        large_contours = [
            cnt for cnt in coins_contours
            if cv2.contourArea(cnt) > min_coin_area
        ]

        # print number of contours
        print('number of objects: %d' % len(large_contours))

        # bounding_img = np.copy(depth_colormap)
        bounding_boxes = []

        # for each contour find bounding box and draw rectangle
        for contour in large_contours:
            x, y, w, h = cv2.boundingRect(contour)
            center = (x + w // 2, y + h // 2)
            x_range = (center[0] - w // 4, center[0] + w // 4)
            y_range = (center[1] - h // 4, center[1] + h // 4)
            d = np.average(depth_image[y_range[0]:y_range[1],
                                       x_range[0]:x_range[1]])
            middle_position = rs.rs2_deproject_pixel_to_point(
                rs_intrinsics_obj, [x + w // 2, y + h // 2], d)
            pos = [middle_position[0], middle_position[2]]
            bounding_boxes.append(pos)
        return bounding_boxes
Exemple #4
0
 def equalize_brightness(image, ref_image, percentile=98, image_gamma=1):
     image = ImageProc.adjust_gamma(image, 1 / image_gamma)
     ip = np.percentile(image, percentile)
     rp = np.percentile(ImageProc.adjust_gamma(ref_image, 1 / image_gamma), percentile)
     image = cv2.convertScaleAbs(image, None, rp / ip, 0)
     return ImageProc.adjust_gamma(image, image_gamma)
import numpy as np
import os

loc = os.path.abspath('')


img = cv2.imread(loc+"/trafficCounter/backgrounds/625_bg.jpg",0)

kernel = np.ones((5,5),np.uint8)

blur = cv2.bilateralFilter(img, 11, 3, 3)
edges = cv2.Canny(img, 0, 820)
edges2 = cv2.Canny(img, 0, 800)


diff = cv2.absdiff(edges, cv2.convertScaleAbs(edges2))

laplacian = cv2.Laplacian(diff, cv2.CV_8UC1)

dilated = cv2.dilate(laplacian, kernel, iterations = 2)
erosion = cv2.erode(dilated,kernel,iterations = 3)


cv2.imshow("ero", erosion)
cv2.waitKey(0)


im2, contours, hierarchy =  cv2.findContours(erosion,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)


cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
Exemple #6
0
  # 產生等高線
  cntImg, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

  for c in cnts:
    # 忽略太小的區域
    if cv2.contourArea(c) < 2500:
      continue

    # 偵測到物體,可以自己加上處理的程式碼在這裡...

    # 計算等高線的外框範圍
    (x, y, w, h) = cv2.boundingRect(c)

    # 畫出外框
    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

  # 畫出等高線(除錯用)
  cv2.drawContours(frame, cnts, -1, (0, 255, 255), 2)

  # 顯示偵測結果影像
  cv2.imshow('frame', frame)

  if cv2.waitKey(1) & 0xFF == ord('q'):
    break

  # 更新平均影像
  cv2.accumulateWeighted(blur, avg_float, 0.01)
  avg = cv2.convertScaleAbs(avg_float)

cap.release()
cv2.destroyAllWindows()
        break

    gray_frame = cv2.cvtColor(orig_frame, cv2.COLOR_BGR2GRAY)
    frame = gaussian_blur(gray_frame)

    # Compute gradient wrt x, y, and t
    gx = signal.convolve2d(frame, sobel_x, mode='same')
    gy = signal.convolve2d(frame, sobel_y, mode='same')
    # frame = normalize(frame, 0, 255)

    # Time gradient
    # Mask image 1: [-1, -1, -1, -1], mask image 2: [1, 1, 1, 1]
    gt = time_grad(prev, frame)
    #gt = frame - prev

    gx = cv2.convertScaleAbs(gx)
    gy = cv2.convertScaleAbs(gy)
    gt = cv2.convertScaleAbs(gt)
    # frame = cv2.convertScaleAbs(frame)

    concat = np.concatenate((gray_frame, gt, gx, gy), axis=1)  # axis=1 for horisontal concat
    cv2.imshow('Resized Window', concat)
    #cv2.imshow('flow', frame)

    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
    elif k == ord('s'):
        cv2.imwrite('opticalfb.png', frame)
        #cv2.imwrite('opticalhsv.png', bgr)
    prev = frame
    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, white.normals,
                                  Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                  Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, red.normals,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(white.lines), len(yellow.lines), len(red.lines)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())
Exemple #9
0
import numpy as np
import cv2
image = cv2.imread('/home/pi/book/dataset/barcode.jpeg', 1)
input = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hor_der = cv2.Sobel(input, ddepth=-1, dx=1, dy=0, ksize=5)
ver_der = cv2.Sobel(input, ddepth=-1, dx=0, dy=1, ksize=5)
diff = cv2.subtract(hor_der, ver_der)
diff = cv2.convertScaleAbs(diff)

blur = cv2.GaussianBlur(diff, (3, 3), 0)
ret, th = cv2.threshold(blur, 225, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(th, None, iterations=10)
eroded = cv2.erode(dilated, None, iterations=15)
(contours, hierarchy) = cv2.findContours(eroded, cv2.RETR_TREE,
                                         cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(temp) for temp in contours]
max_index = np.argmax(areas)
largest_contour = contours[max_index]

x, y, width, height = cv2.boundingRect(largest_contour)
cv2.rectangle(image, (x, y), (x + width, y + height), (255, 0, 0), 2)
cv2.imshow('Detected Barcode', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Exemple #10
0
def img_process(image):

    # img=cv2.resize(image,(900,300))
    # cv2.imshow('img',img)
    # cv2.waitKey()
    # img = cv2.imwrite("/home/shreyasbyndoor/Indoor_localization_CSL/color_fp_resize.jpg", img)
    """
    Apply sobel edge detection on input image in x, y direction
    """
    # 1. Convert the image to gray scale
    img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # 2. Gaussian blur the image
    img = cv2.GaussianBlur(img, (3, 3), 0)

    # 3. Use cv2.Sobel() to find derievatives for both X and Y Axis
    grad_x = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize=3)
    grad_y = cv2.Sobel(img, cv2.CV_8U, 0, 1, ksize=3)

    # 4. Use cv2.addWeighted() to combine the results
    grad = cv2.addWeighted(grad_x, 0.5, grad_y, 0.5, 0)

    grad = cv2.convertScaleAbs(grad)
    # Apply threshold
    binary_output = cv2.threshold(grad, 25, 100, cv2.THRESH_BINARY)[1]

    for i in range(binary_output.shape[0]):
        for j in range(binary_output.shape[1]):
            if binary_output[i, j] == 100:
                binary_output[i, j] = 0
            else:
                binary_output[i, j] = 255

    # Display the skeletonized floorplan

    cv2.imshow('img', binary_output)
    cv2.waitKey()

    size = np.size(binary_output)
    skel = np.zeros(binary_output.shape, np.uint8)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
    done = False
    # Skeletonize the image
    while True:
        #Step 2: Open the image
        open_img = cv2.morphologyEx(binary_output, cv2.MORPH_OPEN, element)
        #Step 3: Substract open from the original image
        temp = cv2.subtract(binary_output, open_img)
        #Step 4: Erode the original image and refine the skeleton
        eroded = cv2.erode(binary_output, element)
        skel = cv2.bitwise_or(skel, temp)
        binary_output = eroded.copy()
        # Step 5: If there are no white pixels left ie.. the image has been completely eroded, quit the loop
        if cv2.countNonZero(binary_output) == 0:
            break

    cv2.imshow('img', skel)
    cv2.waitKey()
    cv2.imwrite('/home/shreyasbyndoor/Indoor_localization_CSL/skel_fp.jpg',
                skel)
    print('exported')
Exemple #11
0
 def getDepthFrame(self):
     frames = self.pipeline.wait_for_frames()
     depthFrame = frames.get_depth_frame()
     depthImage = np.asanyarray(depthFrame.get_data())
     depthColorMap = cv2.applyColorMap(cv2.convertScaleAbs(depthImage, alpha=0.025), cv2.COLORMAP_JET)
     return depthColorMap
Exemple #12
0
def main(argv=None):
    if tf.gfile.Exists(FLAGS.save_dir):
        tf.gfile.DeleteRecursively(FLAGS.save_dir)
    tf.gfile.MakeDirs(FLAGS.save_dir)
    if tf.gfile.Exists(FLAGS.gen_frm_dir):
        tf.gfile.DeleteRecursively(FLAGS.gen_frm_dir)
    tf.gfile.MakeDirs(FLAGS.gen_frm_dir)

    # load data
    train_input_handle, test_input_handle = datasets_factory.data_provider(
        FLAGS.dataset_name,
        FLAGS.train_data_paths,
        FLAGS.valid_data_paths,
        FLAGS.batch_size * FLAGS.n_gpu,
        FLAGS.img_width,
        is_training=True)

    print('Initializing models')
    model = Model()
    lr = FLAGS.lr

    delta = FLAGS.sampling_delta_per_iter
    eta = FLAGS.sampling_start_value

    for itr in range(1, FLAGS.max_iterations + 1):
        if train_input_handle.no_batch_left():
            train_input_handle.begin(do_shuffle=True)
        ims = train_input_handle.get_batch()
        ims = preprocess.reshape_patch(ims, FLAGS.patch_size)
        ims_list = np.split(ims, FLAGS.n_gpu)

        if itr < FLAGS.sampling_stop_iter:
            eta -= delta
        else:
            eta = 0.0
        random_flip = np.random.random_sample(
            (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1))
        true_token = (random_flip < eta)
        ones = np.ones((FLAGS.img_width // FLAGS.patch_size,
                        FLAGS.img_width // FLAGS.patch_size,
                        FLAGS.patch_size**2 * FLAGS.img_channel))
        zeros = np.zeros((FLAGS.img_width // FLAGS.patch_size,
                          FLAGS.img_width // FLAGS.patch_size,
                          FLAGS.patch_size**2 * FLAGS.img_channel))

        mask_true = []
        for i in range(FLAGS.batch_size):
            for j in range(FLAGS.seq_length - FLAGS.input_length - 1):
                if true_token[i, j]:
                    mask_true.append(ones)
                else:
                    mask_true.append(zeros)
        mask_true = np.array(mask_true)
        mask_true = np.reshape(
            mask_true,
            (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
             FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width //
             FLAGS.patch_size, FLAGS.patch_size**2 * FLAGS.img_channel))

        cost = model.train(ims_list, lr, mask_true)

        if FLAGS.reverse_input:
            ims_rev = np.split(ims[:, ::-1], FLAGS.n_gpu)
            cost += model.train(ims_rev, lr, mask_true)
            cost = cost / 2

        if itr % FLAGS.display_interval == 0:
            print('itr: ' + str(itr))
            print('training loss: ' + str(cost))

        if itr % FLAGS.test_interval == 0:
            print('test...')
            test_input_handle.begin(do_shuffle=False)
            res_path = os.path.join(FLAGS.gen_frm_dir, str(itr))
            os.mkdir(res_path)
            avg_mse = 0
            batch_id = 0
            img_mse, ssim, psnr, fmae, sharp = [], [], [], [], []
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                img_mse.append(0)
                ssim.append(0)
                psnr.append(0)
                fmae.append(0)
                sharp.append(0)
            mask_true = np.zeros(
                (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
                 FLAGS.img_width // FLAGS.patch_size,
                 FLAGS.img_width // FLAGS.patch_size,
                 FLAGS.patch_size**2 * FLAGS.img_channel))
            while (test_input_handle.no_batch_left() == False):
                batch_id = batch_id + 1
                test_ims = test_input_handle.get_batch()
                test_dat = preprocess.reshape_patch(test_ims, FLAGS.patch_size)
                test_dat = np.split(test_dat, FLAGS.n_gpu)
                img_gen = model.test(test_dat, mask_true)

                # concat outputs of different gpus along batch
                img_gen = np.concatenate(img_gen)
                img_gen = preprocess.reshape_patch_back(
                    img_gen, FLAGS.patch_size)
                # MSE per frame
                for i in range(FLAGS.seq_length - FLAGS.input_length):
                    x = test_ims[:, i + FLAGS.input_length, :, :, 0]
                    gx = img_gen[:, i, :, :, 0]
                    fmae[i] += metrics.batch_mae_frame_float(gx, x)
                    gx = np.maximum(gx, 0)
                    gx = np.minimum(gx, 1)
                    mse = np.square(x - gx).sum()
                    img_mse[i] += mse
                    avg_mse += mse

                    real_frm = np.uint8(x * 255)
                    pred_frm = np.uint8(gx * 255)
                    psnr[i] += metrics.batch_psnr(pred_frm, real_frm)
                    for b in range(FLAGS.batch_size):
                        sharp[i] += np.max(
                            cv2.convertScaleAbs(cv2.Laplacian(pred_frm[b], 3)))
                        score, _ = compare_ssim(pred_frm[b],
                                                real_frm[b],
                                                full=True)
                        ssim[i] += score

                # save prediction examples
                if batch_id <= FLAGS.num_save_samples:
                    path = os.path.join(res_path, str(batch_id))
                    os.mkdir(path)
                    for i in range(FLAGS.seq_length):
                        name = 'gt' + str(i + 1) + '.png'
                        file_name = os.path.join(path, name)
                        img_gt = np.uint8(test_ims[0, i, :, :, :] * 255)
                        cv2.imwrite(file_name, img_gt)
                    for i in range(FLAGS.seq_length - FLAGS.input_length):
                        name = 'pd' + str(i + 1 + FLAGS.input_length) + '.png'
                        file_name = os.path.join(path, name)
                        img_pd = img_gen[0, i, :, :, :]
                        img_pd = np.maximum(img_pd, 0)
                        img_pd = np.minimum(img_pd, 1)
                        img_pd = np.uint8(img_pd * 255)
                        cv2.imwrite(file_name, img_pd)
                test_input_handle.next()

            avg_mse = avg_mse / (batch_id * FLAGS.batch_size * FLAGS.n_gpu)
            print('mse per seq: ' + str(avg_mse))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(img_mse[i] / (batch_id * FLAGS.batch_size * FLAGS.n_gpu))

            psnr = np.asarray(psnr, dtype=np.float32) / batch_id
            fmae = np.asarray(fmae, dtype=np.float32) / batch_id
            ssim = np.asarray(ssim,
                              dtype=np.float32) / (FLAGS.batch_size * batch_id)
            sharp = np.asarray(
                sharp, dtype=np.float32) / (FLAGS.batch_size * batch_id)

            print('psnr per frame: ' + str(np.mean(psnr)))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(psnr[i])
            print('fmae per frame: ' + str(np.mean(fmae)))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(fmae[i])
            print('ssim per frame: ' + str(np.mean(ssim)))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(ssim[i])
            print('sharpness per frame: ' + str(np.mean(sharp)))
            for i in range(FLAGS.seq_length - FLAGS.input_length):
                print(sharp[i])

        if itr % FLAGS.snapshot_interval == 0:
            model.save(itr)

        train_input_handle.next()
Exemple #13
0
    h = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)  #寻找轮廓
    contour = h[0]
    contour = sorted(contour, key=cv2.contourArea, reverse=True)  #已轮廓区域面积进行排序
    #contourmax = contour[0][:, 0, :]#保留区域面积最大的轮廓点坐标
    bg = np.ones(dst.shape, np.uint8) * 255  #创建白色幕布
    ret = cv2.drawContours(bg, contour[0], -1, (0, 0, 0), 3)  #绘制黑色轮廓
    return ret


while (True):
    ret, frame = cap.read()
    #下面三行可以根据自己的电脑进行调节
    src = cv2.resize(frame, (400, 350), interpolation=cv2.INTER_CUBIC)  #窗口大小
    cv2.rectangle(src, (90, 60), (300, 300), (0, 255, 0))  #框出截取位置
    roi = src[60:300, 90:300]  # 获取手势框图

    res = A(roi)  # 进行肤色检测
    cv2.imshow("0", roi)

    gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
    dst = cv2.Laplacian(gray, cv2.CV_16S, ksize=3)
    Laplacian = cv2.convertScaleAbs(dst)

    contour = B(Laplacian)  #轮廓处理
    cv2.imshow("2", contour)

    key = cv2.waitKey(50) & 0xFF
    if key == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()
Exemple #14
0
def main(argv):
    ## [variables]
    # First we declare the variables we are going to use
    window_name = ('Sobel Demo - Simple Edge Detector')
    scale = 1
    delta = 0
    ddepth = cv.CV_16S
    ## [variables]

    ## [load]
    # As usual we load our source image (src)
    # Check number of arguments
    if len(argv) < 1:
        print('Not enough parameters')
        print('Usage:\nmorph_lines_detection.py < path_to_image >')
        return -1

    # Load the image
    src = cv.imread(argv[0], cv.IMREAD_COLOR)

    # Check if image is loaded fine
    if src is None:
        print('Error opening image: ' + argv[0])
        return -1
    ## [load]

    ## [reduce_noise]
    # Remove noise by blurring with a Gaussian filter ( kernel size = 3 )
    src = cv.GaussianBlur(src, (3, 3), 0)
    ## [reduce_noise]

    ## [convert_to_gray]
    # Convert the image to grayscale
    gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
    ## [convert_to_gray]

    ## [sobel]
    # Gradient-X
    # grad_x = cv.Scharr(gray,ddepth,1,0)
    grad_x = cv.Sobel(gray,
                      ddepth,
                      1,
                      0,
                      ksize=3,
                      scale=scale,
                      delta=delta,
                      borderType=cv.BORDER_DEFAULT)

    # Gradient-Y
    # grad_y = cv.Scharr(gray,ddepth,0,1)
    grad_y = cv.Sobel(gray,
                      ddepth,
                      0,
                      1,
                      ksize=3,
                      scale=scale,
                      delta=delta,
                      borderType=cv.BORDER_DEFAULT)
    ## [sobel]

    ## [convert]
    # converting back to uint8
    abs_grad_x = cv.convertScaleAbs(grad_x)
    abs_grad_y = cv.convertScaleAbs(grad_y)
    ## [convert]

    ## [blend]
    ## Total Gradient (approximate)
    grad = cv.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
    ## [blend]

    ## [display]
    cv.imshow(window_name, grad)
    cv.waitKey(0)
    ## [display]

    return 0
Exemple #15
0
           cmap='gray')
plt.show()

# Roberts
kernelx = np.array([[1, 0], [0, -1]])
kernely = np.array([[0, 1], [-1, 0]])
img_robertsx = cv2.filter2D(I_data[0], -1, kernelx)
img_robertsy = cv2.filter2D(I_data[0], -1, kernely)
plt.imshow(np.sqrt(np.power(img_robertsx, 2) + np.power(img_robertsy, 2)),
           cmap='gray')
plt.show()

# LOG
I2 = cv2.GaussianBlur(I_data[0], (5, 5), 0)
I3 = cv2.Laplacian(I2, cv2.CV_32F, 5)
I3 = cv2.convertScaleAbs(I3)
plt.imshow(I3, cmap='gray')
plt.show()

TH1 = 20
TH2 = 180
I4 = np.where((I2 >= TH1) & (I2 <= TH2), 1, 0)
plt.imshow(I4, cmap='gray')
plt.show()


def RegionGrow(im_arr,
               seedlist,
               iter_num=5,
               multiplier=2.5,
               initialRadius=2,
Exemple #16
0
# coding=utf-8

# 前两个是必须的参数:
#
# 第一个参数是需要处理的图像;
# 第二个参数是图像的深度,-1表示采用的是与原图像相同的深度。目标图像的深度必须大于等于原图像的深度;
# 其后是可选的参数:
#
# dst不用解释了;
# ksize是算子的大小,必须为1、3、5、7。默认为1。
# scale是缩放导数的比例常数,默认情况下没有伸缩系数;
# delta是一个可选的增量,将会加到最终的dst中,同样,默认情况下没有额外的值加到dst中;
# borderType是判断图像边界的模式。这个参数默认值为cv2.BORDER_DEFAULT。

import cv2
import numpy as np

img = cv2.imread("pics/test.png", 0)

gray_lap = cv2.Laplacian(img, cv2.CV_16S, ksize=3)
dst = cv2.convertScaleAbs(gray_lap)  # 转回uint8

cv2.imshow("orign", img)
cv2.imshow('laplacian', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
data = conn.recv(1024)

while True:
    fs = pipeline.wait_for_frames()
    aligned_frames = alignedFs.process(fs)

    color_frame = aligned_frames.get_color_frame()
    depth_frame = aligned_frames.get_depth_frame()

    if not depth_frame or not color_frame:
        continue

    color_image = np.asanyarray(color_frame.get_data())
    depth_image = np.asanyarray(depth_frame.get_data())

    depth_image = cv.applyColorMap(cv.convertScaleAbs(depth_image, alpha=0.1),
                                   cv.COLORMAP_JET)

    color_image = cv.cvtColor(color_image, cv.COLOR_BGR2RGB)
    # convert to Image
    frame = Image.fromarray(np.uint8(color_image))
    # start to detect
    tuxiang, zuobiao, label = yolo.detect_image(frame)

    change = zuobiao
    if len(zuobiao) != 0:
        if zuobiao[0] == zuobiao[1] == zuobiao[2] == zuobiao[3] == 0:
            pass
        else:
            # obtain object depth coordinate
            dist_to_center = depth_frame.get_distance(
Exemple #18
0
import numpy as np
import os

path1 = "./file/before/"
path2 = "./file/data/"

for root, dirs, files in os.walk(path1):
    num = 0
    for file in files:
        print(file, end=' - - - ')
        try:
            img = cv2.imread(path1 + file, cv2.IMREAD_GRAYSCALE)
            img = cv2.GaussianBlur(img, (3, 3), 0)
            x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
            y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
            absX = cv2.convertScaleAbs(x)
            absY = cv2.convertScaleAbs(y)
            img = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
            img = cv2.resize(img, (255, 255))
            cv2.imwrite(path2 + file[:len(file) - 4] + '!.jpg',
                        cv2.bitwise_not(img))
            img = cv2.imread(path1 + file, cv2.IMREAD_COLOR)
            img = cv2.resize(img, (255, 255))
            cv2.imwrite(path2 + file, img)
        except Exception as e:
            print('error')
            print(e)
        else:
            num += 1
            print('ok')
    print('Total : ' + str(num))
Exemple #19
0
def LaplacianOfGaussian(image):
    LoG_image = cv2.GaussianBlur(image, (3, 3), 0)  # paramter
    gray = cv2.cvtColor(LoG_image, cv2.COLOR_BGR2GRAY)
    LoG_image = cv2.Laplacian(gray, cv2.CV_8U, 3, 3, 2)  # parameter
    LoG_image = cv2.convertScaleAbs(LoG_image)
    return LoG_image
Exemple #20
0
import cv2
import numpy as np

if __name__ == '__main__':
    # シャープの度合い
    k = 10.0
    # シャープ化するためのオペレータ
    shape_operator = np.array([[0, -k, 0], [-k, 1 + 4 * k, -k], [0, -k, 0]])

    # 画像の読み込み
    img_src = cv2.imread("AA10.png", 3)

    # 作成したオペレータを基にシャープ化
    img_tmp = cv2.filter2D(img_src, -1, shape_operator)
    img_shape = cv2.convertScaleAbs(img_tmp)

    # 表示
    cv2.imshow("Show SHAPE Image", img_shape)
    cv2.waitKey(0)
    cv2.destroyAllWindows()