示例#1
0
    def process(self, *mats):
        # x = time.perf_counter()
        DOWNSIZE_CAMERA = self.options['downsize_camera']

        mat = cv2.cvtColor(mats[0], cv2.COLOR_BGR2GRAY)

        img2 = resize(
            to_umat(mat), int(mat.shape[1] * DOWNSIZE_CAMERA),
            int(mat.shape[0] *
                DOWNSIZE_CAMERA)) if DOWNSIZE_CAMERA else mat  # trainImage

        board = self.static_process('upper', 'lower')

        # find the keypoints and descriptors with SIFT
        kp2, des2 = self.detector.detectAndCompute(img2, None)
        cam = {"img": img2, "kp": kp2, "des": des2}

        p = resize(mats[0],
                   int(mats[0].shape[1] * self.options['downsize_camera']),
                   int(mats[0].shape[0] * self.options['downsize_camera']))
        p_mat = p.copy()

        p, M = self.match(board, cam, p, (255, 0, 0))
        if self.options['show_keypoints']:
            p = cv2.drawKeypoints(p, kp2, None, (255, 255, 0))

        assert p is not None

        self.post_shm(p_mat, p, M)

        self.post("outline", p)
示例#2
0
 def find_key_descriptors(im):
     if self.options['source_x_scale_%s' % image] != 0 and self.options[
             'source_y_scale_%s' % image] != 0:
         scaledim = resize(
             im,
             int(im.shape[1] *
                 self.options['source_x_scale_%s' % image]),
             int(im.shape[0] *
                 self.options['source_y_scale_%s' % image]))
         scaledim = np.pad(scaledim,
                           ((PADDING, PADDING), (PADDING, PADDING)),
                           'constant',
                           constant_values=255)
         rx = self.options['source_x_scale_%s' % image]
         ry = self.options['source_y_scale_%s' % image]
     else:
         scaledim = im
         rx = 1
         ry = 1
     kp, des = self.detector.detectAndCompute(scaledim, None)
     self.static[image] = {
         "name": image,
         "org": im,
         "img": scaledim,
         "rx": rx,
         "ry": ry,
         "kp": kp,
         "des": des
     }
     self.post(
         image,
         cv2.drawKeypoints(scaledim, kp, None, (0, 0, 255), flags=0))
     return self.static[image]
示例#3
0
    def process(self, mat):
        mat = resize(mat, mat.shape[1] // 4, mat.shape[0] // 4)

        col = cv2.cvtColor(mat, cv2.COLOR_BGR2LAB).astype(np.int16)
        dst1 = dst_thresh(col,
                          (self.options['t_l_trg'], self.options['t_a_trg'],
                           self.options['t_b_trg']))
        dst2 = dst_thresh(col,
                          (self.options['b_l_trg'], self.options['b_a_trg'],
                           self.options['b_b_trg']))
        cdst = dst1.astype(np.uint16) * dst2.astype(np.uint16)
        t3 = cdst < (self.options['combined_thresh']**2)
        self.post('dst1', dst1)
        self.post('dst2', dst2)
        self.post('cdst', (cdst**.5).astype(np.uint8))
        #t2 = dst < self.options['d_thresh']
        #t1 = t1.astype(np.uint8) * 255
        #t2 = t2.astype(np.uint8) * 255
        t3 = t3.astype(np.uint8) * 255
        #self.post('t1', t1)
        #self.post('t2', t2)
        self.post('t3', t3)
        img, contours, hierarchy = cv2.findContours(t3, cv2.RETR_CCOMP,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        if contours is None or hierarchy is None:
            shm.bins_status.lever_visible.set(False)
        else:
            valid_contours = (hierarchy[0, :, 3] < 0)  # (hierarchy[0,:,2] < 0)
            #print(valid_contours.shape, contours.shape)
            #print(hierarchy)
            #print(len(contours), valid_contours)
            #contours = contours[valid_contours]
            #print([(cv2.contourArea(x), cv2.arcLength(x, True) ** 2 / 2) for x in contours])
            contours = [
                x for i, x in enumerate(contours)
                if valid_contours[i] and cv2.contourArea(x) >
                cv2.arcLength(x, True)**2 / 2 * self.options['compactness']
            ]
            #print(contours)
            if contours:
                mx = max(contours, key=cv2.contourArea)
                mm = cv2.moments(mx)
                if mm['m00'] != 0:
                    cx = mm['m10'] / mm['m00']
                    cy = mm['m01'] / mm['m00']
                    cv2.drawContours(mat, [mx], -1, (0, 255, 0), 2)
                    norm_x = cx / mat.shape[1] - .5
                    norm_y = (cy - mat.shape[0] / 2) / mat.shape[1]
                    shm.bins_status.lever_x.set(norm_x)
                    shm.bins_status.lever_y.set(norm_y)
                    shm.bins_status.lever_sz.set(cv2.contourArea(mx)**.5)
                    shm.bins_status.lever_visible.set(True)
                else:
                    shm.bins_status.lever_visible.set(False)
            else:
                shm.bins_status.lever_visible.set(False)
        self.post('mat', mat)
示例#4
0
    def static_process(self, image):
        def find_key_descriptors(im):
            if self.options['source_x_scale_%s' % image] != 0 and self.options[
                    'source_y_scale_%s' % image] != 0:
                scaledim = resize(
                    im,
                    int(im.shape[1] *
                        self.options['source_x_scale_%s' % image]),
                    int(im.shape[0] *
                        self.options['source_y_scale_%s' % image]))
                scaledim = np.pad(scaledim,
                                  ((PADDING, PADDING), (PADDING, PADDING)),
                                  'constant',
                                  constant_values=255)
                rx = self.options['source_x_scale_%s' % image]
                ry = self.options['source_y_scale_%s' % image]
            else:
                scaledim = im
                rx = 1
                ry = 1
            kp, des = self.detector.detectAndCompute(scaledim, None)
            self.static[image] = {
                "name": image,
                "org": im,
                "img": scaledim,
                "rx": rx,
                "ry": ry,
                "kp": kp,
                "des": des
            }
            self.post(
                image,
                cv2.drawKeypoints(scaledim, kp, None, (0, 0, 255), flags=0))
            return self.static[image]

        if image in self.static:
            if self.static[image]['rx'] == self.options['source_x_scale_%s'%image] and \
               self.static[image]['ry'] == self.options['source_y_scale_%s'%image]:
                return self.static[image]
            else:
                im = self.static[image]["org"]
                return find_key_descriptors(im)
        else:
            im = simple_gaussian_blur(
                cv2.imread('buoy_images/%s.png' % image, 0), 11, 3)
            im = resize(im, im.shape[1] // 2, im.shape[0] // 2)
            return find_key_descriptors(im)
示例#5
0
def crop_by_mask(cvtmat, mask, x, y, r, shrink=False):
    import time
    t = time.time()
    cvtmat = cvtmat[:, :, 1:]  # TODO: make this adjustable
    print('foo a', time.time() - t)
    t = time.time()
    mask = mask[y - r:y + r, x - r:x + r]
    print('foo b', time.time() - t)
    t = time.time()
    cropped = cvtmat[y - r:y + r, x - r:x + r]
    print('foo c', time.time() - t)
    t = time.time()
    cropped = cv2.bitwise_and(cropped, cropped, mask=mask)
    print('foo d', time.time() - t)
    if shrink:
        size = min(cropped.shape[0], cropped.shape[1], shrink)
        cropped = resize(cropped, size, size)
    return cropped
示例#6
0
 def process(self, mat):
     self.post('org', mat)
     mat = resize(mat, mat.shape[1]//2, mat.shape[0]//2)
     shm.bins_garlic.center_x.set(mat.shape[0]//2)
     shm.bins_garlic.center_y.set(mat.shape[1]//2)
     cvtmat, split = bgr_to_lab(mat)
     self.circles = find_yellow_circle(split,
                                       color=[self.options['yellow_{}'.format(s)] for s in COLORSPACE],
                                       distance=self.options['circle_color_distance'],
                                       erode_kernel=self.options['circle_erode_kernel'],
                                       erode_iterations=self.options['circle_erode_iterations'],
                                       dilate_kernel=self.options['circle_dilate_kernel'],
                                       dilate_iterations=self.options['circle_dilate_iterations'],
                                       min_contour_size=self.options['circle_min_contour_size'],
                                       min_circularity=self.options['circle_min_circularity'],
                                       radius_offset=self.options['garlic_circle_r_offset'])
     cv2.drawContours(mat, [c['contour'] for c in self.circles], 0, (255, 0, 0), 10)
     for c in self.circles:
         cv2.circle(mat, *c['circle'], (0, 255, 0), 10)
     self.post('circle', mat)
     self.find_red_garlic(cvtmat, split)
示例#7
0
    def process(self, mat):
        t = time.perf_counter()
        self.post('org', mat)
        mat = resize(mat, mat.shape[1]//2, mat.shape[0]//2)
        shm.recovery_vampire.cam_x.set(mat.shape[1]//2)
        shm.recovery_vampire.cam_y.set(mat.shape[0]//2)
        # tt = time.perf_counter()
        # print('1 %f' % (tt - t))
        # print(mat.shape)
        _, split = bgr_to_lab(mat)
        d = self.options['vampire_color_distance']
        color = [self.options["yellow_%s" % c] for c in COLORSPACE]
        self.rectangles = self.find_yellow_rectangle(split, color, d, self.options['erode_kernel_size'],
                                                self.options['erode_iterations'],
                                                self.options['dilate_kernel_size'],
                                                self.options['dilate_iterations'],
                                                self.options['contour_size_min'],
                                                self.options['rectangularity_thresh'],
                                                -self.options['rectangle_padding'])
        # t = time.perf_counter()
        # print('2 %f' % (t - tt))

        for y in self.rectangles:
            rectangle = cv2.boxPoints(y['rectangle'])
            mat = cv2.drawContours(mat, [np.int0(rectangle)], 0, (0, 0, 255), 10)

        color = [self.options["purple_%s" % c] for c in COLORSPACE]
        # purple = self.find_color(mat, color, d, use_first_channel=False, erode_mask=True, dilate_mask=True, iterations=3, rectangular=False)
        # self.post('purple', purple)
        # purple_contours = self.contours_and_filter(purple, self.options['contour_size_min'])
        self.find_vampire(mat, split, color, d)

        # tt = time.perf_counter()
        # print('3 %f' % (tt-t))

        mat = cv2.drawContours(mat, [r['contour'] for r in self.rectangles], -1, (0, 255, 0), 10)
        # mat = cv2.drawContours(mat, purple_contours, -1, (0, 255, 0), 10)
        self.post('yellow_contours', mat)
示例#8
0
 def find_key_descriptors(im):
     if self.options['source_x_scale_%s' % image] != 0 and self.options[
             'source_y_scale_%s' % image] != 0:
         scaledim = resize(
             im,
             int(im.shape[1] *
                 self.options['source_x_scale_%s' % image]),
             int(im.shape[0] *
                 self.options['source_y_scale_%s' % image]))
         scaledim = np.pad(scaledim,
                           ((PADDING, PADDING), (PADDING, PADDING)),
                           'constant',
                           constant_values=255)
         rx = self.options['source_x_scale_%s' % image]
         ry = self.options['source_y_scale_%s' % image]
     else:
         scaledim = im
         rx = 1
         ry = 1
     scaledim = simple_gaussian_blur(scaledim, BLUR_KERNEL, BLUR_SD)
     kp, des = self.detector.detectAndCompute(scaledim, None)
     self.static[image] = {
         "name": image,
         "org": im,
         "img": scaledim,
         "rx": rx,
         "ry": ry,
         "kp": kp,
         "des": des,
         "separation": self.options['board_separation']
     }
     keypoints = cv2.drawKeypoints(scaledim.copy(),
                                   kp,
                                   None, (0, 0, 255),
                                   flags=0)
     self.post(image, keypoints)
     return self.static[image]
示例#9
0
        def find_key_descriptors(im):
            scaledim = resize(
                im,
                int(im.shape[1] * self.options['source_x_scale_%s' % image]),
                int(im.shape[0] * self.options['source_y_scale_%s' % image]))

            #mean = 0
            #sigma = 20
            #gauss = np.random.normal(mean,sigma,scaledim.shape)
            #scaledim = scaledim.astype(np.int16) + gauss.astype(np.int8)
            #np.clip(scaledim, 0, 255, out=scaledim)
            #scaledim = scaledim.astype(np.uint8)

            scaledim = np.pad(scaledim,
                              ((PADDING, PADDING), (PADDING, PADDING)),
                              'constant',
                              constant_values=255)
            rx = self.options['source_x_scale_%s' % image]
            ry = self.options['source_y_scale_%s' % image]
            scaledim = simple_gaussian_blur(scaledim, BLUR_KERNEL, BLUR_SD)
            kp, des = self.detector.detectAndCompute(scaledim, None)
            self.static[image] = {
                "name": image,
                "org": im,
                "img": scaledim,
                "rx": rx,
                "ry": ry,
                "kp": kp,
                "des": des
            }
            keypoints = cv2.drawKeypoints(scaledim.copy(),
                                          kp,
                                          None, (0, 0, 255),
                                          flags=0)
            self.post(image, keypoints)
            return self.static[image]
示例#10
0
    def process(self, *mats):
        x = time.perf_counter()
        DOWNSIZE_CAMERA = self.options['downsize_camera']

        img2 = resize(
            to_umat(mats[0]), int(mats[0].shape[1] * DOWNSIZE_CAMERA),
            int(mats[0].shape[0] *
                DOWNSIZE_CAMERA)) if DOWNSIZE_CAMERA else mats[0]  # trainImage

        #img2 = cv2.copyMakeBorder(img2, PADDING,PADDING,PADDING,PADDING, cv2.BORDER_REPLICATE)

        draugr = self.static_process('draugr')
        vetalas = self.static_process('vetalas')
        aswang = self.static_process('aswang')
        jiangshi = self.static_process('jiangshi')

        # find the keypoints and descriptors with SIFT
        kp2, des2 = self.detector.detectAndCompute(img2, None)
        cam = {"img": img2, "kp": kp2, "des": des2}

        p = self.match(draugr, cam, None, (255, 0, 0))
        p = self.match(vetalas, cam, p, (0, 255, 0))
        p = self.match(aswang, cam, p, (0, 0, 255))
        p = self.match(jiangshi, cam, p, (255, 0, 255))

        if self.options['show_keypoints']:
            p = cv2.drawKeypoints(p, kp2, None, (255, 255, 0))

        p = from_umat(p)

        self.post_shm()
        shm.vamp_buoy_results.camera_x.set(p.shape[1] // 2)
        shm.vamp_buoy_results.camera_y.set(p.shape[0] // 2)

        self.post("outline", p)
        print(time.perf_counter() - x)
示例#11
0
    def process(self, *mats):
        results = shm.gate_vision.get()
        h, w, _ = mats[0].shape
        h = int(h * self.options['resize_height_scale'])
        w = int(w * self.options['resize_width_scale'])
        results.img_height = h
        results.img_width = w
        mat = resize(mats[0], w, h)
        #print(np.mean(mat))
        avg_brightness_ratio = np.mean(mat) / REFERENCE_BRIGHTNESS
        nonblack_thresh_dist = self.options['nonblack_thresh'] * avg_brightness_ratio

        lab, lab_split = bgr_to_lab(mat)
        median_a = np.median(lab_split[1])
        median_b = np.median(lab_split[2])
        median_filter_a = range_threshold(lab_split[1], median_a - self.options['water_a_thresh'], median_a + self.options['water_a_thresh'])
        median_filter_b = range_threshold(lab_split[2], median_b - self.options['water_b_thresh'], median_b + self.options['water_b_thresh'])
        if self.options['debug']:
            self.post('median filter a', median_filter_a)
            self.post('median filter b', median_filter_b)
        nonwater_mask, _ = gray_to_bgr(255 - (median_filter_a & median_filter_b))
        self.post('nonwater', nonwater_mask)
        # Tuned for a 320x256 image
        vehicle_depth = shm.kalman.depth.get()
        reflection_cutoff = min(h, int(max(0, 3 - vehicle_depth)**2 * CUTOFF_SCALAR))
        mat[:reflection_cutoff] *= 0
        tmp = mat.copy()
        draw_text(tmp, 'Depth: {:.2f}'.format(vehicle_depth), (30, 30), 0.5, color=(255, 255, 255))
        self.post('mat', tmp)
        #lab, lab_split = bgr_to_lab(mat)
        #nonblack_mask, _ = gray_to_bgr(np.uint8(255 * (lab_split[0] > self.options['nonblack_thresh'])))
        nonblack_mask, _ = gray_to_bgr(np.uint8(255 * (np.var(mat, axis=2) > nonblack_thresh_dist)))
        self.post('nonblack', nonblack_mask)
        mat &= nonblack_mask
        mat &= nonwater_mask
        mat = to_umat(mat)
        mat = simple_gaussian_blur(mat, to_odd(self.options['blur_kernel']),
                                   self.options['blur_std'])
        lab, lab_split = bgr_to_lab(mat)
        threshed, dists = thresh_color_distance([lab_split[0], lab_split[1], lab_split[2]],
                                                [self.options['lab_l_ref'], self.options['lab_a_ref'],
                                                     self.options['lab_b_ref']],
                                         self.options['color_dist_thresh'], auto_distance_percentile=self.options['auto_distance_percentile'],
                                         ignore_channels=[0], weights=[2, 0, 15])
        if self.options['debug']:
            self.post('threshed', threshed)
            self.post('dists', dists)
        dilated = dilate(threshed, rect_kernel(self.options['dilate_kernel']))
        if self.options['debug']:
            self.post('dilated', dilated)
        eroded = erode(dilated, rect_kernel(self.options['erode_kernel']))
        if self.options['debug']:
            self.post('eroded', eroded)
        contours = outer_contours(eroded)
        areas = [*map(contour_area, contours)]
        centroids = [*map(contour_centroid, contours)]
        xs = [c[0] for c in centroids]
        ys = [c[1] for c in centroids]
        rects = [*map(min_enclosing_rect, contours)]
        lengths = [max(r[1]) for r in rects]
        ratios = [max(r[1]) / (1e-30 + min(r[1])) for r in rects]
        vehicle_roll = shm.kalman.roll.get()
        lines = [cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01) for c in contours]
        angles = [np.degrees(np.arctan2(line[1], line[0]))[0] for line in lines]
        angles = [min(abs(90 - a - vehicle_roll), abs(-90 - a - vehicle_roll)) for a in angles]
        rectangularities = [a / (1e-30 + rect[1][0] * rect[1][1]) for (c, a, rect) in zip(contours, areas, rects)]
        contours = [ContourFeats(*feats) for feats in zip(contours, areas, xs, ys, rectangularities, angles, lengths, ratios)]
        contours = [*filter(lambda c: c.area > self.options['min_contour_area'], contours)]
        self.post_contours('area', h, w, contours)
        contours = [*filter(lambda c: c.angle < self.options['max_angle_from_vertical'], contours)]
        self.post_contours('angle', h, w, contours)
        contours = [*filter(lambda c: c.length > self.options['min_length'], contours)]
        self.post_contours('length', h, w, contours)
        #contours = [*filter(lambda c: c.rect > self.options['min_contour_rect'], contours)]
        #self.post_contours('rect', h, w, contours)
        contours = [*filter(lambda c: c.ratio > self.options['min_contour_ratio'], contours)]
        self.post_contours('ratio', h, w, contours)
        contours = sorted(contours, key=lambda c: c.area)[:6]
        contours_by_x = sorted(contours, key=lambda c: c.x)
        contours_by_x = filter_duplicates_sorted_by_x(contours_by_x)
        leftmost = try_index(contours_by_x, 0)
        middle = try_index(contours_by_x, 1)
        rightmost = try_index(contours_by_x, 2)
        tmp = np.zeros((h, w, 3))
        results.leftmost_visible = leftmost is not None
        results.middle_visible = middle is not None
        results.rightmost_visible = rightmost is not None
        draw_text(tmp, 'Roll: {:.2f}'.format(vehicle_roll), (30, 30), 0.5, color=(255, 255, 255))
        if leftmost is not None:
            draw_contours(tmp, [leftmost.contour], color=(255, 0, 0), thickness=-1)
            draw_circle(tmp, (leftmost.x, leftmost.y), 5, color=(255, 255, 255), thickness=-1)
            results.leftmost_x = leftmost.x
            results.leftmost_y = leftmost.y
            results.leftmost_len = leftmost.length
        if middle is not None:
            draw_contours(tmp, [middle.contour], color=(0, 255, 0), thickness=-1)
            draw_circle(tmp, (middle.x, middle.y), 5, color=(255, 255, 255), thickness=-1)
            results.middle_x = middle.x
            results.middle_y = middle.y
            results.middle_len = middle.length
        if rightmost is not None:
            draw_contours(tmp, [rightmost.contour], color=(0, 0, 255), thickness=-1)
            draw_circle(tmp, (rightmost.x, rightmost.y), 5, color=(255, 255, 255), thickness=-1)
            results.rightmost_x = rightmost.x
            results.rightmost_y = rightmost.y
            results.rightmost_len = rightmost.length
        shm.gate_vision.set(results)
        self.post('contours', tmp)
示例#12
0
    def process(self, mat):
        camera_scale = self.options['camera_scale']
        debug = self.options['debug']
        if camera_scale != 0:
            mat = resize(mat, int(mat.shape[1] * camera_scale),
                         int(mat.shape[0] * camera_scale))

        l_mat = cv2.cvtColor(mat, cv2.COLOR_BGR2Lab)
        if debug:
            mm = l_mat.astype(np.int32)
            dst_lid = (mm[:,:,0] - self.options['lid_l_trg']) ** 2 + \
                (mm[:,:,1] - self.options['lid_a_trg']) ** 2 + \
                (mm[:,:,2] - self.options['lid_b_trg']) ** 2
            self.post('yellowness_lid', ((dst_lid / 3)**.5).astype(np.uint8))
            #np.clip(dst_lid, 0, 255, out=dst_lid)
            #dst_lid = dst_lid.astype(np.uint8)
            #res, yellow_mask_lid = cv2.threshold(to_umat(dst_lid), self.options['lid_d_thresh'], 255, cv2.THRESH_BINARY_INV)
            yellow_mask_lid = (dst_lid < self.options['lid_d_thresh']**
                               2).astype(np.uint8) * 255
            self.post('yellow_mask_lid', yellow_mask_lid)
        img2 = cv2.cvtColor(to_umat(mat), cv2.COLOR_BGR2GRAY)
        edg = cv2.Canny(img2,
                        self.options['canny1'],
                        self.options['canny2'],
                        apertureSize=3)
        #yellow_edg_msk = cv2.erode(yellow_mask_lid, kernel)
        #yellow_edg_msk = cv2.dilate(yellow_edg_msk, kernel, iterations=4)
        #self.post('edg0', edg)
        #edg = cv2.bitwise_and(edg, yellow_edg_msk)
        self.post('edg', edg)

        lines = cv2.HoughLines(edg, 1, np.pi / 180,
                               self.options['houghness']).get()
        clrs = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (0, 255, 255),
                (255, 255, 0), (255, 0, 255), (0, 0, 128), (0, 128, 0),
                (128, 0, 0)]

        if lines is None:
            self.post('lines', mat)
            shm.bins_status.cover_visible.set(False)
            return

        lines[lines[:, 0, 0] < 0, :, 1] += np.pi
        lines[:, :, 0] = np.abs(lines[:, :, 0])
        lvecs = np.exp(1j * lines[:, :, 1])
        mlvecs = lvecs**2
        #print(lvecs)
        if debug:
            for i, (dst, vect) in enumerate(zip(lines[:, 0, 0], lvecs[:, 0])):
                ctr = vect * dst
                vc = vect * 1j
                p1 = ctr + 1000 * vc
                p2 = ctr - 1000 * vc
                mat = cv2.line(mat, (int(p1.real), int(p1.imag)),
                               (int(p2.real), int(p2.imag)), (0, 0, 200), 2)
            #print(vect, dst)
        m = cv2.BFMatcher(cv2.NORM_L2)
        cc = mlvecs.astype(np.complex64).view(np.float32)
        centers = []
        if len(mlvecs) > 1:
            dsst = 2.83 - shm.kalman.depth.get()
            plen = 75 / dsst if dsst > 0 else 1000
            res = m.match(cc, -cc)
            for m in res:
                if m.distance > .05: continue
                d1 = lvecs[m.trainIdx, 0], lines[m.trainIdx, 0, 0]
                d2 = lvecs[m.queryIdx, 0], lines[m.queryIdx, 0, 0]
                try:
                    center = np.linalg.solve(
                        np.complex64([d1[0] / abs(d1[0]), d2[0] / abs(d2[0])
                                      ]).view(np.float32).reshape(2, -1),
                        [[d1[1]], [d2[1]]])[:, 0].astype(
                            np.float64)  #.view(np.complex64)[0]
                except np.linalg.linalg.LinAlgError:
                    print('singular matrix')
                    continue
                cC = complex(*center)
                mz = np.zeros(mat.shape[:-1], dtype=np.uint8)
                mz2 = np.zeros(mat.shape[:-1], dtype=np.uint8)
                poly = make_poly(d1[0], d2[0])
                rpoly = make_poly(d1[0], -d2[0])

                mz = cv2.fillConvexPoly(mz, (cC + plen * poly).view(
                    np.float32).astype(np.int32), 255)
                mz = cv2.fillConvexPoly(mz, (cC - plen * poly).view(
                    np.float32).astype(np.int32), 255)
                mz2 = cv2.fillConvexPoly(mz2, (cC + plen * rpoly).view(
                    np.float32).astype(np.int32), 255)
                mz2 = cv2.fillConvexPoly(mz2, (cC - plen * rpoly).view(
                    np.float32).astype(np.int32), 255)
                m1, sd1 = cv2.meanStdDev(img2, mask=mz)
                m2, sd2 = cv2.meanStdDev(img2, mask=mz2)

                m1, sd1, m2, sd2 = (x.get()[0, 0] for x in (m1, sd1, m2, sd2))
                score = abs(m1 - m2) / (sd1 * sd2)
                if score < self.options['min_cross_score']: continue

                no_swap = np.cross(
                    *np.complex128([[d1[0]], [d2[0]]]).view(np.float64)) > 0
                d1, d2 = (d1, d2) if no_swap else (d2, d1)
                long_axis, short_axis = (d1[0], d2[0]) if m1 < m2 else (d2[0],
                                                                        d1[0])
                sm = mz if (m1 > m2) else mz2  #  ^ no_swap
                m_color = cv2.mean(l_mat, mask=sm)[0:3]
                passes_color = cv2.norm(
                    np.float32(m_color),
                    np.float32([
                        self.options['lid_l_trg'], self.options['lid_a_trg'],
                        self.options['lid_b_trg']
                    ])) < self.options['lid_d_thresh']
                if passes_color:
                    centers.append(
                        (cC, d1, d2, score, long_axis, short_axis, sm))
                try:
                    mat = cv2.circle(mat, (int(center[0]), int(center[1])), 10,
                                     (255, 0, 0) if passes_color else
                                     (0, 200, 200), 2)
                except (ValueError, OverflowError):
                    pass
            if centers:
                mx = max(centers, key=lambda h: h[3])
                center, d1, d2, score, long_axis, short_axis, sm = mx
                #dsst = 4.26 - shm.kalman.depth.get() - .762
                pts = (np.complex64([
                    long_axis + short_axis, long_axis - short_axis,
                    -long_axis - short_axis, -long_axis + short_axis
                ])[:, np.newaxis] * plen + center).view(np.float32).astype(
                    np.int32)
                print(pts)
                mat = cv2.polylines(mat, [pts], True, (255, 255, 255))
                print(score, shm.kalman.depth.get())
                if long_axis.imag < 0: long_axis *= -1
                if short_axis.real < 0: short_axis *= -1
                #self.post('sm', sm)
                shm.bins_status.cover_x.set(center.real / mat.shape[1] - .5)
                shm.bins_status.cover_y.set(
                    (center.imag - mat.shape[0] / 2) / mat.shape[1])
                shm.bins_status.cover_maj_x.set(long_axis.real)
                shm.bins_status.cover_maj_y.set(long_axis.imag)
                shm.bins_status.cover_min_x.set(short_axis.real)
                shm.bins_status.cover_min_y.set(short_axis.imag)
                shm.bins_status.cover_visible.set(True)

                for ax, clr in ((short_axis, (255, 0, 0)), (long_axis, (0, 255,
                                                                        0))):
                    p1 = center + ax * 1000
                    p2 = center - ax * 1000
                    mat = cv2.line(mat, (int(p1.real), int(p1.imag)),
                                   (int(p2.real), int(p2.imag)), clr, 2)
                mat = cv2.circle(mat, (int(center.real), int(center.imag)), 10,
                                 (0, 255, 0), 2)
            else:
                shm.bins_status.cover_visible.set(False)
        self.post('lines', mat)
示例#13
0
    def process(self, *mats):
        # x = time.perf_counter()
        CAMERA_SCALE = self.options['camera_scale']
        t = time.perf_counter()
        print('a',
              time.perf_counter() - t)
        t = time.perf_counter()

        mat = resize(mats[0], int(mats[0].shape[1] * CAMERA_SCALE),
                     int(mats[0].shape[0] *
                         CAMERA_SCALE)) if CAMERA_SCALE else mats[0]
        temp = mat.astype(
            np.uint16) * 2  #self.options_dict['PPX_contrast'].value
        mat = np.clip(temp, 0, 255).astype(np.uint8)
        p = mat.copy()
        #rv, ccs = cv2.findChessboardCorners(mat, (1, 1))
        #print(rv)
        #l_mat = cv2.cvtColor(mat, cv2.COLOR_BGR2Lab)
        #mm = l_mat.astype(np.int16)
        #dst = np.abs(mm[:,:,0] - self.options['img_l_trg']) + \
        #    np.abs(mm[:,:,1] - self.options['img_a_trg']) + \
        #    np.abs(mm[:,:,2] - self.options['img_b_trg'])
        #self.post('yellowness', (dst // 3).astype(np.uint8))
        #np.clip(dst, 0, 255, out=dst)
        #dst = dst.astype(np.uint8)
        #res, yellow_mask = cv2.threshold(dst, self.options['img_d_thresh'], 255, cv2.THRESH_BINARY_INV)
        #self.post('yellow_mask', yellow_mask)

        img2 = cv2.cvtColor(to_umat(mat), cv2.COLOR_BGR2GRAY)
        print('b',
              time.perf_counter() - t)
        t = time.perf_counter()
        #corners = cv2.cornerHarris(img2, 2, 3, .04)
        #print(corners.get().dtype)
        #cg = corners.get()
        #cg[cg <= 0] = cg[cg > 0].min()
        #print(cg.max())
        #print(cg.min(), cg.max())
        #ll = np.log(cg)
        #print(ll.min(), ll.max())
        #self.post('harris', np.clip((ll * 5 + 128), 0, 255).astype(np.uint8))
        #img2 = to_umat(np.pad(cv2.cvtColor(mat, cv2.COLOR_BGR2GRAY), ((PADDING, PADDING), (PADDING, PADDING)), 'constant', constant_values=255))
        #mat = to_umat(np.pad(img2.get(), ((PADDING, PADDING), (PADDING, PADDING)), 'constant', constant_values=255))

        #img2 = resize(to_umat(mat), int(mat.shape[1]*camera_scale), int(mat.shape[0]*camera_scale)) if DOWNSIZE_CAMERA else mat  # trainImage

        print('h',
              time.perf_counter() - t)
        t = time.perf_counter()
        bat = self.static_process('bat')
        wolf = self.static_process('wolf')
        print('i',
              time.perf_counter() - t)
        t = time.perf_counter()

        #black_areas = img2 < self.options['min_gray']
        res, black_areas = cv2.threshold(img2, self.options['min_gray'], 255,
                                         cv2.THRESH_BINARY_INV)
        black_areas = cv2.erode(black_areas, kernel)
        black_areas = cv2.dilate(black_areas, kernel, iterations=2)
        self.post('black_areas', black_areas)
        img, contours, hierarchy = cv2.findContours(black_areas,
                                                    cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        msk = np.zeros(mat.shape[:-1], dtype=np.uint8)
        #szcs = sorted(contours, key=cv2.contourArea, reverse=True)
        print('j',
              time.perf_counter() - t)
        t = time.perf_counter()

        for i, x in enumerate(contours):  #szcs[:2]:
            pts = cv2.boxPoints(cv2.minAreaRect(x))
            #mm = np.mean(x, axis=0)
            mpp = np.mean(pts, axis=0)
            #print(pts)
            #print(np.mean(pts, axis=0))
            cv2.drawContours(msk,
                             [np.int0(pts - (pts - (pts * 4 + mpp) / 5) * .1)],
                             -1, i + 1, -1)
            #print(x * 9 + mm, mm)
        # print(x)
        #cv2.drawContours(msk, [np.int0((x * 2 + mm) / 3)], -1, i+1, -1)

        #for p in szcs[0:2]:
        #    msk = cv2.fillPoly(msk, p, 255)
        colors = np.uint8([(0, 0, 0), (0, 0, 255), (0, 255, 0), (255, 0, 0),
                           (0, 255, 255), (255, 255, 0), (255, 0, 255),
                           (255, 255, 255)])
        self.post('blk_fill', colors[np.clip(msk, 0, 7)])
        print('k',
              time.perf_counter() - t)
        t = time.perf_counter()

        #print(szcs)
        #black_areas = black_areas.astype(np.uint8)
        #target_area = cv2.dilate(black_areas, kernel, iterations=10)
        #target_area = target_area.get()
        #self.post('target_area', target_area)

        #target_area = cv2.erode(yellow_mask, kernel)
        #target_area = cv2.dilate(target_area, kernel, iterations=10)
        #target_area &= msk
        target_area = msk
        #self.post('target_area', target_area)

        # find the keypoints and descriptors with SIFT

        #img2 = cv2.UMat(np.pad(img2.get(), ((PADDING, PADDING), (PADDING, PADDING)), 'constant', constant_values=255)) # boo
        print('l',
              time.perf_counter() - t)
        t = time.perf_counter()
        kp2, des2 = self.detector.detectAndCompute(img2, None)
        print('l1',
              time.perf_counter() - t)
        t = time.perf_counter()
        if des2 is None:
            print('No points found')
            return
        dg = des2.get()
        if dg is None:
            print('No points found')
            return
        #cv2.UMat(des2, [0, 1, 2])
        #print(dir(des2))
        #print(des2.get([0, 1, 2]))
        #print(kp2)
        #print(max(x.pt[1] for x in kp2))
        #print(max(x.pt[0] for x in kp2))
        #p = resize(mats[0], int(mats[0].shape[1] * self.options['camera_scale']), int(mats[0].shape[0] * self.options['camera_scale']))
        #p = cv2.copyMakeBorder(p, PADDING, PADDING, PADDING, PADDING, cv2.BORDER_CONSTANT, None, (255, 255, 255))
        if self.options['show_keypoints']:
            p = cv2.drawKeypoints(p, kp2, None, (0, 255, 255))
        #print([x.pt for x in kp2])
        idxs = [
            i for (i, x) in enumerate(kp2)
            if (0 < x.pt[1] < target_area.shape[0]) and
            (0 < x.pt[0] < target_area.shape[1]) and target_area[int(x.pt[1]),
                                                                 int(x.pt[0])]
        ]
        kp2 = [kp2[i] for i in idxs]
        des2 = cv2.UMat(dg[idxs])
        #print(kp2[0].pt)
        cam = {"img": img2, "kp": kp2, "des": des2}

        if self.options['show_keypoints']:
            p = cv2.drawKeypoints(p, kp2, None, (255, 255, 0))
        p_mat = p.copy()

        def get_center_ang(img, mat):
            ii = img['img']
            pts = np.float32([[ii.shape[1] / 2, ii.shape[0] / 2],
                              [ii.shape[1] / 2, 0]]).reshape(-1, 1,
                                                             2) + PADDING
            middle, top = cv2.perspectiveTransform(pts, mat)[:, 0, :]
            up_vec = top - middle
            return middle, np.arctan2(up_vec[1], up_vec[0])

        if kp2:
            print('m',
                  time.perf_counter() - t)
            t = time.perf_counter()
            p, M1 = self.match(bat, cam, p, (0, 0, 255), msk, len(contours))
            p, M2 = self.match(wolf, cam, p, (0, 255, 0), msk, len(contours))
            if M1 is not None:
                ctr, ang = get_center_ang(bat, M1)
                shm.bins_status.bat_x.set(ctr[0] / mat.shape[1] - .5)
                shm.bins_status.bat_y.set(
                    (ctr[1] - mat.shape[0] / 2) / mat.shape[1])
                shm.bins_status.bat_angle.set(ang)
                shm.bins_status.bat_visible_frames.set(
                    shm.bins_status.bat_visible_frames.get() + 1)
            shm.bins_status.bat_visible.set(M1 is not None)

            if M2 is not None:
                ctr, ang = get_center_ang(bat, M2)
                shm.bins_status.wolf_x.set(ctr[0] / mat.shape[1] - .5)
                shm.bins_status.wolf_y.set(
                    (ctr[1] - mat.shape[0] / 2) / mat.shape[1])
                shm.bins_status.wolf_angle.set(ang)
                shm.bins_status.wolf_visible_frames.set(
                    shm.bins_status.wolf_visible_frames.get() + 1)
            shm.bins_status.wolf_visible.set(M2 is not None)

        print('n',
              time.perf_counter() - t)
        t = time.perf_counter()

        assert p is not None

        #try:
        #    self.post_shm(p_mat, p, M)
        #except cv2.error as e:
        #    print(e)

        self.post("outline", p)
示例#14
0
    def match(self, img, min_match=10, ratio=0.7, draw=False):
        """
        Find all instances of source images in an image.

        img:        The image to be matched.
        min_match:  The minimum number of keypoint matches to identify a
                    source.  Increasing this number allows sources to be
                    identified more accurately, but less reliably.
        ratio:      A number [0..1] that is used to identify a "good" match
                    using the ratio test. A higher number means more matches
                    are considered "good", but is also more likely to be noise.
        draw:       Whether to draw the matches. I really believe it is not
                    required, usually it is possible to just compare the
                    keypoints through human eye, but is a useful tool for
                    debugging.

        The function returns the following as a tuple in this order:
        - A list of identified sources, which is a tuple including the
          following in this order:
            - the name of the source
            - the good matches found
            - a single contour specifying the area of the match as a
              transformed rectangle,
            - a mask of the matched area,
            - An image showing all matches to the source image;
        - The keypoints of the imaged passed
        - The feature descriptors of the image passed
        """
        kp, des = self.sift.detectAndCompute(img, None)
        matched = []
        draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=None,
                           matchesMask=None,
                           flags=2)
        for name, val in self.sources.items():
            matches = self.matcher.knnMatch(val["des"], des, k=2)

            good = SIFT._ratio_test(matches, ratio=ratio)
            if len(good) < min_match:
                continue

            # TODO: What if we don't do homogenous transform?
            src_pts = np.float32([val["kp"][m.queryIdx].pt for m in good])\
                .reshape(-1,1,2)
            dst_pts = np.float32([kp[m.trainIdx].pt for m in good])\
                .reshape(-1,1,2)

            matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,
                                              5.0)
            matchesMask = mask.ravel().tolist()

            h, w = val["source"].shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)

            try:
                # OpenCV sometimes throws assertion errors here. It should have
                # been fixed by updating opencv, but it's here just in case
                dst = np.int32(cv2.perspectiveTransform(pts, matrix))
            except cv2.error as e:
                print(e)
                continue

            drawim = None
            if draw:
                draw_params["matchesMask"] = matchesMask
                drawim = cv2.drawMatches(val["source"], val["kp"], img, kp,
                                         good, None, **draw_params)
                drawim = resize(drawim, int(drawim.shape[1] * 0.5),
                                int(drawim.shape[0] * 0.5))

            matched.append((val["name"], good, dst, matchesMask, drawim))

        return matched, kp, des