コード例 #1
0
from perception.tasks.TaskPerceiver import TaskPerceiver
from typing import Dict
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt

class TestAlgo(TaskPerceiver):
	def __init__(self):
		super().__init__(canny_low=((0, 255), 100), canny_high=((0, 255), 200))
		self.t = .1

	def analyze(self, frame: np.ndarray, debug: bool, slider_vals: Dict[str, int]):
		fig = plt.figure()
		x = np.linspace(0.0, 5.0)
		y = np.cos(2 * np.pi * (x + slider_vals['canny_low'] * 3.14 / 2)) * np.exp(-x * self.t)
		plt.plot(x, y, 'ko-')
		fig.canvas.draw()

		self.t *= 1.01
		return frame, [frame,
					   cv.cvtColor(frame, cv.COLOR_BGR2GRAY),
					   cv.flip(cv.cvtColor(frame, cv.COLOR_BGR2GRAY), cv.ROTATE_180),
					   cv.Canny(frame, slider_vals['canny_low'], slider_vals['canny_high']),
					   cv.flip(cv.Canny(frame, slider_vals['canny_low'], slider_vals['canny_high']), 0),
					   fig]

if __name__ == '__main__':
	from perception.vis.vis import run
	run(['webcam'], TestAlgo(), True)
コード例 #2
0
        area_cnts = []

        # remove all contours with zero area
        cnt = [cnt[i] for i in range(len(cnt)) if cv.contourArea(cnt[i]) > 0]
        for c in cnt:
            area_cnt = cv.contourArea(c)
            area_cnts.append(area_cnt)
            area_rect = cv.boundingRect(c)[-2] * cv.boundingRect(c)[-1]
            area_diff.append(abs((area_rect - area_cnt) / area_cnt))

        if len(area_diff) >= 2:
            largest_area_idx = [area_cnts.index(sorted(area_cnts, reverse=True)[i]) for i in range(min(3, len(cnt)))]
            area_diff_copy = sorted([area_diff[i] for i in largest_area_idx])
            min_i1, min_i2 = area_diff.index(area_diff_copy[0]), area_diff.index(area_diff_copy[1])

            rect1 = cv.boundingRect(cnt[min_i1])
            rect2 = cv.boundingRect(cnt[min_i2])
            x1, y1, w1, h1 = rect1
            x2, y2, w2, h2 = rect2
            cv.rectangle(debug_filter, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)
            cv.rectangle(debug_filter, (x2, y2), (x2 + w2, y2 + h2), (0, 255, 0), 2)

        if debug:
            return (rect1, rect2), (frame, debug_filter)
        return (rect1, rect2)
        

if __name__ == '__main__':
    from perception.vis.vis import run
    run(['..\..\..\data\GOPR1142.MP4'], GateSegmentationAlgoA(), False)
コード例 #3
0
        next_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        flow = cv.calcOpticalFlowFarneback(self.prvs, next_frame, None, 0.5, 3,
                                           15, 3, 5, 1.2, 0)
        mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
        mag = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
        # hsv[...,0] = ang*180/np.pi
        # hsv[...,2] = mag
        # bgr = cv.cvtColor(hsv,cv.COLOR_HSV2BGR)
        # cv.imshow('bgr', bgr)
        return next_frame, mag, ang

    def get_center(self, rect1, rect2, frame):
        x1, y1, w1, h1 = rect1
        x2, y2, w2, h2 = rect2
        center_x, center_y = (x1 + x2) // 2, ((y1 + h1 // 2) +
                                              (y2 + h2 // 2)) // 2
        self.prvs, mag, ang = self.dense_optical_flow(frame)

        if len(self.center_x_locs) < 25 or (np.mean(mag) < 40 and ((not self.use_optical_flow ) or \
            (self.use_optical_flow and (center_x - self.gate_center[0])**2 + (center_y - self.gate_center[1])**2 < 50))):
            self.use_optical_flow = False
            return self.center_without_optical_flow(center_x, center_y)
        self.use_optical_flow = True
        return (int(self.gate_center[0] + self.optical_flow_c * np.mean(mag * np.cos(ang))), \
                (int(self.gate_center[1] + self.optical_flow_c * np.mean(mag * np.sin(ang)))))


if __name__ == '__main__':
    from perception.vis.vis import run
    run(['..\..\..\data\GOPR1142.MP4'], GateCenterAlgo(), False)