def resetValues(): kalman2d0 = Kalman2D() measured_points0 = [] measured0 = (0, 0) delta = 0 prev_estx = 0 prev_esty = 0 frames_processed = 0 insideLoop = False
def __init__(self, video_src): self.track_len = 30 self.detect_interval = 1 self.tracks = [] # self.cam = video.create_capture(video_src) self.frame_idx = 0 self.kf = Kalman2D() self.firsttime = True # self.cam = cv2.VideoCapture('mouse_tracking.mp4') self.cam = cv2.VideoCapture('videos/mv2_001.avi') self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
def main(): HUE_LOWERB = 40 HUE_UPPERB = 100 SAT_LOWERB = 50 SAT_UPPERB = 170 BRI_LOWERB = 50 BRI_UPPERB = 255 EROSION = 3 DILATION = 4 FOLLOW_WIDTH = 50 DRONE_SPEED = 10 / 100. CIRCLE_SPEED = 10 / 100. TURN_SPEED = 50 / 100. HFUZZ = 100 VFUZZ = 100 testing = False if len(sys.argv) > 1: if sys.argv[1] == 'testing': testing = True elif sys.argv[1] == 'control': control = True pygame.init() print "initialized" screen = pygame.display.set_mode((320, 240)) navdata_0 = None if not testing: drone = libardrone.ARDrone(True) drone.reset() navdata_0 = drone.navdata.copy() tracking_center = False k2d = Kalman2D(1, 0.0001, 0.1) estimated = False namedWindow("output", 1) namedWindow("threshold", 1) moveWindow("threshold", 1000, 0) namedWindow("control", WINDOW_NORMAL) resizeWindow("control", 1024, 300) moveWindow("control", 0, 1000) addControl("hue lower", HUE_LOWERB, 255) addControl("hue upper", HUE_UPPERB, 255) addControl("sat lower", SAT_LOWERB, 255) addControl("sat upper", SAT_UPPERB, 255) addControl("bri lower", BRI_LOWERB, 255) addControl( "bri upper", BRI_UPPERB, 255, ) addControl("erosion", EROSION, 10) addControl("dilation", DILATION, 10) addControl("dilation", DILATION, 10) addControl("follow width", FOLLOW_WIDTH, 100) addControl("drone speed", int(DRONE_SPEED * 100), 100) addControl("turn speed", int(TURN_SPEED * 100), 100) addControl("circle speed", int(CIRCLE_SPEED * 100), 100) if testing: capture = VideoCapture(0) while (not capture.isOpened()): print "not opened" running = True while running: HUE_LOWERB = getTrackbarPos("hue lower", "control") HUE_UPPERB = getTrackbarPos("hue upper", "control") SAT_LOWERB = getTrackbarPos("sat lower", "control") SAT_UPPERB = getTrackbarPos("sat upper", "control") BRI_LOWERB = getTrackbarPos("bri lower", "control") BRI_UPPERB = getTrackbarPos("bri upper", "control") EROSION = getTrackbarPos("erosion", "control") DILATION = getTrackbarPos("dilation", "control") FOLLOW_WIDTH = getTrackbarPos("follow width", "control") DRONE_SPEED = getTrackbarPos("drone speed", "control") / 100. CIRCLE_SPEED = getTrackbarPos("circle speed", "control") / 100. TURN_SPEED = getTrackbarPos("turn speed", "control") / 100. for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYUP: drone.hover() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: drone.reset() running = False # takeoff / land elif event.key == pygame.K_RETURN: print("take off") drone.takeoff() elif event.key == pygame.K_SPACE: print("land") drone.land() # emergency elif event.key == pygame.K_BACKSPACE: drone.reset() # forward / backward elif event.key == pygame.K_w: drone.move_forward() elif event.key == pygame.K_s: drone.move_backward() # left / right elif event.key == pygame.K_a: drone.move_left() elif event.key == pygame.K_d: drone.move_right() # up / down elif event.key == pygame.K_UP: drone.move_up() elif event.key == pygame.K_DOWN: drone.move_down() # turn left / turn right elif event.key == pygame.K_LEFT: drone.turn_left() elif event.key == pygame.K_RIGHT: drone.turn_right() elif event.key == pygame.K_p: psi_i = drone.navdata['psi'] while (drone.navdata['psi'] - psi_i) % 360 < 90: drone.turn_right() if testing: ret, frame = capture.read() frame = flip(frame, 1) else: frame = drone.get_image() frame = cvtColor(frame, COLOR_RGB2BGR) if frame != None: processed_frame = cvtColor(frame, COLOR_BGR2HSV) processed_frame = inRange(processed_frame, (HUE_LOWERB, SAT_LOWERB, BRI_LOWERB), (HUE_UPPERB, SAT_UPPERB, BRI_UPPERB)) kernel = np.ones((5, 5)) processed_frame = GaussianBlur(processed_frame, (5, 5), 0, 0) processed_frame = erode(processed_frame, kernel, iterations=EROSION) processed_frame = dilate(processed_frame, kernel, iterations=DILATION) imshow("threshold", processed_frame) contours = findContours(processed_frame, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, (0, 0))[0] center = (frame.shape[1] / 2, frame.shape[0] / 2) hat = (center[0] - 1, center[1] - 1, 2, 2) hat_distance = 90000 for contour in contours: (x, y, w, h) = boundingRect(contour) distance = np.linalg.norm( np.array((x + w / 2, y + h / 2)) - estimated) if distance < hat_distance: hat = (x, y, w, h) hat_distance = distance (x1, y1, w, h) = hat (x2, y2) = (x1 + w, y1 + h) k2d.update(x1 + w / 2, y1 + h / 2) estimated = [int(c) for c in k2d.getEstimate()] tracking_center = estimated circle(frame, (estimated[0], estimated[1]), 4, (0, 255, 0)) rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0)) action = "" if not (testing or control): drone.speed = DRONE_SPEED if (center[1] + VFUZZ < tracking_center[1]): action += "move down " drone.move_down() elif (center[1] - VFUZZ > tracking_center[1]): action += "move up " drone.move_up() elif (center[0] + HFUZZ < tracking_center[0]): action += "turn right " drone.speed = TURN_SPEED drone.turn_right() drone.speed = DRONE_SPEED elif (center[0] - HFUZZ > tracking_center[0]): action += "turn left " drone.speed = TURN_SPEED drone.turn_left() drone.speed = DRONE_SPEED elif w < FOLLOW_WIDTH and w > 25: action += "move forward " drone.move_forward() elif w > 2 * FOLLOW_WIDTH: action += "move backward " drone.move_backward() elif w > 25: action += "circle" drone.speed = CIRCLE_SPEED drone.move_right() drone.speed = DRONE_SPEED else: action += "hover" drone.turn_left() putText(frame, action, (0, 20), FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2) try: putText(frame, str(drone.navdata.get(0, dict())["battery"]), (0, 50), FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2) except KeyError: pass imshow("output", frame) else: print "frame is none" print("Shutting down...") drone.halt() print("Ok.")
def run(self): print str(self.name) + ': Initializing the tracker thread.' #initialize parameters for the Kalman Filter kalman2d0 = Kalman2D() measured_points0 = [] measured0 = (0, 0) delta = 0 prev_estx = 0 prev_esty = 0 frames_processed = 0 outsideLoop = True while outsideLoop: self.startTime = time.time() while self.frameQ.empty(): time.sleep(0) frame = self.getFrame() gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) avg_frame = np.float32(gray) while (frames_processed < 15): if (not self.frameQ.empty()): frame = self.getFrame() track(frame, avg_frame) frames_processed += 1 insideLoop = True triggerWidthRight = frame.shape[1] * 0.9 triggerWidthLeft = frame.shape[1] * 0.1 centerx = int(frame.shape[1] / 2) centery = int(frame.shape[0] / 2) prev_x = centerx prev_y = centery while insideLoop: while self.frameQ.empty(): time.sleep(0) frame = self.getFrame() if (frame != None): track_img, x, y = track(frame, avg_frame) if (x == self.timerOldX and y == self.timerOldY): #print "NOT different" deltaTime = time.time() - self.startTime if deltaTime > self.timerDelay: outsideLoop = False insideLoop = False break else: self.startTime = time.time() self.timerOldX = x self.timerOldY = y if ((x != -1) | (y != -1)): prev_x = x prev_y = y measured0 = (x, y) drawCross(frame, (x, y), 0, 0, 255) else: measured0 = (prev_x, prev_y) measured_points0.append(measured0) kalman2d0.update(measured0[0], measured0[1]) estimated0 = [int(c) for c in kalman2d0.getEstimate()] delta = estimated0[0] - prev_estx prev_estx = estimated0[0] prev_esty = estimated0[1] drawCross(frame, estimated0, 255, 255, 255) if ((delta < 0) and (estimated0[0] < triggerWidthLeft) and (prev_x < triggerWidthLeft)): self.lock.acquire() self.frameQ.queue.clear() self.lock.release() resetValues() prev_x = centerx prev_y = centery self.parent.moveLeft() if ((delta > 0) and (estimated0[0] > triggerWidthRight) and (prev_x > triggerWidthRight)): self.lock.acquire() self.frameQ.queue.clear() self.lock.release() resetValues() prev_x = centerx prev_y = centery self.parent.moveRight() cv2.imshow("frame", frame) key = cv2.waitKey(1) if key == 27: cv2.destroyWindow("0") insideLoop = False outsideLoop = False break self.parent.finish( ) # Tell the Controller Thread that tracking is finished. print str(self.name) + ": DONE"
def main(): testing = False if len(sys.argv) > 1 and sys.argv[1] == 'testing': testing = True pygame.init() drone = libardrone.ARDrone(True) drone.reset() navdata_0 = drone.navdata.copy() calibrated = False control_start = False controlled = False tracking_start = False tracking = False tracking_center = False k2d = Kalman2D(1, 0.0001, 0.1) estimated = False armband = Armband.Armband() yaw_0 = armband.collector.yaw_w roll_0 = armband.collector.roll_w pitch_0 = armband.collector.pitch_w clock = pygame.time.Clock() namedWindow("output", 1) person_cascade = CascadeClassifier(CASCADE_FILE) if testing: capture = VideoCapture(0) while (not capture.isOpened()): print "not opened" running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYUP: drone.hover() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: drone.reset() running = False # takeoff / land elif event.key == pygame.K_RETURN: print("return") drone.takeoff() elif event.key == pygame.K_SPACE: print("space") drone.land() # emergency elif event.key == pygame.K_BACKSPACE: drone.reset() # forward / backward elif event.key == pygame.K_w: drone.move_forward() elif event.key == pygame.K_s: drone.move_backward() # left / right elif event.key == pygame.K_a: drone.move_left() elif event.key == pygame.K_d: drone.move_right() # up / down elif event.key == pygame.K_UP: drone.move_up() elif event.key == pygame.K_DOWN: drone.move_down() # turn left / turn right elif event.key == pygame.K_LEFT: drone.turn_left() elif event.key == pygame.K_RIGHT: drone.turn_right() elif event.key == pygame.K_p: psi_i = drone.navdata['psi'] while (drone.navdata['psi'] - psi_i) % 360 < 90: drone.turn_right() elif event.key == pygame.K_t: tracking = not tracking elif event.key == pygame.K_c: yaw_0 = armband.collector.yaw_w roll_0 = armband.collector.roll_w pitch_0 = armband.collector.pitch_w calibrated = True print "calibrated!" # speed elif event.key == pygame.K_1: drone.speed = 0.1 elif event.key == pygame.K_2: drone.speed = 0.2 elif event.key == pygame.K_3: drone.speed = 0.3 elif event.key == pygame.K_4: drone.speed = 0.4 elif event.key == pygame.K_5: drone.speed = 0.5 elif event.key == pygame.K_6: drone.speed = 0.6 elif event.key == pygame.K_7: drone.speed = 0.7 elif event.key == pygame.K_8: drone.speed = 0.8 elif event.key == pygame.K_9: drone.speed = 0.9 elif event.key == pygame.K_0: drone.speed = 1.0 # MUST WEAR armband with light to outside if (not calibrated): frame = drone.get_image() imshow("output", frame) continue yaw_d = armband.collector.yaw_w - yaw_0 pitch_d = armband.collector.pitch_w - pitch_0 roll_d = armband.collector.roll_w - roll_0 if (roll_d > 270): roll_d = roll_d - 360 if (not controlled and not tracking and pitch_d > 40 and roll_d > 60): tracking_start = True armband.collector.myo.vibrate('short') if (tracking_start and not (pitch_d > 40 or roll_d > 60)): tracking_start = False if (tracking_start and armband.collector.current_pose == pose_t.fist): tracking_start = False tracking = True armband.collector.myo.vibrate('long') if (not controlled and roll_d > 60 and not (pitch_d > 10 or pitch_d < -10)): control_start = True armband.collector.myo.vibrate('short') if (control_start and (pitch_d > 10 or pitch_d < -10)): control_start = False if (control_start and roll_d < 10): yaw_0 = armband.collector.yaw_w roll_0 = armband.collector.roll_w controlled = True control_start = False tracking = False armband.collector.myo.vibrate('long') #print yaw_d, pitch_d, roll_d if (controlled): if (yaw_d < -20): print "turn right" drone.turn_right() if (yaw_d > 20): print "turn left" drone.turn_left() if (pitch_d < -15): print "move backward" drone.move_backward() if (pitch_d > 20): print "move forward" drone.move_forward() if (roll_d > 30): print "move right" drone.move_right() if (roll_d < -30): print "move left" drone.move_left() if (pitch_d > 30): controlled = False if (armband.collector.current_pose == pose_t.double_tap): print "shoot" urllib2.urlopen("http://droneduino.local/arduino/shoot/lj") armband.collector.current_pose = pose_t.rest if (armband.collector.current_pose == pose_t.fingers_spread): print "rise" armband.collector.current_pose = pose_t.rest if (armband.collector.current_pose == pose_t.fist): print "descend" armband.collector.current_pose = pose_t.rest frame = drone.get_image() imshow("output", frame) continue frame = drone.get_image() if testing: ret, frame = capture.read() frame = flip(frame, 1) if frame != None: if not estimated: estimated = np.array((frame.shape[1], frame.shape[0])) b, g, gray = split(frame) people = person_cascade.detectMultiScale(gray, 1.05, 3, 0, (150, 150)) best_candidate = (0, 0, 0, 0) best_distance = 9000 for (x, y, w, h) in people: rectangle(frame, (x, y), (x + w, y + h), 0) distance = np.linalg.norm( np.array((x + w / 2, y + h / 2)) - estimated) if distance < best_distance: best_candidate = (x, y, w, h) best_distance = distance (x, y, w, h) = best_candidate k2d.update(x + w / 2, y + h / 2) estimated = [int(c) for c in k2d.getEstimate()] circle(frame, (estimated[0], estimated[1]), 4, 1234) rectangle(frame, (x, y), (x + w, y + h), 1234) if tracking: if not tracking_center: tracking_center = [x + w / 2, y + h / 2] circle(frame, (tracking_center[0], tracking_center[1]), 15, 0x00FF00) circle(frame, (x + w / 2, y + w / 2), 10, 0xFF0000) print tracking_center, x + w / 2, y + h / 2, #if (y + h/2 < frame.shape[0]/2): if False: print "unable to track" else: FUZZ = 100 if (y + h / 2 + FUZZ < tracking_center[1]): print "move forward" drone.move_forward() elif (y + h / 2 - FUZZ > tracking_center[1]): print "move backward" drone.move_backward() if (x + w / 2 + FUZZ < tracking_center[0]): print "move left" drone.turn_left() elif (x + w / 2 - FUZZ > tracking_center[0]): print "move right" drone.turn_right() imshow("output", frame) else: print "frame is none" #t = getTickCount() - t #print "detection time = %gms" % (t/(getTickFrequency()*1000.)) print("Shutting down...") drone.halt() print("Ok.")
# Loop until mouse inside window while True: if mouse_info.x > 0 and mouse_info.y > 0: break cv2.imshow(WINDOW_NAME, img) if cv2.waitKey(1) == 27: exit(0) # These will get the trajectories for mouse location and Kalman estiamte measured_points = [] kalman_points = [] # Create a new Kalman2D filter and initialize it with starting mouse location kalman2d = Kalman2D() # Loop till user hits escape while True: # Serve up a fresh image img = newImage() # Grab current mouse position and add it to the trajectory measured = (mouse_info.x, mouse_info.y) measured_points.append(measured) # Update the Kalman filter with the mouse point kalman2d.update(mouse_info.x, mouse_info.y) # Get the current Kalman estimate and add it to the trajectory
def main(): GREENHUE_LOWERB = 50 GREENHUE_UPPERB = 100 testing = False if len(sys.argv) > 1 and sys.argv[1] == 'testing': testing = True pygame.init() print "initialized" screen = pygame.display.set_mode((320, 240)) navdata_0 = None if not testing: drone = libardrone.ARDrone(True) drone.reset() navdata_0 = drone.navdata.copy() tracking_center = False k2d = Kalman2D(1, 0.0001, 0.1) estimated = False clock = pygame.time.Clock() namedWindow("threshold", 1) namedWindow("control", 1) namedWindow("output", 1) createTrackbar("green hue lower", "control", GREENHUE_LOWERB, 255, report_lower_change) createTrackbar("green hue upper", "control", GREENHUE_UPPERB, 255, report_upper_change) if testing: capture = VideoCapture(0) while (not capture.isOpened()): print "not opened" running = True while running: GREENHUE_LOWERB = getTrackbarPos("green hue lower", "control") GREENHUE_UPPERB = getTrackbarPos("green hue upper", "control") for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.KEYUP: drone.hover() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: drone.reset() running = False # takeoff / land elif event.key == pygame.K_RETURN: print("return") drone.takeoff() elif event.key == pygame.K_SPACE: print("space") drone.land() # emergency elif event.key == pygame.K_BACKSPACE: drone.reset() # forward / backward elif event.key == pygame.K_w: drone.move_forward() elif event.key == pygame.K_s: drone.move_backward() # left / right elif event.key == pygame.K_a: drone.move_left() elif event.key == pygame.K_d: drone.move_right() # up / down elif event.key == pygame.K_UP: drone.move_up() elif event.key == pygame.K_DOWN: drone.move_down() # turn left / turn right elif event.key == pygame.K_LEFT: drone.turn_left() elif event.key == pygame.K_RIGHT: drone.turn_right() elif event.key == pygame.K_p: psi_i = drone.navdata['psi'] while (drone.navdata['psi'] - psi_i) % 360 < 90: drone.turn_right() elif event.key == pygame.K_1: drone.speed = 0.1 elif event.key == pygame.K_2: drone.speed = 0.2 elif event.key == pygame.K_3: drone.speed = 0.3 elif event.key == pygame.K_4: drone.speed = 0.4 elif event.key == pygame.K_5: drone.speed = 0.5 elif event.key == pygame.K_6: drone.speed = 0.6 elif event.key == pygame.K_7: drone.speed = 0.7 elif event.key == pygame.K_8: drone.speed = 0.8 elif event.key == pygame.K_9: drone.speed = 0.9 elif event.key == pygame.K_0: drone.speed = 1.0 if testing: ret, frame = capture.read() frame = flip(frame, 1) else: frame = drone.get_image() frame = cvtColor(frame, COLOR_RGB2BGR) if frame != None: processed_frame = cvtColor(frame, COLOR_BGR2HSV) processed_frame = inRange(processed_frame, (GREENHUE_LOWERB, 25, 25), (GREENHUE_UPPERB, 190, 190)) kernel = np.ones((5, 5)) #processed_frame = erode(processed_frame, kernel, iterations=2) #processed_frame = dilate(processed_frame, kernel, iterations=2) imshow("threshold", processed_frame) contours = findContours(processed_frame, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, (0, 0))[0] hat = (0, 0, 0, 0) hat_distance = 90000 for contour in contours: (x, y, w, h) = boundingRect(contour) distance = np.linalg.norm( np.array((x + w / 2, y + h / 2)) - estimated) if distance < hat_distance: hat = (x, y, w, h) hat_distance = distance if hat is None: continue (x1, y1, w, h) = hat (x2, y2) = (x1 + w, y1 + h) k2d.update(x1 + w / 2, y1 + h / 2) estimated = [int(c) for c in k2d.getEstimate()] tracking_center = ((x1 + x2) / 2, (y1 + y2) / 2) circle(frame, (estimated[0], estimated[1]), 4, 1234) rectangle(frame, (x1, y1), (x2, y2), 0xFF0000) if not testing: center = (frame.shape[1] / 2, frame.shape[0] / 2) FUZZ = 100 if (center[1] + FUZZ < tracking_center[1]): #print "move forward" #drone.move_forward() pass elif (center[1] - FUZZ > tracking_center[1]): #print "move backward" #drone.move_backward() pass if (center[0] + FUZZ < tracking_center[0]): #print "move right" #drone.turn_right() pass elif (center[0] - FUZZ > tracking_center[0]): #print "move left" #drone.turn_left() pass imshow("output", frame) else: print "frame is none" print("Shutting down...") drone.halt() print("Ok.")
def main(): lower = [17, 15, 100] upper = [50, 56, 200] lower = np.array(lower, dtype = "uint8") upper = np.array(upper, dtype = "uint8") global frame, roiPts, inputMode, roiBoxWidth, roiBoxHeight cnt = 0 #menghitung roibox dari kalmanfilter centerX = 0 centerY = 0 toggle = True flag = False kalman2d = Kalman2D() cv2.namedWindow("frame") cv2.setMouseCallback("frame",selectROI) termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,3) #kondisi terminasi camshift roiBox = None time2 = datetime.now() #<<<check time>>> serial_flag_bit = 1 #Sending data flag delta = timedelta(seconds = 1) #menggunakan capture_continous with picamera.PiCamera() as camera: stream = picamera.array.PiRGBArray(camera) camera.resolution = (Width,Height) #mengatur resolusi time3 = datetime.now() try: for foo in enumerate(camera.capture_continuous(stream,'bgr',use_video_port = True)): #menangkap dari capture_continuous time1 = datetime.now() # print(time1 - time2) #periode waktu pada 1 cycle terhadap loop time2 = time1 frame = stream.array #menyimpan array gambar ke variabel mask = cv2.inRange(frame, lower, upper) output = cv2.bitwise_and(frame, frame, mask = mask) gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (7, 7), 0) edged = cv2.Canny(gray, 50, 100) edged = cv2.dilate(edged, None, iterations=1) edged = cv2.erode(edged, None, iterations=1) stream.seek(0) #inisialisasi stream untuk iterasi selanjutnya stream.truncate() if roiBox is not None:# definisi roiBox hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #merubah warna gambar RGB ke HSV # print ('hsv: ', hsv) backProj = cv2.calcBackProject([hsv],[0],roiHist, [0,180], 1) #membuat backprojection # print ('backproj', backProj) if roiBoxWidth > 0 and roiBoxHeight > 0: #Cek camshift jika gagal (r,roiBox) = cv2.CamShift(backProj, roiBox, termination) #camshift #r is set of 4points about roiBox #roiBox is an array, consist of x and y of topLeft, and box's width and height roiBoxWidth = roiBox[2] roiBoxHeight = roiBox[3] serial_flag_bit = 1 else : #Init roiBox ketika camshift gagal # print "roiBox init!!!!!!!!!!!!!!!!" tl[0] = 1 tl[1] = 1 br[0] = Width -1 br[1] = Height -1 roiBox = (tl[0], tl[1], br[0], br[1]) #Camshift (r,roiBox) = cv2.CamShift(backProj, roiBox, termination) roiBoxWidth = roiBox[2] roiBoxHeight = roiBox[3] serial_flag_bit = 0 time3 = datetime.now() #transform r ke pts ke draw Box dengan polylines pts = np.int0(cv2.cv.BoxPoints(r)) #Draw roiBox cv2.polylines(frame,[pts],True, (0,255,0),2) #menghitung center x,y centerX = (pts[0][0] + pts[2][0])/2 print ('Center X', centerX) centerY = (pts[0][1] + pts[2][1])/2 #Update x,y to kalman filter dan mendapatkan estimasi x,y CenterInfo.x = centerX CenterInfo.y = centerY #mengirim center x ke arduino if CenterInfo.x / 10 == 0: tempCenterX = '00' + str(CenterInfo.x) elif CenterInfo.x / 100 == 0: tempCenterX = '0' + str(CenterInfo.x) else: tempCenterX = str(CenterInfo.x) if CenterInfo.y / 10 == 0: tempCenterY = '00' + str(CenterInfo.y) elif CenterInfo.y / 100 == 0: tempCenterY = '0' + str(CenterInfo.y) else: tempCenterY = str(CenterInfo.y) # centerData = str(int(centerX)) # print ("Center Data", centerData) #check waktu dan flag saat mengirim data ke arduino if datetime.now() > time3 + delta : if serial_flag_bit == 1: ser.write(tempCenterX ) # print ser.write(tempCenterX) #Update Kalman kalman2d.update(CenterInfo.x, CenterInfo.y) estimated = [int (c) for c in kalman2d.getEstimate()] estimatedX = estimated[0] print ('Estimasi x', estimatedX) estimatedY = estimated[1] #menghitung delta x,y deltaX = estimatedX - centerX deltaY = estimatedY - centerY #Apply new roiBox from kalmanfilter if cnt > 1: roiBox = (roiBox[0]+deltaX, roiBox[1]+deltaY, br[0], br[1]) cnt = 0 #Draw estimated center x,y from kalman filter for test #cv2.circle(frame,(estimatedX,estimatedY), 4, (0, 255, 255),2) #merubah warna target ketika berada di box #if wl < centerX and wr > centerX and centerY < hb and centerY > ht : #cv2.circle(frame,(centerX,centerY), 4, (255,0,0),2) #flag = False #else : #cv2.circle(frame,(centerX,centerY), 4, (0,255,0),2) #flag = True cnt = cnt+1 #menghitung box baru dari kalman filter #Draw kalman top left point for test #cv2.circle(frame,(roiBox[0],roiBox[1]), 4, (0,0,255),2) #Draw target box cv2.circle(frame,(Width/2,Height/2) , 4, (255,255,255),2) cv2.polylines(frame, np.int32([targetBox]), 1, (255,255,255)) # print(len(cnts)) colors = ((0, 0, 255), (240, 0, 159), (0, 165, 255), (255, 255, 0), (255, 0, 255)) # refObj = None # find contours in the edge map cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # print("cnts1",cnts) if not cnts[0] == []: cnts = cnts[0] if imutils.is_cv2() else cnts[1] # print("cnts2",cnts) # sort the contours from left-to-right and, then initialize the # distance colors and reference object (cnts, _) = contours.sort_contours(cnts) for c in cnts: # if the contour is not sufficiently large, ignore it if cv2.contourArea(c) < 100: continue # compute the rotated bounding box of the contour box = cv2.minAreaRect(c) box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box) box = np.array(box, dtype="int") # order the points in the contour such that they appear # in top-left, top-right, bottom-right, and bottom-left # order, then draw the outline of the rotated bounding # box box = perspective.order_points(box) # compute the center of the bounding box cX = np.average(box[:, 0]) cY = np.average(box[:, 1]) (tl, tr, br, bl) = box (tlblX, tlblY) = midpoint(tl, bl) (trbrX, trbrY) = midpoint(tr, br) # compute the Euclidean distance between the midpoints, # then construct the reference object D = dist.euclidean((tlblX, tlblY), (trbrX, trbrY)) refObj = (box, (cX, cY), D / 3.5) # print("refObj",refObj) cv2.drawContours(frame, [box.astype("int")], -1, (0, 255, 0), 2) refCoords = np.vstack([refObj[0], refObj[1]]) objCoords = np.vstack([pts, (centerX, centerY)]) # print("refCoords", refCoords) # print("objCoords",objCoords) # loop over the original points # for ((xA, yA), (xB, yB), color) in zip(refCoords, objCoords, colors): xA, yA = refCoords[-1] xB, yB = objCoords[-1] color = colors[-1] # draw circles corresponding to the current points and # connect them with a line cv2.circle(frame, (int(xA), int(yA)), 5, color, -1) cv2.circle(frame, (int(xB), int(yB)), 5, color, -1) cv2.line(frame, (int(xA), int(yA)), (int(xB), int(yB)), color, 2) # compute the Euclidean distance between the coordinates, # and then convert the distance in pixels to distance in # units D = dist.euclidean((xA, yA), (xB, yB)) / refObj[2] hasil = D/29.5276 print("Distance", hasil) if D <= 29.5276: print("Anda Mendekati Area Berbahaya") (mX, mY) = midpoint((xA, yA), (xB, yB)) cv2.putText(orig, "{:.1f}in".format(D), (int(mX), int(mY - 10)),cv2.FONT_HERSHEY_SIMPLEX, 0.55, color, 2) #menampilkan atau tidak imshow #cnts = [] if toggle is True: cv2.imshow("frame",frame) #if you want speed up, delete this line key=cv2.waitKey(1) & 0xFF #Init roiBox if key == ord("i") and len(roiPts) < 4: inputMode = True orig = frame.copy() #wait for 4th click while len(roiPts) < 4: cv2.imshow("frame",frame) cv2.waitKey(0) #set data from 4 clicks roiPts = np.array(roiPts) s = roiPts.sum(axis=1) tl = roiPts[np.argmin(s)] br = roiPts[np.argmax(s)] #membuat warna histogram dari roi roi = orig[tl[1]:br[1], tl[0]:br[0]] roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) # if ser.isOpen(): # read_data = ser.read() # #response = ser.readline() # print "Data received : " + read_data # else: # print "Can not open serial port" roiHist = cv2.calcHist([roi],[0],None,[16],[0,180]) roiHist = cv2.normalize(roiHist, roiHist,0,255,cv2.NORM_MINMAX) roiBox = (tl[0], tl[1], br[0], br[1]) #mengatur roiBox lebar, tinggi roiBoxWidth = roiBox[2] roiBoxHeight = roiBox[3] #menghitung center x,y CenterInfo.x = tl[0]+br[0]/2 CenterInfo.y = tl[1]+br[1]/2 #Init x,y to kalman filter and set first roiBox CenterInfo.x = centerX CenterInfo.y = centerY kalman2d.update(CenterInfo.x, CenterInfo.y) estimated = [int (c) for c in kalman2d.getEstimate()] print kalman2d print estimated estimatedX = estimated[0] estimatedY = estimated[1] #Calculate delta x,y deltaX = estimatedX - centerX print ('Delta x2', deltaX) deltaY = estimatedY - centerY #set first roiBox roiBox = (tl[0]+deltaX, tl[1]+deltaY, br[0], br[1]) #toggle for imshow elif key == ord("r"): toggle = not toggle #quit elif key == ord("q"): break finally: cv2.destroyAllWindows()
def main(): # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", help="path to the (optional) video file") args = vars(ap.parse_args()) # grab the reference to the current frame, list of ROI # points and whether or not it is ROI selection mode global frame, roiPts, inputMode cnt = 0 #count for new roiBox from kalmanfilter centerX = 0 centerY = 0 toggle = True #toggle for imshow flag = False #flag for moving kalman2d = Kalman2D() #Create new Kalman filter and initailize # if the video path was not supplied, grab the reference to the # camera if not args.get("video", False): camera = cv2.VideoCapture(0) # otherwise, load the video else: camera = cv2.VideoCapture(args["video"]) # setup the mouse callback cv2.namedWindow("frame") cv2.setMouseCallback("frame", selectROI) # initialize the termination criteria for cam shift, indicating # a maximum of ten iterations or movement by a least one pixel # along with the bounding box of the ROI termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) roiBox = None # keep looping over the frames while True: # grab the current frame (grabbed, frame) = camera.read() # check to see if we have reached the end of the # video if not grabbed: break # if the see if the ROI has been computed if roiBox is not None: # convert the current frame to the HSV color space # and perform mean shift hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1) # apply cam shift to the back projection, convert the # points to a bounding box, and then draw them (r, roiBox) = cv2.CamShift(backProj, roiBox, termination) pts = np.int0(cv2.cv.BoxPoints(r)) cv2.polylines(frame, [pts], True, (0, 255, 0), 2) #Calculate center x,y centerX = (pts[0][0] + pts[2][0]) / 2 centerY = (pts[0][1] + pts[2][1]) / 2 #Update x,y to kalman filter and get estimated x,y CenterInfo.x = centerX CenterInfo.y = centerY #Send center x,y to Arduino if CenterInfo.x / 10 == 0: tempCenterX = '00' + str(CenterInfo.x) elif CenterInfo.x / 100 == 0: tempCenterX = '0' + str(CenterInfo.x) else: tempCenterX = str(CenterInfo.x) if CenterInfo.y / 10 == 0: tempCenterY = '00' + str(CenterInfo.y) elif CenterInfo.y / 100 == 0: tempCenterY = '0' + str(CenterInfo.y) else: tempCenterY = str(CenterInfo.y) centerData = str(int(flag)) + tempCenterX + tempCenterY #print centerData #Update Kalman kalman2d.update(CenterInfo.x, CenterInfo.y) estimated = [int(c) for c in kalman2d.getEstimate()] estimatedX = estimated[0] estimatedY = estimated[1] #Calculate delta x,y deltaX = estimatedX - centerX deltaY = estimatedY - centerY #Apply new roiBox from kalmanfilter if cnt > 1: roiBox = (roiBox[0] + deltaX, roiBox[1] + deltaY, br[0], br[1]) cnt = 0 #Draw estimated center x,y from kalman filter for test #cv2.circle(frame,(estimatedX,estimatedY), 4, (0, 255, 255),2) #Change a color when target is in target box if wl < centerX and wr > centerX and centerY < hb and centerY > ht: cv2.circle(frame, (centerX, centerY), 4, (255, 0, 0), 2) flag = False else: cv2.circle(frame, (centerX, centerY), 4, (0, 255, 0), 2) flag = True cnt = cnt + 1 #count for apply new box from kalman filter #Draw kalman top left point for test #cv2.circle(frame,(roiBox[0],roiBox[1]), 4, (0,0,255),2) #Draw target box cv2.circle(frame, (Width / 2, Height / 2), 4, (255, 255, 255), 2) cv2.polylines(frame, np.int32([targetBox]), 1, (255, 255, 255)) # show the frame and record if the user presses a key cv2.imshow("frame", frame) key = cv2.waitKey(1) & 0xFF # handle if the 'i' key is pressed, then go into ROI # selection mode if key == ord("i") and len(roiPts) < 4: # indicate that we are in input mode and clone the # frame inputMode = True orig = frame.copy() # keep looping until 4 reference ROI points have # been selected; press any key to exit ROI selction # mode once 4 points have been selected while len(roiPts) < 4: cv2.imshow("frame", frame) cv2.waitKey(0) # determine the top-left and bottom-right points roiPts = np.array(roiPts) s = roiPts.sum(axis=1) tl = roiPts[np.argmin(s)] br = roiPts[np.argmax(s)] # grab the ROI for the bounding box and convert it # to the HSV color space roi = orig[tl[1]:br[1], tl[0]:br[0]] roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) #roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB) # compute a HSV histogram for the ROI and store the # bounding box roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180]) roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX) roiBox = (tl[0], tl[1], br[0], br[1]) # if the 'q' key is pressed, stop the loop elif key == ord("q"): break # cleanup the camera and close any open windows camera.release() cv2.destroyAllWindows()
while True: if mouse_info.x > 0 and mouse_info.y > 0: break cv2.imshow(WINDOW_NAME, img) if cv2.waitKey(1) == 27: exit(0) # These will get the trajectories for mouse location and Kalman estiamte measured_points = [] kalman_points = [] # Create a new Kalman filter and initialize it with starting mouse location kalfilt = Kalman(2) # Loop till user hits escape while True: # Serve up a fresh image img = newImage() # Grab current mouse position and add it to the trajectory measured = (mouse_info.x, mouse_info.y) measured_points.append(measured) # Update the Kalman filter with the mouse point kalfilt.update((mouse_info.x, mouse_info.y)) # Get the current Kalman estimate and add it to the trajectory