def threadLoop(): global curCorners global curPos global curAng global curCamera global targetAng global targetPos if curCamera == None: curCamera = init_camera() img = capture_image(curCamera) if curCorners == None: curCorners = findCorners(img) img = transform(img, curCorners) (img, curPos, curAng, curDiam) = detectRobot(img) if targetPos != None: targetAng = atan2(targetPos[1] - curPos[1], targetPos[0] - curPos[0]) * 180 / math.pi # print("The robot's position is: {}".format(curPos)) # print("The robot's angle is: {}".format(curAng)) # print("THe robot's diameter is: {}".format(curDiam)) threading.Timer(0.01, threadLoop).start()
def threadLoop(): global curCorners global curPos global curAng global curCamera global curImage global curDiam global targetAng global targetPos global targetPath global targetPoint ## if(counter == 0): ## targetpath() ## else: ## if (curCamera == None): ## curCamera = init_camera() pathTime = 0 camera, rawCapture = init_camera() shown = False lastMapTime = 0 rawTargetPoint = None m = None rawPos = None while True: #print("Image capture took {}".format(captureTimeEnd - captureTimeStart)) captureTimeStart = time.time() img = capture_image(camera, rawCapture) captureTimeEnd = time.time() if curCorners == None: curCorners = findCorners(img) transTime = time.time() imageLock.acquire() img = transform(img, curCorners) # plt.imshow(img) # plt.show() imageLock.release() # print("Transformation took {}".format(time.time() - transTime)) # if not shown: # plt.imshow(img) # plt.show() # shown = True detectTimeStart = time.time() imageLock.acquire() (img, rawPos, curAng, curDiam) = detectRobot(img) #print("current angle: {}".format(curAng)) # if rawPos == None: # (img, rawPos , curAng, curDiam) = detectRobot(img) # else: ## print("X-range: {}. Y-range: {}".format((xL, xH), (yL, yH))) # (img, rawPos, curAng, curDiam) = detectRobot(img, yL, yH, xL, xH) imageLock.release() print("Current position: {} Target Position: {}".format( rawPos, targetPoint)) xL = rawPos[0] - 150 xH = rawPos[0] + 150 yL = rawPos[1] - 150 yH = rawPos[1] + 150 if (xL < 0): xL = 0 if (yL < 0): yL = 0 if (yH >= 1200): yH = 1200 if (xH >= 1600): xH = 1600 #print("X-range: {}. Y-range: {}".format((xL, xH), (yL, yH))) detectTimeEnd = time.time() print("Robot detection took {}".format(detectTimeEnd - detectTimeStart)) imageLock.acquire() curImage = img curPos = (int(rawPos[0] / 20), int(rawPos[1] / 20)) if curPos == (0, 0): rawPos = None imageLock.release()
# print("THe robot's diameter is: {}".format(curDiam)) threading.Timer(0.01, threadLoop).start() if __name__ == '__main__': # camera = init_camera() threadLoop() moveRobotForward() time.sleep(60) exit() camera = init_camera() img = capture_image(camera) corners = findCorners(img) img = transform(img, corners) # img = cv2.GaussianBlur(img, robotStart = time.time() (img, pos, ang, diam) = detectRobot(img) robotEnd = time.time() print("Robot detection duration: {}".format(robotEnd - robotStart)) smallPos = (int(pos[0] / 20), int(pos[1] / 20)) mapStart = time.time() m = generateMap(img, diam / 40) mapEnd = time.time() print("Map generation duration: {}".format(mapEnd - mapStart)) pathStart = time.time() path = findPath(m, smallPos, (70, 10), int(diam / 40))
# sleep(60) # camera.stop_preview() # # exit() rawCapture = PiRGBArray(camera) camera.capture(rawCapture, format="bgr") img = rawCapture.array cv2.imwrite('/home/pi/mat.png', img) # img = cv2.imread('/home/pi/best_course.png') ## print("Raw image") ## plt.imshow(img) ## plt.show() start = time.time() img = transform(img) # print("Transformed image") # im = ax.imshow(img, interpolation='none') # ax.format_coord = Formatter(im) # plt.imshow(img) # plt.show() img = cv2.GaussianBlur(img, (11, 11), 0) robotStart = time.time() (img, pos, ang, diam) = detectRobot(img) robotEnd = time.time() print("Robot detection duration: {}".format(robotEnd - robotStart)) # Not cool. Change later pos = (int(pos[0] / 20), int(pos[1] / 20)) mapStart = time.time()