def _view_camera(self, camera_id): cm = CameraManager() camera = cm.get_camera_by_id(camera_id) self._render_view('view', camera = camera)
def __init__( self, test_img=[], test_video=[], test_display=False, using_nt=False, ): # self.entries = entries # Memory Allocation # Numpy takes Rows then Cols as dimensions. Height x Width self.hsv = np.zeros(shape=(FRAME_HEIGHT, FRAME_WIDTH, 3), dtype=np.uint8) self.image = self.hsv.copy() self.display = self.hsv.copy() self.mask = np.zeros(shape=(FRAME_HEIGHT, FRAME_WIDTH), dtype=np.uint8) # Camera Configuration self.CameraManager = CameraManager(test_img=test_img, test_video=test_video, test_display=test_display) self.testing = len(test_video) or len(test_img) if not self.testing: self.CameraManager.setCameraProperty( 0, "white_balance_temperature_auto", 0) self.CameraManager.setCameraProperty(0, "exposure_auto", 1) self.CameraManager.setCameraProperty(0, "focus_auto", 0) self.CameraManager.setCameraProperty(0, "exposure_absolute", 1) self.Connection = Connection(using_nt=using_nt, test=self.testing) self.avg_horiz_angle = 0 self.avg_dist = 0 self.prev_dist = 0 self.prev_horiz_angle = 0 self.old_fps_time = 0
def __init__(self): self.id = 0 self.feed_amount = 0 self.event_time = '' self.has_run = 'false' self.images = [] self.servo_man = ServoManager() self.camera_man = CameraManager()
def restart(self): self.world.unload_map_layer(carla.MapLayer.All) # Get a random blueprint. blueprint = random.choice( self.world.get_blueprint_library().filter('vehicle.audi.a2')) blueprint.set_attribute('role_name', self.actor_role_name) if blueprint.has_attribute('color'): blueprint.set_attribute('color', '224,0,0') if blueprint.has_attribute('is_invincible'): blueprint.set_attribute('is_invincible', 'true') # Spawn the vehicle. while self.vehicle is None: #or self.player2 is None or self.player3 is None: if not self.map.get_spawn_points(): print('There are no spawn points available in your map/town.') print('Please add some Vehicle Spawn Point to your UE4 scene.') sys.exit(1) spawn_point = carla.Transform( carla.Location(x=initial_state["x"], y=initial_state["y"], z=initial_state["z"]), carla.Rotation(yaw=initial_state["yaw"]), ) self.vehicle = self.world.try_spawn_actor(blueprint, spawn_point) # spawing objects for player_id in range(1): while self.players[player_id] is None: spawn_point = carla.Transform( carla.Location( x=initial_state["x"] + self.players_location[player_id][0], y=initial_state["y"] + self.players_location[player_id][1], z=initial_state["z"], ), carla.Rotation(yaw=initial_state["yaw"] + self.players_location[player_id][2]), ) self.players[player_id] = self.world.try_spawn_actor( blueprint, spawn_point) # Set up the sensors. self.camera_manager = CameraManager(self.vehicle) # self.camera_manager.set_sensor(0, 0, notify=False) # main camera self.camera_manager.set_sensor(0, 1, notify=False) # front camera self.camera_manager.set_sensor(1, 1, notify=False) # depth camera self.camera_manager.set_sensor(2, 1, notify=False) # segmentation camera print("[+] World setup completed")
class Event(object): def __init__(self): self.id = 0 self.feed_amount = 0 self.event_time = '' self.has_run = 'false' self.images = [] self.servo_man = ServoManager() self.camera_man = CameraManager() def add_image(self, image): self.images.append(image) def clear_images(self): self.images = [] def try_execute(self): if self.has_run == 'false' and self.event_time <= datetime.now(): self.execute() self.servo_man.cleanup() # Cleanup even if servo is not activated self.camera_man.cleanup() # Cleanup even if camera is not used def execute(self): try: if self.feed_amount > 0: self.servo_man.run() image_file_names = self.camera_man.take_picture_series(3) self.clear_images() for image_file_name in image_file_names: image = Image() image.file_name = image_file_name self.add_image(image) self.has_run = 'true' except: self.clear_images() self.has_run = 'false' def serialize_to_json_string(self): json_string = '{"event_time": "' + self.event_time.strftime( '%Y-%m-%dT%H:%M:%S.%fZ') + '", "id": "' + str( self.id) + '", "feed_amount": "' + str( self.feed_amount) + '", "has_run": "' + str( self.has_run) + '", "images": [' for index, image in enumerate(self.images): json_string += image.serialize_to_json_string() if index != len(self.images) - 1: json_string += ', ' json_string += ']}' return str(json_string)
def start_secure(): for i in range(retry_count): try: logger.info('retrying') time.sleep(5) new_socket = socket.socket() new_socket.connect((tilda_ip, tilda_port)) logger.info('connecting to {}:{}...'.format(tilda_ip, tilda_port)) new_connection = new_socket.makefile('wb') logger.info('connected to {}:{}'.format(tilda_ip, tilda_port)) new_streamer = ImageStreamer(new_connection) logger.info('Image streamer created ') camera_manager = CameraManager(new_streamer) logger.info('camera_manager created ') camera_manager.start_capturing() new_streamer.terminated = True new_streamer.join() logger.info('new_connection write last') new_connection.write(struct.pack('<L', 0)) logger.info('connection write ends') except Exception as e: t, value, t2 = sys.exc_info() logger.error('Exception when retrying streaming {} {}'.format( t, value), exc_info=True) traceback.print_tb(e.__traceback__) try: logger.info( f'Total images sent {camera_manager.get_total_images_count()} on fps {camera_manager.get_fps()}' ) except: pass finally: try: new_socket.close() new_connection.close() except Exception as e: t, value, t2 = sys.exc_info() logger.error( 'Exception when retrying to close connection and socket {} {}' .format(t, value), exc_info=True) traceback.print_tb(e.__traceback__)
def __init__(self, config): self.config = config self.cm = CameraManager() self.builder = gtk.Builder() self.builder.add_from_file('win_main.xml') self.dbh = DatabaseHelper() self._init_controls() self._connect_events()
def do_POST(self): ctype, pdict = cgi.parse_header(self.headers.getheader('content-type')) if ctype == 'multipart/form-data': query=cgi.parse_multipart(self.rfile, pdict) user = query.get('user') password = query.get('password') conf = Configuration.get() self.usuario_valido = (user[0] == conf['user'] and password[0] == conf['password']) # print user[0], conf['user'], password[0], conf['password'] if self.usuario_valido: cm = CameraManager() self._render_view('cameras', cameras = cm.get_cameras()) else: self._render_view('auth_form')
def __init__(self, baudrate, camera=CameraManager((512, 512)), timeout=1): self.baudrate = baudrate self.camera = camera self.timeout = timeout self.mapping = {0: 'r', 1: 'l', 2: 'b', 3: 'f', 4: 'None'} self.model = None self.input_details = None self.output_details = None self.port = None self.serial_connection = None self._start_port_conn() self._load_model()
def __init__( self, test_img=None, test_video=None, test_display=False, using_nt=False, ): # self.entries = entries # Memory Allocation self.hsv = np.zeros(shape=(FRAME_WIDTH, FRAME_HEIGHT, 3), dtype=np.uint8) self.image = self.hsv.copy() self.mask = np.zeros(shape=(FRAME_WIDTH, FRAME_HEIGHT), dtype=np.uint8) # Camera Configuration self.CameraManager = CameraManager(test_img=test_img, test_video=test_video, test_display=test_display) self.Connection = Connection(using_nt=using_nt, test=test_video or test_img) self.testing = not (type(test_img) == type(None) or type(test_video) == type(None))
def get_images(): cMgr = CameraManager() json_data = cMgr.getImages() return json.dumps(json_data)
class Vision: """Main vision class. An instance should be created, with test=False (default). As long as the cameras are configured correctly via the GUI interface, everything will work without modification required. This will not work on most machines, so tests of the main process function are the only tests that can be done without a Pi running the FRC vision image. """ entries = None def __init__( self, test_img=None, test_video=None, test_display=False, using_nt=False, ): # self.entries = entries # Memory Allocation self.hsv = np.zeros(shape=(FRAME_WIDTH, FRAME_HEIGHT, 3), dtype=np.uint8) self.image = self.hsv.copy() self.mask = np.zeros(shape=(FRAME_WIDTH, FRAME_HEIGHT), dtype=np.uint8) # Camera Configuration self.CameraManager = CameraManager(test_img=test_img, test_video=test_video, test_display=test_display) self.Connection = Connection(using_nt=using_nt, test=test_video or test_img) self.testing = not (type(test_img) == type(None) or type(test_video) == type(None)) def find_loading_bay(self, frame: np.ndarray): cnts, hierarchy = cv2.findContours(self.mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) cnts = np.array(cnts) hierarchy = np.array(hierarchy)[0] outer_rects = {} inner_rects = {} for i, cnt in enumerate(cnts): if hierarchy[i][3] == -1: outer_rects[i] = ( get_corners_from_contour(cnt), hierarchy[i], cv2.contourArea(cnt), ) else: inner_rects[i] = ( get_corners_from_contour(cnt), hierarchy[i], cv2.contourArea(cnt), ) if not (inner_rects and outer_rects): return None good = [] for i in outer_rects: if outer_rects[i][2] > MIN_CONTOUR_AREA: current_inners = [] next_child = outer_rects[i][1][2] while next_child != -1: current_inners.append(inner_rects[next_child]) next_child = inner_rects[next_child][1][0] largest = max(current_inners, key=lambda x: x[2]) if (abs((outer_rects[i][2] / largest[2]) - INNER_OUTER_RATIO) < 0.5 and abs((cv2.contourArea(outer_rects[i][0]) / outer_rects[i][2]) - 1) < RECT_AREA_RATIO and abs((cv2.contourArea(largest[0]) / largest[2]) - 1) < RECT_AREA_RATIO): good.append((outer_rects[i], largest)) self.image = frame.copy() for pair in good: self.image = cv2.drawContours(self.image, pair[0][0].reshape((1, 4, 2)), -1, (255, 0, 0), thickness=2) self.image = cv2.drawContours( self.image, pair[1][0].reshape((1, 4, 2)), -1, (255, 0, 255), thickness=1, ) return (0.0, 0.0) def find_power_port(self, frame: np.ndarray) -> tuple: _, cnts, _ = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(cnts) >= 1: acceptable_cnts = [] for current_contour in enumerate(cnts): area = cv2.contourArea(current_contour[1]) box = cv2.boundingRect(current_contour[1]) hull_area = cv2.contourArea(cv2.convexHull(current_contour[1])) if (area > MIN_CONTOUR_AREA and area / hull_area > 0.2 and box[2] > box[3]): acceptable_cnts.append(current_contour[1]) if acceptable_cnts: power_port_contour = max(acceptable_cnts, key=lambda x: cv2.contourArea(x)) power_port_points = get_corners_from_contour( power_port_contour) # x, y, w, h = cv2.boundingRect(power_port_contour) return power_port_points else: return None else: return None def create_annotated_display(self, frame: np.ndarray, points: np.ndarray, printing=False): for i in range(len(points)): cv2.circle(frame, (points[i][0][0], points[i][0][1]), 5, (0, 255, 0)) if printing == True: print(points) def get_vertical_angle(self, p: int): """Gets angle of point p above the horizontal. Parameter p should have 0 at the bottom of the frame and FRAME_HEIGHT at the top. """ return math.atan2(p - FRAME_HEIGHT, FY) # get_angle and get_distance will be replaced with solve pnp eventually def get_horizontal_angle(self, X: float) -> float: return ((X / FRAME_WIDTH) - 0.5) * MAX_FOV_WIDTH # 33.18 degrees #gets the angle def get_distance(self, Y: float) -> float: target_angle = self.get_vertical_angle(Y) # print(f"Total Angle: {math.degrees(target_angle + GROUND_ANGLE)}") return (TARGET_HEIGHT - CAMERA_HEIGHT) / math.tan(GROUND_ANGLE + target_angle) def get_middles(self, contour: np.ndarray) -> tuple: """ Use the cv2 moments to find the centre x of the contour. We just copied it from the opencv reference. The y is just the lowest pixel in the image.""" M = cv2.moments(contour) if M["m00"] != 0: cX = int(M["m10"] / M["m00"]) else: cX = 160 cY = max(list(contour[:, :, 1])) return cX, cY def get_image_values(self, frame: np.ndarray) -> tuple: """Takes a frame, returns a tuple of results, or None.""" self.hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV, dst=self.hsv) self.mask = cv2.inRange(self.hsv, HSV_LOWER_BOUND, HSV_UPPER_BOUND, dst=self.mask) self.mask = cv2.erode(self.mask, None, dst=self.mask, iterations=1) self.mask = cv2.dilate(self.mask, None, dst=self.mask, iterations=2) self.mask = cv2.erode(self.mask, None, dst=self.mask, iterations=1) power_port = self.find_power_port(self.mask) self.image = self.mask if power_port is not None: self.create_annotated_display(frame, power_port) midX, midY = self.get_middles(power_port) angle = self.get_horizontal_angle(midX) distance = self.get_distance(midY) return (distance, angle) else: return None def run(self): """Main process function. When ran, takes image, processes image, and sends results to RIO. """ if self.Connection.using_nt: self.Connection.pong() frame_time, self.frame = self.CameraManager.get_frame(0) if frame_time == 0: print(self.CameraManager.sinks[0].getError(), file=sys.stderr) self.CameraManager.source.notifyError( self.CameraManager.sinks[0].getError()) else: self.frame = cv2.rotate(self.frame, cv2.ROTATE_180) results = self.get_image_values(self.frame) if results is not None: distance, angle = results self.Connection.send_results( (distance, angle, time.monotonic() )) # distance (meters), angle (radians), timestamp self.CameraManager.send_frame(self.image) def translate( self, value, leftMin, leftMax, rightMin, rightMax ): # https://stackoverflow.com/questions/1969240/mapping-a-range-of-values-to-another # Figure out how 'wide' each range is leftSpan = leftMax - leftMin rightSpan = rightMax - rightMin # Convert the left range into a 0-1 range (float) valueScaled = float(value - leftMin) / float(leftSpan) # Convert the 0-1 range into a value in the right range. return rightMin + (valueScaled * rightSpan)
class World(object): def __init__(self, carla_world, args): self.world = carla_world self.actor_role_name = args.rolename try: self.map = self.world.get_map() except RuntimeError as error: print('RuntimeError: {}'.format(error)) print(' The server could not send the OpenDRIVE (.xodr) file:') sys.exit(1) self.vehicle = None self.camera_manager = None self.players = [None, None] self.players_location = [[5, 45, 0], [5, 70, 0]] self.restart() def restart(self): self.world.unload_map_layer(carla.MapLayer.All) # Get a random blueprint. blueprint = random.choice( self.world.get_blueprint_library().filter('vehicle.audi.a2')) blueprint.set_attribute('role_name', self.actor_role_name) if blueprint.has_attribute('color'): blueprint.set_attribute('color', '224,0,0') if blueprint.has_attribute('is_invincible'): blueprint.set_attribute('is_invincible', 'true') # Spawn the vehicle. while self.vehicle is None: #or self.player2 is None or self.player3 is None: if not self.map.get_spawn_points(): print('There are no spawn points available in your map/town.') print('Please add some Vehicle Spawn Point to your UE4 scene.') sys.exit(1) spawn_point = carla.Transform( carla.Location(x=initial_state["x"], y=initial_state["y"], z=initial_state["z"]), carla.Rotation(yaw=initial_state["yaw"]), ) self.vehicle = self.world.try_spawn_actor(blueprint, spawn_point) # spawing objects for player_id in range(1): while self.players[player_id] is None: spawn_point = carla.Transform( carla.Location( x=initial_state["x"] + self.players_location[player_id][0], y=initial_state["y"] + self.players_location[player_id][1], z=initial_state["z"], ), carla.Rotation(yaw=initial_state["yaw"] + self.players_location[player_id][2]), ) self.players[player_id] = self.world.try_spawn_actor( blueprint, spawn_point) # Set up the sensors. self.camera_manager = CameraManager(self.vehicle) # self.camera_manager.set_sensor(0, 0, notify=False) # main camera self.camera_manager.set_sensor(0, 1, notify=False) # front camera self.camera_manager.set_sensor(1, 1, notify=False) # depth camera self.camera_manager.set_sensor(2, 1, notify=False) # segmentation camera print("[+] World setup completed") def destroy_sensors(self): if self.camera_manager is not None: sen = self.camera_manager.sen for s in sen: s.destroy() self.camera_manager.sensor = None self.camera_manager.index = None def destroy(self): self.destroy_sensors() if self.vehicle is not None: self.vehicle.destroy() actors = self.players for actor in actors: if actor is not None: actor.destroy()
if not ( abs(inner["area"] / cv2.contourArea(inner["rect"]) - 1) < RAW_RECT_AREA_ERROR ): return False, inner, outer if not ( abs(outer["area"] / cv2.contourArea(outer["rect"]) - 1) < RAW_RECT_AREA_ERROR ): return False, inner, outer return True, inner, outer def draw_contour_pair(self, inner: dict, outer: dict) -> None: """Draw the inner and outer contours on self.image""" self.image = cv2.drawContours( self.image, inner["rect"].reshape((1, 4, 2)), -1, (255, 0, 0), thickness=2 ) self.image = cv2.drawContours( self.image, outer["rect"].reshape((1, 4, 2)), -1, (0, 0, 255), thickness=2 ) if __name__ == "__main__": vision = Vision( CameraManager("Loading Bay Camera", "/dev/video0", 240, 320, 30, "kYUYV"), NTConnection(), ) while True: vision.run()
import asyncio # monkey patch for timer coroutines import nest_asyncio nest_asyncio.apply() from aiohttp import web from aiojobs.aiohttp import atomic import aiohttp_cors import socketio import json import time from camera_manager import CameraManager mgmt = CameraManager() sio = socketio.AsyncServer(async_mode='aiohttp', cors_allowed_origins="*") app = web.Application() cors = aiohttp_cors.setup(app, defaults={ "*": aiohttp_cors.ResourceOptions( allow_credentials=True, expose_headers="*", allow_headers="*", ) }) sio.attach(app)
from camera_manager import CameraManager camera_man = CameraManager() camera_man.take_picture()
def test_camera_manager_get_images(self): cMgr = CameraManager() json_data = cMgr.getImages() self.assertTrue(type(json_data) is list)
def test_camera_manager_get_normal_view(self): cMgr = CameraManager() print cMgr.getNormalView()
class Vision: """Main vision class. An instance should be created, with test=False (default). As long as the cameras are configured correctly via the GUI interface, everything will work without modification required. This will not work on most machines, so tests of the main process function are the only tests that can be done without a Pi running the FRC vision image. """ entries = None def __init__( self, test_img=[], test_video=[], test_display=False, using_nt=False, ): # self.entries = entries # Memory Allocation # Numpy takes Rows then Cols as dimensions. Height x Width self.hsv = np.zeros(shape=(FRAME_HEIGHT, FRAME_WIDTH, 3), dtype=np.uint8) self.image = self.hsv.copy() self.display = self.hsv.copy() self.mask = np.zeros(shape=(FRAME_HEIGHT, FRAME_WIDTH), dtype=np.uint8) # Camera Configuration self.CameraManager = CameraManager(test_img=test_img, test_video=test_video, test_display=test_display) self.testing = len(test_video) or len(test_img) if not self.testing: self.CameraManager.setCameraProperty( 0, "white_balance_temperature_auto", 0) self.CameraManager.setCameraProperty(0, "exposure_auto", 1) self.CameraManager.setCameraProperty(0, "focus_auto", 0) self.CameraManager.setCameraProperty(0, "exposure_absolute", 1) self.Connection = Connection(using_nt=using_nt, test=self.testing) self.avg_horiz_angle = 0 self.avg_dist = 0 self.prev_dist = 0 self.prev_horiz_angle = 0 self.old_fps_time = 0 def find_loading_bay(self, frame: np.ndarray): cnts, hierarchy = cv2.findContours(self.mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) cnts = np.array(cnts) hierarchy = np.array(hierarchy)[0] outer_rects = {} inner_rects = {} for i, cnt in enumerate(cnts): if hierarchy[i][3] == -1: outer_rects[i] = ( get_corners_from_contour(cnt), hierarchy[i], cv2.contourArea(cnt), ) else: inner_rects[i] = ( get_corners_from_contour(cnt), hierarchy[i], cv2.contourArea(cnt), ) if not (inner_rects and outer_rects): return None good = [] for i in outer_rects: if outer_rects[i][2] > MIN_CONTOUR_AREA: current_inners = [] next_child = outer_rects[i][1][2] while next_child != -1: current_inners.append(inner_rects[next_child]) next_child = inner_rects[next_child][1][0] largest = max(current_inners, key=lambda x: x[2]) if (abs((outer_rects[i][2] / largest[2]) - LOADING_INNER_OUTER_RATIO) < 0.5 and abs((cv2.contourArea(outer_rects[i][0]) / outer_rects[i][2]) - 1) < LOADING_RECT_AREA_RATIO and abs((cv2.contourArea(largest[0]) / largest[2]) - 1) < LOADING_RECT_AREA_RATIO): good.append((outer_rects[i], largest)) self.image = frame.copy() for pair in good: self.image = cv2.drawContours(self.image, pair[0][0].reshape((1, 4, 2)), -1, (255, 0, 0), thickness=2) self.image = cv2.drawContours( self.image, pair[1][0].reshape((1, 4, 2)), -1, (255, 0, 255), thickness=1, ) return (0.0, 0.0) def find_power_port(self, frame: np.ndarray) -> tuple: # cv2.imshow('Mask', frame) # frame = cv2.dilate(frame, None, dst=frame, iterations=1) # frame = cv2.erode(frame, None, dst=frame, iterations=1) # frame = cv2.erode(frame, None, dst=frame, iterations=1) # cv2.imshow('After Ero/Dil', frame) hullList = [] # Convert to RGB to draw contour on - shouldn't recreate every time self.display = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR, dst=self.display) _, cnts, _ = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(cnts) >= 1: acceptable_cnts = [] # Check if the found contour is possibly a target for current_contour in enumerate(cnts): area = cv2.contourArea(current_contour[1]) if PP_MAX_CONTOUR_AREA > area > PP_MIN_CONTOUR_AREA: box = cv2.boundingRect(current_contour[1]) # Convex hull gives the bounding polygon of the contour with no # interior angles greater than 180deg hull = cv2.convexHull(current_contour[1]) hull_area = cv2.contourArea(hull) # If the contour takes up more than X% of the Hull and # width greater than height if (PP_MAX_AREA_RATIO > area / hull_area > PP_MIN_AREA_RATIO and box[2] > box[3]): # print(box) # X,Y,W,H # print("P %.2f, Area %d, Hull, %d" % (area / hull_area, area, hull_area)) acceptable_cnts.append(current_contour[1]) hullList.append(hull) # ***This section of code displays the possible targets*** for i in range(len(acceptable_cnts)): color_G = (0, 255, 0) color_B = (255, 0, 0) cv2.drawContours(self.display, acceptable_cnts, i, color_G) cv2.drawContours(self.display, hullList, i, color_B) if acceptable_cnts: if len(acceptable_cnts) > 1: # Pick the largest found 'power port' power_port_contour = max(acceptable_cnts, key=lambda x: cv2.contourArea(x)) else: power_port_contour = acceptable_cnts[0] power_port_points = get_corners_from_contour( power_port_contour) # x, y, w, h = cv2.boundingRect(power_port_contour) for i in range(4): cv2.circle(self.display, tuple(power_port_points[i][0]), 3, (0, 0, 255)) # cv2.imshow("Display", self.display) # cv2.waitKey() return power_port_points else: return None else: return None def create_annotated_display(self, frame: np.ndarray, points: np.ndarray, printing=False): for i in range(len(points)): cv2.circle(frame, (points[i][0][0], points[i][0][1]), 5, (0, 255, 0)) if printing: print(points) def get_mid(self, contour: np.ndarray) -> tuple: """ Use the cv2 moments to find the centre x of the contour. We just copied it from the opencv reference. The y is just the lowest pixel in the image.""" M = cv2.moments(contour) if M["m00"] != 0: cX = int(M["m10"] / M["m00"]) else: cX = 160 return cX def get_image_values(self, frame: np.ndarray) -> tuple: """Takes a frame, returns a tuple of results, or None.""" self.hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV, dst=self.hsv) self.mask = cv2.inRange(self.hsv, HSV_LOWER_BOUND, HSV_UPPER_BOUND, dst=self.mask) power_port = self.find_power_port(self.mask) self.image = self.mask if power_port is not None: self.prev_dist = self.avg_dist self.prev_horiz_angle = self.avg_horiz_angle self.create_annotated_display(frame, power_port) midX = self.get_mid(power_port) target_top = min(list(power_port[:, :, 1])) target_bottom = max(list(power_port[:, :, 1])) # print("target top: ", target_top, " target bottom: ", target_bottom) horiz_angle = get_horizontal_angle(midX, FRAME_WIDTH, MAX_FOV_WIDTH / 2, True) vert_angles = [ get_vertical_angle_linear(target_bottom, FRAME_HEIGHT, MAX_FOV_HEIGHT / 2, True), get_vertical_angle_linear(target_top, FRAME_HEIGHT, MAX_FOV_HEIGHT / 2, True), ] distances = [ get_distance(vert_angles[0], TARGET_HEIGHT_BOTTOM, CAMERA_HEIGHT, GROUND_ANGLE), get_distance(vert_angles[1], TARGET_HEIGHT_TOP, CAMERA_HEIGHT, GROUND_ANGLE), ] distance = distances[1] vert_angle = vert_angles[1] print("angle: ", math.degrees(vert_angle), " distance: ", distance) self.avg_dist = (distance * (1 - DIST_SMOOTHING_AMOUNT) + self.prev_dist * DIST_SMOOTHING_AMOUNT) self.avg_horiz_angle = ( horiz_angle * (1 - ANGLE_SMOOTHING_AMOUNT) + self.prev_horiz_angle * ANGLE_SMOOTHING_AMOUNT) if self.testing: return (distance, horiz_angle) else: return (self.avg_dist, self.avg_horiz_angle) else: return None def run(self): """Main process function. When ran, takes image, processes image, and sends results to RIO. """ if not self.testing: if self.Connection.using_nt: self.Connection.pong() frame_time, self.frame = self.CameraManager.get_frame(0) if frame_time == 0: print(self.CameraManager.sinks[0].getError(), file=sys.stderr) self.CameraManager.source.notifyError( self.CameraManager.sinks[0].getError()) else: # Flip the image cause originally upside down. self.frame = cv2.rotate(self.frame, cv2.ROTATE_180) results = self.get_image_values(self.frame) if self.Connection.using_nt: self.time = time.monotonic() self.fps = 1 / (self.time - self.old_fps_time) self.old_fps_time = self.time self.Connection.fps_entry.setDouble(self.fps) if results is not None: distance, angle = results self.Connection.send_results( (distance, angle, time.monotonic() )) # distance (meters), angle (radians), timestamp self.CameraManager.send_frame(self.image)
When ran, takes image, processes image, and sends results to RIO. """ self.connection.pong() frame_time, self.frame = self.camera_manager.get_frame() if frame_time == 0: self.camera_manager.notify_error(self.camera_manager.get_error()) return # Flip the image cause originally upside down. self.frame = cv2.rotate(self.frame, cv2.ROTATE_180) results = self.get_image_values(self.frame) self.connection.set_fps() if results is not None: distance, angle = results self.connection.send_results( (distance, angle, time.monotonic() )) # distance (meters), angle (radians), timestamp self.camera_manager.send_frame(self.display) if __name__ == "__main__": vision = Vision( CameraManager("Power Port Camera", "/dev/video0", 240, 320, 30, "kYUYV"), NTConnection(), ) while True: vision.run()
class MainWindow: controls = [ 'win_main', 'btn_config_events', 'btn_recognize_face', 'lst_events', 'cmb_cameras', 'btn_view_camera', 'txt_port', 'btn_start_server', 'lst_cameras', 'tr_vw_col_date', 'tr_vw_col_event', 'tr_vw_events' ] def __init__(self, config): self.config = config self.cm = CameraManager() self.builder = gtk.Builder() self.builder.add_from_file('win_main.xml') self.dbh = DatabaseHelper() self._init_controls() self._connect_events() def _init_controls(self): for c in self.controls: self.__dict__[c] = self.builder.get_object(c) cell = gtk.CellRendererText() self.cmb_cameras.pack_start(cell, True) self.cmb_cameras.add_attribute(cell, "text", 0) self._fill_cameras() cell = gtk.CellRendererText() self.tr_vw_col_date.pack_start(cell, True) self.tr_vw_col_date.add_attribute(cell, 'text', 0) cell = gtk.CellRendererText() self.tr_vw_col_event.pack_start(cell, True) self.tr_vw_col_event.add_attribute(cell, 'text', 1) self._fill_events() def _fill_cameras(self): self.cmb_cameras.get_model().clear() for cam in self.cm.get_cameras(): self.cmb_cameras.append_text(cam.device_path) self.cmb_cameras.set_active(0) def _connect_events(self): self.win_main.connect('delete_event', self._win_main_delete_event) self.btn_view_camera.connect('clicked', self._view_camera) self.btn_start_server.connect('clicked', self._start_server) def _fill_events(self, *args): self.lst_events.clear() for a in self.dbh.get_activities(): row = time.strftime('%d/%m/%Y', time.localtime(a[1])), a[2] self.lst_events.append(row) def _start_server(self, *args): port = self.txt_port.get_text() print 'Servidor iniciado en puerto', port proc = Process(target = MainWindow._start_monitor_server, args = (port,)) proc.start() def _win_main_delete_event(self, *args): gtk.main_quit() def _view_camera(self, *args): device = self.cmb_cameras.get_active_text() proc = Process(target = MainWindow._start_camera_monitor, args = (device,) ) proc.start() @staticmethod def _start_monitor_server(port): os.popen(' '.join(('python', 'monitor_server.py', '-p', port))) @staticmethod def _start_camera_monitor(device): os.popen(' '.join(('python', 'camera_manager.py', '-d', device, '-f'))) def main(self): self.win_main.show_all() gtk.main()
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] colorizer = rs.colorizer() cam_manager = CameraManager() model = jetson.inference.detectNet('ssd-mobilenet-v2', threshold=0.4) time.sleep(2) while True: start_time = time.time() rgb_frame, depth_frame = cam_manager.get_frames() detections = model.Detect(jetson.utils.cudaFromNumpy(rgb_frame)) for detection in detections: text = "{} {}%".format(COCO_INSTANCE_CATEGORY_NAMES[detection.ClassID], detection.Confidence) print(text) barcodes = pyzbar.decode(rgb_frame) # loop over the detected barcodes
outer["rect"] = get_corners_from_contour(outer["contour"]) if not len(outer["rect"]) == 4: return False, inner, outer if not (abs(inner["area"] / cv2.contourArea(inner["rect"]) - 1) < RAW_RECT_AREA_ERROR): return False, inner, outer if not (abs(outer["area"] / cv2.contourArea(outer["rect"]) - 1) < RAW_RECT_AREA_ERROR): return False, inner, outer return True, inner, outer def draw_contour_pair(self, inner: dict, outer: dict) -> None: """Draw the inner and outer contours on self.image""" self.image = cv2.drawContours(self.image, inner["rect"].reshape((1, 4, 2)), -1, (255, 0, 0), thickness=2) self.image = cv2.drawContours(self.image, outer["rect"].reshape((1, 4, 2)), -1, (0, 0, 255), thickness=2) if __name__ == "__main__": vision = Vision(CameraManager(), NTConnection()) while True: vision.run()