def match_lis(self): #for the first pass, we only want to use the brightest MAX_FALSE_STARS+REQUIRED_STARS img_stars_n_brightest = self.img_stars.copy_n_brightest(beast.cvar.MAX_FALSE_STARS+beast.cvar.REQUIRED_STARS) img_const_n_brightest = beast.constellation_db(img_stars_n_brightest,beast.cvar.MAX_FALSE_STARS+2,1) lis=beast.db_match(C_DB,img_const_n_brightest) #TODO: uncomment once p_match is fixed #if lis.p_match>P_MATCH_THRESH: if lis.p_match>P_MATCH_THRESH and lis.winner.size()>=beast.cvar.REQUIRED_STARS: x=lis.winner.R11 y=lis.winner.R21 z=lis.winner.R31 self.match_near(x,y,z,beast.cvar.MAXFOV/2)
def match_near(self,x,y,z,r): SQ_RESULTS.kdsearch(x,y,z,r,beast.cvar.THRESH_FACTOR*beast.cvar.IMAGE_VARIANCE) #estimate density for constellation generation C_DB.results.kdsearch(x,y,z,r,beast.cvar.THRESH_FACTOR*beast.cvar.IMAGE_VARIANCE) fov_stars=SQ_RESULTS.from_kdresults()#REE self.fov_db = beast.constellation_db(fov_stars,C_DB.results.r_size(),1) C_DB.results.clear_kdresults() SQ_RESULTS.clear_kdresults() img_const=beast.constellation_db(self.img_stars,beast.cvar.MAX_FALSE_STARS+2,1) near = beast.db_match(self.fov_db,img_const) if near.p_match>P_MATCH_THRESH: self.match = near self.db_stars = near.winner.from_match()
def match_rel(self,last_match): #make copy of stars from lastmatch img_stars_from_lm=last_match.img_stars.copy() w=last_match.match.winner #convert the stars to ECI for i in range(img_stars_from_lm.size()): s=img_stars_from_lm.get_star(i) x=s.x*w.R11+s.y*w.R12+s.z*w.R13 y=s.x*w.R21+s.y*w.R22+s.z*w.R23 z=s.x*w.R31+s.y*w.R32+s.z*w.R33 s.x=x s.y=y s.z=z #create constellation from last match self.const_from_lm=beast.constellation_db(img_stars_from_lm,beast.cvar.MAX_FALSE_STARS+2,1) #match between last and current img_const=beast.constellation_db(self.img_stars,beast.cvar.MAX_FALSE_STARS+2,1) rel=beast.db_match(self.const_from_lm,img_const) if rel.p_match>P_MATCH_THRESH: self.match_from_lm = rel self.db_stars_from_lm = rel.winner.from_match()
def solve(self, orig_img): if (DEBUG_ENABLED): global STACK_LAYERS print(f"{STACK_LAYERS * ' '}Inside StarTracker.solve") STACK_LAYERS = STACK_LAYERS + 1 # Keep track of solution time starttime = time.time() # Create and initialize variables img_stars = beast.star_db() match = None fov_db = None # Process the image for solving img = np.clip(orig_img.astype(np.int16) - self.MEDIAN_IMAGE, a_min=0, a_max=255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Remove areas of the image that don't meet our brightness threshold and then extract contours ret, thresh = cv2.threshold( img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh, 1, 2) # Process the contours for c in contours: M = cv2.moments(c) if M["m00"] > 0: # this is how the x and y position are defined by cv2 cx = M["m10"] / M["m00"] cy = M["m01"] / M["m00"] # see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx**2 u02 = M["m02"] / M["m00"] - cy**2 u11 = M["m11"] / M["m00"] - cx * cy # The center pixel is used as the approximation of the brightest pixel img_stars += beast.star( cx - beast.cvar.IMG_X / 2.0, cy - beast.cvar.IMG_Y / 2.0, float(cv2.getRectSubPix(img_grey, (1, 1), (cx, cy))[0, 0]), -1) # We only want to use the brightest MAX_FALSE_STARS + REQUIRED_STARS img_stars_n_brightest = img_stars.copy_n_brightest( beast.cvar.MAX_FALSE_STARS + beast.cvar.REQUIRED_STARS) img_const_n_brightest = beast.constellation_db( img_stars_n_brightest, beast.cvar.MAX_FALSE_STARS + 2, 1) lis = beast.db_match(self.C_DB, img_const_n_brightest) # Generate the match if lis.p_match > self.P_MATCH_THRESH and lis.winner.size( ) >= beast.cvar.REQUIRED_STARS: x = lis.winner.R11 y = lis.winner.R21 z = lis.winner.R31 r = beast.cvar.MAXFOV / 2 self.SQ_RESULTS.kdsearch( x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) # Estimate density for constellation generation self.C_DB.results.kdsearch( x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) fov_stars = self.SQ_RESULTS.from_kdresults() fov_db = beast.constellation_db(fov_stars, self.C_DB.results.r_size(), 1) self.C_DB.results.clear_kdresults() self.SQ_RESULTS.clear_kdresults() img_const = beast.constellation_db(img_stars, beast.cvar.MAX_FALSE_STARS + 2, 1) near = beast.db_match(fov_db, img_const) if near.p_match > self.P_MATCH_THRESH: match = near # Get solution -- for reference: # - dec - rotation about the y-axis # - ra - rotation about the z-axis # - ori - rotation about the camera axis if match is not None: match.winner.calc_ori() dec = match.winner.get_dec() ra = match.winner.get_ra() ori = match.winner.get_ori() else: dec, ra, ori = 0.0, 0.0, 0.0 # Calculate how long it took to process runtime = time.time() - starttime if (DEBUG_ENABLED): STACK_LAYERS = STACK_LAYERS - 1 print(f"{STACK_LAYERS * ' '}End StarTracker.solve") # Return solution return dec, ra, ori, time
def solve_image(filepath, connection): # Keep track of solution time starttime = time() # Create and initialize variables img_stars = beast.star_db() match = None fov_db = None # Start output for iteration connection.send(filepath) print(filepath) # Load the image orig_img = cv2.imread(filepath) if type(orig_img) == type(None): connection.send("\nInvalid filepath\n\n") return # Check the image to see if it is fit for processing result = check_image(orig_img, connection) if result == 0: connection.send("\nTime: " + str(time() - starttime) + "\n\n") return # Process the image for solving img = np.clip(orig_img.astype(np.int16) - MEDIAN_IMAGE, a_min = 0, a_max = 255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Remove areas of the image that don't meet our brightness threshold and then extract contours ret, thresh = cv2.threshold(img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) thresh_contours, contours, hierarchy = cv2.findContours(thresh, 1, 2); # Process the contours for c in contours: M = cv2.moments(c) if M['m00'] > 0: # this is how the x and y position are defined by cv2 cx = M['m10'] / M['m00'] cy = M['m01'] / M['m00'] # see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx ** 2 u02 = M["m02"] / M["m00"] - cy ** 2 u11 = M["m11"] / M["m00"] - cx * cy # The center pixel is used as the approximation of the brightest pixel img_stars += beast.star(cx - beast.cvar.IMG_X / 2.0, (cy - beast.cvar.IMG_Y / 2.0), float(cv2.getRectSubPix(img_grey, (1,1), (cx,cy))[0,0]), -1) # For the first pass, we only want to use the brightest MAX_FALSE_STARS + REQUIRED_STARS img_stars_n_brightest = img_stars.copy_n_brightest(beast.cvar.MAX_FALSE_STARS + beast.cvar.REQUIRED_STARS) img_const_n_brightest = beast.constellation_db(img_stars_n_brightest, beast.cvar.MAX_FALSE_STARS + 2, 1) lis = beast.db_match(C_DB, img_const_n_brightest) # Generate the match if lis.p_match > P_MATCH_THRESH and lis.winner.size() >= beast.cvar.REQUIRED_STARS: x = lis.winner.R11 y = lis.winner.R21 z = lis.winner.R31 r = beast.cvar.MAXFOV / 2 SQ_RESULTS.kdsearch(x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) # estimate density for constellation generation C_DB.results.kdsearch(x, y, z, r,beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) fov_stars = SQ_RESULTS.from_kdresults() fov_db = beast.constellation_db(fov_stars, C_DB.results.r_size(), 1) C_DB.results.clear_kdresults() SQ_RESULTS.clear_kdresults() img_const = beast.constellation_db(img_stars, beast.cvar.MAX_FALSE_STARS + 2, 1) near = beast.db_match(fov_db, img_const) if near.p_match > P_MATCH_THRESH: match = near # Print solution if match is not None: match.winner.print_ori() # For reference: # - DEC - rotation about the y-axis # - RA - rotation about the z-axis # - ORIENTATION - rotation about the camera axis else: connection.send("\nImage could not be processed; no match found\n") return # Calculate how long it took to process connection.send("\nTime: " + str(time() - starttime) + "\n\n") # Grab latest result from file fields = open("last_results.txt").read().splitlines() # Annotate image img = cv2.resize(img, (1280, 960)) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 0.75 fontColor = (255,255,255) lineType = 2 cv2.putText(img, fields[0], (25, 50), font, fontScale, fontColor, lineType) cv2.putText(img, fields[1], (25, 85), font, fontScale, fontColor, lineType) cv2.putText(img, fields[2], (25, 120), font, fontScale, fontColor, lineType) # Show image window_name = "Result - " + filepath cv2.namedWindow(window_name) cv2.moveWindow(window_name, 0, 0) cv2.imshow(window_name, img) cv2.waitKey(3000) cv2.destroyAllWindows()