def __init__(self, imagefile, median_image): b_conf = [time(), beast.cvar.PIXSCALE, beast.cvar.BASE_FLUX] self.img_stars = beast.star_db() self.img_data = [] self.match = None self.db_stars = None self.match_from_lm = None self.db_stars_from_lm = None #Placeholders so that these don't get garbage collected by SWIG self.fov_db = None self.const_from_lm = None #TODO: improve memory efficiency if "://" in imagefile: import urllib img = cv2.imdecode( np.asarray(bytearray(urllib.urlopen(imagefile).read()), dtype="uint8"), cv2.IMREAD_COLOR) else: img = cv2.imread(imagefile) if img is None: print("Invalid image, using blank dummy image", file=sys.stderr) img = median_image img = np.clip(img.astype(np.int16) - median_image, a_min=0, a_max=255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) #removes areas of the image that don't meet our brightness threshold ret, thresh = cv2.threshold( img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) contours, heirachy = cv2.findContours(thresh, 1, 2) for c in contours: M = cv2.moments(c) if M['m00'] > 0: #this is how the x and y position are defined by cv2 cx = M['m10'] / M['m00'] cy = M['m01'] / M['m00'] #see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ #for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx**2 u02 = M["m02"] / M["m00"] - cy**2 u11 = M["m11"] / M["m00"] - cx * cy #the center pixel is used as the approximation of the brightest pixel self.img_stars += beast.star( cx - beast.cvar.IMG_X / 2.0, (cy - beast.cvar.IMG_Y / 2.0), float(cv2.getRectSubPix(img_grey, (1, 1), (cx, cy))[0, 0]), -1) self.img_data.append( b_conf + [cx, cy, u20, u02, u11] + cv2.getRectSubPix(img, (1, 1), (cx, cy))[0, 0].tolist())
def solve(self, orig_img): if (DEBUG_ENABLED): global STACK_LAYERS print(f"{STACK_LAYERS * ' '}Inside StarTracker.solve") STACK_LAYERS = STACK_LAYERS + 1 # Keep track of solution time starttime = time.time() # Create and initialize variables img_stars = beast.star_db() match = None fov_db = None # Process the image for solving img = np.clip(orig_img.astype(np.int16) - self.MEDIAN_IMAGE, a_min=0, a_max=255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Remove areas of the image that don't meet our brightness threshold and then extract contours ret, thresh = cv2.threshold( img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh, 1, 2) # Process the contours for c in contours: M = cv2.moments(c) if M["m00"] > 0: # this is how the x and y position are defined by cv2 cx = M["m10"] / M["m00"] cy = M["m01"] / M["m00"] # see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx**2 u02 = M["m02"] / M["m00"] - cy**2 u11 = M["m11"] / M["m00"] - cx * cy # The center pixel is used as the approximation of the brightest pixel img_stars += beast.star( cx - beast.cvar.IMG_X / 2.0, cy - beast.cvar.IMG_Y / 2.0, float(cv2.getRectSubPix(img_grey, (1, 1), (cx, cy))[0, 0]), -1) # We only want to use the brightest MAX_FALSE_STARS + REQUIRED_STARS img_stars_n_brightest = img_stars.copy_n_brightest( beast.cvar.MAX_FALSE_STARS + beast.cvar.REQUIRED_STARS) img_const_n_brightest = beast.constellation_db( img_stars_n_brightest, beast.cvar.MAX_FALSE_STARS + 2, 1) lis = beast.db_match(self.C_DB, img_const_n_brightest) # Generate the match if lis.p_match > self.P_MATCH_THRESH and lis.winner.size( ) >= beast.cvar.REQUIRED_STARS: x = lis.winner.R11 y = lis.winner.R21 z = lis.winner.R31 r = beast.cvar.MAXFOV / 2 self.SQ_RESULTS.kdsearch( x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) # Estimate density for constellation generation self.C_DB.results.kdsearch( x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) fov_stars = self.SQ_RESULTS.from_kdresults() fov_db = beast.constellation_db(fov_stars, self.C_DB.results.r_size(), 1) self.C_DB.results.clear_kdresults() self.SQ_RESULTS.clear_kdresults() img_const = beast.constellation_db(img_stars, beast.cvar.MAX_FALSE_STARS + 2, 1) near = beast.db_match(fov_db, img_const) if near.p_match > self.P_MATCH_THRESH: match = near # Get solution -- for reference: # - dec - rotation about the y-axis # - ra - rotation about the z-axis # - ori - rotation about the camera axis if match is not None: match.winner.calc_ori() dec = match.winner.get_dec() ra = match.winner.get_ra() ori = match.winner.get_ori() else: dec, ra, ori = 0.0, 0.0, 0.0 # Calculate how long it took to process runtime = time.time() - starttime if (DEBUG_ENABLED): STACK_LAYERS = STACK_LAYERS - 1 print(f"{STACK_LAYERS * ' '}End StarTracker.solve") # Return solution return dec, ra, ori, time
def solve_image(filepath, connection): # Keep track of solution time starttime = time() # Create and initialize variables img_stars = beast.star_db() match = None fov_db = None # Start output for iteration connection.send(filepath) print(filepath) # Load the image orig_img = cv2.imread(filepath) if type(orig_img) == type(None): connection.send("\nInvalid filepath\n\n") return # Check the image to see if it is fit for processing result = check_image(orig_img, connection) if result == 0: connection.send("\nTime: " + str(time() - starttime) + "\n\n") return # Process the image for solving img = np.clip(orig_img.astype(np.int16) - MEDIAN_IMAGE, a_min = 0, a_max = 255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Remove areas of the image that don't meet our brightness threshold and then extract contours ret, thresh = cv2.threshold(img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) thresh_contours, contours, hierarchy = cv2.findContours(thresh, 1, 2); # Process the contours for c in contours: M = cv2.moments(c) if M['m00'] > 0: # this is how the x and y position are defined by cv2 cx = M['m10'] / M['m00'] cy = M['m01'] / M['m00'] # see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx ** 2 u02 = M["m02"] / M["m00"] - cy ** 2 u11 = M["m11"] / M["m00"] - cx * cy # The center pixel is used as the approximation of the brightest pixel img_stars += beast.star(cx - beast.cvar.IMG_X / 2.0, (cy - beast.cvar.IMG_Y / 2.0), float(cv2.getRectSubPix(img_grey, (1,1), (cx,cy))[0,0]), -1) # For the first pass, we only want to use the brightest MAX_FALSE_STARS + REQUIRED_STARS img_stars_n_brightest = img_stars.copy_n_brightest(beast.cvar.MAX_FALSE_STARS + beast.cvar.REQUIRED_STARS) img_const_n_brightest = beast.constellation_db(img_stars_n_brightest, beast.cvar.MAX_FALSE_STARS + 2, 1) lis = beast.db_match(C_DB, img_const_n_brightest) # Generate the match if lis.p_match > P_MATCH_THRESH and lis.winner.size() >= beast.cvar.REQUIRED_STARS: x = lis.winner.R11 y = lis.winner.R21 z = lis.winner.R31 r = beast.cvar.MAXFOV / 2 SQ_RESULTS.kdsearch(x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) # estimate density for constellation generation C_DB.results.kdsearch(x, y, z, r,beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) fov_stars = SQ_RESULTS.from_kdresults() fov_db = beast.constellation_db(fov_stars, C_DB.results.r_size(), 1) C_DB.results.clear_kdresults() SQ_RESULTS.clear_kdresults() img_const = beast.constellation_db(img_stars, beast.cvar.MAX_FALSE_STARS + 2, 1) near = beast.db_match(fov_db, img_const) if near.p_match > P_MATCH_THRESH: match = near # Print solution if match is not None: match.winner.print_ori() # For reference: # - DEC - rotation about the y-axis # - RA - rotation about the z-axis # - ORIENTATION - rotation about the camera axis else: connection.send("\nImage could not be processed; no match found\n") return # Calculate how long it took to process connection.send("\nTime: " + str(time() - starttime) + "\n\n") # Grab latest result from file fields = open("last_results.txt").read().splitlines() # Annotate image img = cv2.resize(img, (1280, 960)) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 0.75 fontColor = (255,255,255) lineType = 2 cv2.putText(img, fields[0], (25, 50), font, fontScale, fontColor, lineType) cv2.putText(img, fields[1], (25, 85), font, fontScale, fontColor, lineType) cv2.putText(img, fields[2], (25, 120), font, fontScale, fontColor, lineType) # Show image window_name = "Result - " + filepath cv2.namedWindow(window_name) cv2.moveWindow(window_name, 0, 0) cv2.imshow(window_name, img) cv2.waitKey(3000) cv2.destroyAllWindows()