def startup(self, median_path, config_path, db_path, sample_dir=None): if (DEBUG_ENABLED): global STACK_LAYERS print(f"{STACK_LAYERS * ' '}Inside StarTracker.startup") STACK_LAYERS = STACK_LAYERS + 1 # Set the sample directory logger.info("Beginning startup sequence...") self.SAMPLE_DIR = sample_dir # Prepare star tracker self.MEDIAN_IMAGE = cv2.imread(median_path) logger.info("Loaded median image from {}".format(median_path)) beast.load_config(config_path) logger.info("Loaded configuration from {}".format(config_path)) self.S_DB = beast.star_db() self.S_DB.load_catalog(db_path, self.YEAR) logger.info("Loaded star database from {}".format(db_path)) self.SQ_RESULTS = beast.star_query(self.S_DB) self.SQ_RESULTS.kdmask_filter_catalog() self.SQ_RESULTS.kdmask_uniform_density(beast.cvar.REQUIRED_STARS) self.S_FILTERED = self.SQ_RESULTS.from_kdmask() logger.info("Filtered stars") self.C_DB = beast.constellation_db(self.S_FILTERED, 2 + beast.cvar.DB_REDUNDANCY, 0) logger.info("Startup sequence complete!") if (DEBUG_ENABLED): STACK_LAYERS = STACK_LAYERS - 1 print(f"{STACK_LAYERS * ' '}End StarTracker.startup")
def __init__(self, imagefile, median_image): b_conf = [time(), beast.cvar.PIXSCALE, beast.cvar.BASE_FLUX] self.img_stars = beast.star_db() self.img_data = [] self.match = None self.db_stars = None self.match_from_lm = None self.db_stars_from_lm = None #Placeholders so that these don't get garbage collected by SWIG self.fov_db = None self.const_from_lm = None #TODO: improve memory efficiency if "://" in imagefile: import urllib img = cv2.imdecode( np.asarray(bytearray(urllib.urlopen(imagefile).read()), dtype="uint8"), cv2.IMREAD_COLOR) else: img = cv2.imread(imagefile) if img is None: print("Invalid image, using blank dummy image", file=sys.stderr) img = median_image img = np.clip(img.astype(np.int16) - median_image, a_min=0, a_max=255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) #removes areas of the image that don't meet our brightness threshold ret, thresh = cv2.threshold( img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) contours, heirachy = cv2.findContours(thresh, 1, 2) for c in contours: M = cv2.moments(c) if M['m00'] > 0: #this is how the x and y position are defined by cv2 cx = M['m10'] / M['m00'] cy = M['m01'] / M['m00'] #see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ #for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx**2 u02 = M["m02"] / M["m00"] - cy**2 u11 = M["m11"] / M["m00"] - cx * cy #the center pixel is used as the approximation of the brightest pixel self.img_stars += beast.star( cx - beast.cvar.IMG_X / 2.0, (cy - beast.cvar.IMG_Y / 2.0), float(cv2.getRectSubPix(img_grey, (1, 1), (cx, cy))[0, 0]), -1) self.img_data.append( b_conf + [cx, cy, u20, u02, u11] + cv2.getRectSubPix(img, (1, 1), (cx, cy))[0, 0].tolist())
def main(): """Main Function.""" args = parse_args() P_MATCH_THRESH = 0.99 SIMULATE = 0 if 'WATCHDOG_USEC' not in os.environ: os.environ['WATCHDOG_USEC'] = "30000000" CONFIGFILE = args.config YEAR = args.year MEDIAN_FILE = args.median # set up server before we do anything else server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: server.bind(('127.0.0.1', 8010)) except: print("server socket already open: try terminal command: sudo kill $(sudo lsof -t -i:8010)") exit() server.listen(5) server.setblocking(0) print("Loading config") print(CONFIGFILE) beast.load_config(CONFIGFILE) print("Loading hip_main.dat") S_DB = beast.star_db() S_DB.load_catalog("hip_main.dat", YEAR) print("Filtering stars") SQ_RESULTS = beast.star_query(S_DB) SQ_RESULTS.kdmask_filter_catalog() SQ_RESULTS.kdmask_uniform_density(beast.cvar.REQUIRED_STARS) S_FILTERED = SQ_RESULTS.from_kdmask() print("Generating DB") C_DB = beast.constellation_db(S_FILTERED, 2 + beast.cvar.DB_REDUNDANCY, 0) print("Ready") # Note: SWIG's policy is to garbage collect objects created with # constructors, but not objects created by returning from a function NONSTARS = {} NONSTAR_NEXT_ID = 0 NONSTAR_DATAFILENAME = "/dev/null" NONSTAR_DATAFILE = open(NONSTAR_DATAFILENAME,"w") rgb = StarCamera(MEDIAN_FILE) # commented below out because there is not yet data for # IR (but once there is, we can uncomment it and add another # argument for the script) #ir = ScienceCamera(sys.argv[3]) CONNECTIONS = {} epoll = select.epoll() epoll.register(server.fileno(), select.EPOLLIN) try: Connection(sys.stdin, epoll) except IOError: pass # unsure what the loop here is doing exactly, will look into # daemon, socket, etc. packages daemon.notify("WATCHDOG=1") last_ping = time.time() while True: #systemd watchdog events = epoll.poll(float(os.environ['WATCHDOG_USEC']) / 2.0e6 - (time.time() - last_ping)) if len(events) == 0 or time.time() >= (last_ping + float(os.environ['WATCHDOG_USEC']) / 2.0e6): daemon.notify("WATCHDOG=1") last_ping = time.time() #events = epoll.poll() for fd, event_type in events: # Activity on the master socket means a new connection. if fd == server.fileno(): conn, addr = server.accept() Connection(conn, epoll) elif fd in CONNECTIONS: w = CONNECTIONS[fd] data = w.read() print(data.decode(encoding='UTF-8'), file=sys.stderr) if len(data) > 0: if sys.version_info[0] > 2: stdout_redir = StringIO() else: stdout_redir = BytesIO() stdout_old = sys.stdout sys.stdout = stdout_redir try: exec(data) except SystemExit: w.close() raise except: traceback.print_exc(file=sys.stdout) sys.stdout = stdout_old data_out = stdout_redir.getvalue() print(data_out, file=sys.stderr) w.write(data_out) else: w.close()
parser.add_argument("year", help="The year?", type=float) parser.add_argument("medianImage", help="A median image?") parser.add_argument("solveImage", help="The image to solve") args = parser.parse_args() configFileName = args.configFile year = args.year medianImage = args.medianImage solveImage = args.solveImage print("Loading config file {}".format(configFileName)) beast.load_config(configFileName) print("Loading him_main.dat for year {}".format(year)) S_DB = beast.star_db() S_DB.load_catalog("hip_main.dat", year) print("S_DB type: ", type(S_DB)) print("Filtering stars") SQ_RESULTS = beast.star_query(S_DB) SQ_RESULTS.kdmask_filter_catalog() SQ_RESULTS.kdmask_uniform_density(beast.cvar.REQUIRED_STARS) S_FILTERED = SQ_RESULTS.from_kdmask() print("SQ_RESULTS type: ", type(SQ_RESULTS)) print("S_FILTERED: ", type(S_FILTERED)) print("Generating DB") C_DB = beast.constellation_db(S_FILTERED, 2 + beast.cvar.DB_REDUNDANCY, 0)
def solve(self, orig_img): if (DEBUG_ENABLED): global STACK_LAYERS print(f"{STACK_LAYERS * ' '}Inside StarTracker.solve") STACK_LAYERS = STACK_LAYERS + 1 # Keep track of solution time starttime = time.time() # Create and initialize variables img_stars = beast.star_db() match = None fov_db = None # Process the image for solving img = np.clip(orig_img.astype(np.int16) - self.MEDIAN_IMAGE, a_min=0, a_max=255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Remove areas of the image that don't meet our brightness threshold and then extract contours ret, thresh = cv2.threshold( img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh, 1, 2) # Process the contours for c in contours: M = cv2.moments(c) if M["m00"] > 0: # this is how the x and y position are defined by cv2 cx = M["m10"] / M["m00"] cy = M["m01"] / M["m00"] # see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx**2 u02 = M["m02"] / M["m00"] - cy**2 u11 = M["m11"] / M["m00"] - cx * cy # The center pixel is used as the approximation of the brightest pixel img_stars += beast.star( cx - beast.cvar.IMG_X / 2.0, cy - beast.cvar.IMG_Y / 2.0, float(cv2.getRectSubPix(img_grey, (1, 1), (cx, cy))[0, 0]), -1) # We only want to use the brightest MAX_FALSE_STARS + REQUIRED_STARS img_stars_n_brightest = img_stars.copy_n_brightest( beast.cvar.MAX_FALSE_STARS + beast.cvar.REQUIRED_STARS) img_const_n_brightest = beast.constellation_db( img_stars_n_brightest, beast.cvar.MAX_FALSE_STARS + 2, 1) lis = beast.db_match(self.C_DB, img_const_n_brightest) # Generate the match if lis.p_match > self.P_MATCH_THRESH and lis.winner.size( ) >= beast.cvar.REQUIRED_STARS: x = lis.winner.R11 y = lis.winner.R21 z = lis.winner.R31 r = beast.cvar.MAXFOV / 2 self.SQ_RESULTS.kdsearch( x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) # Estimate density for constellation generation self.C_DB.results.kdsearch( x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) fov_stars = self.SQ_RESULTS.from_kdresults() fov_db = beast.constellation_db(fov_stars, self.C_DB.results.r_size(), 1) self.C_DB.results.clear_kdresults() self.SQ_RESULTS.clear_kdresults() img_const = beast.constellation_db(img_stars, beast.cvar.MAX_FALSE_STARS + 2, 1) near = beast.db_match(fov_db, img_const) if near.p_match > self.P_MATCH_THRESH: match = near # Get solution -- for reference: # - dec - rotation about the y-axis # - ra - rotation about the z-axis # - ori - rotation about the camera axis if match is not None: match.winner.calc_ori() dec = match.winner.get_dec() ra = match.winner.get_ra() ori = match.winner.get_ori() else: dec, ra, ori = 0.0, 0.0, 0.0 # Calculate how long it took to process runtime = time.time() - starttime if (DEBUG_ENABLED): STACK_LAYERS = STACK_LAYERS - 1 print(f"{STACK_LAYERS * ' '}End StarTracker.solve") # Return solution return dec, ra, ori, time
def solve_image(filepath, connection): # Keep track of solution time starttime = time() # Create and initialize variables img_stars = beast.star_db() match = None fov_db = None # Start output for iteration connection.send(filepath) print(filepath) # Load the image orig_img = cv2.imread(filepath) if type(orig_img) == type(None): connection.send("\nInvalid filepath\n\n") return # Check the image to see if it is fit for processing result = check_image(orig_img, connection) if result == 0: connection.send("\nTime: " + str(time() - starttime) + "\n\n") return # Process the image for solving img = np.clip(orig_img.astype(np.int16) - MEDIAN_IMAGE, a_min = 0, a_max = 255).astype(np.uint8) img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Remove areas of the image that don't meet our brightness threshold and then extract contours ret, thresh = cv2.threshold(img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY) thresh_contours, contours, hierarchy = cv2.findContours(thresh, 1, 2); # Process the contours for c in contours: M = cv2.moments(c) if M['m00'] > 0: # this is how the x and y position are defined by cv2 cx = M['m10'] / M['m00'] cy = M['m01'] / M['m00'] # see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ for how to convert these into eigenvectors/values u20 = M["m20"] / M["m00"] - cx ** 2 u02 = M["m02"] / M["m00"] - cy ** 2 u11 = M["m11"] / M["m00"] - cx * cy # The center pixel is used as the approximation of the brightest pixel img_stars += beast.star(cx - beast.cvar.IMG_X / 2.0, (cy - beast.cvar.IMG_Y / 2.0), float(cv2.getRectSubPix(img_grey, (1,1), (cx,cy))[0,0]), -1) # For the first pass, we only want to use the brightest MAX_FALSE_STARS + REQUIRED_STARS img_stars_n_brightest = img_stars.copy_n_brightest(beast.cvar.MAX_FALSE_STARS + beast.cvar.REQUIRED_STARS) img_const_n_brightest = beast.constellation_db(img_stars_n_brightest, beast.cvar.MAX_FALSE_STARS + 2, 1) lis = beast.db_match(C_DB, img_const_n_brightest) # Generate the match if lis.p_match > P_MATCH_THRESH and lis.winner.size() >= beast.cvar.REQUIRED_STARS: x = lis.winner.R11 y = lis.winner.R21 z = lis.winner.R31 r = beast.cvar.MAXFOV / 2 SQ_RESULTS.kdsearch(x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) # estimate density for constellation generation C_DB.results.kdsearch(x, y, z, r,beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE) fov_stars = SQ_RESULTS.from_kdresults() fov_db = beast.constellation_db(fov_stars, C_DB.results.r_size(), 1) C_DB.results.clear_kdresults() SQ_RESULTS.clear_kdresults() img_const = beast.constellation_db(img_stars, beast.cvar.MAX_FALSE_STARS + 2, 1) near = beast.db_match(fov_db, img_const) if near.p_match > P_MATCH_THRESH: match = near # Print solution if match is not None: match.winner.print_ori() # For reference: # - DEC - rotation about the y-axis # - RA - rotation about the z-axis # - ORIENTATION - rotation about the camera axis else: connection.send("\nImage could not be processed; no match found\n") return # Calculate how long it took to process connection.send("\nTime: " + str(time() - starttime) + "\n\n") # Grab latest result from file fields = open("last_results.txt").read().splitlines() # Annotate image img = cv2.resize(img, (1280, 960)) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 0.75 fontColor = (255,255,255) lineType = 2 cv2.putText(img, fields[0], (25, 50), font, fontScale, fontColor, lineType) cv2.putText(img, fields[1], (25, 85), font, fontScale, fontColor, lineType) cv2.putText(img, fields[2], (25, 120), font, fontScale, fontColor, lineType) # Show image window_name = "Result - " + filepath cv2.namedWindow(window_name) cv2.moveWindow(window_name, 0, 0) cv2.imshow(window_name, img) cv2.waitKey(3000) cv2.destroyAllWindows()