def current_observation(self, new_observation): if self.current_observation is None: # If we have no current observation but do have a new one, set seq_time # and add to the list if new_observation is not None: # Set the new seq_time for the observation new_observation.seq_time = current_time(flatten=True) # Add the new observation to the list self.observed_list[new_observation.seq_time] = new_observation else: # If no new observation, simply reset the current if new_observation is None: self.current_observation.reset() else: # If we have a new observation, check if same as old observation if self.current_observation.name != new_observation.name: self.current_observation.reset() new_observation.seq_time = current_time(flatten=True) # Add the new observation to the list self.observed_list[new_observation.seq_time] = new_observation self.logger.info("Setting new observation to {}".format(new_observation)) self._current_observation = new_observation
def move_direction(self, direction='north', seconds=1.0): """ Move mount in specified `direction` for given amount of `seconds` """ seconds = float(seconds) assert direction in ['north', 'south', 'east', 'west'] move_command = 'move_{}'.format(direction) self.logger.debug("Move command: {}".format(move_command)) try: now = current_time() self.logger.debug("Moving {} for {} seconds. ".format(direction, seconds)) self.query(move_command) time.sleep(seconds) self.logger.debug("{} seconds passed before stop".format((current_time() - now).sec)) self.query('stop_moving') self.logger.debug("{} seconds passed total".format((current_time() - now).sec)) except KeyboardInterrupt: self.logger.warning("Keyboard interrupt, stopping movement.") except Exception as e: self.logger.warning( "Problem moving command!! Make sure mount has stopped moving: {}".format(e)) finally: # Note: We do this twice. That's fine. self.logger.debug("Stopping movement") self.query('stop_moving')
def current_observation(self, new_observation): if self.current_observation is None: # If we have no current observation but do have a new one, set seq_time # and add to the list if new_observation is not None: # Set the new seq_time for the observation new_observation.seq_time = current_time(flatten=True) # Add the new observation to the list self.observed_list[new_observation.seq_time] = new_observation else: # If no new observation, simply reset the current if new_observation is None: self.current_observation.reset() else: # If we have a new observation, check if same as old observation if self.current_observation.name != new_observation.name: self.current_observation.reset() new_observation.seq_time = current_time(flatten=True) # Add the new observation to the list self.observed_list[ new_observation.seq_time] = new_observation self.logger.info( "Setting new observation to {}".format(new_observation)) self._current_observation = new_observation
def test_pretty_time(): t0 = '2016-08-13 10:00:00' os.environ['POCSTIME'] = t0 t1 = current_time(pretty=True) assert t1 == t0 t2 = current_time(flatten=True) assert t2 != t0 assert t2 == '20160813T100000' t3 = current_time(datetime=True) assert t3 == dt(2016, 8, 13, 10, 0, 0)
def _create_flat_field_observation(self, alt=None, az=None, dither_pattern_offset=5 * u.arcmin, dither_random_offset=0.5 * u.arcmin, n_positions=9): flat_config = self.config['flat_field']['twilight'] if alt is None: alt = flat_config['alt'] if az is None: az = flat_config['az'] flat_coords = utils.altaz_to_radec(alt=alt, az=az, location=self.earth_location, obstime=utils.current_time()) self.logger.debug("Creating dithered observation") field = Field('Evening Flats', flat_coords.to_string('hmsdms')) flat_obs = DitheredObservation(field, exp_time=1. * u.second) flat_obs.seq_time = utils.current_time(flatten=True) if isinstance(flat_obs, DitheredObservation): dither_coords = utils.dither.get_dither_positions( flat_obs.field.coord, n_positions=n_positions, pattern=utils.dither.dice9, pattern_offset=dither_pattern_offset, random_offset=dither_random_offset) self.logger.debug( "Dither Coords for Flat-field: {}".format(dither_coords)) fields = [ Field('Dither{:02d}'.format(i), coord) for i, coord in enumerate(dither_coords) ] exp_times = [flat_obs.exp_time for coord in dither_coords] flat_obs.field = fields flat_obs.exp_time = exp_times flat_obs.min_nexp = len(fields) flat_obs.exp_set_size = len(fields) self.logger.debug("Flat-field observation: {}".format(flat_obs)) return flat_obs
def _make_pretty_from_cr2(fname, timeout=15, **kwargs): verbose = kwargs.get('verbose', False) title = '{} {}'.format(kwargs.get('title', ''), current_time().isot) solve_field = "{}/scripts/cr2_to_jpg.sh".format(os.getenv('POCS')) cmd = [solve_field, fname, title] if kwargs.get('primary', False): cmd.append('link') if verbose: print(cmd) try: proc = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if verbose: print(proc) except OSError as e: raise error.InvalidCommand("Can't send command to gphoto2." " {} \t {}".format(e, cmd)) except ValueError as e: raise error.InvalidCommand("Bad parameters to gphoto2." " {} \t {}".format(e, cmd)) except Exception as e: raise error.PanError("Timeout on plate solving: {}".format(e)) return fname.replace('cr2', 'jpg')
def _make_pretty_from_fits(fname, timeout=15, **kwargs): config = load_config() title = '{} {}'.format(kwargs.get('title', ''), current_time().isot) new_filename = fname.replace('.fits', '.jpg') data = fits.getdata(fname) plt.imshow(data, cmap='cubehelix_r', origin='lower') plt.title(title) plt.savefig(new_filename) image_dir = config['directories']['images'] ln_fn = '{}/latest.jpg'.format(image_dir) try: os.remove(ln_fn) except FileNotFoundError: pass try: os.symlink(new_filename, ln_fn) except Exception as e: warn("Can't link latest image: {}".format(e)) return new_filename
def observe(self): """Take individual images for the current observation This method gets the current observation and takes the next corresponding exposure. """ # Get observatory metadata headers = self.get_standard_headers() # All cameras share a similar start time headers['start_time'] = current_time(flatten=True) # List of camera events to wait for to signal exposure is done # processing camera_events = dict() # Take exposure with each camera for cam_name, camera in self.cameras.items(): self.logger.debug("Exposing for camera: {}".format(cam_name)) try: # Start the exposures cam_event = camera.take_observation(self.current_observation, headers) camera_events[cam_name] = cam_event except Exception as e: self.logger.error("Problem waiting for images: {}".format(e)) return camera_events
def send_message(self, channel, message): """ Responsible for actually sending message across a channel Args: channel(str): Name of channel to send on. message(str): Message to be sent. """ assert channel > '', self.logger.warning("Cannot send blank channel") if isinstance(message, str): message = { 'message': message, 'timestamp': current_time().isot.replace('T', ' ').split('.')[0] } else: message = self.scrub_message(message) msg_object = dumps(message, skipkeys=True) full_message = '{} {}'.format(channel, msg_object) if channel == 'PANCHAT': self.logger.info("{} {}".format(channel, message['message'])) # Send the message self.socket.send_string(full_message, flags=zmq.NOBLOCK)
def make_pretty_image(fname, timeout=15, verbose=False, **kwargs): """ Make a pretty picture Args: fname(str, required): Filename to solve in either .cr2 or .fits extension. timeout(int, optional): Timeout for the solve-field command, defaults to 60 seconds. verbose(bool, optional): Show output, defaults to False. """ assert os.path.exists(fname), warnings.warn("File doesn't exist, can't make pretty: {}".format(fname)) title = '{} {}'.format(kwargs.get('title', ''), current_time().isot) solve_field = "{}/scripts/cr2_to_jpg.sh".format(os.getenv('POCS'), '/var/panoptes/POCS') cmd = [solve_field, fname, title] if kwargs.get('primary', False): cmd.append('link') if verbose: print(cmd) try: proc = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if verbose: print(proc) except OSError as e: raise error.InvalidCommand("Can't send command to gphoto2. {} \t {}".format(e, run_cmd)) except ValueError as e: raise error.InvalidCommand("Bad parameters to gphoto2. {} \t {}".format(e, run_cmd)) except Exception as e: raise error.PanError("Timeout on plate solving: {}".format(e)) return fname.replace('cr2', 'jpg')
def send_message(self, channel, message): """ Responsible for actually sending message across a channel Args: channel(str): Name of channel to send on. message(str): Message to be sent. """ assert channel > '', self.logger.warning("Cannot send blank channel") if isinstance(message, str): message = { 'message': message, 'timestamp': current_time().isot.replace( 'T', ' ').split('.')[0]} else: message = self.scrub_message(message) msg_object = dumps(message, skipkeys=True) full_message = '{} {}'.format(channel, msg_object) if channel == 'PANCHAT': self.logger.info("{} {}".format(channel, message['message'])) # Send the message self.socket.send_string(full_message, flags=zmq.NOBLOCK)
def process_exposure(self, info, signal_event, exposure_process=None): """Processes the exposure Converts the CR2 to a FITS file. If the camera is a primary camera, extract the jpeg image and save metadata to mongo `current` collection. Saves metadata to mongo `observations` collection for all images Args: info (dict): Header metadata saved for the image signal_event (threading.Event): An event that is set signifying that the camera is done with this exposure """ if exposure_process: exposure_process.wait() image_id = info['image_id'] seq_id = info['sequence_id'] file_path = info['file_path'] self.logger.debug("Processing {}".format(image_id)) try: self.logger.debug("Extracting pretty image") images.make_pretty_image(file_path, title=image_id, primary=info['is_primary']) except Exception as e: self.logger.warning( 'Problem with extracting pretty image: {}'.format(e)) self.logger.debug("Converting CR2 -> FITS: {}".format(file_path)) fits_path = images.cr2_to_fits(file_path, headers=info, remove_cr2=True) # Replace the path name with the FITS file info['file_path'] = fits_path if info['is_primary']: self.logger.debug( "Adding current observation to db: {}".format(image_id)) self.db.insert_current('observations', info, include_collection=False) else: self.logger.debug('Compressing {}'.format(file_path)) images.fpack(fits_path) self.logger.debug("Adding image metadata to db: {}".format(image_id)) self.db.observations.insert_one({ 'data': info, 'date': current_time(datetime=True), 'type': 'observations', 'sequence_id': seq_id, }) # Mark the event as done signal_event.set()
def _update_status(self): self.logger.debug("Getting mount simulator status") status = dict() status['timestamp'] = current_time() status['tracking_rate_ra'] = self.tracking_rate status['state'] = self.state return status
def take_observation(self, observation, headers=None, **kwargs): camera_event = Event() if headers is None: headers = {} start_time = headers.get('start_time', current_time(flatten=True)) filename = "solved.{}".format(self.file_extension) file_path = "{}/pocs/tests/data/{}".format(os.getenv('POCS'), filename) image_id = '{}_{}_{}'.format( self.config['name'], self.uid, start_time ) self.logger.debug("image_id: {}".format(image_id)) sequence_id = '{}_{}_{}'.format( self.config['name'], self.uid, observation.seq_time ) # Camera metadata metadata = { 'camera_name': self.name, 'camera_uid': self.uid, 'field_name': observation.field.field_name, 'file_path': file_path, 'filter': self.filter_type, 'image_id': image_id, 'is_primary': self.is_primary, 'sequence_id': sequence_id, 'start_time': start_time, } metadata.update(headers) exp_time = kwargs.get('exp_time', observation.exp_time.value) exp_time = 5 self.logger.debug("Trimming camera simulator exposure to 5 s") self.take_exposure(seconds=exp_time, filename=file_path) # Add most recent exposure to list observation.exposure_list[image_id] = file_path.replace('.cr2', '.fits') # Process the image after a set amount of time wait_time = exp_time + self.readout_time t = Timer(wait_time, self.process_exposure, (metadata, camera_event,)) t.name = '{}Thread'.format(self.name) t.start() return camera_event
def status(self): """Get status information for various parts of the observatory """ status = {} try: t = current_time() local_time = str(datetime.now()).split('.')[0] if self.mount.is_initialized: status['mount'] = self.mount.status() status['mount'][ 'current_ha'] = self.observer.target_hour_angle( t, self.mount.get_current_coordinates()) if self.mount.has_target: status['mount'][ 'mount_target_ha'] = self.observer.target_hour_angle( t, self.mount.get_target_coordinates()) if self.dome: status['dome'] = self.dome.status if self.current_observation: status['observation'] = self.current_observation.status() status['observation'][ 'field_ha'] = self.observer.target_hour_angle( t, self.current_observation.field) status['observer'] = { 'siderealtime': str(self.sidereal_time), 'utctime': t, 'localtime': local_time, 'local_evening_astro_time': self.observer.twilight_evening_astronomical(t, which='next'), 'local_morning_astro_time': self.observer.twilight_morning_astronomical(t, which='next'), 'local_sun_set_time': self.observer.sun_set_time(t), 'local_sun_rise_time': self.observer.sun_rise_time(t), 'local_moon_alt': self.observer.moon_altaz(t).alt, 'local_moon_illumination': self.observer.moon_illumination(t), 'local_moon_phase': self.observer.moon_phase(t), } except Exception as e: # pragma: no cover self.logger.warning("Can't get observatory status: {}".format(e)) return status
def is_dark(self): horizon = self.location.get('twilight_horizon', -18 * u.degree) t0 = current_time() is_dark = self.observer.is_night(t0, horizon=horizon) if not is_dark: sun_pos = self.observer.altaz(t0, target=get_sun(t0)).alt self.logger.debug("Sun {:.02f} > {}".format(sun_pos, horizon)) return is_dark
def make_pretty_image(fname, timeout=15, **kwargs): # pragma: no cover """ Make a pretty image This calls out to an external script which will try to extract the JPG directly from the CR2 file, otherwise will do an actual conversion Notes: See `$POCS/scripts/cr2_to_jpg.sh` Arguments: fname {str} -- Name of CR2 file **kwargs {dict} -- Additional arguments to be passed to external script Keyword Arguments: timeout {number} -- Process timeout (default: {15}) Returns: str -- Filename of image that was created """ assert os.path.exists(fname),\ warn("File doesn't exist, can't make pretty: {}".format(fname)) verbose = kwargs.get('verbose', False) title = '{} {}'.format(kwargs.get('title', ''), current_time().isot) solve_field = "{}/scripts/cr2_to_jpg.sh".format(os.getenv('POCS')) cmd = [solve_field, fname, title] if kwargs.get('primary', False): cmd.append('link') if verbose: print(cmd) try: proc = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if verbose: print(proc) except OSError as e: raise error.InvalidCommand("Can't send command to gphoto2." " {} \t {}".format(e, cmd)) except ValueError as e: raise error.InvalidCommand("Bad parameters to gphoto2." " {} \t {}".format(e, cmd)) except Exception as e: raise error.PanError("Timeout on plate solving: {}".format(e)) return fname.replace('cr2', 'jpg')
def take_observation(self, observation, headers=None, **kwargs): camera_event = Event() if headers is None: headers = {} start_time = headers.get('start_time', current_time(flatten=True)) filename = "solved.{}".format(self.file_extension) file_path = "{}/pocs/tests/data/{}".format(os.getenv('POCS'), filename) image_id = '{}_{}_{}'.format(self.config['name'], self.uid, start_time) self.logger.debug("image_id: {}".format(image_id)) sequence_id = '{}_{}_{}'.format(self.config['name'], self.uid, observation.seq_time) # Camera metadata metadata = { 'camera_name': self.name, 'camera_uid': self.uid, 'field_name': observation.field.field_name, 'file_path': file_path, 'filter': self.filter_type, 'image_id': image_id, 'is_primary': self.is_primary, 'sequence_id': sequence_id, 'start_time': start_time, } metadata.update(headers) exp_time = kwargs.get('exp_time', observation.exp_time.value) exp_time = 5 self.logger.debug("Trimming camera simulator exposure to 5 s") self.take_exposure(seconds=exp_time, filename=file_path) # Add most recent exposure to list observation.exposure_list[image_id] = file_path.replace( '.cr2', '.fits') # Process the image after a set amount of time wait_time = exp_time + self.readout_time t = Timer(wait_time, self.process_exposure, ( metadata, camera_event, )) t.name = '{}Thread'.format(self.name) t.start() return camera_event
def process_exposure(self, info, signal_event, exposure_process=None): """Processes the exposure Converts the CR2 to a FITS file. If the camera is a primary camera, extract the jpeg image and save metadata to mongo `current` collection. Saves metadata to mongo `observations` collection for all images Args: info (dict): Header metadata saved for the image signal_event (threading.Event): An event that is set signifying that the camera is done with this exposure """ if exposure_process: exposure_process.wait() image_id = info['image_id'] seq_id = info['sequence_id'] file_path = info['file_path'] self.logger.debug("Processing {}".format(image_id)) try: self.logger.debug("Extracting pretty image") images.make_pretty_image(file_path, title=image_id, primary=info['is_primary']) except Exception as e: self.logger.warning('Problem with extracting pretty image: {}'.format(e)) self.logger.debug("Converting CR2 -> FITS: {}".format(file_path)) fits_path = images.cr2_to_fits(file_path, headers=info, remove_cr2=True) # Replace the path name with the FITS file info['file_path'] = fits_path if info['is_primary']: self.logger.debug("Adding current observation to db: {}".format(image_id)) self.db.insert_current('observations', info, include_collection=False) else: self.logger.debug('Compressing {}'.format(file_path)) images.fpack(fits_path) self.logger.debug("Adding image metadata to db: {}".format(image_id)) self.db.observations.insert_one({ 'data': info, 'date': current_time(datetime=True), 'type': 'observations', 'sequence_id': seq_id, }) # Mark the event as done signal_event.set()
def is_weather_safe(self, stale=180): """Determines whether current weather conditions are safe or not Args: stale (int, optional): Number of seconds before record is stale, defaults to 180 Returns: bool: Conditions are safe (True) or unsafe (False) """ assert self.db.current, self.logger.warning( "No connection to sensors, can't check weather safety") # Always assume False is_safe = False record = {'safe': False} try: if 'weather' in self.config['simulator']: is_safe = True self.logger.debug("Weather simulator always safe") return is_safe except KeyError: pass try: record = self.db.current.find_one({'type': 'weather'}) is_safe = record['data'].get('safe', False) timestamp = record['date'] age = (current_time().datetime - timestamp).total_seconds() self.logger.debug( "Weather Safety: {} [{:.0f} sec old - {}]".format(is_safe, age, timestamp)) except TypeError as e: self.logger.warning("No record found in Mongo DB") self.logger.debug('DB: {}'.format(self.db.current)) else: if age > stale: self.logger.warning("Weather record looks stale, marking unsafe.") is_safe = False self._is_safe = is_safe return self._is_safe
def _setup_location_for_mount(self): """ Sets the mount up to the current location. Mount must be initialized first. This uses mount.location (an astropy.coords.EarthLocation) to set most of the params and the rest is read from a config file. Users should not call this directly. Includes: * Latitude set_long * Longitude set_lat * Daylight Savings disable_daylight_savings * Universal Time Offset set_gmt_offset * Current Date set_local_date * Current Time set_local_time """ assert self.is_initialized, self.logger.warning( 'Mount has not been initialized') assert self.location is not None, self.logger.warning( 'Please set a location before attempting setup') self.logger.info('Setting up mount for location') # Location # Adjust the lat/long for format expected by iOptron lat = '{:+07.0f}'.format(self.location.latitude.to(u.arcsecond).value) lon = '{:+07.0f}'.format(self.location.longitude.to(u.arcsecond).value) self.query('set_long', lon) self.query('set_lat', lat) # Time self.query('disable_daylight_savings') gmt_offset = self.config.get('location').get('gmt_offset', 0) self.query('set_gmt_offset', gmt_offset) now = current_time() + gmt_offset * u.minute self.query('set_local_time', now.datetime.strftime("%H%M%S")) self.query('set_local_date', now.datetime.strftime("%y%m%d"))
def get_standard_headers(self, observation=None): """Get a set of standard headers Args: observation (`~pocs.scheduler.observation.Observation`, optional): The observation to use for header values. If None is given, use the `current_observation` Returns: dict: The standard headers """ if observation is None: observation = self.current_observation assert observation is not None, self.logger.warning( "No observation, can't get headers") field = observation.field self.logger.debug("Getting headers for : {}".format(observation)) t0 = current_time() moon = get_moon(t0, self.observer.location) headers = { 'airmass': self.observer.altaz(t0, field).secz.value, 'creator': "POCSv{}".format(self.__version__), 'elevation': self.location.get('elevation').value, 'ha_mnt': self.observer.target_hour_angle(t0, field).value, 'latitude': self.location.get('latitude').value, 'longitude': self.location.get('longitude').value, 'moon_fraction': self.observer.moon_illumination(t0), 'moon_separation': field.coord.separation(moon).value, 'observer': self.config.get('name', ''), 'origin': 'Project PANOPTES', 'tracking_rate_ra': self.mount.tracking_rate, } # Add observation metadata headers.update(observation.status()) return headers
def _setup_location_for_mount(self): """ Sets the mount up to the current location. Mount must be initialized first. This uses mount.location (an astropy.coords.EarthLocation) to set most of the params and the rest is read from a config file. Users should not call this directly. Includes: * Latitude set_long * Longitude set_lat * Daylight Savings disable_daylight_savings * Universal Time Offset set_gmt_offset * Current Date set_local_date * Current Time set_local_time """ assert self.is_initialized, self.logger.warning('Mount has not been initialized') assert self.location is not None, self.logger.warning( 'Please set a location before attempting setup') self.logger.info('Setting up mount for location') # Location # Adjust the lat/long for format expected by iOptron lat = '{:+07.0f}'.format(self.location.latitude.to(u.arcsecond).value) lon = '{:+07.0f}'.format(self.location.longitude.to(u.arcsecond).value) self.query('set_long', lon) self.query('set_lat', lat) # Time self.query('disable_daylight_savings') gmt_offset = self.config.get('location').get('gmt_offset', 0) self.query('set_gmt_offset', gmt_offset) now = current_time() + gmt_offset * u.minute self.query('set_local_time', now.datetime.strftime("%H%M%S")) self.query('set_local_date', now.datetime.strftime("%y%m%d"))
def connect(self): """Connect to Canon DSLR Gets the serial number from the camera and sets various settings """ self.logger.debug('Connecting to camera') # Get serial number _serial_number = self.get_property('serialnumber') if _serial_number > '': self._serial_number = _serial_number # Properties to be set upon init. prop2index = { '/main/actions/viewfinder': 1, # Screen off '/main/settings/autopoweroff': 0, # Don't power off '/main/settings/reviewtime': 0, # Screen off after taking pictures '/main/settings/capturetarget': 0, # Capture to RAM, for download '/main/imgsettings/imageformat': 9, # RAW '/main/imgsettings/imageformatsd': 9, # RAW '/main/imgsettings/imageformatcf': 9, # RAW '/main/imgsettings/iso': 1, # ISO 100 '/main/capturesettings/focusmode': 0, # Manual (don't try to focus) '/main/capturesettings/continuousaf': 0, # No auto-focus '/main/capturesettings/autoexposuremode': 3, # 3 - Manual; 4 - Bulb '/main/capturesettings/drivemode': 0, # Single exposure '/main/capturesettings/shutterspeed': 0, # Bulb } prop2value = { '/main/settings/artist': 'Project PANOPTES', '/main/settings/ownername': 'Project PANOPTES', '/main/settings/copyright': 'Project PANOPTES {}'.format(current_time().datetime.year), } self.set_properties(prop2index, prop2value) self._connected = True
def set_park_coordinates(self, ha=-170 * u.degree, dec=-10 * u.degree): """ Calculates the RA-Dec for the the park position. This method returns a location that points the optics of the unit down toward the ground. The RA is calculated from subtracting the desired hourangle from the local sidereal time. This requires a proper location be set. Note: Mounts usually don't like to track or slew below the horizon so this will most likely require a configuration item be set on the mount itself. Args: ha (Optional[astropy.units.degree]): Hourangle of desired parking position. Defaults to -165 degrees. dec (Optional[astropy.units.degree]): Declination of desired parking position. Defaults to -165 degrees. Returns: park_skycoord (astropy.coordinates.SkyCoord): A SkyCoord object representing current parking position. """ self.logger.debug('Setting park position') park_time = current_time() park_time.location = self.location lst = park_time.sidereal_time('apparent') self.logger.debug("LST: {}".format(lst)) self.logger.debug("HA: {}".format(ha)) ra = lst - ha self.logger.debug("RA: {}".format(ra)) self.logger.debug("Dec: {}".format(dec)) self._park_coordinates = SkyCoord(ra, dec) self.logger.debug("Park Coordinates RA-Dec: {}".format(self._park_coordinates))
def insert_current(self, collection, obj, include_collection=True): """Insert an object into both the `current` collection and the collection provided Args: collection (str): Name of valid collection within panoptes db obj (dict or str): Object to be inserted include_collection (bool): Whether to also update the collection, defaults to True Returns: str: Mongo object ID of record in `collection` """ if include_collection: assert collection in self.collections, warn( "Collection not available") _id = None try: current_obj = { 'type': collection, 'data': obj, 'date': current_time(datetime=True), } # Update `current` record self.current.replace_one({'type': collection}, current_obj, True) if include_collection: # Insert record into db col = getattr(self, collection) _id = col.insert_one(current_obj).inserted_id except AttributeError: warn("Collection does not exist in db: {}".format(collection)) except Exception as e: warn("Problem inserting object into collection: {}".format(e)) return _id
def insert_current(self, collection, obj, include_collection=True): """Insert an object into both the `current` collection and the collection provided Args: collection (str): Name of valid collection within panoptes db obj (dict or str): Object to be inserted include_collection (bool): Whether to also update the collection, defaults to True Returns: str: Mongo object ID of record in `collection` """ if include_collection: assert collection in self.collections, warn("Collection not available") _id = None try: current_obj = { 'type': collection, 'data': obj, 'date': current_time(datetime=True), } # Update `current` record self.current.replace_one({'type': collection}, current_obj, True) if include_collection: # Insert record into db col = getattr(self, collection) _id = col.insert_one(current_obj).inserted_id except AttributeError: warn("Collection does not exist in db: {}".format(collection)) except Exception as e: warn("Problem inserting object into collection: {}".format(e)) return _id
'gs://panoptes-survey/PAN001/{}/'.format(d)] try: completed_process = subprocess.run(run_cmd, stdout=subprocess.PIPE) if completed_process.returncode != 0: print("Problem uploading") print(completed_process.stdout) except Exception as e: print("Problem uploading: {}".format(e)) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description="Uploader for image directory") parser.add_argument('--date', default=None, help='Export start date, e.g. 2016-01-01, defaults to yesterday') parser.add_argument('--auto-confirm', action='store_true', default=False, help='Auto-confirm upload') args = parser.parse_args() if args.date is None: args.date = (current_time() - 1. * u.day).isot else: args.date = Time(args.date) main(**vars(args))
def _autofocus(self, seconds, focus_range, focus_step, thumbnail_size, keep_files, dark_thumb, merit_function, merit_function_kwargs, coarse, plots, start_event, finished_event, smooth=0.4, *args, **kwargs): # If passed a start_event wait until Event is set before proceeding # (e.g. wait for coarse focus to finish before starting fine focus). if start_event: start_event.wait() initial_focus = self.position if coarse: self.logger.debug( "Beginning coarse autofocus of {} - initial position: {}", self._camera, initial_focus) else: self.logger.debug( "Beginning autofocus of {} - initial position: {}", self._camera, initial_focus) # Set up paths for temporary focus files, and plots if requested. image_dir = self.config['directories']['images'] start_time = current_time(flatten=True) file_path_root = "{}/{}/{}/{}".format(image_dir, 'focus', self._camera.uid, start_time) # Take an image before focusing, grab a thumbnail from the centre and add it to the plot file_path = "{}/{}_{}.{}".format(file_path_root, initial_focus, "initial", self._camera.file_extension) thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size, keep_file=True) if plots: thumbnail = images.mask_saturated(thumbnail) if dark_thumb is not None: thumbnail = thumbnail - dark_thumb fig = plt.figure(figsize=(9, 18), tight_layout=True) ax1 = fig.add_subplot(3, 1, 1) im1 = ax1.imshow(thumbnail, interpolation='none', cmap=palette, norm=colours.LogNorm()) fig.colorbar(im1) ax1.set_title('Initial focus position: {}'.format(initial_focus)) # Set up encoder positions for autofocus sweep, truncating at focus travel # limits if required. if coarse: focus_range = focus_range[1] focus_step = focus_step[1] else: focus_range = focus_range[0] focus_step = focus_step[0] focus_positions = np.arange(max(initial_focus - focus_range / 2, self.min_position), min(initial_focus + focus_range / 2, self.max_position) + 1, focus_step, dtype=np.int) n_positions = len(focus_positions) metric = np.empty((n_positions)) for i, position in enumerate(focus_positions): # Move focus, updating focus_positions with actual encoder position after move. focus_positions[i] = self.move_to(position) # Take exposure file_path = "{}/{}_{}.{}".format(file_path_root, focus_positions[i], i, self._camera.file_extension) thumbnail = self._camera.get_thumbnail( seconds, file_path, thumbnail_size, keep_file=keep_files) thumbnail = images.mask_saturated(thumbnail) if dark_thumb is not None: thumbnail = thumbnail - dark_thumb # Calculate focus metric metric[i] = images.focus_metric(thumbnail, merit_function, **merit_function_kwargs) self.logger.debug("Focus metric at position {}: {}".format(position, metric[i])) fitted = False # Find maximum values imax = metric.argmax() if imax == 0 or imax == (n_positions - 1): # TODO: have this automatically switch to coarse focus mode if this happens self.logger.warning( "Best focus outside sweep range, aborting autofocus on {}!".format(self._camera)) best_focus = focus_positions[imax] elif not coarse: # Crude guess at a standard deviation for focus metric, 40% of the maximum value weights = np.ones(len(focus_positions)) / (smooth * metric.max()) # Fit smoothing spline to focus metric data fit = UnivariateSpline(focus_positions, metric, w=weights, k=4, ext='raise') try: stationary_points = fit.derivative().roots() except ValueError as err: self.logger.warning('Error finding extrema of spline fit: {}'.format(err)) best_focus = focus_positions[imax] else: extrema = fit(stationary_points) if len(extrema) > 0: best_focus = stationary_points[extrema.argmax()] fitted = True else: # Coarse focus, just use max value. best_focus = focus_positions[imax] if plots: ax2 = fig.add_subplot(3, 1, 2) ax2.plot(focus_positions, metric, 'bo', label='{}'.format(merit_function)) if fitted: fs = np.arange(focus_positions[0], focus_positions[-1] + 1) ax2.plot(fs, fit(fs), 'b-', label='Smoothing spline fit') ax2.set_xlim(focus_positions[0] - focus_step / 2, focus_positions[-1] + focus_step / 2) u_limit = 1.10 * metric.max() l_limit = min(0.95 * metric.min(), 1.05 * metric.min()) ax2.set_ylim(l_limit, u_limit) ax2.vlines(initial_focus, l_limit, u_limit, colors='k', linestyles=':', label='Initial focus') ax2.vlines(best_focus, l_limit, u_limit, colors='k', linestyles='--', label='Best focus') ax2.set_xlabel('Focus position') ax2.set_ylabel('Focus metric') if coarse: ax2.set_title('{} coarse focus at {}'.format(self._camera, start_time)) else: ax2.set_title('{} fine focus at {}'.format(self._camera, start_time)) ax2.legend(loc='best') final_focus = self.move_to(best_focus) file_path = "{}/{}_{}.{}".format(file_path_root, final_focus, "final", self._camera.file_extension) thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size, keep_file=True) if plots: thumbnail = images.mask_saturated(thumbnail) if dark_thumb is not None: thumbnail = thumbnail - dark_thumb ax3 = fig.add_subplot(3, 1, 3) im3 = ax3.imshow(thumbnail, interpolation='none', cmap=palette, norm=colours.LogNorm()) fig.colorbar(im3) ax3.set_title('Final focus position: {}'.format(final_focus)) if coarse: plot_path = file_path_root + '_coarse.png' else: plot_path = file_path_root + '_fine.png' fig.savefig(plot_path) plt.close(fig) if coarse: self.logger.info('Coarse focus plot for camera {} written to {}'.format( self._camera, plot_path)) else: self.logger.info('Fine focus plot for camera {} written to {}'.format( self._camera, plot_path)) self.logger.debug( 'Autofocus of {} complete - final focus position: {}', self._camera, final_focus) if finished_event: finished_event.set() return initial_focus, final_focus
def get_observation(self, time=None, show_all=False, reread_fields_file=False): """Get a valid observation Args: time (astropy.time.Time, optional): Time at which scheduler applies, defaults to time called show_all (bool, optional): Return all valid observations along with merit value, defaults to False to only get top value reread_fields_file (bool, optional): If targets file should be reread before getting observation, default False. Returns: tuple or list: A tuple (or list of tuples) with name and score of ranked observations """ if reread_fields_file: self.logger.debug("Rereading fields file") # The setter method on `fields_file` will force a reread self.fields_file = self.fields_file if time is None: time = current_time() valid_obs = {obs: 1.0 for obs in self.observations} best_obs = [] common_properties = { 'end_of_night': self.observer.tonight(time=time, horizon=-18 * u.degree)[-1], 'moon': get_moon(time, self.observer.location) } for constraint in listify(self.constraints): self.logger.debug("Checking Constraint: {}".format(constraint)) for obs_name, observation in self.observations.items(): if obs_name in valid_obs: self.logger.debug("\tObservation: {}".format(obs_name)) veto, score = constraint.get_score( time, self.observer, observation, **common_properties) self.logger.debug("\t\tScore: {}\tVeto: {}".format(score, veto)) if veto: self.logger.debug("\t\t{} vetoed by {}".format(obs_name, constraint)) del valid_obs[obs_name] continue valid_obs[obs_name] += score for obs_name, score in valid_obs.items(): valid_obs[obs_name] += self.observations[obs_name].priority if len(valid_obs) > 0: # Sort the list by highest score (reverse puts in correct order) best_obs = sorted(valid_obs.items(), key=lambda x: x[1])[::-1] top_obs = best_obs[0] # Check new best against current_observation if self.current_observation is not None \ and top_obs[0] != self.current_observation.name: # Favor the current observation if still available end_of_next_set = time + self.current_observation.set_duration if self.observation_available(self.current_observation, end_of_next_set): # If current is better or equal to top, use it if self.current_observation.merit >= top_obs[1]: best_obs.insert(0, self.current_observation) # Set the current self.current_observation = self.observations[top_obs[0]] self.current_observation.merit = top_obs[1] else: if self.current_observation is not None: # Favor the current observation if still available end_of_next_set = time + self.current_observation.set_duration if end_of_next_set < common_properties['end_of_night'] and \ self.observation_available(self.current_observation, end_of_next_set): self.logger.debug("Reusing {}".format(self.current_observation)) best_obs = [(self.current_observation.name, self.current_observation.merit)] else: self.logger.warning("No valid observations found") self.current_observation = None if not show_all and len(best_obs) > 0: best_obs = best_obs[0] return best_obs
def take_evening_flats(self, alt=None, az=None, min_counts=5000, max_counts=15000, bias=1000, max_exptime=60., camera_list=None, target_adu_percentage=0.5, max_num_exposures=10, *args, **kwargs): """Take flat fields Args: alt (float, optional): Altitude for flats az (float, optional): Azimuth for flats min_counts (int, optional): Minimum ADU count max_counts (int, optional): Maximum ADU count bias (int, optional): Default bias for the cameras max_exptime (float, optional): Maximum exposure time before stopping camera_list (list, optional): List of cameras to use for flat-fielding target_adu_percentage (float, optional): Exposure time will be adjust so that counts are close to: target * (`min_counts` + `max_counts`). Default to 0.5 max_num_exposures (int, optional): Maximum number of flats to take *args (TYPE): Description **kwargs (TYPE): Description """ target_adu = target_adu_percentage * (min_counts + max_counts) image_dir = self.config['directories']['images'] flat_obs = self._create_flat_field_observation(alt=alt, az=az) exp_times = {cam_name: [1. * u.second] for cam_name in camera_list} # Loop until conditions are met for flat-fielding while True: self.logger.debug("Slewing to flat-field coords: {}".format( flat_obs.field)) self.mount.set_target_coordinates(flat_obs.field) self.mount.slew_to_target() self.status() # Seems to help with reading coords fits_headers = self.get_standard_headers(observation=flat_obs) start_time = utils.current_time() fits_headers['start_time'] = utils.flatten_time( start_time) # Common start time for cameras camera_events = dict() # Take the observations for cam_name in camera_list: camera = self.cameras[cam_name] filename = "{}/flats/{}/{}/{}.{}".format( image_dir, camera.uid, flat_obs.seq_time, 'flat_{:02d}'.format(flat_obs.current_exp), camera.file_extension) # Take picture and get event camera_event = camera.take_observation( flat_obs, fits_headers, filename=filename, exp_time=exp_times[cam_name][-1]) camera_events[cam_name] = { 'event': camera_event, 'filename': filename, } # Block until done exposing on all cameras while not all( [info['event'].is_set() for info in camera_events.values()]): self.logger.debug('Waiting for flat-field image') time.sleep(1) # Check the counts for each image for cam_name, info in camera_events.items(): img_file = info['filename'] self.logger.debug("Checking counts for {}".format(img_file)) # Unpack fits if compressed if not os.path.exists(img_file) and \ os.path.exists(img_file.replace('.fits', '.fits.fz')): img_utils.fpack(img_file.replace('.fits', '.fits.fz'), unpack=True) data = fits.getdata(img_file) mean, median, stddev = stats.sigma_clipped_stats(data) counts = mean - bias # This is in the original DragonFly code so copying if counts <= 0: counts = 10 self.logger.debug("Counts: {}".format(counts)) if counts < min_counts or counts > max_counts: self.logger.debug( "Counts outside min/max range, should be discarded") elapsed_time = (utils.current_time() - start_time).sec self.logger.debug("Elapsed time: {}".format(elapsed_time)) # Get suggested exposure time exp_time = int(exp_times[cam_name][-1].value * (target_adu / counts) * (2.0**(elapsed_time / 180.0)) + 0.5) self.logger.debug("Suggested exp_time for {}: {}".format( cam_name, exp_time)) exp_times[cam_name].append(exp_time * u.second) self.logger.debug("Checking for long exposures") # Stop flats if any time is greater than max if any([t[-1].value >= max_exptime for t in exp_times.values()]): self.logger.debug( "Exposure times greater than max, stopping flat fields") break self.logger.debug("Checking for too many exposures") # Stop flats if we are going on too long if any([len(t) >= max_num_exposures for t in exp_times.values()]): self.logger.debug("Too many flats, quitting") break self.logger.debug("Incrementing exposure count") flat_obs.current_exp += 1 # Add a bias exposure for cam_name in camera_list: exp_times[cam_name].append(0 * u.second) # Record how many exposures we took num_exposures = flat_obs.current_exp # Reset to first exposure so we can loop through again taking darks flat_obs.current_exp = 0 # Take darks for i in range(num_exposures): self.logger.debug("Slewing to dark-field coords: {}".format( flat_obs.field)) self.mount.set_target_coordinates(flat_obs.field) self.mount.slew_to_target() self.status() for cam_name in camera_list: camera = self.cameras[cam_name] filename = "{}/flats/{}/{}/{}.{}".format( image_dir, camera.uid, flat_obs.seq_time, 'dark_{:02d}'.format(flat_obs.current_exp), camera.file_extension) # Take picture and wait for result camera_event = camera.take_observation( flat_obs, fits_headers, filename=filename, exp_time=exp_times[cam_name][i], dark=True) camera_events[cam_name] = { 'event': camera_event, 'filename': filename, } # Will block here until done exposing on all cameras while not all( [info['event'].is_set() for info in camera_events.values()]): self.logger.debug('Waiting for dark-field image') time.sleep(1) flat_obs.current_exp = i
def take_observation(self, observation, headers=None, filename=None, *args, **kwargs): """Take an observation Gathers various header information, sets the file path, and calls `take_exposure`. Also creates a `threading.Event` object and a `threading.Thread` object. The Thread calls `process_exposure` after the exposure had completed and the Event is set once `process_exposure` finishes. Args: observation (~pocs.scheduler.observation.Observation): Object describing the observation headers (dict): Header data to be saved along with the file. **kwargs (dict): Optional keyword arguments (`exp_time`, dark) Returns: threading.Event: An event to be set when the image is done processing """ # To be used for marking when exposure is complete (see `process_exposure`) camera_event = Event() if headers is None: headers = {} start_time = headers.get('start_time', current_time(flatten=True)) # Get the filename image_dir = "{}/fields/{}/{}/{}/".format( self.config['directories']['images'], observation.field.field_name, self.uid, observation.seq_time, ) # Get full file path if filename is None: file_path = "{}/{}.{}".format(image_dir, start_time, self.file_extension) else: # Add extension if '.' not in filename: filename = '{}.{}'.format(filename, self.file_extension) # Add directory if not filename.startswith('/'): filename = '{}/{}'.format(image_dir, filename) file_path = filename image_id = '{}_{}_{}'.format(self.config['name'], self.uid, start_time) self.logger.debug("image_id: {}".format(image_id)) sequence_id = '{}_{}_{}'.format(self.config['name'], self.uid, observation.seq_time) # Camera metadata metadata = { 'camera_name': self.name, 'camera_uid': self.uid, 'field_name': observation.field.field_name, 'file_path': file_path, 'filter': self.filter_type, 'image_id': image_id, 'is_primary': self.is_primary, 'sequence_id': sequence_id, 'start_time': start_time, } metadata.update(headers) exp_time = kwargs.get('exp_time', observation.exp_time) exposure_event = self.take_exposure(seconds=exp_time, filename=file_path, **kwargs) # Process the exposure once readout is complete t = Thread(target=self.process_exposure, args=(metadata, camera_event, exposure_event)) t.name = '{}Thread'.format(self.name) t.start() return camera_event
def get_observation(self, time=None, show_all=False, reread_fields_file=False): """Get a valid observation Args: time (astropy.time.Time, optional): Time at which scheduler applies, defaults to time called show_all (bool, optional): Return all valid observations along with merit value, defaults to False to only get top value reread_fields_file (bool, optional): If targets file should be reread before getting observation, default False. Returns: tuple or list: A tuple (or list of tuples) with name and score of ranked observations """ if reread_fields_file: self.logger.debug("Rereading fields file") # The setter method on `fields_file` will force a reread self.fields_file = self.fields_file if time is None: time = current_time() valid_obs = {obs: 1.0 for obs in self.observations} best_obs = [] common_properties = { 'end_of_night': self.observer.tonight(time=time, horizon=-18 * u.degree)[-1], 'moon': get_moon(time, self.observer.location) } for constraint in listify(self.constraints): self.logger.debug("Checking Constraint: {}".format(constraint)) for obs_name, observation in self.observations.items(): if obs_name in valid_obs: self.logger.debug("\tObservation: {}".format(obs_name)) veto, score = constraint.get_score(time, self.observer, observation, **common_properties) self.logger.debug("\t\tScore: {}\tVeto: {}".format( score, veto)) if veto: self.logger.debug("\t\t{} vetoed by {}".format( obs_name, constraint)) del valid_obs[obs_name] continue valid_obs[obs_name] += score for obs_name, score in valid_obs.items(): valid_obs[obs_name] += self.observations[obs_name].priority if len(valid_obs) > 0: # Sort the list by highest score (reverse puts in correct order) best_obs = sorted(valid_obs.items(), key=lambda x: x[1])[::-1] top_obs = best_obs[0] # Check new best against current_observation if self.current_observation is not None \ and top_obs[0] != self.current_observation.name: # Favor the current observation if still available end_of_next_set = time + self.current_observation.set_duration if self.observation_available(self.current_observation, end_of_next_set): # If current is better or equal to top, use it if self.current_observation.merit >= top_obs[1]: best_obs.insert(0, self.current_observation) # Set the current self.current_observation = self.observations[top_obs[0]] self.current_observation.merit = top_obs[1] else: if self.current_observation is not None: # Favor the current observation if still available end_of_next_set = time + self.current_observation.set_duration if end_of_next_set < common_properties['end_of_night'] and \ self.observation_available(self.current_observation, end_of_next_set): self.logger.debug("Reusing {}".format( self.current_observation)) best_obs = [(self.current_observation.name, self.current_observation.merit)] else: self.logger.warning("No valid observations found") self.current_observation = None if not show_all and len(best_obs) > 0: best_obs = best_obs[0] return best_obs
def sidereal_time(self): return self.observer.local_sidereal_time(current_time())
def capture(self): """ Capture an image from a webcam Given a webcam, this attempts to capture an image using the subprocess command. Also creates a thumbnail of the image Args: webcam (dict): Entry for the webcam. Example:: { 'name': 'Pier West', 'port': '/dev/video0', 'params': { 'rotate': 270 }, } The values for the `params` key will be passed directly to fswebcam """ webcam = self.webcam_config assert isinstance(webcam, dict) self.logger.debug("Capturing image for {}...".format(webcam.get('name'))) camera_name = self.port_name # Create the directory for storing images timestamp = current_time(flatten=True) today_dir = timestamp.split('T')[0] today_path = "{}/{}".format(self.webcam_dir, today_dir) try: if today_path != self._today_dir: # If yesterday is not None, archive it if self._today_dir is not None: self.logger.debug("Making timelapse for webcam") self.create_timelapse( self._today_dir, out_file="{}/{}_{}.mp4".format( self.webcam_dir, today_dir, self.port_name), remove_after=True) # If today doesn't exist, make it if not os.path.exists(today_path): self.logger.debug("Making directory for day's webcam") os.makedirs(today_path, exist_ok=True) self._today_dir = today_path except OSError as err: self.logger.warning("Cannot create new dir: {} \t {}".format(today_path, err)) # Output file names out_file = '{}/{}_{}.jpeg'.format(today_path, camera_name, timestamp) # We also create a thumbnail and always link it to the same image # name so that it is always current. thumbnail_file = '{}/tn_{}.jpeg'.format(self.webcam_dir, camera_name) options = self.base_params if 'params' in webcam: for opt, val in webcam.get('params').items(): options += "--{}={}".format(opt, val) # Assemble all the parameters params = " -d {} --title \"{}\" {} --save {} --scale {} {}".format( webcam.get('port'), webcam.get('name'), options, out_file, self._thumbnail_resolution, thumbnail_file ) static_out_file = '' # Actually call the command. # NOTE: This is a blocking call (within this process). See `start_capturing` try: self.logger.debug("Webcam subproccess command: {} {}".format(self.cmd, params)) with open(os.devnull, 'w') as devnull: retcode = subprocess.call(self.cmd + params, shell=True, stdout=devnull, stderr=devnull) if retcode < 0: self.logger.warning( "Image captured terminated for {}. Return code: {} \t Error: {}".format( webcam.get('name'), retcode, sys.stderr ) ) else: self.logger.debug("Image captured for {}".format(webcam.get('name'))) # Static files (always points to most recent) static_out_file = '{}/{}.jpeg'.format(self.webcam_dir, camera_name) static_tn_out_file = '{}/tn_{}.jpeg'.format(self.webcam_dir, camera_name) # Symlink the latest image and thumbnail if os.path.lexists(static_out_file): os.remove(static_out_file) os.symlink(out_file, static_out_file) if os.path.lexists(static_tn_out_file): os.remove(static_tn_out_file) os.symlink(out_file, static_tn_out_file) return retcode except OSError as e: self.logger.warning("Execution failed:".format(e, file=sys.stderr)) return {'out_fn': static_out_file}
def take_observation(self, observation, headers=None, filename=None, **kwargs): """Take an observation Gathers various header information, sets the file path, and calls `take_exposure`. Also creates a `threading.Event` object and a `threading.Timer` object. The timer calls `process_exposure` after the set amount of time is expired (`observation.exp_time + self.readout_time`). Note: If a `filename` is passed in it can either be a full path that includes the extension, or the basename of the file, in which case the directory path and extension will be added to the `filename` for output Args: observation (~pocs.scheduler.observation.Observation): Object describing the observation headers (dict): Header data to be saved along with the file filename (str, optional): Filename for saving, defaults to ISOT time stamp **kwargs (dict): Optional keyword arguments (`exp_time`) Returns: threading.Event: An event to be set when the image is done processing """ # To be used for marking when exposure is complete (see `process_exposure`) camera_event = Event() if headers is None: headers = {} start_time = headers.get('start_time', current_time(flatten=True)) # Get the filename image_dir = "{}/fields/{}/{}/{}/".format( self.config['directories']['images'], observation.field.field_name, self.uid, observation.seq_time, ) # Get full file path if filename is None: file_path = "{}/{}.{}".format(image_dir, start_time, self.file_extension) else: # Add extension if '.' not in filename: filename = '{}.{}'.format(filename, self.file_extension) # Add directory if '/' not in filename: filename = '{}/{}'.format(image_dir, filename) file_path = filename image_id = '{}_{}_{}'.format( self.config['name'], self.uid, start_time ) self.logger.debug("image_id: {}".format(image_id)) sequence_id = '{}_{}_{}'.format( self.config['name'], self.uid, observation.seq_time ) # Camera metadata metadata = { 'camera_name': self.name, 'camera_uid': self.uid, 'field_name': observation.field.field_name, 'file_path': file_path, 'filter': self.filter_type, 'image_id': image_id, 'is_primary': self.is_primary, 'sequence_id': sequence_id, 'start_time': start_time, } metadata.update(headers) exp_time = kwargs.get('exp_time', observation.exp_time.value) proc = self.take_exposure(seconds=exp_time, filename=file_path) # Add most recent exposure to list observation.exposure_list[image_id] = file_path.replace('.cr2', '.fits') # Process the image after a set amount of time wait_time = exp_time + self.readout_time t = Timer(wait_time, self.process_exposure, (metadata, camera_event, proc)) t.name = '{}Thread'.format(self.name) t.start() return camera_event
def take_observation(self, observation, headers=None, filename=None, *args, **kwargs): """Take an observation Gathers various header information, sets the file path, and calls `take_exposure`. Also creates a `threading.Event` object and a `threading.Thread` object. The Thread calls `process_exposure` after the exposure had completed and the Event is set once `process_exposure` finishes. Args: observation (~pocs.scheduler.observation.Observation): Object describing the observation headers (dict): Header data to be saved along with the file. **kwargs (dict): Optional keyword arguments (`exp_time`, dark) Returns: threading.Event: An event to be set when the image is done processing """ # To be used for marking when exposure is complete (see `process_exposure`) camera_event = Event() if headers is None: headers = {} start_time = headers.get('start_time', current_time(flatten=True)) # Get the filename image_dir = "{}/fields/{}/{}/{}/".format( self.config['directories']['images'], observation.field.field_name, self.uid, observation.seq_time, ) # Get full file path if filename is None: file_path = "{}/{}.{}".format(image_dir, start_time, self.file_extension) else: # Add extension if '.' not in filename: filename = '{}.{}'.format(filename, self.file_extension) # Add directory if not filename.startswith('/'): filename = '{}/{}'.format(image_dir, filename) file_path = filename image_id = '{}_{}_{}'.format( self.config['name'], self.uid, start_time ) self.logger.debug("image_id: {}".format(image_id)) sequence_id = '{}_{}_{}'.format( self.config['name'], self.uid, observation.seq_time ) # Camera metadata metadata = { 'camera_name': self.name, 'camera_uid': self.uid, 'field_name': observation.field.field_name, 'file_path': file_path, 'filter': self.filter_type, 'image_id': image_id, 'is_primary': self.is_primary, 'sequence_id': sequence_id, 'start_time': start_time, } metadata.update(headers) exp_time = kwargs.get('exp_time', observation.exp_time) exposure_event = self.take_exposure(seconds=exp_time, filename=file_path, **kwargs) # Process the exposure once readout is complete t = Thread(target=self.process_exposure, args=(metadata, camera_event, exposure_event)) t.name = '{}Thread'.format(self.name) t.start() return camera_event
def process_exposure(self, info, signal_event, exposure_event=None): """ Processes the exposure Args: info (dict): Header metadata saved for the image signal_event (threading.Event): An event that is set signifying that the camera is done with this exposure exposure_event (threading.Event, optional): An event that should be set when the exposure is complete, triggering the processing. """ # If passed an Event that signals the end of the exposure wait for it to be set if exposure_event: exposure_event.wait() image_id = info['image_id'] file_path = info['file_path'] self.logger.debug("Processing {}".format(image_id)) # Explicity convert the equinox for FITS header try: equinox = float(info['equinox'].value.replace('J', '')) except KeyError: equinox = '' # Add FITS headers from info the same as images.cr2_to_fits() self.logger.debug("Updating FITS headers: {}".format(file_path)) with fits.open(file_path, 'update') as f: hdu = f[0] hdu.header.set('IMAGEID', info.get('image_id', '')) hdu.header.set('SEQID', info.get('sequence_id', '')) hdu.header.set('FIELD', info.get('field_name', '')) hdu.header.set('RA-MNT', info.get('ra_mnt', ''), 'Degrees') hdu.header.set('HA-MNT', info.get('ha_mnt', ''), 'Degrees') hdu.header.set('DEC-MNT', info.get('dec_mnt', ''), 'Degrees') hdu.header.set('EQUINOX', equinox) hdu.header.set('AIRMASS', info.get('airmass', ''), 'Sec(z)') hdu.header.set('FILTER', info.get('filter', '')) hdu.header.set('LAT-OBS', info.get('latitude', ''), 'Degrees') hdu.header.set('LONG-OBS', info.get('longitude', ''), 'Degrees') hdu.header.set('ELEV-OBS', info.get('elevation', ''), 'Meters') hdu.header.set('MOONSEP', info.get('moon_separation', ''), 'Degrees') hdu.header.set('MOONFRAC', info.get('moon_fraction', '')) hdu.header.set('CREATOR', info.get('creator', ''), 'POCS Software version') hdu.header.set('INSTRUME', info.get('camera_uid', ''), 'Camera ID') hdu.header.set('OBSERVER', info.get('observer', ''), 'PANOPTES Unit ID') hdu.header.set('ORIGIN', info.get('origin', '')) hdu.header.set('RA-RATE', info.get('tracking_rate_ra', ''), 'RA Tracking Rate') if info['is_primary']: self.logger.debug("Extracting pretty image") images.make_pretty_image(file_path, title=info['field_name'], primary=True) self.logger.debug("Adding current observation to db: {}".format(image_id)) self.db.insert_current('observations', info, include_collection=False) else: self.logger.debug('Compressing {}'.format(file_path)) images.fpack(file_path) self.logger.debug("Adding image metadata to db: {}".format(image_id)) self.db.observations.insert_one({ 'data': info, 'date': current_time(datetime=True), 'type': 'observations', 'image_id': image_id, }) # Mark the event as done signal_event.set()
def export(self, yesterday=True, start_date=None, end_date=None, collections=['all'], backup_dir=None, compress=True): # pragma: no cover """Exports the mongodb to an external file Args: yesterday (bool, optional): Export only yesterday, defaults to True start_date (str, optional): Start date for export if `yesterday` is False, defaults to None, e.g. 2016-01-01 end_date (None, optional): End date for export if `yesterday is False, defaults to None, e.g. 2016-01-31 collections (list, optional): Which collections to include, defaults to all backup_dir (str, optional): Backup directory, defaults to /backups compress (bool, optional): Compress output file with gzip, defaults to True Returns: list: List of saved files """ if backup_dir is None: backup_dir = '{}/backups/'.format( os.getenv('PANDIR', default='/var/panoptes/')) if not os.path.exists(backup_dir): warn("Creating backup dir") os.makedirs(backup_dir) if yesterday: start_dt = (current_time() - 1. * u.day).datetime start = datetime(start_dt.year, start_dt.month, start_dt.day, 0, 0, 0, 0) end = datetime(start_dt.year, start_dt.month, start_dt.day, 23, 59, 59, 0) else: assert start_date, warn( "start-date required if not using yesterday") y, m, d = [int(x) for x in start_date.split('-')] start_dt = date(y, m, d) if end_date is None: end_dt = start_dt else: y, m, d = [int(x) for x in end_date.split('-')] end_dt = date(y, m, d) start = datetime.fromordinal(start_dt.toordinal()) end = datetime(end_dt.year, end_dt.month, end_dt.day, 23, 59, 59, 0) if 'all' in collections: collections = self.collections date_str = start.strftime('%Y-%m-%d') end_str = end.strftime('%Y-%m-%d') if end_str != date_str: date_str = '{}_to_{}'.format(date_str, end_str) out_files = list() console.color_print("Exporting collections: ", 'default', "\t{}".format(date_str.replace('_', ' ')), 'yellow') for collection in collections: if collection not in self.collections: next console.color_print("\t{}".format(collection)) out_file = '{}{}_{}.json'.format(backup_dir, date_str.replace('-', ''), collection) col = getattr(self, collection) try: entries = [ x for x in col.find({ 'date': { '$gt': start, '$lt': end } }).sort([('date', pymongo.ASCENDING)]) ] except pymongo.errors.OperationFailure: entries = [ x for x in col.find({'date': { '$gt': start, '$lt': end }}) ] if len(entries): console.color_print( "\t\t{} records exported".format(len(entries)), 'yellow') content = json.dumps(entries, default=json_util.default) write_type = 'w' if compress: console.color_print("\t\tCompressing...", 'lightblue') content = gzip.compress(bytes(content, 'utf8')) out_file = out_file + '.gz' write_type = 'wb' with open(out_file, write_type) as f: console.color_print("\t\tWriting file: ", 'lightblue', out_file, 'yellow') f.write(content) out_files.append(out_file) else: console.color_print("\t\tNo records found", 'yellow') console.color_print("Output file: {}".format(out_files)) return out_files
def autofocus(self, seconds=None, focus_range=None, focus_step=None, thumbnail_size=None, keep_files=None, take_dark=None, merit_function=None, merit_function_kwargs=None, coarse=False, plots=True, blocking=False, *args, **kwargs): """ Focuses the camera using the specified merit function. Optionally performs a coarse focus first before performing the default fine focus. The expectation is that coarse focus will only be required for first use of a optic to establish the approximate position of infinity focus and after updating the intial focus position in the config only fine focus will be required. Args: seconds (scalar, optional): Exposure time for focus exposures, if not specified will use value from config. focus_range (2-tuple, optional): Coarse & fine focus sweep range, in encoder units. Specify to override values from config. focus_step (2-tuple, optional): Coarse & fine focus sweep steps, in encoder units. Specify to override values from config. thumbnail_size (int, optional): Size of square central region of image to use, default 500 x 500 pixels. keep_files (bool, optional): If True will keep all images taken during focusing. If False (default) will delete all except the first and last images from each focus run. take_dark (bool, optional): If True will attempt to take a dark frame before the focus run, and use it for dark subtraction and hot pixel masking, default True. merit_function (str/callable, optional): Merit function to use as a focus metric, default vollath_F4. merit_function_kwargs (dict, optional): Dictionary of additional keyword arguments for the merit function. coarse (bool, optional): Whether to begin with coarse focusing, default False. plots (bool, optional: Whether to write focus plots to images folder, default True. blocking (bool, optional): Whether to block until autofocus complete, default False. Returns: threading.Event: Event that will be set when autofocusing is complete """ assert self._camera.is_connected, self.logger.error( "Camera must be connected for autofocus!") assert self.is_connected, self.logger.error("Focuser must be connected for autofocus!") if not focus_range: if self.autofocus_range: focus_range = self.autofocus_range else: raise ValueError( "No focus_range specified, aborting autofocus of {}!".format(self._camera)) if not focus_step: if self.autofocus_step: focus_step = self.autofocus_step else: raise ValueError( "No focus_step specified, aborting autofocus of {}!".format(self._camera)) if not seconds: if self.autofocus_seconds: seconds = self.autofocus_seconds else: raise ValueError( "No focus exposure time specified, aborting autofocus of {}!", self._camera) if not thumbnail_size: if self.autofocus_size: thumbnail_size = self.autofocus_size else: raise ValueError( "No focus thumbnail size specified, aborting autofocus of {}!", self._camera) if keep_files is None: if self.autofocus_keep_files: keep_files = True else: keep_files = False if take_dark is None: if self.autofocus_take_dark is not None: take_dark = self.autofocus_take_dark else: take_dark = True if not merit_function: if self.autofocus_merit_function: merit_function = self.autofocus_merit_function else: merit_function = 'vollath_F4' if not merit_function_kwargs: if self.autofocus_merit_function_kwargs: merit_function_kwargs = self.autofocus_merit_function_kwargs else: merit_function_kwargs = {} if take_dark: image_dir = self.config['directories']['images'] start_time = current_time(flatten=True) file_path = "{}/{}/{}/{}/{}.{}".format(image_dir, 'focus', self._camera.uid, start_time, "dark", self._camera.file_extension) self.logger.debug('Taking dark frame {} on camera {}'.format(file_path, self._camera)) try: dark_thumb = self._camera.get_thumbnail(seconds, file_path, thumbnail_size, keep_file=True, dark=True) # Mask 'saturated' with a low threshold to remove hot pixels dark_thumb = images.mask_saturated(dark_thumb, threshold=0.3) except TypeError: self.logger.warning("Camera {} does not support dark frames!".format(self._camera)) else: dark_thumb = None if coarse: coarse_event = Event() coarse_thread = Thread(target=self._autofocus, args=args, kwargs={'seconds': seconds, 'focus_range': focus_range, 'focus_step': focus_step, 'thumbnail_size': thumbnail_size, 'keep_files': keep_files, 'dark_thumb': dark_thumb, 'merit_function': merit_function, 'merit_function_kwargs': merit_function_kwargs, 'coarse': True, 'plots': plots, 'start_event': None, 'finished_event': coarse_event, **kwargs}) coarse_thread.start() else: coarse_event = None fine_event = Event() fine_thread = Thread(target=self._autofocus, args=args, kwargs={'seconds': seconds, 'focus_range': focus_range, 'focus_step': focus_step, 'thumbnail_size': thumbnail_size, 'keep_files': keep_files, 'dark_thumb': dark_thumb, 'merit_function': merit_function, 'merit_function_kwargs': merit_function_kwargs, 'coarse': False, 'plots': plots, 'start_event': coarse_event, 'finished_event': fine_event, **kwargs}) fine_thread.start() if blocking: fine_event.wait() return fine_event
def target_down(location): return altaz_to_radec(obstime=current_time(), location=location, alt=5, az=90)
def take_observation(self, observation, headers=None, filename=None, **kwargs): """Take an observation Gathers various header information, sets the file path, and calls `take_exposure`. Also creates a `threading.Event` object and a `threading.Timer` object. The timer calls `process_exposure` after the set amount of time is expired (`observation.exp_time + self.readout_time`). Note: If a `filename` is passed in it can either be a full path that includes the extension, or the basename of the file, in which case the directory path and extension will be added to the `filename` for output Args: observation (~pocs.scheduler.observation.Observation): Object describing the observation headers (dict): Header data to be saved along with the file filename (str, optional): Filename for saving, defaults to ISOT time stamp **kwargs (dict): Optional keyword arguments (`exp_time`) Returns: threading.Event: An event to be set when the image is done processing """ # To be used for marking when exposure is complete (see `process_exposure`) camera_event = Event() if headers is None: headers = {} start_time = headers.get('start_time', current_time(flatten=True)) # Get the filename image_dir = "{}/fields/{}/{}/{}/".format( self.config['directories']['images'], observation.field.field_name, self.uid, observation.seq_time, ) # Get full file path if filename is None: file_path = "{}/{}.{}".format(image_dir, start_time, self.file_extension) else: # Add extension if '.' not in filename: filename = '{}.{}'.format(filename, self.file_extension) # Add directory if '/' not in filename: filename = '{}/{}'.format(image_dir, filename) file_path = filename image_id = '{}_{}_{}'.format(self.config['name'], self.uid, start_time) self.logger.debug("image_id: {}".format(image_id)) sequence_id = '{}_{}_{}'.format(self.config['name'], self.uid, observation.seq_time) # Camera metadata metadata = { 'camera_name': self.name, 'camera_uid': self.uid, 'field_name': observation.field.field_name, 'file_path': file_path, 'filter': self.filter_type, 'image_id': image_id, 'is_primary': self.is_primary, 'sequence_id': sequence_id, 'start_time': start_time, } metadata.update(headers) exp_time = kwargs.get('exp_time', observation.exp_time.value) proc = self.take_exposure(seconds=exp_time, filename=file_path) # Add most recent exposure to list observation.exposure_list[image_id] = file_path.replace( '.cr2', '.fits') # Process the image after a set amount of time wait_time = exp_time + self.readout_time t = Timer(wait_time, self.process_exposure, (metadata, camera_event, proc)) t.name = '{}Thread'.format(self.name) t.start() return camera_event
def process_exposure(self, info, signal_event, exposure_event=None): """ Processes the exposure Args: info (dict): Header metadata saved for the image signal_event (threading.Event): An event that is set signifying that the camera is done with this exposure exposure_event (threading.Event, optional): An event that should be set when the exposure is complete, triggering the processing. """ # If passed an Event that signals the end of the exposure wait for it to be set if exposure_event: exposure_event.wait() image_id = info['image_id'] file_path = info['file_path'] self.logger.debug("Processing {}".format(image_id)) # Explicity convert the equinox for FITS header try: equinox = float(info['equinox'].value.replace('J', '')) except KeyError: equinox = '' # Add FITS headers from info the same as images.cr2_to_fits() self.logger.debug("Updating FITS headers: {}".format(file_path)) with fits.open(file_path, 'update') as f: hdu = f[0] hdu.header.set('IMAGEID', info.get('image_id', '')) hdu.header.set('SEQID', info.get('sequence_id', '')) hdu.header.set('FIELD', info.get('field_name', '')) hdu.header.set('RA-MNT', info.get('ra_mnt', ''), 'Degrees') hdu.header.set('HA-MNT', info.get('ha_mnt', ''), 'Degrees') hdu.header.set('DEC-MNT', info.get('dec_mnt', ''), 'Degrees') hdu.header.set('EQUINOX', equinox) hdu.header.set('AIRMASS', info.get('airmass', ''), 'Sec(z)') hdu.header.set('FILTER', info.get('filter', '')) hdu.header.set('LAT-OBS', info.get('latitude', ''), 'Degrees') hdu.header.set('LONG-OBS', info.get('longitude', ''), 'Degrees') hdu.header.set('ELEV-OBS', info.get('elevation', ''), 'Meters') hdu.header.set('MOONSEP', info.get('moon_separation', ''), 'Degrees') hdu.header.set('MOONFRAC', info.get('moon_fraction', '')) hdu.header.set('CREATOR', info.get('creator', ''), 'POCS Software version') hdu.header.set('INSTRUME', info.get('camera_uid', ''), 'Camera ID') hdu.header.set('OBSERVER', info.get('observer', ''), 'PANOPTES Unit ID') hdu.header.set('ORIGIN', info.get('origin', '')) hdu.header.set('RA-RATE', info.get('tracking_rate_ra', ''), 'RA Tracking Rate') if info['is_primary']: self.logger.debug("Extracting pretty image") images.make_pretty_image(file_path, title=info['field_name'], primary=True) self.logger.debug( "Adding current observation to db: {}".format(image_id)) self.db.insert_current('observations', info, include_collection=False) else: self.logger.debug('Compressing {}'.format(file_path)) images.fpack(file_path) self.logger.debug("Adding image metadata to db: {}".format(image_id)) self.db.observations.insert_one({ 'data': info, 'date': current_time(datetime=True), 'type': 'observations', 'image_id': image_id, }) # Mark the event as done signal_event.set()
def export(self, yesterday=True, start_date=None, end_date=None, collections=['all'], backup_dir=None, compress=True): # pragma: no cover """Exports the mongodb to an external file Args: yesterday (bool, optional): Export only yesterday, defaults to True start_date (str, optional): Start date for export if `yesterday` is False, defaults to None, e.g. 2016-01-01 end_date (None, optional): End date for export if `yesterday is False, defaults to None, e.g. 2016-01-31 collections (list, optional): Which collections to include, defaults to all backup_dir (str, optional): Backup directory, defaults to /backups compress (bool, optional): Compress output file with gzip, defaults to True Returns: list: List of saved files """ if backup_dir is None: backup_dir = '{}/backups/'.format(os.getenv('PANDIR', default='/var/panoptes/')) if not os.path.exists(backup_dir): warn("Creating backup dir") os.makedirs(backup_dir) if yesterday: start_dt = (current_time() - 1. * u.day).datetime start = datetime(start_dt.year, start_dt.month, start_dt.day, 0, 0, 0, 0) end = datetime(start_dt.year, start_dt.month, start_dt.day, 23, 59, 59, 0) else: assert start_date, warn("start-date required if not using yesterday") y, m, d = [int(x) for x in start_date.split('-')] start_dt = date(y, m, d) if end_date is None: end_dt = start_dt else: y, m, d = [int(x) for x in end_date.split('-')] end_dt = date(y, m, d) start = datetime.fromordinal(start_dt.toordinal()) end = datetime(end_dt.year, end_dt.month, end_dt.day, 23, 59, 59, 0) if 'all' in collections: collections = self.collections date_str = start.strftime('%Y-%m-%d') end_str = end.strftime('%Y-%m-%d') if end_str != date_str: date_str = '{}_to_{}'.format(date_str, end_str) out_files = list() console.color_print( "Exporting collections: ", 'default', "\t{}".format( date_str.replace( '_', ' ')), 'yellow') for collection in collections: if collection not in self.collections: next console.color_print("\t{}".format(collection)) out_file = '{}{}_{}.json'.format(backup_dir, date_str.replace('-', ''), collection) col = getattr(self, collection) try: entries = [x for x in col.find({'date': {'$gt': start, '$lt': end}} ).sort([('date', pymongo.ASCENDING)])] except pymongo.errors.OperationFailure: entries = [x for x in col.find({'date': {'$gt': start, '$lt': end}})] if len(entries): console.color_print("\t\t{} records exported".format(len(entries)), 'yellow') content = json.dumps(entries, default=json_util.default) write_type = 'w' if compress: console.color_print("\t\tCompressing...", 'lightblue') content = gzip.compress(bytes(content, 'utf8')) out_file = out_file + '.gz' write_type = 'wb' with open(out_file, write_type)as f: console.color_print("\t\tWriting file: ", 'lightblue', out_file, 'yellow') f.write(content) out_files.append(out_file) else: console.color_print("\t\tNo records found", 'yellow') console.color_print("Output file: {}".format(out_files)) return out_files