Esempio n. 1
0
            def satellite_angular_offset(index, clock_offset):
                # Fetch observed position of object at this time point
                pt_utc = path_x_y[index][3]
                pt_alt = path_alt_az[index][0]
                pt_az = path_alt_az[index][1]

                # Project position of this satellite in space at this time point
                t = ts.tai_jd(jd=jd_from_unix(utc=pt_utc + tai_utc_offset +
                                              clock_offset))

                # Project position of this satellite in the observer's sky
                sight_line = sat - observer
                topocentric = sight_line.at(t)
                sat_alt, sat_az, sat_distance = topocentric.altaz()

                # Work out offset of satellite's position from observed moving object
                ang_mismatch = ang_dist(ra0=pt_az * pi / 180,
                                        dec0=pt_alt * pi / 180,
                                        ra1=sat_az.radians,
                                        dec1=sat_alt.radians) * 180 / pi

                return ang_mismatch, sat_distance
Esempio n. 2
0
def shower_determination(utc_min, utc_max):
    """
    Estimate the parent showers of all meteors observed between the unix times <utc_min> and <utc_max>.

    :param utc_min:
        The start of the time period in which we should determine the parent showers of meteors (unix time).
    :type utc_min:
        float
    :param utc_max:
        The end of the time period in which we should determine the parent showers of meteors (unix time).
    :type utc_max:
        float
    :return:
        None
    """

    # Load list of meteor showers
    shower_list = read_shower_list()

    # Open connection to image archive
    db = obsarchive_db.ObservationDatabase(file_store_path=settings['dbFilestore'],
                                           db_host=installation_info['mysqlHost'],
                                           db_user=installation_info['mysqlUser'],
                                           db_password=installation_info['mysqlPassword'],
                                           db_name=installation_info['mysqlDatabase'],
                                           obstory_id=installation_info['observatoryId'])

    logging.info("Starting meteor shower identification.")

    # Count how many images we manage to successfully fit
    outcomes = {
        'successful_fits': 0,
        'error_records': 0,
        'rescued_records': 0,
        'insufficient_information': 0
    }

    # Status update
    logging.info("Searching for meteors within period {} to {}".format(date_string(utc_min), date_string(utc_max)))

    # Open direct connection to database
    conn = db.con

    # Search for meteors within this time period
    conn.execute("""
SELECT ao.obsTime, ao.publicId AS observationId, f.repositoryFname, l.publicId AS observatory
FROM archive_observations ao
LEFT OUTER JOIN archive_files f ON (ao.uid = f.observationId AND
    f.semanticType=(SELECT uid FROM archive_semanticTypes WHERE name="pigazing:movingObject/video"))
INNER JOIN archive_observatories l ON ao.observatory = l.uid
INNER JOIN archive_metadata am2 ON ao.uid = am2.observationId AND
    am2.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="web:category")
WHERE ao.obsType=(SELECT uid FROM archive_semanticTypes WHERE name='pigazing:movingObject/') AND
      ao.obsTime BETWEEN %s AND %s AND
      am2.stringValue = "Meteor"
ORDER BY ao.obsTime;
""", (utc_min, utc_max))
    results = conn.fetchall()

    # Display logging list of the images we are going to work on
    logging.info("Estimating the parent showers of {:d} meteors.".format(len(results)))

    # Count how many meteors we find in each shower
    meteor_count_by_shower = {}

    # Analyse each meteor in turn
    for item_index, item in enumerate(results):
        # Fetch metadata about this object, some of which might be on the file, and some on the observation
        obs_obj = db.get_observation(observation_id=item['observationId'])
        obs_metadata = {item.key: item.value for item in obs_obj.meta}
        if item['repositoryFname']:
            file_obj = db.get_file(repository_fname=item['repositoryFname'])
            file_metadata = {item.key: item.value for item in file_obj.meta}
        else:
            file_metadata = {}
        all_metadata = {**obs_metadata, **file_metadata}

        # Check we have all required metadata
        if 'pigazing:path' not in all_metadata:
            logging.info("Cannot process <{}> due to inadequate metadata.".format(item['observationId']))
            continue

        # Make ID string to prefix to all logging messages about this event
        logging_prefix = "{date} [{obs}]".format(
            date=date_string(utc=item['obsTime']),
            obs=item['observationId']
        )

        # Project path from (x,y) coordinates into (RA, Dec)
        projector = PathProjection(
            db=db,
            obstory_id=item['observatory'],
            time=item['obsTime'],
            logging_prefix=logging_prefix
        )

        path_x_y, path_ra_dec_at_epoch, path_alt_az, sight_line_list_this = projector.ra_dec_from_x_y(
            path_json=all_metadata['pigazing:path'],
            path_bezier_json=all_metadata['pigazing:pathBezier'],
            detections=all_metadata['pigazing:detectionCount'],
            duration=all_metadata['pigazing:duration']
        )

        # Check for error
        if projector.error is not None:
            if projector.error in outcomes:
                outcomes[projector.error] += 1
            continue

        # Check for notifications
        for notification in projector.notifications:
            if notification in outcomes:
                outcomes[notification] += 1

        # Check number of points in path
        path_len = len(path_x_y)

        # List of candidate showers this meteor might belong to
        candidate_showers = []

        # Test for each candidate meteor shower in turn
        for shower in shower_list:
            # Work out celestial coordinates of shower radiant in RA/Dec in hours/degs of epoch
            radiant_ra_at_epoch, radiant_dec_at_epoch = ra_dec_from_j2000(ra0=shower['RA'],
                                                                          dec0=shower['Decl'],
                                                                          utc_new=item['obsTime'])

            # Work out alt-az of the shower's radiant using known location of camera. Fits returned in degrees.
            alt_az_pos = alt_az(ra=radiant_ra_at_epoch, dec=radiant_dec_at_epoch,
                                utc=item['obsTime'],
                                latitude=projector.obstory_info['latitude'],
                                longitude=projector.obstory_info['longitude'])

            # Work out position of the Sun (J2000)
            sun_ra_j2000, sun_dec_j2000 = sun_pos(utc=item['obsTime'])

            # Work out position of the Sun (RA, Dec of epoch)
            sun_ra_at_epoch, sun_dec_at_epoch = ra_dec_from_j2000(ra0=sun_ra_j2000, dec0=sun_dec_j2000,
                                                                  utc_new=item['obsTime'])

            # Offset from peak of shower
            year = 365.2524
            peak_offset = (sun_ra_at_epoch * 180 / 12. - shower['peak']) * year / 360  # days
            while peak_offset < -year / 2:
                peak_offset += year
            while peak_offset > year / 2:
                peak_offset -= year

            start_offset = peak_offset + shower['start'] - 4
            end_offset = peak_offset + shower['end'] + 4

            # Estimate ZHR of shower at the time the meteor was observed
            zhr = 0
            if abs(peak_offset) < 2:
                zhr = shower['zhr']  # Shower is within 2 days of maximum; use quoted peak ZHR value
            if start_offset < 0 < end_offset:
                zhr = max(zhr, 5)  # Shower is not at peak, but is active; assume ZHR=5

            # Correct hourly rate for the altitude of the shower radiant
            hourly_rate = zhr * sin(alt_az_pos[0] * pi / 180)

            # If hourly rate is zero, this shower is not active
            if hourly_rate <= 0:
                # logging.info("Meteor shower <{}> has zero rate".format(shower['name']))
                continue

            # Work out angular distance of meteor from radiant (radians)
            path_radiant_sep = [ang_dist(ra0=pt[0], dec0=pt[1],
                                         ra1=radiant_ra_at_epoch * pi / 12, dec1=radiant_dec_at_epoch * pi / 180)
                                for pt in path_ra_dec_at_epoch]
            change_in_radiant_dist = path_radiant_sep[-1] - path_radiant_sep[0]  # radians

            # Reject meteors that travel *towards* the radiant
            if change_in_radiant_dist < 0:
                continue

            # Convert path to Cartesian coordinates on a unit sphere
            path_cartesian = [Vector.from_ra_dec(ra=ra * 12 / pi, dec=dec * 180 / pi)
                              for ra, dec in path_ra_dec_at_epoch]

            # Work out cross product of first and last point, which is normal to path of meteors
            first = path_cartesian[0]
            last = path_cartesian[-1]
            path_normal = first.cross_product(last)

            # Work out angle of path normal to meteor shower radiant
            radiant_cartesian = Vector.from_ra_dec(ra=radiant_ra_at_epoch, dec=radiant_dec_at_epoch)
            theta = path_normal.angle_with(radiant_cartesian)  # degrees

            if theta > 90:
                theta = 180 - theta

            # What is the angular separation of the meteor's path's closest approach to the shower radiant?
            radiant_angle = 90 - theta

            # Work out likelihood metric that this meteor belongs to this shower
            radiant_angle_std_dev = 2  # Allow 2 degree mismatch in radiant pos
            likelihood = hourly_rate * scipy.stats.norm(loc=0, scale=radiant_angle_std_dev).pdf(radiant_angle)

            # Store information about the likelihood this meteor belongs to this shower
            candidate_showers.append({
                'name': shower['name'],
                'likelihood': likelihood,
                'offset': radiant_angle,
                'change_radiant_dist': change_in_radiant_dist,
                'shower_rate': hourly_rate
            })

        # Add model possibility for sporadic meteor
        hourly_rate = 5
        likelihood = hourly_rate * (1. / 90.)  # Mean value of Gaussian in range 0-90 degs
        candidate_showers.append({
            'name': "Sporadic",
            'likelihood': likelihood,
            'offset': 0,
            'shower_rate': hourly_rate
        })

        # Renormalise likelihoods to sum to unity
        sum_likelihood = sum(shower['likelihood'] for shower in candidate_showers)
        for shower in candidate_showers:
            shower['likelihood'] *= 100 / sum_likelihood

        # Sort candidates by likelihood
        candidate_showers.sort(key=itemgetter('likelihood'), reverse=True)

        # Report possible meteor shower identifications
        logging.info("{date} [{obs}] -- {showers}".format(
            date=date_string(utc=item['obsTime']),
            obs=item['observationId'],
            showers=", ".join([
                "{} {:.1f}% ({:.1f} deg offset)".format(shower['name'], shower['likelihood'], shower['offset'])
                for shower in candidate_showers
            ])
        ))

        # Identify most likely shower
        most_likely_shower = candidate_showers[0]['name']

        # Update tally of meteors
        if most_likely_shower not in meteor_count_by_shower:
            meteor_count_by_shower[most_likely_shower] = 0
        meteor_count_by_shower[most_likely_shower] += 1

        # Store meteor identification
        user = settings['pigazingUser']
        timestamp = time.time()
        db.set_observation_metadata(user_id=user, observation_id=item['observationId'], utc=timestamp,
                                    meta=mp.Meta(key="shower:name", value=most_likely_shower))
        db.set_observation_metadata(user_id=user, observation_id=item['observationId'], utc=timestamp,
                                    meta=mp.Meta(key="shower:radiant_offset", value=candidate_showers[0]['offset']))
        db.set_observation_metadata(user_id=user, observation_id=item['observationId'], utc=timestamp,
                                    meta=mp.Meta(key="shower:path_length",
                                                 value=ang_dist(ra0=path_ra_dec_at_epoch[0][0],
                                                                dec0=path_ra_dec_at_epoch[0][1],
                                                                ra1=path_ra_dec_at_epoch[-1][0],
                                                                dec1=path_ra_dec_at_epoch[-1][1]
                                                                ) * 180 / pi
                                                 ))
        db.set_observation_metadata(user_id=user, observation_id=item['observationId'], utc=timestamp,
                                    meta=mp.Meta(key="shower:path_ra_dec",
                                                 value="[[{:.3f},{:.3f}],[{:.3f},{:.3f}],[{:.3f},{:.3f}]]".format(
                                                     path_ra_dec_at_epoch[0][0] * 12 / pi,
                                                     path_ra_dec_at_epoch[0][1] * 180 / pi,
                                                     path_ra_dec_at_epoch[int(path_len / 2)][0] * 12 / pi,
                                                     path_ra_dec_at_epoch[int(path_len / 2)][1] * 180 / pi,
                                                     path_ra_dec_at_epoch[-1][0] * 12 / pi,
                                                     path_ra_dec_at_epoch[-1][1] * 180 / pi,
                                                 )
                                                 ))

        # Meteor successfully identified
        outcomes['successful_fits'] += 1

        # Update database
        db.commit()

    # Report how many fits we achieved
    logging.info("{:d} meteors successfully identified.".format(outcomes['successful_fits']))
    logging.info("{:d} malformed database records.".format(outcomes['error_records']))
    logging.info("{:d} rescued database records.".format(outcomes['rescued_records']))
    logging.info("{:d} meteors with incomplete data.".format(outcomes['insufficient_information']))

    # Report tally of meteors
    logging.info("Tally of meteors by shower:")
    for shower in sorted(meteor_count_by_shower.keys()):
        logging.info("    * {:32s}: {:6d}".format(shower, meteor_count_by_shower[shower]))

    # Clean up and exit
    db.commit()
    db.close_db()
    return
Esempio n. 3
0
def plane_determination(utc_min, utc_max, source):
    """
    Estimate the identity of aircraft observed between the unix times <utc_min> and <utc_max>.

    :param utc_min:
        The start of the time period in which we should determine the identity of aircraft (unix time).
    :type utc_min:
        float
    :param utc_max:
        The end of the time period in which we should determine the identity of aircraft (unix time).
    :type utc_max:
        float
    :param source:
        The source we should use for plane trajectories. Either 'adsb' or 'fr24'.
    :type source:
        str
    :return:
        None
    """

    # Open connection to image archive
    db = obsarchive_db.ObservationDatabase(
        file_store_path=settings['dbFilestore'],
        db_host=installation_info['mysqlHost'],
        db_user=installation_info['mysqlUser'],
        db_password=installation_info['mysqlPassword'],
        db_name=installation_info['mysqlDatabase'],
        obstory_id=installation_info['observatoryId'])

    logging.info("Starting aircraft identification.")

    # Count how many images we manage to successfully fit
    outcomes = {
        'successful_fits': 0,
        'unsuccessful_fits': 0,
        'error_records': 0,
        'rescued_records': 0,
        'insufficient_information': 0
    }

    # Status update
    logging.info("Searching for aircraft within period {} to {}".format(
        date_string(utc_min), date_string(utc_max)))

    # Open direct connection to database
    conn = db.con

    # Search for planes and satellites within this time period
    conn.execute(
        """
SELECT ao.obsTime, ao.publicId AS observationId, f.repositoryFname, l.publicId AS observatory
FROM archive_observations ao
LEFT OUTER JOIN archive_files f ON (ao.uid = f.observationId AND
    f.semanticType=(SELECT uid FROM archive_semanticTypes WHERE name="pigazing:movingObject/video"))
INNER JOIN archive_observatories l ON ao.observatory = l.uid
INNER JOIN archive_metadata am2 ON ao.uid = am2.observationId AND
    am2.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="web:category")
WHERE ao.obsType=(SELECT uid FROM archive_semanticTypes WHERE name='pigazing:movingObject/') AND
      ao.obsTime BETWEEN %s AND %s AND
      (am2.stringValue='Plane' OR am2.stringValue='Satellite' OR am2.stringValue='Junk')
ORDER BY ao.obsTime
""", (utc_min, utc_max))
    results = conn.fetchall()

    # Display logging list of the images we are going to work on
    logging.info("Estimating the identity of {:d} aircraft.".format(
        len(results)))

    # Analyse each aircraft in turn
    for item_index, item in enumerate(results):
        # Fetch metadata about this object, some of which might be on the file, and some on the observation
        obs_obj = db.get_observation(observation_id=item['observationId'])
        obs_metadata = {item.key: item.value for item in obs_obj.meta}
        if item['repositoryFname']:
            file_obj = db.get_file(repository_fname=item['repositoryFname'])
            file_metadata = {item.key: item.value for item in file_obj.meta}
        else:
            file_metadata = {}
        all_metadata = {**obs_metadata, **file_metadata}

        # Check we have all required metadata
        if 'pigazing:path' not in all_metadata:
            logging.info(
                "Cannot process <{}> due to inadequate metadata.".format(
                    item['observationId']))
            continue

        # Make ID string to prefix to all logging messages about this event
        logging_prefix = "{date} [{obs}]".format(
            date=date_string(utc=item['obsTime']), obs=item['observationId'])

        # Project path from (x,y) coordinates into (RA, Dec)
        projector = PathProjection(db=db,
                                   obstory_id=item['observatory'],
                                   time=item['obsTime'],
                                   logging_prefix=logging_prefix)

        path_x_y, path_ra_dec_at_epoch, path_alt_az, sight_line_list = projector.ra_dec_from_x_y(
            path_json=all_metadata['pigazing:path'],
            path_bezier_json=all_metadata['pigazing:pathBezier'],
            detections=all_metadata['pigazing:detectionCount'],
            duration=all_metadata['pigazing:duration'])

        # Check for error
        if projector.error is not None:
            if projector.error in outcomes:
                outcomes[projector.error] += 1
            continue

        # Check for notifications
        for notification in projector.notifications:
            if notification in outcomes:
                outcomes[notification] += 1

        # Check number of points in path
        path_len = len(path_x_y)

        # Look up list of aircraft tracks at the time of this sighting
        if source == 'adsb':
            aircraft_list = fetch_planes_from_adsb(utc=item['obsTime'])
        elif source == 'fr24':
            aircraft_list = fetch_planes_from_fr24(utc=item['obsTime'])
        else:
            raise ValueError("Unknown source <{}>".format(source))

        # List of aircraft this moving object might be
        candidate_aircraft = []

        # Check that we found a list of aircraft
        if aircraft_list is None:
            logging.info("{date} [{obs}] -- No aircraft records found.".format(
                date=date_string(utc=item['obsTime']),
                obs=item['observationId']))
            outcomes['insufficient_information'] += 1
            continue

        # Logging message about how many aircraft we're testing
        # logging.info("{date} [{obs}] -- Matching against {count:7d} aircraft.".format(
        #     date=date_string(utc=item['obsTime']),
        #     obs=item['observationId'],
        #     count=len(aircraft_list)
        # ))

        # Test for each candidate aircraft in turn
        for aircraft in aircraft_list:
            # Fetch aircraft position at each time point along trajectory
            ang_mismatch_list = []
            distance_list = []
            altitude_list = []

            def aircraft_angular_offset(index, clock_offset):
                # Fetch observed position of object at this time point
                pt_utc = sight_line_list[index]['utc']
                observatory_position = sight_line_list[index]['obs_position']
                observed_sight_line = sight_line_list[index]['line'].direction

                # Project position of this aircraft in space at this time point
                aircraft_position = path_interpolate(aircraft=aircraft,
                                                     utc=pt_utc + clock_offset)
                if aircraft_position is None:
                    return np.nan, np.nan, np.nan

                # Convert position to Cartesian coordinates
                aircraft_point = Point.from_lat_lng(
                    lat=aircraft_position['lat'],
                    lng=aircraft_position['lon'],
                    alt=aircraft_position['altitude'] * feet,
                    utc=None)

                # Work out offset of plane's position from observed moving object
                aircraft_sight_line = aircraft_point.to_vector(
                ) - observatory_position.to_vector()
                angular_offset = aircraft_sight_line.angle_with(
                    other=observed_sight_line)  # degrees
                distance = abs(aircraft_sight_line)
                altitude = aircraft_position['altitude'] * feet

                return angular_offset, distance, altitude

            def time_offset_objective(p):
                """
                Objective function that we minimise in order to find the best fit clock offset between the observed
                and model paths.

                :param p:
                    Vector with a single component: the clock offset
                :return:
                    Metric to minimise
                """

                # Turn input parameters into a time offset
                clock_offset = p[0]

                # Look up angular offset
                ang_mismatch, distance, altitude = aircraft_angular_offset(
                    index=0, clock_offset=clock_offset)

                # Return metric to minimise
                return ang_mismatch * exp(clock_offset / 8)

            # Work out the optimum time offset between the plane's path and the observed path
            # See <http://www.scipy-lectures.org/advanced/mathematical_optimization/>
            # for more information about how this works
            parameters_initial = [0]
            parameters_optimised = scipy.optimize.minimize(
                time_offset_objective,
                np.asarray(parameters_initial),
                options={
                    'disp': False,
                    'maxiter': 100
                }).x

            # Construct best-fit linear trajectory for best-fitting parameters
            clock_offset = float(parameters_optimised[0])

            # Check clock offset is reasonable
            if abs(clock_offset) > global_settings['max_clock_offset']:
                continue

            # Measure the offset between the plane's position and the observed position at each time point
            for index in range(path_len):
                # Look up angular mismatch at this time point
                ang_mismatch, distance, altitude = aircraft_angular_offset(
                    index=index, clock_offset=clock_offset)

                # Keep list of the offsets at each recorded time point along the trajectory
                ang_mismatch_list.append(ang_mismatch)
                distance_list.append(distance)
                altitude_list.append(altitude)

            # Consider adding this plane to list of candidates
            mean_ang_mismatch = np.mean(
                np.asarray(ang_mismatch_list))  # degrees
            distance_mean = np.mean(np.asarray(distance_list))  # metres
            altitude_mean = np.mean(np.asarray(altitude_list))  # metres

            if mean_ang_mismatch < global_settings['max_mean_angular_mismatch']:
                start_time = sight_line_list[0]['utc']
                end_time = sight_line_list[-1]['utc']
                start_point = path_interpolate(aircraft=aircraft,
                                               utc=start_time + clock_offset)
                end_point = path_interpolate(aircraft=aircraft,
                                             utc=end_time + clock_offset)
                candidate_aircraft.append({
                    'call_sign': aircraft['call_sign'],  # string
                    'hex_ident': aircraft['hex_ident'],  # string
                    'distance': distance_mean / 1e3,  # km
                    'altitude': altitude_mean / 1e3,  # km
                    'clock_offset': clock_offset,  # seconds
                    'offset': mean_ang_mismatch,  # degrees
                    'start_point': start_point,
                    'end_point': end_point
                })

        # Add model possibility for null aircraft
        if len(candidate_aircraft) == 0:
            candidate_aircraft.append({
                'call_sign': "Unidentified",
                'hex_ident': "Unidentified",
                'distance': 0,
                'altitude': 0,
                'clock_offset': 0,
                'offset': 0,
                'start_point': None,
                'end_point': None
            })

        # Sort candidates by score
        for candidate in candidate_aircraft:
            candidate['score'] = hypot(
                candidate['offset'],
                candidate['clock_offset'],
            )
        candidate_aircraft.sort(key=itemgetter('score'))

        # Report possible satellite identifications
        logging.info("{prefix} -- {aircraft}".format(
            prefix=logging_prefix,
            aircraft=", ".join([
                "{} ({:.1f} deg offset; clock offset {:.1f} sec; distance {:.1f} km)"
                .format(aircraft['call_sign'], aircraft['offset'],
                        aircraft['clock_offset'], aircraft['distance'])
                for aircraft in candidate_aircraft
            ])))

        # Identify most likely aircraft
        most_likely_aircraft = candidate_aircraft[0]

        # Fetch extra information about plane
        plane_info = fetch_aircraft_data(
            hex_ident=most_likely_aircraft['hex_ident'])

        # Store aircraft identification
        user = settings['pigazingUser']
        timestamp = time.time()
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="plane:call_sign",
                         value=most_likely_aircraft['call_sign']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="plane:hex_ident",
                         value=most_likely_aircraft['hex_ident']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="plane:clock_offset",
                         value=most_likely_aircraft['clock_offset']))
        db.set_observation_metadata(user_id=user,
                                    observation_id=item['observationId'],
                                    utc=timestamp,
                                    meta=mp.Meta(
                                        key="plane:angular_offset",
                                        value=most_likely_aircraft['offset']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="plane:distance",
                         value=most_likely_aircraft['distance']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="plane:mean_altitude",
                         value=most_likely_aircraft['altitude']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="plane:path",
                         value=json.dumps([
                             most_likely_aircraft['start_point'],
                             most_likely_aircraft['end_point']
                         ])))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="plane:path_length",
                         value=ang_dist(ra0=path_ra_dec_at_epoch[0][0],
                                        dec0=path_ra_dec_at_epoch[0][1],
                                        ra1=path_ra_dec_at_epoch[-1][0],
                                        dec1=path_ra_dec_at_epoch[-1][1]) *
                         180 / pi))

        aircraft_operator = ""
        if 'operator' in plane_info and plane_info['operator']:
            aircraft_operator = plane_info['operator']
        elif 'owner' in plane_info and plane_info['owner']:
            aircraft_operator = plane_info['owner']

        db.set_observation_metadata(user_id=user,
                                    observation_id=item['observationId'],
                                    utc=timestamp,
                                    meta=mp.Meta(key="plane:operator",
                                                 value=aircraft_operator))
        db.set_observation_metadata(user_id=user,
                                    observation_id=item['observationId'],
                                    utc=timestamp,
                                    meta=mp.Meta(key="plane:model",
                                                 value=plane_info.get(
                                                     'model', '')))
        db.set_observation_metadata(user_id=user,
                                    observation_id=item['observationId'],
                                    utc=timestamp,
                                    meta=mp.Meta(key="plane:manufacturer",
                                                 value=plane_info.get(
                                                     'manufacturername', '')))

        # Aircraft successfully identified
        if most_likely_aircraft['call_sign'] == "Unidentified":
            outcomes['unsuccessful_fits'] += 1
        else:
            outcomes['successful_fits'] += 1

        # Update database
        db.commit()

    # Report how many fits we achieved
    logging.info("{:d} aircraft successfully identified.".format(
        outcomes['successful_fits']))
    logging.info("{:d} aircraft not identified.".format(
        outcomes['unsuccessful_fits']))
    logging.info("{:d} malformed database records.".format(
        outcomes['error_records']))
    logging.info("{:d} rescued database records.".format(
        outcomes['rescued_records']))
    logging.info("{:d} aircraft with incomplete data.".format(
        outcomes['insufficient_information']))

    # Clean up and exit
    db.commit()
    db.close_db()
    return
Esempio n. 4
0
def calibrate_lens(filenames: list,
                   verbose: bool = True,
                   diagnostics_run_id=None):
    """
    Calibrate the radial distortion and sky orientation of one image, or a group of images, taken from a particular
    lens / camera setup. If multiple images are passed, they may have different orientations on the sky, but are
    assumed to share the same radial distortions.

    The best fit parameters are displayed to stdout, and also stored in the global variable <parameters_final>.

    :param filenames:
        List of the JSON files describing the images we are to calibrate.
    :type filenames:
        List<str>
    :param verbose:
        Boolean switch indicating whether we produce diagnostic messages listing the quality of fit of
        each star listed.
    :type verbose:
        bool
    :param diagnostics_run_id:
        If set, we produce a series of PNG graphs in </tmp> illustrating the quality of the fit achieved in terms
        of the radial offset of the position of each star from its theoretically predicted position. If this shows
        systematic trends away from zero, it suggests that the radial distortion has been poorly fitted. The string
        value supplied here is appended as a suffix to all the image files, to distinguish them from other runs of
        this tool.
    :type diagnostics_run_id:
        str|int
    :return:
        None
    """

    # We store the details of the list of parameters we are currently fitting in global variables (yuck!) as they
    # need to be accessed by the objective function which does the function fitting.
    global parameters_radial, parameters_image
    global fitting_parameters, fitting_parameter_indices, fitting_parameter_names
    global fitting_star_list, fitting_filename_list
    global parameters_final

    # Details of all the images we are currently fitting
    img_size_x = []  # Horizontal pixel width of each image we are fitting
    img_size_y = []  # Vertical pixel height of each image we are fitting
    input_config = [
    ]  # The full JSON structures describing each image we are fitting
    star_list = []  # Lists of the stars in each image we are fitting

    # Start creating list of parameters we are to fit
    fitting_parameters = {
    }  # fitting_parameters[parameter_name] = dictionary of info about the parameter (see above)
    fitting_parameter_indices = {
    }  # fitting_parameter_indices[parameter_name] = index
    fitting_parameter_names = [
    ]  # fitting_parameter_names[index] = parameter_name
    fitting_star_list = None  # global copy of <star_list>

    # Compile a list of all the free parameters in this fitting run
    # All fitting runs require a single set of radial distortion parameters
    # We create descriptors for these parameters in <fitting_parameters> now
    for item in parameters_radial:
        parameter_index = len(fitting_parameters)
        fitting_parameters[item['name']] = item.copy()
        fitting_parameters[item['name']][
            'image'] = None  # These parameters do not refer to any single image
        fitting_parameter_indices[item['name']] = parameter_index
        fitting_parameter_names.append(item['name'])

    # For every image we are fitting, we need a separate set of parameters describing the image's orientation on the sky
    # We create descriptors for these parameters now
    for filename in filenames:
        # Read the JSON description for this image
        image_info = read_input_data(filename=filename, show_warnings=verbose)

        # Add this image to the list of images we are fitting
        img_size_x.append(image_info[0])
        img_size_y.append(image_info[1])
        input_config.append(image_info[2])
        star_list.append(image_info[3])

        # Create all the parameters in <fitting_parameters> which describe the orientation of this image on the sky
        for item in parameters_image:
            # Calculate the index that this parameter will take in the vector of trial values in the optimiser
            parameter_index = len(fitting_parameters)

            # Create a unique name for this parameter. We may have multiple images, so we prepend the image's filename
            # to the parameter name, to distinguish it from other images.
            key = "{}_{}".format(filename, item['name'])

            # Create parameter
            fitting_parameters[key] = item.copy()
            fitting_parameters[key]['image'] = filename
            fitting_parameter_indices[key] = parameter_index
            fitting_parameter_names.append(key)

    # Create global list of all the images we are fitting, and the stars within each image
    fitting_star_list = star_list
    fitting_filename_list = filenames

    # Set a sensible default RA / Dec for the centre of each image
    for index, filename in enumerate(filenames):
        # Use the first reference star in each image as a guesstimate of celestial coordinates of the centre
        ra0 = star_list[index][0]['ra']
        dec0 = star_list[index][0]['dec']
        key_ra = "{}_{}".format(filename, 'ra')
        fitting_parameters[key_ra]['default'] = ra0
        key_dec = "{}_{}".format(filename, 'dec')
        fitting_parameters[key_dec]['default'] = dec0

    # Create vectors of initial values, and step sizes, for each parameter we are to fit
    parameters_scale = [
        fitting_parameters[key]['step_size'] for key in fitting_parameter_names
    ]

    parameters_initial = [
        fitting_parameters[key]['default'] /
        fitting_parameters[key]['step_size'] for key in fitting_parameter_names
    ]

    # Solve the system of equations
    # See <http://www.scipy-lectures.org/advanced/mathematical_optimization/> for more information about how this works
    parameters_optimised = scipy.optimize.minimize(
        mismatch,
        numpy.asarray(parameters_initial),
        options={
            'disp': verbose,
            'maxiter': 1e8
        }).x

    # Extract the best-fitting set of parameter values, in physical units (not in units of optimiser step size)
    parameters_final = [
        parameters_optimised[i] * parameters_scale[i]
        for i in range(len(fitting_parameter_names))
    ]

    # Display best fit parameter values
    if verbose:
        logging.info("Observatory: {}".format(
            [item['observatory'] for item in input_config]))
        logging.info("Lens: {}".format([item['lens']
                                        for item in input_config]))
        logging.info("Best fit parameters were:")
        for index, key in enumerate(fitting_parameter_names):
            logging.info("{:30s} : {:.8f}".format(
                fitting_parameters[key]['title'], parameters_final[index] *
                fitting_parameters[key]['display_scale']))

        # Print JSON string describing the radial distortion model
        for index, filename in enumerate(filenames):
            scale_x = parameters_final[fitting_parameter_indices[
                "{}_{}".format(filename, 'width')]]
            scale_y = scale_x * parameters_final[
                fitting_parameter_indices['aspect']]
            k1 = parameters_final[fitting_parameter_indices['k1']]
            k2 = parameters_final[fitting_parameter_indices['k2']]
            k3 = parameters_final[fitting_parameter_indices['k3']]
            logging.info(
                "Barrel parameters: [{:12.8f}, {:12.8f}, {:14.10f}, {:14.10f}, {:14.10f}]"
                .format(scale_x * 180 / pi, scale_y * 180 / pi, k1, k2, k3))

    # In verbose mode, print detailed information about how well each star was fitted
    if verbose:
        # Loop over all of the images we are fitting
        for index, filename in enumerate(filenames):
            # Extract the parameter values relevant to this image
            ra0 = parameters_final[fitting_parameter_indices["{}_{}".format(
                filename, 'ra')]]
            dec0 = parameters_final[fitting_parameter_indices["{}_{}".format(
                filename, 'dec')]]
            scale_x = parameters_final[fitting_parameter_indices[
                "{}_{}".format(filename, 'width')]]
            scale_y = scale_x * parameters_final[
                fitting_parameter_indices['aspect']]
            pos_ang = parameters_final[fitting_parameter_indices[
                "{}_{}".format(filename, 'pa')]]
            k1 = parameters_final[fitting_parameter_indices['k1']]
            k2 = parameters_final[fitting_parameter_indices['k2']]
            k3 = parameters_final[fitting_parameter_indices['k3']]

            logging.info("Image <{}>".format(filename))
            logging.info("Stars used in fitting process:")

            # Print the quality of fit to each star individually
            for star in star_list[index]:
                # Calculate the theoretically predicted position of this star
                pos = gnomonic_project(ra=star['ra'],
                                       dec=star['dec'],
                                       ra0=ra0,
                                       dec0=dec0,
                                       size_x=1,
                                       size_y=1,
                                       scale_x=scale_x,
                                       scale_y=scale_y,
                                       pos_ang=pos_ang,
                                       barrel_k1=k1,
                                       barrel_k2=k2,
                                       barrel_k3=k3)

                # Calculate the offset between the theoretically predicted and observed positions of this star
                distance = hypot((star['xpos'] - pos[0]) * img_size_x[index],
                                 (star['ypos'] - pos[1]) * img_size_y[index])

                # Print diagnostic details for this star
                logging.info("""
User-supplied position ({:4.0f},{:4.0f}). Model position ({:4.0f},{:4.0f}). Mismatch {:5.1f} pixels.
""".format(star['xpos'] * img_size_x[index], star['ypos'] * img_size_y[index],
                pos[0] * img_size_x[index], pos[1] * img_size_y[index],
                distance).strip())

    # Debugging: output a data file listing the observed and theoretical positions of each star
    if diagnostics_run_id is not None:
        # Create diagnostic data file in /tmp
        output_filename = "/tmp/point_offsets_{}.dat".format(
            diagnostics_run_id)
        with open(output_filename, "w") as output:
            # Column headings
            output.write("# x_user_input y_user_input x_model y_model\n")

            # Loop over all the images we are simultaneously fitting
            for index, filename in enumerate(filenames):
                # Leave a blank line between data points from different images
                output.write("\n\n")

                # Extract the parameter values relevant to this image
                ra0 = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'ra')]]
                dec0 = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'dec')]]
                scale_x = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'width')]]
                scale_y = scale_x * parameters_final[
                    fitting_parameter_indices['aspect']]
                pos_ang = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'pa')]]
                k1 = parameters_final[fitting_parameter_indices['k1']]
                k2 = parameters_final[fitting_parameter_indices['k2']]
                k3 = parameters_final[fitting_parameter_indices['k3']]

                # Loop over all the stars in this image
                for star in star_list[index]:
                    # Calculate the theoretically predicted position of this star
                    pos = gnomonic_project(ra=star['ra'],
                                           dec=star['dec'],
                                           ra0=ra0,
                                           dec0=dec0,
                                           size_x=1,
                                           size_y=1,
                                           scale_x=scale_x,
                                           scale_y=scale_y,
                                           pos_ang=pos_ang,
                                           barrel_k1=k1,
                                           barrel_k2=k2,
                                           barrel_k3=k3)

                    # Output the theoretically predicted and observed positions of this star
                    output.write("{:4.0f} {:4.0f}    {:4.0f} {:4.0f}\n".format(
                        star['xpos'] * img_size_x[index],
                        star['ypos'] * img_size_y[index],
                        pos[0] * img_size_x[index],
                        pos[1] * img_size_y[index]))

    # Debugging: output a data file listing the theoretical and observed radial positions of each star
    if diagnostics_run_id is not None:
        # Create diagnostic data file in /tmp
        logging.info(
            "Producing diagnostic file <{}>".format(diagnostics_run_id))
        output_filename = "/tmp/radial_distortion_{}.dat".format(
            diagnostics_run_id)
        with open(output_filename, "w") as output:
            # Column headings
            output.write(
                "# x/pixel, y/pixel, offset/pixel, radius/pixel , Angular distance/rad , "
                "Tangent-space distance , Barrel-corrected tan-space dist\n")

            # Loop over all the images we are simultaneously fitting
            for index, filename in enumerate(filenames):
                # Leave a blank line between data points from different images
                output.write("\n\n")

                # Extract the parameter values relevant to this image
                ra0 = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'ra')]]
                dec0 = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'dec')]]
                scale_x = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'width')]]
                scale_y = scale_x * parameters_final[
                    fitting_parameter_indices['aspect']]
                pos_ang = parameters_final[fitting_parameter_indices[
                    "{}_{}".format(filename, 'pa')]]
                k1 = parameters_final[fitting_parameter_indices['k1']]
                k2 = parameters_final[fitting_parameter_indices['k2']]
                k3 = parameters_final[fitting_parameter_indices['k3']]

                # Loop over all the stars in this image
                for star in star_list[index]:
                    # Calculate the theoretically predicted position of this star
                    pos = gnomonic_project(ra=star['ra'],
                                           dec=star['dec'],
                                           ra0=ra0,
                                           dec0=dec0,
                                           size_x=1,
                                           size_y=1,
                                           scale_x=scale_x,
                                           scale_y=scale_y,
                                           pos_ang=pos_ang,
                                           barrel_k1=k1,
                                           barrel_k2=k2,
                                           barrel_k3=k3)

                    # Calculate the offset in the projected position of this star (pixels)
                    offset = hypot((star['xpos'] - pos[0]) * img_size_x[index],
                                   (star['ypos'] - pos[1]) * img_size_y[index])

                    # Calculate the angular distance of this star from the centre of the field (rad)
                    angular_distance = ang_dist(ra1=star['ra'],
                                                dec1=star['dec'],
                                                ra0=ra0,
                                                dec0=dec0)

                    # Calculate the pixel distance of this star from the centre of the field
                    # (horizontal pixels; after radial distortion)
                    pixel_distance = hypot(
                        (star['xpos'] - 0.5) * img_size_x[index],
                        (star['ypos'] - 0.5) * img_size_x[index] *
                        tan(scale_y / 2.) / tan(scale_x / 2.))

                    # Calculate the distance of this star from the centre of the field
                    # (tangent space; before radial distortion)
                    tan_distance = tan(angular_distance)

                    # Apply radial correction to the position this star in tangent space
                    r = tan_distance / tan(scale_x / 2)
                    bc_kn = 1. - k1 - k2 - k3
                    r2 = r * (bc_kn + k1 * (r**2) + k2 * (r**4) + k3 * (r**6))

                    # Calculate the distance of this star from the centre of the field
                    # (tangent space; after radial distortion)
                    barrel_corrected_tan_dist = r2 * tan(scale_x / 2)

                    # Display diagnostic table of calculated values
                    output.write(
                        "{:4.0f} {:4.0f} {:8.4f} {:8.4f} {:8.4f} {:8.4f} {:8.4f}\n"
                        .format(
                            star['xpos'] * img_size_x[index],
                            star['ypos'] * img_size_y[index], offset,
                            pixel_distance, angular_distance * 180 / pi,
                            tan_distance / tan(scale_x / 2) *
                            img_size_x[index] / 2, barrel_corrected_tan_dist /
                            tan(scale_x / 2) * img_size_x[index] / 2))

        # Write pyxplot script to plot quality of fit
        output_filename = "/tmp/radial_distortion_{}.ppl".format(
            diagnostics_run_id)
        with open(output_filename, "w") as output:
            output.write("""
set width 30 ; set term png dpi 100
set xlabel 'Distance from centre of field / pixels'
set ylabel 'Observed pixel distance from centre (after distortion) - Tan space distance (before distortion)'
set output '/tmp/radial_distortion_{0}_a.png'
""".format(diagnostics_run_id))
            for index, filename in enumerate(filenames):
                output.write("""
f_{1}(x) = a + b * x ** 2
# fit f_{1}() withouterrors '/tmp/radial_distortion_{0}.dat' using 4:$6/$4 index {1} via a, b
""".format(diagnostics_run_id, index))
            output.write("plot ")
            for index, filename in enumerate(filenames):
                output.write(
                    "'/tmp/radial_distortion_{0}.dat' using 4:$6-$4 index {1}, "
                    .format(diagnostics_run_id, index))
                # output.write("f_{1}(x), ".format(diagnostics_run_id, index))
                output.write(
                    "'/tmp/radial_distortion_{0}.dat' using 4:$6-$7 index {1}, "
                    .format(diagnostics_run_id, index))
            output.write(
                "-1 notitle w col green lt 2, 0 notitle w col green lt 2, 1 notitle w col green lt 2\n"
            )
            output.write("""
set ylabel 'Observed pixel distance from centre (after distortion) - Tan space distance (after distortion)'
set output '/tmp/radial_distortion_{0}_b.png'
""".format(diagnostics_run_id))
            output.write("plot ")
            for index, filename in enumerate(filenames):
                output.write(
                    "'/tmp/radial_distortion_{0}.dat' using 4:$7-$4 index {1}, "
                    .format(diagnostics_run_id, index))
                # output.write("'/tmp/radial_distortion_{0}.dat' using 4:$6/f_{1}($4)-$4 index {1}, ".format(diagnostics_run_id, index))
            output.write(
                "-1 notitle w col green lt 2, 0 notitle w col green lt 2, 1 notitle w col green lt 2\n"
            )

        # Run pyxplot
        os.system(
            "pyxplot /tmp/radial_distortion_{}.ppl".format(diagnostics_run_id))
Esempio n. 5
0
def satellite_determination(utc_min, utc_max):
    """
    Estimate the identity of spacecraft observed between the unix times <utc_min> and <utc_max>.

    :param utc_min:
        The start of the time period in which we should determine the identity of spacecraft (unix time).
    :type utc_min:
        float
    :param utc_max:
        The end of the time period in which we should determine the identity of spacecraft (unix time).
    :type utc_max:
        float
    :return:
        None
    """

    # Open connection to image archive
    db = obsarchive_db.ObservationDatabase(
        file_store_path=settings['dbFilestore'],
        db_host=installation_info['mysqlHost'],
        db_user=installation_info['mysqlUser'],
        db_password=installation_info['mysqlPassword'],
        db_name=installation_info['mysqlDatabase'],
        obstory_id=installation_info['observatoryId'])

    logging.info("Starting satellite identification.")

    # Count how many images we manage to successfully fit
    outcomes = {
        'successful_fits': 0,
        'unsuccessful_fits': 0,
        'error_records': 0,
        'rescued_records': 0,
        'insufficient_information': 0
    }

    # Status update
    logging.info("Searching for satellites within period {} to {}".format(
        date_string(utc_min), date_string(utc_max)))

    # Open direct connection to database
    conn = db.con

    # Search for satellites within this time period
    conn.execute(
        """
SELECT ao.obsTime, ao.publicId AS observationId, f.repositoryFname, l.publicId AS observatory
FROM archive_observations ao
LEFT OUTER JOIN archive_files f ON (ao.uid = f.observationId AND
    f.semanticType=(SELECT uid FROM archive_semanticTypes WHERE name="pigazing:movingObject/video"))
INNER JOIN archive_observatories l ON ao.observatory = l.uid
INNER JOIN archive_metadata am2 ON ao.uid = am2.observationId AND
    am2.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="web:category")
WHERE ao.obsType=(SELECT uid FROM archive_semanticTypes WHERE name='pigazing:movingObject/') AND
      ao.obsTime BETWEEN %s AND %s AND
      (am2.stringValue='Plane' OR am2.stringValue='Satellite' OR am2.stringValue='Junk')
ORDER BY ao.obsTime
""", (utc_min, utc_max))
    results = conn.fetchall()

    # Display logging list of the images we are going to work on
    logging.info("Estimating the identity of {:d} spacecraft.".format(
        len(results)))

    # Analyse each spacecraft in turn
    for item_index, item in enumerate(results):
        # Fetch metadata about this object, some of which might be on the file, and some on the observation
        obs_obj = db.get_observation(observation_id=item['observationId'])
        obs_metadata = {item.key: item.value for item in obs_obj.meta}
        if item['repositoryFname']:
            file_obj = db.get_file(repository_fname=item['repositoryFname'])
            file_metadata = {item.key: item.value for item in file_obj.meta}
        else:
            file_metadata = {}
        all_metadata = {**obs_metadata, **file_metadata}

        # Check we have all required metadata
        if 'pigazing:path' not in all_metadata:
            logging.info(
                "Cannot process <{}> due to inadequate metadata.".format(
                    item['observationId']))
            continue

        # Make ID string to prefix to all logging messages about this event
        logging_prefix = "{date} [{obs}]".format(
            date=date_string(utc=item['obsTime']), obs=item['observationId'])

        # Project path from (x,y) coordinates into (RA, Dec)
        projector = PathProjection(db=db,
                                   obstory_id=item['observatory'],
                                   time=item['obsTime'],
                                   logging_prefix=logging_prefix)

        path_x_y, path_ra_dec_at_epoch, path_alt_az, sight_line_list_this = projector.ra_dec_from_x_y(
            path_json=all_metadata['pigazing:path'],
            path_bezier_json=all_metadata['pigazing:pathBezier'],
            detections=all_metadata['pigazing:detectionCount'],
            duration=all_metadata['pigazing:duration'])

        # Check for error
        if projector.error is not None:
            if projector.error in outcomes:
                outcomes[projector.error] += 1
            continue

        # Check for notifications
        for notification in projector.notifications:
            if notification in outcomes:
                outcomes[notification] += 1

        # Check number of points in path
        path_len = len(path_x_y)

        # Look up list of satellite orbital elements at the time of this sighting
        spacecraft_list = fetch_satellites(utc=item['obsTime'])

        # List of candidate satellites this object might be
        candidate_satellites = []

        # Check that we found a list of spacecraft
        if spacecraft_list is None:
            logging.info(
                "{date} [{obs}] -- No spacecraft records found.".format(
                    date=date_string(utc=item['obsTime']),
                    obs=item['observationId']))
            outcomes['insufficient_information'] += 1
            continue

        # Logging message about how many spacecraft we're testing
        # logging.info("{date} [{obs}] -- Matching against {count:7d} spacecraft.".format(
        #     date=date_string(utc=item['obsTime']),
        #     obs=item['observationId'],
        #     count=len(spacecraft_list)
        # ))

        # Test for each candidate satellite in turn
        for spacecraft in spacecraft_list:
            # Unit scaling
            deg2rad = pi / 180.0  # 0.0174532925199433
            xpdotp = 1440.0 / (2.0 * pi)  # 229.1831180523293

            # Model the path of this spacecraft
            model = Satrec()
            model.sgp4init(
                # whichconst: gravity model
                WGS72,

                # opsmode: 'a' = old AFSPC mode, 'i' = improved mode
                'i',

                # satnum: Satellite number
                spacecraft['noradId'],

                # epoch: days since 1949 December 31 00:00 UT
                jd_from_unix(spacecraft['epoch']) - 2433281.5,

                # bstar: drag coefficient (/earth radii)
                spacecraft['bStar'],

                # ndot (NOT USED): ballistic coefficient (revs/day)
                spacecraft['meanMotionDot'] / (xpdotp * 1440.0),

                # nddot (NOT USED): mean motion 2nd derivative (revs/day^3)
                spacecraft['meanMotionDotDot'] / (xpdotp * 1440.0 * 1440),

                # ecco: eccentricity
                spacecraft['ecc'],

                # argpo: argument of perigee (radians)
                spacecraft['argPeri'] * deg2rad,

                # inclo: inclination (radians)
                spacecraft['incl'] * deg2rad,

                # mo: mean anomaly (radians)
                spacecraft['meanAnom'] * deg2rad,

                # no_kozai: mean motion (radians/minute)
                spacecraft['meanMotion'] / xpdotp,

                # nodeo: right ascension of ascending node (radians)
                spacecraft['RAasc'] * deg2rad)

            # Wrap within skyfield to convert to topocentric coordinates
            ts = load.timescale()
            sat = EarthSatellite.from_satrec(model, ts)

            # Fetch spacecraft position at each time point along trajectory
            ang_mismatch_list = []
            distance_list = []

            # e, r, v = model.sgp4(jd_from_unix(utc=item['obsTime']), 0)
            # logging.info("{} {} {}".format(str(e), str(r), str(v)))
            tai_utc_offset = 39  # seconds

            def satellite_angular_offset(index, clock_offset):
                # Fetch observed position of object at this time point
                pt_utc = path_x_y[index][3]
                pt_alt = path_alt_az[index][0]
                pt_az = path_alt_az[index][1]

                # Project position of this satellite in space at this time point
                t = ts.tai_jd(jd=jd_from_unix(utc=pt_utc + tai_utc_offset +
                                              clock_offset))

                # Project position of this satellite in the observer's sky
                sight_line = sat - observer
                topocentric = sight_line.at(t)
                sat_alt, sat_az, sat_distance = topocentric.altaz()

                # Work out offset of satellite's position from observed moving object
                ang_mismatch = ang_dist(ra0=pt_az * pi / 180,
                                        dec0=pt_alt * pi / 180,
                                        ra1=sat_az.radians,
                                        dec1=sat_alt.radians) * 180 / pi

                return ang_mismatch, sat_distance

            def time_offset_objective(p):
                """
                Objective function that we minimise in order to find the best fit clock offset between the observed
                and model paths.

                :param p:
                    Vector with a single component: the clock offset
                :return:
                    Metric to minimise
                """

                # Turn input parameters into a time offset
                clock_offset = p[0]

                # Look up angular offset
                ang_mismatch, sat_distance = satellite_angular_offset(
                    index=0, clock_offset=clock_offset)

                # Return metric to minimise
                return ang_mismatch * exp(clock_offset / 8)

            # First, chuck out satellites with large angular offsets
            observer = wgs84.latlon(
                latitude_degrees=projector.obstory_info['latitude'],
                longitude_degrees=projector.obstory_info['longitude'],
                elevation_m=0)

            ang_mismatch, sat_distance = satellite_angular_offset(
                index=0, clock_offset=0)

            # Check angular offset is reasonable
            if ang_mismatch > global_settings['max_angular_mismatch']:
                continue

            # Work out the optimum time offset between the satellite's path and the observed path
            # See <http://www.scipy-lectures.org/advanced/mathematical_optimization/>
            # for more information about how this works
            parameters_initial = [0]
            parameters_optimised = scipy.optimize.minimize(
                time_offset_objective,
                np.asarray(parameters_initial),
                options={
                    'disp': False,
                    'maxiter': 100
                }).x

            # Construct best-fit linear trajectory for best-fitting parameters
            clock_offset = float(parameters_optimised[0])

            # Check clock offset is reasonable
            if abs(clock_offset) > global_settings['max_clock_offset']:
                continue

            # Measure the offset between the satellite's position and the observed position at each time point
            for index in range(path_len):
                # Look up angular mismatch at this time point
                ang_mismatch, sat_distance = satellite_angular_offset(
                    index=index, clock_offset=clock_offset)

                # Keep list of the offsets at each recorded time point along the trajectory
                ang_mismatch_list.append(ang_mismatch)
                distance_list.append(sat_distance.km)

            # Consider adding this satellite to list of candidates
            mean_ang_mismatch = np.mean(np.asarray(ang_mismatch_list))
            distance_mean = np.mean(np.asarray(distance_list))

            if mean_ang_mismatch < global_settings['max_mean_angular_mismatch']:
                candidate_satellites.append({
                    'name':
                    spacecraft['name'],  # string
                    'noradId':
                    spacecraft['noradId'],  # int
                    'distance':
                    distance_mean,  # km
                    'clock_offset':
                    clock_offset,  # seconds
                    'offset':
                    mean_ang_mismatch,  # degrees
                    'absolute_magnitude':
                    spacecraft['mag']
                })

        # Add model possibility for null satellite
        candidate_satellites.append({
            'name': "Unidentified",
            'noradId': 0,
            'distance': 35.7e3 *
            0.25,  # Nothing is visible beyond 25% of geostationary orbit distance
            'clock_offset': 0,
            'offset': 0,
            'absolute_magnitude': None
        })

        # Sort candidates by score - use absolute mag = 20 for satellites with no mag
        for candidate in candidate_satellites:
            candidate['score'] = hypot(
                candidate['distance'] / 1e3,
                candidate['clock_offset'],
                (20 if candidate['absolute_magnitude'] is None else
                 candidate['absolute_magnitude']),
            )
        candidate_satellites.sort(key=itemgetter('score'))

        # Report possible satellite identifications
        logging.info("{prefix} -- {satellites}".format(
            prefix=logging_prefix,
            satellites=", ".join([
                "{} ({:.1f} deg offset; clock offset {:.1f} sec)".format(
                    satellite['name'], satellite['offset'],
                    satellite['clock_offset'])
                for satellite in candidate_satellites
            ])))

        # Identify most likely satellite
        most_likely_satellite = candidate_satellites[0]

        # Store satellite identification
        user = settings['pigazingUser']
        timestamp = time.time()
        db.set_observation_metadata(user_id=user,
                                    observation_id=item['observationId'],
                                    utc=timestamp,
                                    meta=mp.Meta(
                                        key="satellite:name",
                                        value=most_likely_satellite['name']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="satellite:norad_id",
                         value=most_likely_satellite['noradId']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="satellite:clock_offset",
                         value=most_likely_satellite['clock_offset']))
        db.set_observation_metadata(user_id=user,
                                    observation_id=item['observationId'],
                                    utc=timestamp,
                                    meta=mp.Meta(
                                        key="satellite:angular_offset",
                                        value=most_likely_satellite['offset']))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(key="satellite:path_length",
                         value=ang_dist(ra0=path_ra_dec_at_epoch[0][0],
                                        dec0=path_ra_dec_at_epoch[0][1],
                                        ra1=path_ra_dec_at_epoch[-1][0],
                                        dec1=path_ra_dec_at_epoch[-1][1]) *
                         180 / pi))
        db.set_observation_metadata(
            user_id=user,
            observation_id=item['observationId'],
            utc=timestamp,
            meta=mp.Meta(
                key="satellite:path_ra_dec",
                value="[[{:.3f},{:.3f}],[{:.3f},{:.3f}],[{:.3f},{:.3f}]]".
                format(
                    path_ra_dec_at_epoch[0][0] * 12 / pi,
                    path_ra_dec_at_epoch[0][1] * 180 / pi,
                    path_ra_dec_at_epoch[int(path_len / 2)][0] * 12 / pi,
                    path_ra_dec_at_epoch[int(path_len / 2)][1] * 180 / pi,
                    path_ra_dec_at_epoch[-1][0] * 12 / pi,
                    path_ra_dec_at_epoch[-1][1] * 180 / pi,
                )))

        # Satellite successfully identified
        if most_likely_satellite['name'] == "Unidentified":
            outcomes['unsuccessful_fits'] += 1
        else:
            outcomes['successful_fits'] += 1

        # Update database
        db.commit()

    # Report how many fits we achieved
    logging.info("{:d} satellites successfully identified.".format(
        outcomes['successful_fits']))
    logging.info("{:d} satellites not identified.".format(
        outcomes['unsuccessful_fits']))
    logging.info("{:d} malformed database records.".format(
        outcomes['error_records']))
    logging.info("{:d} rescued database records.".format(
        outcomes['rescued_records']))
    logging.info("{:d} satellites with incomplete data.".format(
        outcomes['insufficient_information']))

    # Clean up and exit
    db.commit()
    db.close_db()
    return
Esempio n. 6
0
def average_daily_fits(conn, db, obstory_id, utc_max, utc_min):
    """
    Average all of the orientation fixes within a given time period, excluding extreme fits. Update the observatory's
    status with a altitude and azimuth of the average fit, if it has a suitably small error bar.

    :param conn:
        Database connection object.
    :param db:
        Database object.
    :param obstory_id:
        Observatory publicId.
    :param utc_max:
        Unix time of the end of the time period.
    :param utc_min:
        Unix time of the beginning of the time period.
    :return:
        None
    """

    # Divide up the time period in which we are working into individual nights, and then work on each night individually
    logging.info("Averaging daily fits within period {} to {}".format(
        date_string(utc_min), date_string(utc_max)))

    # Each night is a 86400-second period
    daily_block_size = 86400

    # Make sure that blocks start at noon
    utc_min = (floor(utc_min / daily_block_size + 0.5) -
               0.5) * daily_block_size
    time_blocks = list(
        np.arange(start=utc_min,
                  stop=utc_max + daily_block_size,
                  step=daily_block_size))

    # Start new block whenever we have a hardware refresh, even if it's in the middle of the night!
    conn.execute(
        """
SELECT time FROM archive_metadata
WHERE observatory=(SELECT uid FROM archive_observatories WHERE publicId=%s)
      AND fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey='refresh')
      AND time BETWEEN %s AND %s
""", (obstory_id, utc_min, utc_max))
    results = conn.fetchall()
    for item in results:
        time_blocks.append(item['time'])

    # Make sure that start points for time blocks are in order
    time_blocks.sort()

    # Work on each time block (i.e. night) in turn
    for block_index, utc_block_min in enumerate(time_blocks[:-1]):
        # End point for this time block
        utc_block_max = time_blocks[block_index + 1]

        # Search for observations with orientation fits
        conn.execute(
            """
SELECT am1.floatValue AS altitude, am2.floatValue AS azimuth, am3.floatValue AS pa, am4.floatValue AS tilt,
       am5.floatValue AS width_x_field, am6.floatValue AS width_y_field,
       am7.stringValue AS fit_quality
FROM archive_observations o
INNER JOIN archive_metadata am1 ON o.uid = am1.observationId AND
    am1.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="orientation:altitude")
INNER JOIN archive_metadata am2 ON o.uid = am2.observationId AND
    am2.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="orientation:azimuth")
INNER JOIN archive_metadata am3 ON o.uid = am3.observationId AND
    am3.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="orientation:pa")
INNER JOIN archive_metadata am4 ON o.uid = am4.observationId AND
    am4.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="orientation:tilt")
INNER JOIN archive_metadata am5 ON o.uid = am5.observationId AND
    am5.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="orientation:width_x_field")
INNER JOIN archive_metadata am6 ON o.uid = am6.observationId AND
    am6.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="orientation:width_y_field")
INNER JOIN archive_metadata am7 ON o.uid = am7.observationId AND
    am7.fieldId=(SELECT uid FROM archive_metadataFields WHERE metaKey="orientation:fit_quality")
WHERE
    o.observatory = (SELECT uid FROM archive_observatories WHERE publicId=%s) AND
    o.obsTime BETWEEN %s AND %s;
""", (obstory_id, utc_block_min, utc_block_max))
        results = conn.fetchall()

        # Remove results with poor fit
        results_filtered = []
        fit_threshold = 2  # pixels
        for item in results:
            fit_quality = float(json.loads(item['fit_quality'])[0])
            if fit_quality > fit_threshold:
                continue
            item['weight'] = 1 / (fit_quality + 0.1)
            results_filtered.append(item)
        results = results_filtered

        # Report how many images we found
        logging.info(
            "Averaging fits within period {} to {}: Found {} fits.".format(
                date_string(utc_block_min), date_string(utc_block_max),
                len(results)))

        # Average the fits we found
        if len(results) < 4:
            logging.info("Insufficient images to reliably average.")
            continue

        # What fraction of the worst fits do we reject?
        rejection_fraction = 0.25

        # Reject the 25% of fits which are further from the average
        rejection_count = int(len(results) * rejection_fraction)

        # Convert alt-az fits into radians and average
        # Iteratively remove the point furthest from the mean
        results_filtered = results

        # Iteratively take the average of the fits, reject the furthest outlier, and then take a new average
        for iteration in range(rejection_count):
            # Average the (alt, az) measurements for this observatory by finding their centroid on a sphere
            alt_az_list = [[i['altitude'] * deg, i['azimuth'] * deg]
                           for i in results_filtered]
            weights_list = [i['weight'] for i in results_filtered]
            alt_az_best = mean_angle_2d(pos_list=alt_az_list,
                                        weights=weights_list)[0]

            # Work out the offset of each fit from the average
            fit_offsets = [
                ang_dist(ra0=alt_az_best[1],
                         dec0=alt_az_best[0],
                         ra1=fitted_alt_az[1],
                         dec1=fitted_alt_az[0])
                for fitted_alt_az in alt_az_list
            ]

            # Reject the worst fit which is further from the average
            fits_with_weights = list(zip(fit_offsets, results_filtered))
            fits_with_weights.sort(key=operator.itemgetter(0))
            fits_with_weights.reverse()

            # Create a new list of orientation fits, with the worst outlier excluded
            results_filtered = [item[1] for item in fits_with_weights[1:]]

        # Convert alt-az fits into radians and average by finding their centroid on a sphere
        alt_az_list = [[i['altitude'] * deg, i['azimuth'] * deg]
                       for i in results_filtered]
        weights_list = [i['weight'] for i in results_filtered]
        [alt_az_best, alt_az_error] = mean_angle_2d(pos_list=alt_az_list,
                                                    weights=weights_list)

        # Average other angles by finding their centroid on a circle
        output_values = {}
        for quantity in ['tilt', 'pa', 'width_x_field', 'width_y_field']:
            # Iteratively remove the point furthest from the mean
            results_filtered = results

            # Iteratively take the average of the values for each parameter, reject the furthest outlier,
            # and then take a new average
            for iteration in range(rejection_count):
                # Average quantity measurements
                quantity_values = [i[quantity] * deg for i in results_filtered]
                weights_list = [i['weight'] for i in results_filtered]
                quantity_mean = mean_angle(angle_list=quantity_values,
                                           weights=weights_list)[0]

                # Work out the offset of each fit from the average
                fit_offsets = []
                for index, quantity_value in enumerate(quantity_values):
                    offset = quantity_value - quantity_mean
                    if offset < -pi:
                        offset += 2 * pi
                    if offset > pi:
                        offset -= 2 * pi
                    fit_offsets.append(abs(offset))

                # Reject the worst fit which is furthest from the average
                fits_with_weights = list(zip(fit_offsets, results_filtered))
                fits_with_weights.sort(key=operator.itemgetter(0))
                fits_with_weights.reverse()
                results_filtered = [item[1] for item in fits_with_weights[1:]]

            # Filtering finished; now convert each fit into radians and average
            values_filtered = [i[quantity] * deg for i in results_filtered]
            weights_list = [i['weight'] for i in results_filtered]
            value_best = mean_angle(angle_list=values_filtered,
                                    weights=weights_list)[0]
            output_values[quantity] = value_best * rad

        # Print fit information
        success = (
            alt_az_error * rad < 0.1
        )  # Only accept determinations with better precision than 0.1 deg
        adjective = "SUCCESSFUL" if success else "REJECTED"
        logging.info("""\
{} ORIENTATION FIT from {:2d} images: Alt: {:.2f} deg. Az: {:.2f} deg. PA: {:.2f} deg. \
ScaleX: {:.2f} deg. ScaleY: {:.2f} deg. Uncertainty: {:.2f} deg.\
""".format(adjective, len(results_filtered), alt_az_best[0] * rad,
           alt_az_best[1] * rad, output_values['tilt'],
           output_values['width_x_field'], output_values['width_y_field'],
           alt_az_error * rad))

        # Update observatory status
        if success:
            # Flush any previous observation status
            flush_orientation(obstory_id=obstory_id,
                              utc_min=utc_block_min - 1,
                              utc_max=utc_block_min + 1)

            user = settings['pigazingUser']
            timestamp = time.time()
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:altitude",
                                         value=alt_az_best[0] * rad,
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:azimuth",
                                         value=alt_az_best[1] * rad,
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:pa",
                                         value=output_values['pa'],
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:tilt",
                                         value=output_values['tilt'],
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:width_x_field",
                                         value=output_values['width_x_field'],
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:width_y_field",
                                         value=output_values['width_y_field'],
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:uncertainty",
                                         value=alt_az_error * rad,
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.register_obstory_metadata(obstory_id=obstory_id,
                                         key="orientation:image_count",
                                         value=len(results),
                                         time_created=timestamp,
                                         metadata_time=utc_block_min,
                                         user_created=user)
            db.commit()