Example #1
0
def sync_stat_archive(opt, msid_files, logger, content, stat, index_tbl):
    """
    Sync the archive for ``content``.

    This calls _sync_stat_archive with a wrapper to catch an exception related
    to the last_date_id file being out of sync.

    :param opt:
    :param msid_files:
    :param logger:
    :param content:
    :param stat: stat interval '5min' or 'daily'
    :param index_tbl: table of sync file entries
    :return:
    """
    try:
        _sync_stat_archive(opt, msid_files, logger, content, stat, index_tbl)
    except RowMismatchError as err:
        pth = Path(fetch.msid_files['last_date_id'].abs)
        if pth.exists():
            msg = f'File {pth} was out of sync with stats archive, generating exception: {err}'
            logger.warn(msg)
            logger.warn(f'Attempting to fix by removing that file and trying to sync again.')
            pth.unlink()
            _sync_stat_archive(opt, msid_files, logger, content, stat, index_tbl)
            process_errors.append(
                f'WARNING: file {pth} was out of sync with stats archive, fixed by deleting it')
Example #2
0
def get_stats_over_time(start, stop=None, sp=False, dp=None, ir=False, ms=None,
                        slots='combined', t_samp=1000):
    """
    Equivalent to get_stats_per_interval, but concatenate the results for all
    obsids within the specified time interval.
    """
    # Get obsids in time range and collect all the per-interval statistics
    obsids = events.obsids.filter(start, stop, dur__gt=2000)
    stats_list = []
    for obsid in obsids:
        set_FILES_context(obsid.obsid, sp, dp, ir, ms, t_samp, slots)

        # First check that there is the raw dat file for this obsid.  Nothing
        # can be done without this.
        dat_file = FILES['dat.pkl'].rel
        if not os.path.exists(dat_file):
            logger.info('Skipping {}: {} not in archive'.format(obsid, dat_file))
            continue

        # Now get the stats for this obsid.  Hopefully it has already been computed and
        # is cached as a file.  If not, try to compute the stats (and cache).  If that
        # fails then press on but touch a file to indicate failure so subsequent attempts
        # don't bother.
        logger.info('Processing obsid {}'.format(obsid))
        try:
            stats = get_cached_stats()  # depends on the context set previously
        except FailedStatsFile:
            # Previously failed
            logger.info('  Skipping {}: failed statistics'.format(obsid.obsid))
            continue
        except NoStatsFile:
            logger.info('  Reading pickled data file {}'.format(dat_file))
            dat = pickle.load(open(dat_file, 'r'))
            try:
                logger.info('  Computing statistics')
                if slots == 'combined':
                    stats = get_stats_per_interval_combined(dat, sp, dp, ir, ms, t_samp)
                else:
                    stats = get_stats_per_interval_per_slot(dat, sp, dp, ir, ms, slots, t_samp)
            except ValueError as err:
                open(FILES['stats.ERR'].rel, 'w')  # touch file to indicate failure to compute stats
                logger.warn('  ERROR: {}'.format(err))

        stats['obsid'] = obsid.obsid
        stats_list.append(stats)

    stats = {}
    for case in STAT_CASES:
        stats[case] = {}
        for stat_type in STAT_TYPES:
            stats[case][stat_type] = np.hstack([x[case][stat_type] for x in stats_list])

    # Set corresponding array of obsids for back-tracing outliers etc
    stats['obsid'] = np.hstack([np.ones(len(x['obc']['std']), dtype=int) * x['obsid']
                                for x in stats_list])

    return stats
Example #3
0
def get_kalman_predicted_over_time(start, stop=None, sp=False, dp=None, ir=False, ms=None):
    obsids = events.obsids.filter(start, stop, dur__gt=2000)
    tlm_durs = []
    pred_durs = []
    for obsid in obsids:
        logger.info('Reading data for obsid {}'.format(obsid))
        try:
            dat = get_obsid_data(obsid.obsid)
        except Exception as err:
            logger.warn('Failed: {}'.format(err))
            continue
        tlm_drops, pred_drops = get_kalman_predicted(dat, sp, dp, ir, ms)[-2:]
        tlm_durs.append(tlm_drops['duration'])
        pred_durs.append(pred_drops['duration'])
    tlm_durs = np.concatenate(tlm_durs)
    pred_durs = np.concatenate(pred_durs)
    return tlm_durs, pred_durs
def update_observed_aimpoints():
    """
    Update the ``OBSERVED_AIMPOINTS_FILE`` table (ascii ECSV format) in
    place to reflect information about observed aimpoints, and in particular
    the delta offset from the planned value.
    """
    # Default is between NOW and NOW - 14 days
    start = DateTime(opt.start) - (14 if opt.start is None else 0)
    stop = DateTime(opt.stop)

    # Get science obsids
    obsids = [evt.obsid for evt in events.obsids.filter(start, stop)
              if evt.obsid < 40000]

    # Read in existing file if it exists and make a set of already-processed obsids
    filename = os.path.join(opt.data_root, OBSERVED_AIMPOINTS_FILE)
    if os.path.exists(filename):
        logger.info('Reading {}'.format(filename))
        dat_old = Table.read(filename, format='ascii.ecsv', guess=False)
        processed_obsids = set(dat_old['obsid'])
    else:
        dat_old = None
        processed_obsids = set()

    rows = []
    for obsid in obsids:
        if obsid in processed_obsids:
            logger.info('Skipping obsid {}: already processed'.format(obsid))
            continue

        try:
            vals = get_observed_aimpoint_offset(obsid)
        except NoObsidError:  # not yet in archive
            logger.info('Skipping obsid {}: not in archive yet'.format(obsid))
            continue
        except Exception as err:
            logger.info('ERROR: {}'.format(err))
            continue

        logger.info('Obsid={obsid:5d} detector={detector:6s} '
                    'chipx={chipx:.1f} chipy={chipy:.1f} dx={dx:.1f} dy={dy:.1f} dr={dr:.1f}'
                    .format(**vals))
        if abs(vals['dx']) > 10 or abs(vals['dy']) > 10:
            logger.warn('WARNING: large dx or dy')

        rows.append(vals)

    if rows:
        dat = Table(rows=rows, names=sorted(rows[0]))
        if dat_old is not None:
            dat = vstack([dat_old, dat])
        logger.info('Writing {}'.format(filename))

        for name in 'dr dx dy obs_chipx obs_chipy sim_z_off'.split():
            dat[name].format = '.2f'

        dat.sort('mean_date')
        dat.write(filename, format='ascii.ecsv')
    else:
        dat = dat_old

    return dat
def update_observed_aimpoints():
    """
    Update the ``OBSERVED_AIMPOINTS_FILE`` table (ascii ECSV format) in
    place to reflect information about observed aimpoints, and in particular
    the delta offset from the planned value.
    """
    # Default is between NOW and NOW - 14 days
    start = DateTime(opt.start) - (14 if opt.start is None else 0)
    stop = DateTime(opt.stop)

    # Get science obsids
    obsids = [
        evt.obsid for evt in events.obsids.filter(start, stop)
        if evt.obsid < 40000
    ]

    # Read in existing file if it exists and make a set of already-processed obsids
    filename = os.path.join(opt.data_root, OBSERVED_AIMPOINTS_FILE)
    if os.path.exists(filename):
        logger.info('Reading {}'.format(filename))
        dat_old = Table.read(filename, format='ascii.ecsv', guess=False)
        processed_obsids = set(dat_old['obsid'])
    else:
        dat_old = None
        processed_obsids = set()

    rows = []
    for obsid in obsids:
        if obsid in processed_obsids:
            logger.info('Skipping obsid {}: already processed'.format(obsid))
            continue

        try:
            vals = get_observed_aimpoint_offset(obsid)
        except NoObsidError:  # not yet in archive
            logger.info('Skipping obsid {}: not in archive yet'.format(obsid))
            continue
        except Exception as err:
            logger.info('ERROR: {}'.format(err))
            continue

        logger.info(
            'Obsid={obsid:5d} detector={detector:6s} '
            'chipx={chipx:.1f} chipy={chipy:.1f} dx={dx:.1f} dy={dy:.1f} dr={dr:.1f}'
            .format(**vals))
        if abs(vals['dx']) > 10 or abs(vals['dy']) > 10:
            logger.warn('WARNING: large dx or dy')

        rows.append(vals)

    if rows:
        dat = Table(rows=rows, names=sorted(rows[0]))
        if dat_old is not None:
            dat = vstack([dat_old, dat])
        logger.info('Writing {}'.format(filename))

        for name in 'dr dx dy obs_chipx obs_chipy sim_z_off'.split():
            dat[name].format = '.2f'

        dat.sort('mean_date')
        dat.write(filename, format='ascii.ecsv', overwrite=True)
    else:
        dat = dat_old

    return dat
Example #6
0
def update(EventModel, date_stop):
    import django.db
    from django.core.exceptions import ObjectDoesNotExist
    from .events import models

    date_stop = DateTime(date_stop)
    cls_name = EventModel.__name__

    try:
        update = models.Update.objects.get(name=cls_name)
    except ObjectDoesNotExist:
        logger.info('No previous update for {} found'.format(cls_name))
        duration = EventModel.lookback
        update = models.Update(name=cls_name, date=date_stop.date)
        date_start = date_stop - EventModel.lookback
    else:
        duration = date_stop - DateTime(update.date)
        date_start = DateTime(update.date) - EventModel.lookback
        update.date = date_stop.date

        # Some events like LoadSegment or DsnComm might change in the database after
        # having been ingested.  Use lookback_delete (less than lookback) to
        # always remove events in that range and re-ingest.
        if duration >= 0.5 and hasattr(EventModel, 'lookback_delete'):
            delete_date = DateTime(update.date) - EventModel.lookback_delete
            delete_from_date(EventModel, delete_date, set_update_date=False)

    if duration < 0.5:
        logger.info(
            'Skipping {} events because update duration={:.1f} is < 0.5 day'.
            format(cls_name, duration))
        return

    # Some events like LoadSegment, DsnComm are defined into the future, so
    # modify date_stop accordingly.  Note that update.date is set to the
    # nominal date_stop (typically NOW), and this refers more to the last date
    # of processing rather than the actual last date in the archive.
    if hasattr(EventModel, 'lookforward'):
        date_stop = date_stop + EventModel.lookforward

    logger.info('Updating {} events from {} to {}'.format(
        cls_name, date_start.date[:-4], date_stop.date[:-4]))

    # Get events for this model from telemetry.  This is returned as a list
    # of dicts with key/val pairs corresponding to model fields.
    events_in_dates = EventModel.get_events(date_start, date_stop)

    # Determine which of the events is not already in the database and
    # put them in a list for saving.
    event_models, events = get_events_and_event_models(EventModel, cls_name,
                                                       events_in_dates)

    # Save the new events in an atomic fashion
    with django.db.transaction.atomic():
        for event, event_model in zip(events, event_models):
            try:
                # In order to catch an IntegrityError here and press on, need to
                # wrap this in atomic().  This was driven by bad data in iFOT, namely
                # duplicate PassPlans that point to the same DsnComm, which gives an
                # IntegrityError because those are related as one-to-one.
                with django.db.transaction.atomic():
                    save_event_to_database(cls_name, event, event_model,
                                           models)
            except django.db.utils.IntegrityError:
                import traceback
                logger.warn(f'WARNING: IntegrityError skipping {event_model}')
                logger.warn(f'Event dict:\n{event}')
                logger.warn(f'Traceback:\n{traceback.format_exc()}')
                continue

        # If processing got here with no exceptions then save the event update
        # information to database
        update.save()