Beispiel #1
0
def get_week_states(tstart, tstop, bs_cmds, tlm):
    """
    Make states from last available telemetry through the end of the backstop commands

    :param tstart: start time from first backstop command
    :param tstop: stop time from last backstop command
    :param bs_cmds: backstop commands for products under review
    :param tlm: available pitch and aacccdpt telemetry recarray from fetch
    :returns: numpy recarray of states
    """
    cstates = Table(get_cmd_states.fetch_states(DateTime(tstart) - 30,
                                                tstop,
                                                vals=['obsid',
                                                      'pitch',
                                                      'q1', 'q2', 'q3', 'q4']))
    # Get the last state at least 3 days before tstart and at least one hour
    # before the last available telemetry
    cstate0 = cstates[(cstates['tstart'] < (DateTime(tstart) - 3).secs)
                      & (cstates['tstart'] < (tlm[-1]['date'] - 3600))][-1]
    # get temperature data in a range around that initial state
    ok = ((tlm['date'] >= cstate0['tstart'] - 700) &
          (tlm['date'] <= cstate0['tstart'] + 700))
    init_aacccdpt = np.mean(tlm['aacccdpt'][ok])

    pre_bs_states = cstates[(cstates['tstart'] >= cstate0['tstart'])
                            & (cstates['tstart'] < tstart)]
    # cmd_states.get_states needs an initial state dictionary, so
    # construct one from the last pre-backstop state
    last_pre_bs_state = {col: pre_bs_states[-1][col]
                         for col in pre_bs_states[-1].colnames}
    # Get the commanded states from last cmd_state through the end of backstop commands
    states = Table(cmd_states.get_states(last_pre_bs_state, bs_cmds))
    states[-1]['datestop'] = bs_cmds[-1]['date']
    states[-1]['tstop'] = bs_cmds[-1]['time']
    # Truncate the last pre_bs_state at the new states start
    pre_bs_states[-1]['datestop'] = states[0]['datestart']
    pre_bs_states[-1]['tstop'] = states[0]['tstart']
    logger.info('Constructed %d commanded states from %s to %s' %
                (len(states), states[0]['datestart'], states[-1]['datestop']))
    # Combine the pre-backstop states with the commanded states
    all_states = vstack([pre_bs_states, states])
    # Add a column for temperature and pre-fill all to be the initial temperature
    # (the first state temperature is the only one used anyway)
    all_states['aacccdpt'] = init_aacccdpt
    return all_states
Beispiel #2
0
def make_week_predict(opt, tstart, tstop, bs_cmds, tlm, db):
    print "In make_week_predict"
    
    # Try to make initial state0 from cmd line options
    state0 = dict((x, getattr(opt, x))
                  for x in ('pitch', 'simpos', 'ccd_count', 'fep_count',
                            'vid_board', 'clocking', 'T_dea'))
    print state0
    state0.update({'tstart': tstart - 30,
                   'tstop': tstart,
                   'datestart': DateTime(tstart - 30).date,
                   'datestop': DateTime(tstart).date,
                   'q1': 0.0, 'q2': 0.0, 'q3': 0.0, 'q4': 1.0,
                   }
                  )
    print state0
    # If cmd lines options were not fully specified then get state0 as last
    # cmd_state that starts within available telemetry.  Update with the
    # mean temperatures at the start of state0.
    if None in state0.values():
        state0 = cmd_states.get_state0(tlm['date'][-5], db,
                                       datepar='datestart')
        ok = ((tlm['date'] >= state0['tstart'] - 700) &
              (tlm['date'] <= state0['tstart'] + 700))
        state0.update({'T_dea': np.mean(tlm['1deamzt'][ok])})

    # TEMPORARY HACK: core model doesn't actually support predictive
    # active heater yet.  Initial temperature determines active heater
    # state for predictions now.
    if state0['T_dea'] < 15:
        state0['T_dea'] = 15.0

    logger.debug('state0 at %s is\n%s' % (DateTime(state0['tstart']).date,
                                           pformat(state0)))

    # Get commands after end of state0 through first backstop command time
    cmds_datestart = state0['datestop']
    cmds_datestop = bs_cmds[0]['date']

    # Get timeline load segments including state0 and beyond.
    timeline_loads = db.fetchall("""SELECT * from timeline_loads
                                 WHERE datestop > '%s'
                                 and datestart < '%s'"""
                                 % (cmds_datestart, cmds_datestop))
    logger.info('Found {} timeline_loads  after {}'.format(
            len(timeline_loads), cmds_datestart))

    # Get cmds since datestart within timeline_loads
    db_cmds = cmd_states.get_cmds(cmds_datestart, db=db, update_db=False,
                                  timeline_loads=timeline_loads)

    # Delete non-load cmds that are within the backstop time span
    # => Keep if timeline_id is not None or date < bs_cmds[0]['time']
    db_cmds = [x for x in db_cmds if (x['timeline_id'] is not None or
                                      x['time'] < bs_cmds[0]['time'])]

    logger.info('Got %d cmds from database between %s and %s' %
                  (len(db_cmds), cmds_datestart, cmds_datestop))

    # Get the commanded states from state0 through the end of backstop commands
    states = cmd_states.get_states(state0, db_cmds + bs_cmds)
    states[-1].datestop = bs_cmds[-1]['date']
    states[-1].tstop = bs_cmds[-1]['time']
    logger.info('Found %d commanded states from %s to %s' %
                 (len(states), states[0]['datestart'], states[-1]['datestop']))

    # Create array of times at which to calculate DEA temps, then do it.
    logger.info('Calculating DEA thermal model')
    print state0
    model = calc_model(opt.model_spec, states, state0['tstart'], tstop,
                       state0['T_dea'])

    # Make the DEA limit check plots and data files
    plt.rc("axes", labelsize=10, titlesize=12)
    plt.rc("xtick", labelsize=10)
    plt.rc("ytick", labelsize=10)
    temps = {'dea': model.comp['1deamzt'].mvals}
    plots = make_check_plots(opt, states, model.times, temps, tstart)
    viols = make_viols(opt, states, model.times, temps)
    write_states(opt, states)
    write_temps(opt, model.times, temps)

    return dict(opt=opt, states=states, times=model.times, temps=temps,
               plots=plots, viols=viols)
    def get_prediction_states(self, tbegin):
        """
        Get the states used for the prediction.  This includes both the
        states from the review load backstop file and all the 
        states between the latest telemetry data and the beginning 
        of that review load backstop file.

        The Review Backstop commands already obtained.
        Telemtry from 21 days back to  the latest in Ska obtained.

        So now the task is to backchain through the loads and assemble
        any states missing between the end of telemetry through the start
        of the review load.

        Parameters
        ----------
        tbegin : string
            The starting date/time from which to obtain states for
            prediction. This is tlm['date'][-5]) or, in other words, the
            date used is 5 enteries back from the end of the fetched telemetry
        """
        """
        Get state0 as last cmd_state that starts within available telemetry. 
        The original logic in get_state0() is to return a state that
        is absolutely, positively reliable by insisting that the
        returned state is at least ``date_margin`` days old, where the
        default is 10 days. That is too conservative (given the way
        commanded states are actually managed) and not what is desired
        here, which is a recent state from which to start thermal propagation.

        Instead we supply ``date_margin=None`` so that get_state0 will
        find the newest state consistent with the ``date`` criterion
        and pcad_mode == 'NPNT'.
        """
        # If an OFLS directory has been specified, get the backstop commands
        # stored in the backstop file in that directory

        # Ok ready to start the collection of continuity commands
        #
        # Make a copy of the Review Load Commands. This will have
        # Continuity commands concatenated to it and will be the final product

        import copy

        bs_cmds = copy.copy(self.bs_cmds)
        bs_start_time = bs_cmds[0]['time']
        present_ofls_dir = copy.copy(self.backstop_file)

        # So long as the earliest command in bs_cmds is after the state0
        # time, keep concatenating continuity commands to bs_cmds based upon
        # the type of load.
        # Note that as you march back in time along the load chain, "ofls_dir" will change.

        # First we need a State0 because cmd_states.get_states cannot translate
        # backstop commands into commanded states without one. cmd_states.get_state0
        # is written such that if you don't give it a database object it will
        # create one for itself and use that. Here, all that really matters
        # is the value of 'tbegin', the specification of the date parameter to be used
        # and the date_margin.
        state0 = cmd_states.get_state0(tbegin,
                                       self.db,
                                       datepar='datestart',
                                       date_margin=None)
        # WHILE
        # The big while loop that backchains through previous loads and concatenates the
        # proper load sections to the review load.
        while state0['tstart'] < bs_start_time:

            # Read the Continuity information of the present ofls directory
            cont_load_path, present_load_type, scs107_date = self.BSC.get_continuity_file_info(
                present_ofls_dir)

            #---------------------- NORMAL ----------------------------------------
            # If the load type is "normal" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            if present_load_type.upper() == 'NORMAL':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)

                # Combine the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineNormal(cont_bs_cmds, bs_cmds)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

            #---------------------- TOO ----------------------------------------
            # If the load type is "too" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            elif present_load_type.upper() == 'TOO':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)

                # Combine the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineTOO(cont_bs_cmds, bs_cmds)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

            #---------------------- STOP ----------------------------------------
            # If the load type is "STOP" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            # Take into account the SCS-107 commands which shut ACIS down
            # and any LTCTI run
            elif present_load_type.upper() == 'STOP':

                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)

                # CombineSTOP the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineSTOP(cont_bs_cmds, bs_cmds,
                                               scs107_date)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

            #---------------------- SCS-107 ----------------------------------------
            # If the load type is "STOP" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            # Take into account the SCS-107 commands which shut ACIS down
            # and any LTCTI run
            elif present_load_type.upper() == 'SCS-107':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(
                    cont_load_path)
                # Store the continuity bs commands as a chunk in the chunk list

                # Obtain the CONTINUITY load Vehicle-Only file
                vo_bs_cmds, vo_bs_name = self.BSC.get_vehicle_only_bs_cmds(
                    cont_load_path)

                # Combine107 the continuity commands with the bs_cmds
                bs_cmds = self.BSC.Combine107(cont_bs_cmds, vo_bs_cmds,
                                              bs_cmds, scs107_date)

                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path

        # Convert the assembled backstop command history into commanded states
        # from state0 through the end of the Review Load backstop commands.
        # get_states trims the list to any command whose time is AFTER the state0 START
        # time and then converts each relevant backstop command, in that resultant list,
        # into a pseudo-commanded states state
        states = cmd_states.get_states(state0, bs_cmds)

        # Get rid of the 2099 placeholder stop date
        states[-1].datestop = bs_cmds[-1]['date']
        states[-1].tstop = bs_cmds[-1]['time']

        self.logger.debug('state0 at %s is\n%s' %
                          (DateTime(state0['tstart']).date, pformat(state0)))

        return states, state0
    def get_prediction_states(self, tbegin):
        """
        Get the states used for the prediction.

        Parameters
        ----------
        tbegin : string
            The starting date/time from which to obtain states for
            prediction.
        """
        """
        Get state0 as last cmd_state that starts within available telemetry. 
        The original logic in get_state0() is to return a state that
        is absolutely, positively reliable by insisting that the
        returned state is at least ``date_margin`` days old, where the
        default is 10 days. That is too conservative (given the way
        commanded states are actually managed) and not what is desired
        here, which is a recent state from which to start thermal propagation.

        Instead we supply ``date_margin=None`` so that get_state0 will
        find the newest state consistent with the ``date`` criterion
        and pcad_mode == 'NPNT'.
        """

        state0 = cmd_states.get_state0(tbegin,
                                       self.db,
                                       datepar='datestart',
                                       date_margin=None)

        self.logger.debug('state0 at %s is\n%s' %
                          (DateTime(state0['tstart']).date, pformat(state0)))

        # Get commands after end of state0 through first backstop command time
        cmds_datestart = state0['datestop']
        cmds_datestop = self.bs_cmds[0]['date']

        # Get timeline load segments including state0 and beyond.
        timeline_loads = self.db.fetchall("""SELECT * from timeline_loads
                                          WHERE datestop >= '%s'
                                          and datestart < '%s'""" %
                                          (cmds_datestart, cmds_datestop))
        self.logger.info('Found {} timeline_loads  after {}'.format(
            len(timeline_loads), cmds_datestart))

        # Get cmds since datestart within timeline_loads
        db_cmds = cmd_states.get_cmds(cmds_datestart,
                                      db=self.db,
                                      update_db=False,
                                      timeline_loads=timeline_loads)

        # Delete non-load cmds that are within the backstop time span
        # => Keep if timeline_id is not None (if a normal load)
        # or date < bs_cmds[0]['time']

        # If this is an interrupt load, we don't want to include the end
        # commands from the continuity load since not all of them will be valid,
        # and we could end up evolving on states which would not be present in
        # the load under review. However, once the load has been approved and is
        # running / has run on the spacecraft, the states in the database will
        # be correct, and we will want to include all relevant commands from the
        # continuity load. To check for this, we find the current time and see
        # the load under review is still in the future. If it is, we then treat
        # this as an interrupt if requested, otherwise, we don't.
        current_time = DateTime().secs
        interrupt = self.interrupt and self.bs_cmds[0]["time"] > current_time

        db_cmds = [
            x for x in db_cmds
            if ((x['timeline_id'] is not None and not interrupt)
                or x['time'] < self.bs_cmds[0]['time'])
        ]

        self.logger.info('Got %d cmds from database between %s and %s' %
                         (len(db_cmds), cmds_datestart, cmds_datestop))

        # Get the commanded states from state0 through the end of backstop commands
        states = cmd_states.get_states(state0, db_cmds + self.bs_cmds)
        states[-1].datestop = self.bs_cmds[-1]['date']
        states[-1].tstop = self.bs_cmds[-1]['time']
        self.logger.info(
            'Found %d commanded states from %s to %s' %
            (len(states), states[0]['datestart'], states[-1]['datestop']))

        return states, state0
Beispiel #5
0
    for table in tables:
        sqldef = file(table + '_def.sql').read()
        db.execute(sqldef, commit=True)

datestart = state0['datestart']
timeline_loads = db.fetchall("""SELECT * from timeline_loads
                                WHERE datestop > '%s'""" % datestart)

timeline_loads_mod = timeline_loads.copy()[:-2]
timeline_loads_mod[-1].datestop = '2009:053:00:00:00.000'

print '=' * 40
print 'Processing with timeline_loads'
cmds = cmd_states.get_cmds(datestart, db=db, update_db=True, timeline_loads=timeline_loads)
states = cmd_states.get_states(state0, cmds)
print 'len(cmds) =',len(cmds)
cmd_states.update_states_db(states, db)

if 0:
    print '=' * 40
    print 'Processing with timeline_loads_mod'
    cmds = cmd_states.get_cmds(datestart, db=db, update_db=True, timeline_loads=timeline_loads_mod)
    states = cmd_states.get_states(state0, cmds)
    print 'len(cmds) =',len(cmds)
    print states[0]
    print states[-1]
    cmd_states.update_states_db(states, db)

    print '=' * 40
    print 'Processing with timeline_loads'
Beispiel #6
0
def make_week_predict(opt, tstart, tstop, bs_cmds, tlm, db):
    logger.debug("In make_week_predict")

    # Try to make initial state0 from cmd line options
    state0 = dict((x, getattr(opt, x))
                  for x in ('pitch', 'simpos', 'ccd_count', 'fep_count',
                            'vid_board', 'clocking', 'T_psmc','T_pin1at',
                            'dh_heater'))
    
    state0.update({'tstart': tstart - 30,
                   'tstop': tstart,
                   'datestart': DateTime(tstart - 30).date,
                   'datestop': DateTime(tstart).date,
                   'q1': 0.0, 'q2': 0.0, 'q3': 0.0, 'q4': 1.0,
                   }
                  )

    logger.debug("Completed state0 update")
    # If cmd lines options were not fully specified then get state0 as last
    # cmd_state that starts within available telemetry.  Update with the
    # mean temperatures at the start of state0.
    if None in state0.values():
        state0 = cmd_states.get_state0(tlm['date'][-5], db,
                                       datepar='datestart')
        ok = ((tlm['date'] >= state0['tstart'] - 700) &
              (tlm['date'] <= state0['tstart'] + 700))
        state0.update({'T_psmc': np.mean(tlm['1pdeaat'][ok])})
        # state0.update({'T_pin1at': np.mean(tlm['1pin1at'][ok]) + 3.0 })
        state0.update({'T_pin1at': np.mean(tlm['1pdeaat'][ok]) - 10.0 })

        

    # TEMPORARY HACK: core model doesn't actually support predictive
    # active heater yet.  Initial temperature determines active heater
    # state for predictions now.
    if state0['T_psmc'] < 15:
        state0['T_psmc'] = 15.0

    logger.info('state0 at %s is\n%s' % (DateTime(state0['tstart']).date,
                                           pformat(state0)))

    # Get commands after end of state0 through first backstop command time
    cmds_datestart = state0['datestop']
    cmds_datestop = bs_cmds[0]['date']

    # Get timeline load segments including state0 and beyond.
    timeline_loads = db.fetchall("""SELECT * from timeline_loads
                                 WHERE datestop > '%s'
                                 and datestart < '%s'"""
                                 % (cmds_datestart, cmds_datestop))
    logger.info('Found {} timeline_loads  after {}'.format(
            len(timeline_loads), cmds_datestart))

    # Get cmds since datestart within timeline_loads
    db_cmds = cmd_states.get_cmds(cmds_datestart, db=db, update_db=False,
                                  timeline_loads=timeline_loads)

    # Delete non-load cmds that are within the backstop time span
    # => Keep if timeline_id is not None or date < bs_cmds[0]['time']
    db_cmds = [x for x in db_cmds if (x['timeline_id'] is not None or
                                      x['time'] < bs_cmds[0]['time'])]

    logger.info('Got %d cmds from database between %s and %s' %
                  (len(db_cmds), cmds_datestart, cmds_datestop))

    # Get the commanded states from state0 through the end of backstop commands
    states = cmd_states.get_states(state0, db_cmds + bs_cmds)
    states[-1].datestop = bs_cmds[-1]['date']
    states[-1].tstop = bs_cmds[-1]['time']
    logger.info('Found %d commanded states from %s to %s' %
                 (len(states), states[0]['datestart'], states[-1]['datestop']))

    # htrbfn='/home/edgar/acis/thermal_models/dhheater_history/dahtbon_history.rdb'
    htrbfn='dahtbon_history.rdb'
    logger.info('Reading file of dahtrb commands from file %s' % htrbfn)
    htrb=Ska.Table.read_ascii_table(htrbfn,headerrow=2,headertype='rdb')
    dh_heater_times=Chandra.Time.date2secs(htrb['time'])
    dh_heater=htrb['dahtbon'].astype(bool)

    # Create array of times at which to calculate PSMC temps, then do it.
    logger.info('Calculating PSMC thermal model')
    logger.info('state0 at start of calc is\n%s' % (pformat(state0)))

    model = calc_model(opt.model_spec, states, state0['tstart'], tstop,
                       state0['T_psmc'],None,state0['T_pin1at'], None,
                       dh_heater,dh_heater_times)

    # Make the PSMC limit check plots and data files
    plt.rc("axes", labelsize=10, titlesize=12)
    plt.rc("xtick", labelsize=10)
    plt.rc("ytick", labelsize=10)
    temps = dict(psmc=model.comp['1pdeaat'].mvals,pin=model.comp['pin1at'].mvals)
    plots = make_check_plots(opt, states, model.times, temps, tstart)
    viols = make_viols(opt, states, model.times, temps)
    write_states(opt, states)
    write_temps(opt, model.times, temps)

    return dict(opt=opt, states=states, times=model.times, temps=temps,
               plots=plots, viols=viols)
    def get_prediction_states(self, tbegin):
        """
        Get the states used for the prediction.  This includes both the
        states from the review load backstop file and all the 
        states between the latest telemetry data and the beginning 
        of that review load backstop file.

        The Review Backstop commands already obtained.
        Telemtry from 21 days back to  the latest in Ska obtained.

        So now the task is to backchain through the loads and assemble
        any states missing between the end of telemetry through the start
        of the review load.

        Parameters
        ----------
        tbegin : string
            The starting date/time from which to obtain states for
            prediction. This is tlm['date'][-5]) or, in other words, the
            date used is 5 enteries back from the end of the fetched telemetry
        """

        """
        Get state0 as last cmd_state that starts within available telemetry. 
        The original logic in get_state0() is to return a state that
        is absolutely, positively reliable by insisting that the
        returned state is at least ``date_margin`` days old, where the
        default is 10 days. That is too conservative (given the way
        commanded states are actually managed) and not what is desired
        here, which is a recent state from which to start thermal propagation.

        Instead we supply ``date_margin=None`` so that get_state0 will
        find the newest state consistent with the ``date`` criterion
        and pcad_mode == 'NPNT'.
        """
        # If an OFLS directory has been specified, get the backstop commands
        # stored in the backstop file in that directory

        # Ok ready to start the collection of continuity commands
        #
        # Make a copy of the Review Load Commands. This will have 
        # Continuity commands concatenated to it and will be the final product

        import copy

        bs_cmds = copy.copy(self.bs_cmds)
        bs_start_time = bs_cmds[0]['time']
        present_ofls_dir = copy.copy(self.backstop_file)
        
        # So long as the earliest command in bs_cmds is after the state0
        # time, keep concatenating continuity commands to bs_cmds based upon
        # the type of load.
        # Note that as you march back in time along the load chain, "ofls_dir" will change.

        # First we need a State0 because cmd_states.get_states cannot translate
        # backstop commands into commanded states without one. cmd_states.get_state0
        # is written such that if you don't give it a database object it will 
        # create one for itself and use that. Here, all that really matters 
        # is the value of 'tbegin', the specification of the date parameter to be used
        # and the date_margin.
        state0 = cmd_states.get_state0(tbegin, self.db, datepar='datestart',
                                       date_margin=None)
        # WHILE
        # The big while loop that backchains through previous loads and concatenates the
        # proper load sections to the review load.
        while state0['tstart'] < bs_start_time:
    
            # Read the Continuity information of the present ofls directory
            cont_load_path, present_load_type, scs107_date = self.BSC.get_continuity_file_info(present_ofls_dir)
    
        #---------------------- NORMAL ----------------------------------------
            # If the load type is "normal" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            if present_load_type.upper() == 'NORMAL':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)
    
                # Combine the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineNormal(cont_bs_cmds, bs_cmds)
        
                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path
        
        #---------------------- TOO ----------------------------------------
            # If the load type is "too" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            elif present_load_type.upper() == 'TOO':
                 # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)
    
                # Combine the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineTOO(cont_bs_cmds, bs_cmds)
                
                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path
        
        
        #---------------------- STOP ----------------------------------------
            # If the load type is "STOP" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            # Take into account the SCS-107 commands which shut ACIS down
            # and any LTCTI run
            elif present_load_type.upper() == 'STOP':
        
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)
    
                # CombineSTOP the continuity commands with the bs_cmds
                bs_cmds = self.BSC.CombineSTOP(cont_bs_cmds, bs_cmds, scs107_date )
                
                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path
        
        #---------------------- SCS-107 ----------------------------------------
            # If the load type is "STOP" then grab the continuity command
            # set and concatenate those commands to the start of bs_cmds
            # Take into account the SCS-107 commands which shut ACIS down
            # and any LTCTI run
            elif present_load_type.upper() == 'SCS-107':
                # Obtain the continuity load commands
                cont_bs_cmds, cont_bs_name = self.BSC.get_bs_cmds(cont_load_path)
                # Store the continuity bs commands as a chunk in the chunk list
        
                # Obtain the CONTINUITY load Vehicle-Only file
                vo_bs_cmds, vo_bs_name = self.BSC.get_vehicle_only_bs_cmds(cont_load_path)
        
                # Combine107 the continuity commands with the bs_cmds
                bs_cmds = self.BSC.Combine107(cont_bs_cmds, vo_bs_cmds, bs_cmds, scs107_date )
        
                # Reset the backstop collection start time for the While loop
                bs_start_time = bs_cmds[0]['time']
                # Now point the operative ofls directory to the Continuity directory
                present_ofls_dir = cont_load_path
        

        # Convert the assembled backstop command history into commanded states
        # from state0 through the end of the Review Load backstop commands.
        # get_states trims the list to any command whose time is AFTER the state0 START
        # time and then converts each relevant backstop command, in that resultant list,
        # into a pseudo-commanded states state
        states = cmd_states.get_states(state0, bs_cmds)
    
        # Get rid of the 2099 placeholder stop date
        states[-1].datestop = bs_cmds[-1]['date']
        states[-1].tstop = bs_cmds[-1]['time']

        self.logger.debug('state0 at %s is\n%s' % (DateTime(state0['tstart']).date,
                                                   pformat(state0)))

        return states, state0
    def get_prediction_states(self, tbegin):
        """
        Get the states used for the prediction.

        Parameters
        ----------
        tbegin : string
            The starting date/time from which to obtain states for
            prediction.
        """

        """
        Get state0 as last cmd_state that starts within available telemetry. 
        The original logic in get_state0() is to return a state that
        is absolutely, positively reliable by insisting that the
        returned state is at least ``date_margin`` days old, where the
        default is 10 days. That is too conservative (given the way
        commanded states are actually managed) and not what is desired
        here, which is a recent state from which to start thermal propagation.

        Instead we supply ``date_margin=None`` so that get_state0 will
        find the newest state consistent with the ``date`` criterion
        and pcad_mode == 'NPNT'.
        """

        state0 = cmd_states.get_state0(tbegin, self.db, datepar='datestart',
                                       date_margin=None)

        self.logger.debug('state0 at %s is\n%s' % (DateTime(state0['tstart']).date,
                                                   pformat(state0)))

        # Get commands after end of state0 through first backstop command time
        cmds_datestart = state0['datestop']
        cmds_datestop = self.bs_cmds[0]['date']

        # Get timeline load segments including state0 and beyond.
        timeline_loads = self.db.fetchall("""SELECT * from timeline_loads
                                          WHERE datestop >= '%s'
                                          and datestart < '%s'"""
                                          % (cmds_datestart, cmds_datestop))
        self.logger.info('Found {} timeline_loads  after {}'.format(
                         len(timeline_loads), cmds_datestart))

        # Get cmds since datestart within timeline_loads
        db_cmds = cmd_states.get_cmds(cmds_datestart, db=self.db, update_db=False,
                                      timeline_loads=timeline_loads)

        # Delete non-load cmds that are within the backstop time span
        # => Keep if timeline_id is not None (if a normal load)
        # or date < bs_cmds[0]['time']

        # If this is an interrupt load, we don't want to include the end 
        # commands from the continuity load since not all of them will be valid,
        # and we could end up evolving on states which would not be present in 
        # the load under review. However, once the load has been approved and is
        # running / has run on the spacecraft, the states in the database will 
        # be correct, and we will want to include all relevant commands from the
        # continuity load. To check for this, we find the current time and see 
        # the load under review is still in the future. If it is, we then treat
        # this as an interrupt if requested, otherwise, we don't. 
        current_time = DateTime().secs
        interrupt = self.interrupt and self.bs_cmds[0]["time"] > current_time

        db_cmds = [x for x in db_cmds
                   if ((x['timeline_id'] is not None and not interrupt) or
                       x['time'] < self.bs_cmds[0]['time'])]

        self.logger.info('Got %d cmds from database between %s and %s' %
                         (len(db_cmds), cmds_datestart, cmds_datestop))

        # Get the commanded states from state0 through the end of backstop commands
        states = cmd_states.get_states(state0, db_cmds + self.bs_cmds)
        states[-1].datestop = self.bs_cmds[-1]['date']
        states[-1].tstop = self.bs_cmds[-1]['time']
        self.logger.info('Found %d commanded states from %s to %s' %
                         (len(states), states[0]['datestart'], 
                          states[-1]['datestop']))

        return states, state0
Beispiel #9
0
def make_week_predict(opt, tstart, tstop, bs_cmds, tlm, db):

    # Try to make initial state0 from cmd line options
    state0 = dict((x, getattr(opt, x)) for x in ("pitch", "simpos", "power", "T_dea", "T_pin"))
    state0.update(
        {
            "tstart": tstart - 30,
            "tstop": tstart,
            "datestart": DateTime(tstart - 30).date,
            "datestop": DateTime(tstart).date,
        }
    )

    # If cmd lines options were not fully specified then get state0 as last
    # cmd_state that starts within available telemetry.  Update with the
    # mean temperatures at the start of state0.
    if None in state0.values():
        state0 = cmd_states.get_state0(tlm[-5].date, db, datepar="datestart")
        ok = (tlm.date >= state0["tstart"] - 150) & (tlm.date <= state0["tstart"] + 150)
        state0.update({"T_dea": np.mean(tlm["1pdeaat"][ok]), "T_pin": np.mean(tlm["1pin1at"][ok])})

    logger.debug("state0 at %s is\n%s" % (DateTime(state0["tstart"]).date, pformat(state0)))

    if opt.old_cmds:
        cmds_datestart = DateTime(state0["tstop"]).date
        cmds_datestop = DateTime(bs_cmds[0]["time"]).date
        db_cmds = cmd_states.get_cmds(cmds_datestart, cmds_datestop, db)
    else:
        # Get the commands after end of state0 through first backstop command time
        cmds_datestart = state0["datestop"]
        cmds_datestop = bs_cmds[0]["date"]  # *was* DateTime(bs_cmds[0]['time']).date

        # Get timeline load segments including state0 and beyond.
        timeline_loads = db.fetchall(
            """SELECT * from timeline_loads
                                        WHERE datestop > '%s' and datestart < '%s'"""
            % (cmds_datestart, cmds_datestop)
        )
        logger.info("Found %s timeline_loads  after %s" % (len(timeline_loads), cmds_datestart))

        # Get cmds since datestart within timeline_loads
        db_cmds = cmd_states.get_cmds(cmds_datestart, db=db, update_db=False, timeline_loads=timeline_loads)

        # Delete non-load cmds that are within the backstop time span
        # => Keep if timeline_id is not None or date < bs_cmds[0]['time']
        db_cmds = [x for x in db_cmds if (x["timeline_id"] is not None or x["time"] < bs_cmds[0]["time"])]

    logger.info("Got %d cmds from database between %s and %s" % (len(db_cmds), cmds_datestart, cmds_datestop))

    # Get the commanded states from state0 through the end of the backstop commands
    states = cmd_states.get_states(state0, db_cmds + bs_cmds)
    states[-1].datestop = bs_cmds[-1]["date"]
    states[-1].tstop = bs_cmds[-1]["time"]
    logger.info(
        "Found %d commanded states from %s to %s" % (len(states), states[0]["datestart"], states[-1]["datestop"])
    )

    # Add power column based on ACIS commanding in states
    states = Ska.Numpy.add_column(states, "power", get_power(states))

    # Create array of times at which to calculate PSMC temperatures, then do it.
    times = np.arange(state0["tstart"], tstop, opt.dt)
    logger.info("Calculating PSMC thermal model")
    T_pin, T_dea = twodof.calc_twodof_model(states, state0["T_pin"], state0["T_dea"], times, characteristics.model_par)

    # Make the PSMC limit check plots and data files
    plt.rc("axes", labelsize=10, titlesize=12)
    plt.rc("xtick", labelsize=10)
    plt.rc("ytick", labelsize=10)
    temps = dict(dea=T_dea, pin=T_pin)
    plots = make_check_plots(opt, states, times, temps, tstart)
    viols = make_viols(opt, states, times, temps)
    write_states(opt, states)
    write_temps(opt, times, temps)

    return dict(opt=opt, states=states, times=times, temps=temps, plots=plots, viols=viols)