Exemple #1
0
    def __init__(self, name, msg_target=None):
        ''' Initialize the checkpoint
            Need the ruleset to enable getting to the current pool
        '''
        # setup the message target information 
        if msg_target is None:
            # This only happens when testing 
            self.msg_target = MsgTargetLogger(prefix='TESTING: ')
        else:
            self.msg_target = msg_target
        self.msg_target.debug('Creating IncidentPoolEventCheckpoint {0}'.format(name))

        EventAnalyzerCheckpoint.__init__(self, name)
        if self.data is None:
            # No data so know pool end was the start 
            self.pool_rec_id = self.start_rec_id
        else: 
            try:
                self.pool_rec_id = long(json.loads(self.data)[1])
            except ThreadKilled:
                raise
            except:
                self.msg_target.warning('Checkpoint data invalid')
                self.pool_rec_id = self.start_rec_id
                self.data = None
        self.rec_ids = self._get_rec_ids_from_data()
        self.prime_incidents = []
        return
Exemple #2
0
class IncidentPoolEventCheckpoint(EventAnalyzerCheckpoint):
    ''' Handle checkpointing for an incident pool 
    
    self.start_rec_id is the min( last_in_pool, (all priming event rec_ids - 1))
    data is a list of:
       -- pool start time
       -- last in pool
       -- list of primes 
            -- compacted entry <rec_id><U|S><time_left>
               -- optional +<dur_ext_time>
               
    Note that if there is no primes then the start_rec_id is the last in pool so data is not recorded and
    the start_rec_id is used as the last in pool rec_id
    
    '''
    
    __metaclass__ = ABCMeta   
     
    def __init__(self, name, msg_target=None):
        ''' Initialize the checkpoint
            Need the ruleset to enable getting to the current pool
        '''
        # setup the message target information 
        if msg_target is None:
            # This only happens when testing 
            self.msg_target = MsgTargetLogger(prefix='TESTING: ')
        else:
            self.msg_target = msg_target
        self.msg_target.debug('Creating IncidentPoolEventCheckpoint {0}'.format(name))

        EventAnalyzerCheckpoint.__init__(self, name)
        if self.data is None:
            # No data so know pool end was the start 
            self.pool_rec_id = self.start_rec_id
        else: 
            try:
                self.pool_rec_id = long(json.loads(self.data)[1])
            except ThreadKilled:
                raise
            except:
                self.msg_target.warning('Checkpoint data invalid')
                self.pool_rec_id = self.start_rec_id
                self.data = None
        self.rec_ids = self._get_rec_ids_from_data()
        self.prime_incidents = []
        return
    
    @abstractmethod   
    def get_pool(self):
        ''' Get the pool
            Allows subclasses to control where the pool is managed
        '''
        pass

    def need_to_analyze(self, event):
        ''' If before my checkpointed rec_id then don't need to process '''
        chk_rec_id = event.rec_id
        # Check if into new pool
        if chk_rec_id > self.pool_rec_id:
            if len(self.rec_ids) != 0:
                self.msg_target.warning('Not all priming events were available.  Missing: {0}'.format(str(self.rec_ids)))
            self._start_pool()
            raise CheckpointRecoveryComplete(self.pool_rec_id, '{0}'.format(self.name))
        # Check if one of the priming events
        if chk_rec_id in self.rec_ids:
            self.prime_incidents.append(event)
            self.rec_ids.remove(chk_rec_id)
        return False

    def set_checkpoint_from_pool(self):
        ''' Set the checkpoint based on the passed pool
            Should only be called once the pool has been closed
        '''
        # Get the current pool from the ruleset
        pool = self.get_pool()
        # Gather information from pool
        if pool.last_incident is None:
            self.msg_target.debug('Not updated checkpoint because nothing processed (last_incident is None)')
            return 
        t_min, t_data = self._gen_min_and_data(pool)
        self.set_checkpoint(t_min, t_data)
        return
    
    def _start_pool(self):
        ''' Initialize and start the pool using the checkpoint data '''
        # Get the current pool from the ruleset
        pool = self.get_pool()
        # Can't checkpoint unless pool is closed
        if pool.state != POOL_STATE_NEW:
            self.msg_target.warning('Unable start from checkpoint with pool that is not NEW')
            return
        if self.data is not None:
            t_list = json.loads(self.data)
            parse_data = re.compile(PARSE_DATA_DEF)
            # Prime
            t_dict = {}
            for t_ent in t_list[2]:
                t_p = parse_data.search(t_ent)
                # Get extension time
                if t_p.group('ext') is None:
                    t_ext = 0
                else:
                    t_ext = long(t_p.group('ext'))
                t_dict[long(t_p.group('rec_id'))] = (t_p.group('sup'), long(t_p.group('left')), t_ext)
                
            # Know there will be one for each of these because they were used to create it
            for t_event in self.rec_ids:
                t_sup, t_min, t_dur = t_dict[t_event.rec_id]
                pool.moved_forward.append(t_event)
                if t_sup == 'S':
                    pool.suppressed[t_event.event_id].append(t_event)
                else:
                    pool.incidents[t_event.event_id].append(t_event)
                pool.duration = min(pool.duration + t_dur, pool.max_duration)
                pool.min_time_incidents[t_event] = (t_min, 0, t_dur)
        
            # Going to start pool so don't have to start when next incident added
            pool._add_incident = pool._add_incident_ACTIVE_SUBSEQUENT
            # Now start the pool with the checkpoint start time
            pool.start(datetime.strptime(t_list[0], '%Y-%m-%d %H:%M:%S.%f'))
        
        return 
    
    def _get_rec_ids_from_data(self):
        ''' Read from the data '''
        if self.data is None:
            return []
        parse_data = re.compile(PARSE_DATA_DEF)
        return [long(parse_data.search(t_list).group('rec_id')) for t_list in json.loads(self.data)[2]]
    
    def _gen_min_and_data(self, pool):
        ''' generate the data and the min '''
        mf_list = []
        min_rec_id = pool.last_incident.rec_id
        for incident in pool.min_time_incidents.keys():
            mf_entry = str(incident.rec_id)
            # Note that generated rec_id always starts with 1, so OK to subtract 1
            min_rec_id = min(min_rec_id, incident.rec_id - 1)
            tid = incident.get_incident_id()
            if tid in pool.suppressed.keys() and incident in pool.suppressed[tid]:
                mf_entry += 'S'
            else:
                mf_entry += 'U'
            # For events moving forward: update duration, add to moving forward list  
            min_left, added_at_sec, dur_ext = pool.min_time_incidents[incident]
    
            # Calculate new min time in pool  (note: we know positive because if it wasn't, it 
            #  would not have moved forward -- cleanup in close)
            mf_entry += str(min_left - (pool.duration - added_at_sec))
            if dur_ext != 0:
                mf_entry += '+{0}'.format(str(dur_ext))
            mf_list.append('"{0}"'.format(mf_entry))
        if pool.start_time is None:
            return (None, None)
        else:
            t_time = pool.start_time + timedelta(seconds=pool.duration)
        return (min_rec_id, '["{0}", "{1}", [{2}]]'.format(str(t_time), pool.last_incident.rec_id, ','.join(mf_list)))