Esempio n. 1
0
 def start(self):
     CompositeNode.start(self)
     self._set_zip_file()
     tread = Thread(name="AutoDiscovery",target=self.kick_start_discovery)
     scheduler.after(2, tread.start)
     self.ad = AutoDiscovery()
     self.ad.configure({"parent":self,"name":"AutoDiscover"})
Esempio n. 2
0
 def _output(self, value):
     value = value ^ self.reverse_output
     if self.enabled and (value is not self._value):
         self.DEBUG3("_drive_output:  self.output=%s\n", self.output)
         if self._output_state == self._SAFETY:
             self._waiting_value = value
         else:
             self.output.set(value)
             self._waiting_value = None
             self._value = value
             if (value == 1) and self.min_on_time > 0:
                 self._output_state = self._SAFETY
                 scheduler.after(self.min_on_time,
                                 self._clear_safety,())
                 self._changes_at = time.time() + self.min_on_time
             elif (value == 0) and self.min_off_time > 0:
                 self._output_state = self._SAFETY
                 scheduler.after(self.min_off_time,
                                 self._clear_safety,())
                 self._changes_at = time.time() + self.min_off_time
         self.DEBUG2("_drive_output:  set:%r, got:%r\n",
                    value, self.output.get)
     else:
         self.DEBUG3("_drive_output:  No change\n")
     self.DEBUG3("_drive_output: return\n")
     return
Esempio n. 3
0
 def export(self, alarm, attempt=0):
     self._lock.acquire()
     try:
         if (not self._started):
             self._alarm.append(alarm)
             # No need to set scheduler here; start() will call
             # export_waiting_alarm()...
             return
         # Even if this node is already started, do not attempt to
         # export alarm unless the linked log node and its collector
         # object are extant and started:
         if (self.log.collector is None):
             self._alarm.append(alarm)
             if (self._waiting_alarm_sid is
                     None):  # if we're not already scheduled, do it:
                 # Need to wait long enough for log.start() to finish creating
                 # and starting collector. ***GUESS*** 10.0 sec. Symptom of not
                 # waiting long enough: ENotStarted error raised below:
                 self._waiting_alarm_sid = scheduler.after(
                     10.0, self.export_waiting_alarm, ())
             return
     finally:
         self._lock.release()
     self.log.collector.pause()
     try:
         try:
             if not self.log.collector.running:
                 raise ENotStarted('Collector not started yet.')
             entry = self.log.collector.get_entry()
             entry[self.ts_position] = time.time()
             # Stamp source, if target log columns support it:
             if isinstance(self.trigger_node_url_posn, int):
                 entry[self.trigger_node_url_posn] = as_node_url(
                     alarm.source)
             if isinstance(self.trigger_node_msg_posn, int):
                 entry[self.trigger_node_msg_posn] = str(alarm)
             self.log.add_entry(entry)
             t = time.time()
             for child in self.log.get_child('exporters').children_nodes():
                 child.go(t)  # starts threads for long ops
         except:
             msglog.exception()
             if attempt > alarm.source.send_retries:
                 msglog.log('broadway', msglog.types.WARN,
                            'Export of alarm failed, aborting send.')
                 raise MpxException('Log and export failed.')
             else:
                 msglog.log('broadway', msglog.types.WARN,
                            'Log on alarm failed, delaying 1.0 sec.')
                 self._lock.acquire()
                 try:
                     if self._scheduled != None:
                         scheduler.cancel(self._scheduled)
                     self._scheduled = scheduler.after(
                         1, self.export, (alarm, attempt + 1))
                 finally:
                     self._lock.release()
     finally:
         self.log.collector.play()
     return
Esempio n. 4
0
def start(period=1.0, *args):
    global should_run
    if not should_run:
        should_run = 1
        first_time = math.floor(time.time()) + period
        scheduler.at(first_time, absolute_callback, (period,first_time,))
        scheduler.after(first_time-time.time(),
                        relative_callback, (period,first_time,))
    return
Esempio n. 5
0
 def export(self, alarm, attempt=0):
     self._lock.acquire()
     try:
         if (not self._started):
             self._alarm.append(alarm)
             # No need to set scheduler here; start() will call 
             # export_waiting_alarm()...
             return
         # Even if this node is already started, do not attempt to 
         # export alarm unless the linked log node and its collector 
         # object are extant and started:
         if (self.log.collector is None):
             self._alarm.append(alarm)
             if (self._waiting_alarm_sid is None): # if we're not already scheduled, do it:
                 # Need to wait long enough for log.start() to finish creating
                 # and starting collector. ***GUESS*** 10.0 sec. Symptom of not
                 # waiting long enough: ENotStarted error raised below:
                 self._waiting_alarm_sid = scheduler.after(10.0, self.export_waiting_alarm, ())
             return
     finally:
         self._lock.release()
     self.log.collector.pause()
     try:
         try:
             if not self.log.collector.running:
                 raise ENotStarted('Collector not started yet.')
             entry = self.log.collector.get_entry()
             entry[self.ts_position] = time.time()
             # Stamp source, if target log columns support it:
             if isinstance(self.trigger_node_url_posn, int):
                 entry[self.trigger_node_url_posn] = as_node_url(alarm.source)
             if isinstance(self.trigger_node_msg_posn, int):
                 entry[self.trigger_node_msg_posn] = str(alarm)
             self.log.add_entry(entry)
             t = time.time()
             for child in self.log.get_child('exporters').children_nodes():
                 child.go(t) # starts threads for long ops
         except:
             msglog.exception()
             if attempt > alarm.source.send_retries:
                 msglog.log('broadway',msglog.types.WARN,
                            'Export of alarm failed, aborting send.')
                 raise MpxException('Log and export failed.')
             else:
                 msglog.log('broadway',msglog.types.WARN,
                            'Log on alarm failed, delaying 1.0 sec.')
                 self._lock.acquire()
                 try:
                     if self._scheduled != None:
                         scheduler.cancel(self._scheduled)
                     self._scheduled = scheduler.after(1,self.export,
                                                       (alarm,attempt+1))
                 finally:
                     self._lock.release()
     finally:
         self.log.collector.play()
     return
Esempio n. 6
0
 def scan(self):
     print 'scan'
     try:
         if self.running:
             self.scan_nodes_for_changes()
             self.scan_spread_sheet_for_changes()
             scheduler.after(self.period, self.scan)
     except:
         msglog.exception()
         scheduler.after(self.period, self.restart)
 def _schedule(self):
     try:
         float(self.DELAY)
     except:
         print "WARNING:  Bad DELAY (%r), setting to 1.0" % self.DELAY
         self.DELAY=1.0
     if self.DELAY > 0.0:
         scheduler.after(self.DELAY, self._queue)
     else:
         self._queue()
     return
Esempio n. 8
0
def relative_callback(period,*args):
    global should_run
    this_time = time.time()
    if should_run:
        next_time = math.floor(time.time()) + period
        scheduler.after(next_time-time.time(),
                        relative_callback, (period,next_time,))
        print 'R: %s %s' % (this_time, args)
    else:
        print 'R: %s %s - STOPPED' % (this_time, args)
    return
Esempio n. 9
0
 def handle_propogation_failure(self, notifier):
     cloudevent = notifier.cloudevent
     target_peer = notifier.peer
     # TODO: generate comm failure error to propogate as well.
     # Progpogate event to Cloud Managers target_peer would have notified.
     
     '''
     The target_peer can be portal or a peer.
     If it is a portal then we will not put it in unreachables and also 
     we do not propogate the event.
     Log if we are not connecting to the portal 
     '''
     portal=self.nformation.get_portal()
     if((portal != None ) and (utils.same_host(target_peer,portal))):
         msg='Portal %s is not reachable .' % portal
         self.message(msg)
         return
         
     
     scheduled = self.unreachable.get(target_peer)
     if scheduled is not None:
         scheduled.cancel()
         self.message('Host %s already listed unreachable, reset scheduled retry.' % target_peer)
     self.unreachable[target_peer] = scheduler.after(5 * 60, self._remove_from_unreachable, (target_peer,))
     self.message('Host %s added to list of unreachable peers.' % target_peer)
     self.propogate(cloudevent, target_peer)
Esempio n. 10
0
 def _dispatch(self, event):
     if self.hysteresis != 0:
         comparison = self._comparison_operator
         if isinstance(event, TriggerActivated):
             # Comarpsion is True
             if comparison == '>':
                 # Input is > constant
                 self.set_constant(self._constant - self.hysteresis)
             elif comparison == '<':
                 # Input is < constant
                 self.set_constant(self._constant + self.hysteresis)
             else: raise ValueError('Operator %s uknnown.' % comparison)
         elif isinstance(event, TriggerCleared):
             # Comparison is False, clear hysteresis
             self.set_constant(self._constant)
         else: raise TypeError('Event of unknown type.')
     if self.alarm_delay:
         self.synclock.acquire()
         try:
             if isinstance(event, TriggerActivated):
                 self.deferred = scheduler.after(self.alarm_delay, 
                                                 self._deferred_dispatch,
                                                 (event,))
             elif isinstance(event, TriggerCleared) and self.deferred:
                 scheduled, self.deferred = self.deferred, None
                 if scheduled:
                     scheduled.cancel()
             else:
                 super(ComparisonTrigger, self)._dispatch(event)
         finally: self.synclock.release()
     else:
         super(ComparisonTrigger, self)._dispatch(event)
Esempio n. 11
0
 def _setup_formation(self, formation,portal):
     scheduled, self._scheduled = self._scheduled, None
     if scheduled is not None:
         try: scheduled.cancel()
         except: pass
         else: self.message('Canceled pending dispatch of formation update.')
     self.nformation.set_portal(portal)
     self.nformation.set_formation(formation)
     self.target_formation = self.nformation.compute_targets()
     self.message('Resetting unreachables during Cloud setup.')
     self.reset_unreachables()
     (dispatch,delay)=self.nformation.compute_dispatch_info()
     if (dispatch):
         self._scheduled = scheduler.after(delay, self.dispatcher.dispatch, (FormationUpdated(self),))
         self.message('Scheduled dispatch in %s seconds.' % delay)
     else: self.message('Formation of one peer, no Updated event generated.')
     
     # Save the PDO, if the formation or portal has changed
     if((self._pdo.formation != formation) or (self._pdo.portal != portal) or (self._pdo.peer != self.peer)):
         self.message('New formation/portal found , hence pickling. New Formation is :%s portal is %s' %(str(formation),portal))
         self._pdo.formation=formation[:]
         self._pdo.portal=portal
         self._pdo.peer=self.peer
         tstart = time.time()
         self._pdo.save()
         tend = time.time()
         self.message('New formation pickled and saved in %s seconds.' % (tend - tstart))
     else:
         self.message('Formation/Portal has not changed. Not pickling it. ' )
Esempio n. 12
0
 def note_modified(self, persistent):
     self.lock.acquire()
     try:
         super(TimedPolicy, self).notify_modified(persistent, modified)
         if self.scheduled is None:
             self.scheduled = scheduler.after(self.seconds, self.commit)
     finally: self.lock.release()
Esempio n. 13
0
 def _dispatch(self, event):
     if self.hysteresis != 0:
         comparison = self._comparison_operator
         if isinstance(event, TriggerActivated):
             # Comarpsion is True
             if comparison == '>':
                 # Input is > constant
                 self.set_constant(self._constant - self.hysteresis)
             elif comparison == '<':
                 # Input is < constant
                 self.set_constant(self._constant + self.hysteresis)
             else:
                 raise ValueError('Operator %s uknnown.' % comparison)
         elif isinstance(event, TriggerCleared):
             # Comparison is False, clear hysteresis
             self.set_constant(self._constant)
         else:
             raise TypeError('Event of unknown type.')
     if self.alarm_delay:
         self.synclock.acquire()
         try:
             if isinstance(event, TriggerActivated):
                 self.deferred = scheduler.after(self.alarm_delay,
                                                 self._deferred_dispatch,
                                                 (event, ))
             elif isinstance(event, TriggerCleared) and self.deferred:
                 scheduled, self.deferred = self.deferred, None
                 if scheduled:
                     scheduled.cancel()
             else:
                 super(ComparisonTrigger, self)._dispatch(event)
         finally:
             self.synclock.release()
     else:
         super(ComparisonTrigger, self)._dispatch(event)
Esempio n. 14
0
 def note_modified(self, persistent):
     self.lock.acquire()
     try:
         super(TimedPolicy, self).notify_modified(persistent, modified)
         if self.scheduled is None:
             self.scheduled = scheduler.after(self.seconds, self.commit)
     finally:
         self.lock.release()
Esempio n. 15
0
 def _schedule(self):
     self.DEBUG3("_schedule():\n")
     self._cancel_polling()
     if self.__running:
         self._poll_event = scheduler.after(self._period,
                                            self._lock_and_poll_input)
     self.DEBUG3("_schedule: return\n")
     return
Esempio n. 16
0
 def __schedule_discover(self):
     delay = self.__discover_delays[min(self.__discover_attempts,
                                        len(self.__discover_delays)-1)]
     msglog.log("SNMP", msglog.types.INFO,
                "Discover of %r scheduled in %r seconds." %
                (self.as_node_url(), delay))
     self.__discover_event = scheduler.after(delay, self.__queue_discover)
     return
Esempio n. 17
0
 def application_change_detector(self, starting=0):
     try:
         if self.check_and_load_application_files(starting):
             self._status = 'Starting control applications'
             msglog.log(self.as_node_url(), msglog.types.INFO,
                        'Stage 5:  Start Application templates.')
             self._start()
             self._status = 'Running'
             msglog.log(self.as_node_url(), msglog.types.INFO,
                        'Application templates started.')
             self.prune_orphaned_schedules()
             # schedule manager must see control service as running to work
         scheduler.after(13, self.application_change_detector)
     except:
         self._status = 'ERROR: check message log'
         msglog.exception()
         scheduler.after(60, self.application_change_detector)
Esempio n. 18
0
 def _schedule(self):
     self.DEBUG3("_schedule():\n")
     self._cancel_polling()
     if self.__running:
         self._poll_event = scheduler.after(
             self._period, self._lock_and_poll_input)
     self.DEBUG3("_schedule: return\n")
     return
Esempio n. 19
0
 def application_change_detector(self, starting=0):
     try:
         if self.check_and_load_application_files(starting):
             self._status = 'Starting control applications'
             msglog.log(self.as_node_url(),msglog.types.INFO,
                'Stage 5:  Start Application templates.')
             self._start()
             self._status = 'Running'
             msglog.log(self.as_node_url(),msglog.types.INFO,
                'Application templates started.')
             self.prune_orphaned_schedules()
             # schedule manager must see control service as running to work
         scheduler.after(13, self.application_change_detector)
     except:
         self._status = 'ERROR: check message log'
         msglog.exception()
         scheduler.after(60, self.application_change_detector)
Esempio n. 20
0
    def poll_alarms(self):
        if not self._running:
            return
        ret_alarms = []
        new_alarms = self.h_alarms.get_new_alarms()

        if new_alarms:
            for rsp in new_alarms:
                if rsp.is_critical():
                    al_type = "Critical"
                else:
                    al_type = "Non-Critical"
                a = Alarm(
                    id=rsp.unitnum(),
                    type=al_type,
                    source=rsp.unitnum(),
                    data=rsp.message(),
                    state=rsp.code(),
                    timestamp=rsp.time(),
                )
                ret_alarms.append(a)

        if self.generatebogus:
            # Generate bogus alarms roughly 1/16 of the time
            roll = self.rand.randint(1, 16)
            if roll == 16:
                how_many = 1
                do_multiple = self.rand.randint(0, 1)
                if do_multiple:
                    how_many = self.rand.randint(1, 10)
                if self.debug:
                    print "%f: Generating %d random alarm(s)." % (time.time(), how_many)
                for i in range(0, how_many):
                    is_not_crit = self.rand.randint(0, 4)
                    if is_not_crit == 0:
                        al_type = "Critical"
                    else:
                        al_type = "Non-Critical"
                    a = Alarm(
                        id="test_%.2d" % (i + 1),
                        type=al_type,
                        source=1,
                        data="This is test alarm #%d." % (i + 1),
                        state=i,
                        timestamp=time.time(),
                    )
                    ret_alarms.append(a)

        if ret_alarms:
            ae = NewAlarmsEvent(self, ret_alarms)
            self.parent.event_generate(ae)

            # While we are at it, acknowledge any critical alarms.
            self.h_alarms.ack_critical_alarms()

        self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 21
0
 def check_startup(self):
     self.synclock.acquire()
     try:
         if self.manager.is_running():
             self.scheduled_startup = None
             NORMAL.queue_noresult(self._initialize)
         else:
             self.scheduled_startup = scheduler.after(5,self.check_startup)
     finally:
         self.synclock.release()
Esempio n. 22
0
 def start(self):
     if self.is_running():
         self.stop()
     self.synclock.acquire()
     try:
         self.manager = as_node("/services/Alarm Manager")
         self.scheduled_startup = scheduler.after(0, self.check_startup)
     finally:
         self.synclock.release()
     return super(EventStore, self).start()
Esempio n. 23
0
 def reschedule(self, delay=None):
     scheduled, self.scheduled = self.scheduled, None
     if scheduled:
         scheduled.cancel()
     if self._running.isSet():
         if delay is None:
             delay = self.poll_period
         self.scheduled = scheduler.after(delay, self.manager.queue_trigger,
                                          (self, ))
     return self.scheduled
Esempio n. 24
0
 def reschedule(self, delay = None):
     scheduled, self.scheduled = self.scheduled, None
     if scheduled: 
         scheduled.cancel()
     if self._running.isSet():
         if delay is None:
             delay = self.poll_period
         self.scheduled = scheduler.after(
             delay, self.manager.queue_trigger, (self,))
     return self.scheduled
Esempio n. 25
0
 def _reschedule(self):
     self._schedule_lock.acquire()
     try:
         if (self._scheduled is None or 
             self._scheduled.executing() or
             self._scheduled.expired()):
             self._scheduled = scheduler.after(
                 self.poll_period,self._manager.queue_alarm_check,(self,))
     finally:
         self._schedule_lock.release()
     return
Esempio n. 26
0
 def propagate(self, data):
     """
         To be called by asyncore-polling loop.  Use 
         notify to trigger the propagate event.
     """
     self.message("%s propagate creating socket." % self)
     self.create_socket(socket.AF_INET,socket.SOCK_STREAM)
     self.message("%s propagate connecting %s:%d" % (self, self.peer, self.port))
     self.connect((self.peer,self.port))
     self.scheduled_timeout = scheduler.after(30, self.handle_timeout)
     self.ac_out_buffer = data
Esempio n. 27
0
 def _reschedule(self):
     self._schedule_lock.acquire()
     try:
         if (self._scheduled is None or self._scheduled.executing()
                 or self._scheduled.expired()):
             self._scheduled = scheduler.after(
                 self.poll_period, self._manager.queue_alarm_check,
                 (self, ))
     finally:
         self._schedule_lock.release()
     return
Esempio n. 28
0
    def poll_alarms(self):
        if not self._running:
            return
        ret_alarms = []
        new_alarms = self.h_alarms.get_new_alarms()

        if new_alarms:
            for rsp in new_alarms:
                if rsp.is_critical():
                    al_type = 'Critical'
                else:
                    al_type = 'Non-Critical'
                a = Alarm(id=rsp.unitnum(),
                          type=al_type,
                          source=rsp.unitnum(),
                          data=rsp.message(),
                          state=rsp.code(),
                          timestamp=rsp.time())
                ret_alarms.append(a)

        if self.generatebogus:
            # Generate bogus alarms roughly 1/16 of the time
            roll = self.rand.randint(1, 16)
            if roll == 16:
                how_many = 1
                do_multiple = self.rand.randint(0, 1)
                if do_multiple:
                    how_many = self.rand.randint(1, 10)
                if self.debug:
                    print '%f: Generating %d random alarm(s).' % (time.time(),
                                                                  how_many)
                for i in range(0, how_many):
                    is_not_crit = self.rand.randint(0, 4)
                    if is_not_crit == 0:
                        al_type = 'Critical'
                    else:
                        al_type = 'Non-Critical'
                    a = Alarm(id='test_%.2d' % (i + 1),
                              type=al_type,
                              source=1,
                              data='This is test alarm #%d.' % (i + 1),
                              state=i,
                              timestamp=time.time())
                    ret_alarms.append(a)

        if ret_alarms:
            ae = NewAlarmsEvent(self, ret_alarms)
            self.parent.event_generate(ae)

            # While we are at it, acknowledge any critical alarms.
            self.h_alarms.ack_critical_alarms()

        self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 29
0
 def restart(self):
     self.shutdown() #start from a known state
     if self.open_socket():
         if self.login():
             if self.debug: print 'login successful - start polling'
             self.service_thread = threading.ImmortalThread(None, self.poll_screens, 'CPC', reincarnate=self.poll_error)
             self.service_thread.start()
             self.__running = 1
             return 1
     self.shutdown()
     self.retry_schedule = scheduler.after(60.0,self.restart)
     msglog.log(INFO, 'CPC', 'retry starting CPC device in 60 seconds')
     return 0
Esempio n. 30
0
   def poll_alarms(self):
      if not self._running:
         return

      if debug > 1:
         print '%f: In poll_alarms().' % time.time()

      alarms = self.get_alarms()
  
      ret_alarms = []
      
      for x in alarms:
         if not x in self.old_alarms:
            if debug:
               print 'Found a new alarm: %s' % str(x)

            # Note: @fixme:  At some point we will probably do some filtering
            #       on the "alarms" we get back from the BCU because apparently
            #       they may include some event type information for which we
            #       don't want to create an Alarm Event.  FredD apparently
            #       has the information for differentiating between events
            #       and real alarms from a BCU.

            # Note: x looks like:
            # {'priority': '', 'ack': 'No', 'from': 'BCU-01', 'SN': 0,
            #  'date': 1068221677.0, 'type': 'Watchdog Timeout',
            #  'detail': ''
            # }
            a = Alarm(id=x['SN'],
                      type=x['type'],
                      source=x['from'],
                      timestamp=x['date'],
                      data=x['detail'],
                      priority=x['priority'],
                      acked=x['ack'])
            if debug:
               print 'Got new alarm: %s.' % str(a)
            ret_alarms.append(a)
         else:
            if debug:
               print 'Ignoring alarm, it has already been raised reported'
      if ret_alarms:
         self.parent.put_alarms(ret_alarms)
      # Note: @fixme: old_alarms should be a PDO so that we don't resend alarms
      #       that have already been seen every time we start up.  For now this
      #       behavior may actually be useful for testing but probably will
      #       not be a "feature" in the real world.
      self.old_alarms = alarms
      self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 31
0
    def poll_alarms(self):
        if not self._running:
            return

        if debug > 1:
            print '%f: In poll_alarms().' % time.time()

        alarms = self.get_alarms()

        ret_alarms = []

        for x in alarms:
            if not x in self.old_alarms:
                if debug:
                    print 'Found a new alarm: %s' % str(x)

                # Note: @fixme:  At some point we will probably do some filtering
                #       on the "alarms" we get back from the BCU because apparently
                #       they may include some event type information for which we
                #       don't want to create an Alarm Event.  FredD apparently
                #       has the information for differentiating between events
                #       and real alarms from a BCU.

                # Note: x looks like:
                # {'priority': '', 'ack': 'No', 'from': 'BCU-01', 'SN': 0,
                #  'date': 1068221677.0, 'type': 'Watchdog Timeout',
                #  'detail': ''
                # }
                a = Alarm(id=x['SN'],
                          type=x['type'],
                          source=x['from'],
                          timestamp=x['date'],
                          data=x['detail'],
                          priority=x['priority'],
                          acked=x['ack'])
                if debug:
                    print 'Got new alarm: %s.' % str(a)
                ret_alarms.append(a)
            else:
                if debug:
                    print 'Ignoring alarm, it has already been raised reported'
        if ret_alarms:
            self.parent.put_alarms(ret_alarms)
        # Note: @fixme: old_alarms should be a PDO so that we don't resend alarms
        #       that have already been seen every time we start up.  For now this
        #       behavior may actually be useful for testing but probably will
        #       not be a "feature" in the real world.
        self.old_alarms = alarms
        self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 32
0
 def _output(self, value):
     value = value ^ self.reverse_output
     if self.enabled and (value is not self._value):
         self.DEBUG3("_drive_output:  self.output=%s\n", self.output)
         if self._output_state == self._SAFETY:
             self._waiting_value = value
         else:
             self.output.set(value)
             self._waiting_value = None
             self._value = value
             if (value == 1) and self.min_on_time > 0:
                 self._output_state = self._SAFETY
                 scheduler.after(self.min_on_time, self._clear_safety, ())
                 self._changes_at = time.time() + self.min_on_time
             elif (value == 0) and self.min_off_time > 0:
                 self._output_state = self._SAFETY
                 scheduler.after(self.min_off_time, self._clear_safety, ())
                 self._changes_at = time.time() + self.min_off_time
         self.DEBUG2("_drive_output:  set:%r, got:%r\n", value,
                     self.output.get)
     else:
         self.DEBUG3("_drive_output:  No change\n")
     self.DEBUG3("_drive_output: return\n")
     return
Esempio n. 33
0
 def wait(self, timeout=None):
     if not self._lock.locked():
         raise AssertionError('wait called on un-acquire()d lock')
     lock = _ThreadWrapper(_allocate_lock())
     lock.acquire()
     id = None
     if timeout is not None:
         id = scheduler.after(timeout, self._timeout, (lock,))
     lock.set_id(id)
     self._locks.append(lock)
     self.release()
     try:
         lock.acquire()
     finally:
         self.acquire()
Esempio n. 34
0
 def wait(self, timeout=None):
     if not self._lock.locked():
         raise AssertionError('wait called on un-acquire()d lock')
     lock = _ThreadWrapper(_allocate_lock())
     lock.acquire()
     id = None
     if timeout is not None:
         id = scheduler.after(timeout, self._timeout, (lock, ))
     lock.set_id(id)
     self._locks.append(lock)
     self.release()
     try:
         lock.acquire()
     finally:
         self.acquire()
Esempio n. 35
0
 def restart(self):
     self.shutdown()  #start from a known state
     if self.open_socket():
         if self.login():
             if self.debug: print 'login successful - start polling'
             self.service_thread = threading.ImmortalThread(
                 None,
                 self.poll_screens,
                 'CPC',
                 reincarnate=self.poll_error)
             self.service_thread.start()
             self.__running = 1
             return 1
     self.shutdown()
     self.retry_schedule = scheduler.after(60.0, self.restart)
     msglog.log(INFO, 'CPC', 'retry starting CPC device in 60 seconds')
     return 0
Esempio n. 36
0
 def _tz_change_detector(self):
     sched_after = 13
     try:
         if os.path.islink(P.TIMEZONE_FILE):
             mtime = os.lstat(P.TIMEZONE_FILE)[8]
             if mtime != self._tz_mtime:
                 time.tzset()
                 self._tz_mtime = mtime
         self._in_err = False
     except:
         if not self._in_err:
             msglog.log(self.as_node_url(), msglog.types.WARN,
                        'Error monitoring TZ file')
             msglog.exception()
             self._in_err = True
         sched_after = 61
     self._scheduled = scheduler.after(sched_after, self.tz_change_detector)
Esempio n. 37
0
class GetViaCOV(EventConsumerAbstract):
    def __init__(self, source_node, timeout=960):
        EventConsumerAbstract.__init__(self)
        self.__node = as_internal_node(source_node)
        self.__cond = Condition()
        self.__event = None
        self.__sched = None  #scheduled action to unsubscribe the point
        self.__timeout = timeout  #number of seconds to maintain subscription
        self._event_received = False
        return

    def get_event(self, **keywords):
        if self.__node._pruned_url:
            try:
                self.__node = as_internal_node(self.__node._pruned_url)
            except ENoSuchName, e:
                raise ENoSuchNode(self.__node._pruned_url)
        self.__cond.acquire()
        try:
            if self.__event is None:
                self.__node.event_subscribe(self, ChangeOfValueEvent)
                #@todo  add a way to get subscription started without blocking  - ie return a value of None - based on keyword?
                do_not_wait = keywords.get('do_not_wait', None)
                if do_not_wait:
                    #create a dummy event and put None in the value and return that
                    self.__event = ChangeOfValueEvent(self.__node, None, None)
                else:
                    self.__cond.wait()  #block until the value shows up
            elif not self._event_received:  #if event not nil and no event received it means 'do_not_wait' was used on first call
                do_not_wait = keywords.get('do_not_wait', None)
                if not do_not_wait:
                    self.__cond.wait(
                    )  #if do_not_wait was used on last call, now we wait for cov
            if self.__sched:
                sched = self.__sched.reset(
                )  #if it's too late to reschedule, this returns None
                if sched is None:  #too late
                    self.__sched.cancel()  #try to cancel it
                self.__sched = sched
            if self.__sched is None:  #first time or after expiration
                self.__sched = scheduler.after(self.__timeout,
                                               self.unsubscribe)
            return self.__event
        finally:
            self.__cond.release()
Esempio n. 38
0
 def export_waiting_alarm(self):
     if (self._started == 1) \
        and (not self.log.collector is None):
         if not self._waiting_alarm_sid is None:
             try:
                 scheduler.remove(self._waiting_alarm_sid)
             except: # SID may already have expired and been removed...
                 msglog.exception()
             self._waiting_alarm_sid = None
         while len(self._alarm) > 0:
             init_len = len(self._alarm)
             alarm = self._alarm.pop(0)
             self.export(alarm) # should leave alarm off of list...
             if init_len <= len(self._alarm):
                 break # failed to keep alarm off the list
     elif (len(self._alarm) > 0):
         self._waiting_alarm_sid = scheduler.after(10.0, self.export_waiting_alarm, ())
     return
Esempio n. 39
0
 def export_waiting_alarm(self):
     if (self._started == 1) \
        and (not self.log.collector is None):
         if not self._waiting_alarm_sid is None:
             try:
                 scheduler.remove(self._waiting_alarm_sid)
             except:  # SID may already have expired and been removed...
                 msglog.exception()
             self._waiting_alarm_sid = None
         while len(self._alarm) > 0:
             init_len = len(self._alarm)
             alarm = self._alarm.pop(0)
             self.export(alarm)  # should leave alarm off of list...
             if init_len <= len(self._alarm):
                 break  # failed to keep alarm off the list
     elif (len(self._alarm) > 0):
         self._waiting_alarm_sid = scheduler.after(
             10.0, self.export_waiting_alarm, ())
     return
Esempio n. 40
0
 def _tz_change_detector(self):
     sched_after = 13
     try:
         if os.path.islink(P.TIMEZONE_FILE):
             mtime = os.lstat(P.TIMEZONE_FILE)[8] 
             if mtime != self._tz_mtime:
                 time.tzset()
                 self._tz_mtime = mtime           
         self._in_err = False
     except:
         if not self._in_err:
             msglog.log(
                 self.as_node_url(), 
                 msglog.types.WARN,
                 'Error monitoring TZ file'
                 )
             msglog.exception()
             self._in_err = True
         sched_after = 61
     self._scheduled = scheduler.after(sched_after, self.tz_change_detector)
Esempio n. 41
0
 def _heartbeat_action(self):
     self._safe_log('Scheduler Heartbeat')
     try:
         self._sid = scheduler.after(self.heartbeat_period,self._heartbeat_action)
     except Exception, e:
         self._safe_log('Failed to re-schedule Scheduler Heartbeat: %s' % str(e))
Esempio n. 42
0
 def restart(self):
     print 'restart'
     SpreadSheet.restart(self) #connect
     scheduler.after(self.period, self.resync)
Esempio n. 43
0
 def start(self):
     self._sid = scheduler.after(self.heartbeat_period,self._heartbeat_action)
     assert not self._sid is None, 'SchedHeartbeat.start(): Failed to get non-None sid from scheduler'
     CompositeNode.start(self)
     self._running = 1
     return
Esempio n. 44
0
    def start(self):
        # Bad self IP Address 
        if(self.peer == '127.0.0.1' ):
            msg='Cloud facility will not function properly because of local IP address being 127.0.0.1'
            self.message(msg,msglog.types.WARN)
            return 
        if not self.channel_monitor.is_running():
            self.channel_monitor.start_monitor()
        self._pdo=PersistentDataObject(self)
        self.message('The Cloud Manager Persistent Object is in the file :%s' %str(self._pdo.filename()),msglog.types.INFO)
        migration=False
        if(os.path.exists(self._pdo.filename())):
            # Already Migrated
            self._pdo.formation=[self.peer]
            self._pdo.portal=None
            self._pdo.peer=self.peer
            self._pdo.load()
        else:
            # We save a 'default' formation and expect the Cloud Configurator to 
            # update the _pdo.formation via update_information API.
            # The _setup_formation gets called internally from update_information
            self._pdo.portal=None
            self._pdo.formation=[self.peer]
            self._pdo.peer=self.peer
            self._pdo.save()
            self._pdo.load()
            migration=True
        
        #Bad formation/peer in the PDO
        if( not self._pdo.peer in self._pdo.formation ):
            #Bad formation/peer
            self.message('The Cloud Manager PDO in the file :%s is corrupted. Defaulting to safe configuration' %str(self._pdo.filename()),msglog.types.WARN)
            self._pdo.portal=None
            self._pdo.formation=[self.peer]
            self._pdo.peer=self.peer
            self._pdo.save()
            self._pdo.load()
        
        self.message('Hosts are :%s portal=%s self=%s' %(str(self._pdo.formation),self._pdo.portal,self._pdo.peer),msglog.types.INFO)
        self.nformation=NFormation(self._pdo.formation,self.peer)
        self.nformation.set_portal(self._pdo.portal)
        
        # IP Address Change Case
        if(not utils.same_host(self.peer,self._pdo.peer)):
            self.message('Self address change detected old=%s new=%s. Fixing the Cloud Formation accordingly' %(str(self._pdo.peer),self.peer),msglog.types.INFO)
            formation = self.nformation.get_formation()
            norm_form=self.nformation.normalize_formation(formation)
            # IP Address Swap
            self_index=norm_form.index(self._pdo.peer)
            formation.pop(self_index)
            formation.insert(0,self.peer)
            self.nformation.set_formation(formation)
               
        '''
        In the Case of Migration, the update_formation() API is called
        by the Cloud Configurator. In the already migrated case, we call the
        update_formation() with the PDO formation and Portal
        '''
        
        self.target_formation = self.nformation.compute_targets()
        
        if(migration == False):
            self.update_formation(self.nformation.get_formation(), self.nformation.get_portal())
        
        if self.subscription is None:
            self.subscription = self.add_listener(
                self.handle_formation_update, 'CloudFormation')

        # Send Cloud Event to all the Hosts for re-sending the alarm events 
        # over to the Portal again - if we are nbmm
        if(self.is_host_nbmm()):
            scheduler.after(10, self.request_for_resending_alarm_events)

        super(CloudManager, self).start()
Esempio n. 45
0
 def schedule(self, delay, callback, args=()):
     return scheduler.after(delay, self.queue, (callback, args))
Esempio n. 46
0
 def start(self):
     Client.start(self)
     self.h_node = as_node(self.node)
     self.h_alarms = self.h_node.get_child("tracer_alarm_points")
     self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 47
0
 def start(self):
     Client.start(self)
     self.h_node = as_node(self.node)
     self.h_alarms = self.h_node.get_child('tracer_alarm_points')
     self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 48
0
 def schedule(self):
     self.scheduled = scheduler.after(15, self.trigger)
Esempio n. 49
0
 def start(self):
    Client.start(self)
    self.h_node = as_node(self.node)
    self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 50
0
 def _schedule_print(self):
     self._scheduled = scheduler.after(60,self._print,(self._stack,))
Esempio n. 51
0
 def reschedule(self, period = None):
     if period is None:
         period = self.period
     if self._running.isSet():
         self._next_run = scheduler.after(period, self._queue_refresh)
Esempio n. 52
0
 def start(self):
     Client.start(self)
     self.h_node = as_node(self.node)
     self.sid = scheduler.after(15, self.poll_alarms)
Esempio n. 53
0
 def reschedule(self, period=None):
     if period is None:
         period = self.period
     if self._running.isSet():
         self._next_run = scheduler.after(period, self._queue_refresh)