Beispiel #1
0
 def get_bdt_from(self, network, ip_address, ip_port):
     a = _ip_format_str_as_ulong(ip_address)
     p = int(ip_port)
     addr = Addr(_ulong_as_str(a) + _ushort_as_str(p))
     try:
         _module_lock.acquire()
         self.reports = {}
     finally:
         _module_lock.release()
     if debug > 1:
         _dump_send(network, addr.address,
                    ReadBroadcastDistributionTable().encoding)
     send(network, addr.address, ReadBroadcastDistributionTable().encoding)
     timeout = 0
     _module_lock.acquire()
     try:
         while (len(self.reports.values()) < 1) and (timeout < 10):
             _module_lock.release()
             pause(0.5)
             timeout += 1
             _module_lock.acquire()
     finally:
         try:
             _module_lock.release()
         except:
             pass
     _module_lock.acquire()
     try:
         if (len(self.reports.values()) > 0):
             self.entries = self.reports.values()[0]
     finally:
         _module_lock.release()
     return self.report(network)
Beispiel #2
0
 def test_exporter_v1(self):
     import _test_lib
     log = _test_lib.trap_log_factory()
     exporter = _test_lib.trap_exporter_factory(parent=log)
     log.start()
     self.assert_comparison("tuple(self.msglog_object[:])", "==", "()")
     version='1'
     context_engine_id='bogus-id'
     context_name='bogus-context'
     address='192.168.1.1'
     sysUpTime='1 day 03:04:05.06'
     trap='Test-MIB::logTestTrap'
     trap_enterprise='Test-MIB::RZ'
     varBinds=(('Test-MIB::Payload', 0),)
     logtime=time.time()
     log.log_trap(version, context_engine_id, context_name, address,
                  sysUpTime, trap,
                  trap_enterprise,
                  varBinds,
                  logtime)
     from mpx.lib import pause
     pause(1)
     log.stop()
     self.assert_comparison("tuple(self.msglog_object[:])", "==", "()")
     return
Beispiel #3
0
 def test_data_loss(self):
     self.log.trim_ge('_seq', 0)
     self.log._write_queue()
     self.log._trim_lock.acquire()
     self.log._write_lock.acquire()
     try:
         entries = []
         reverse = -1
         for i in range(0, 5):
             t = time.time()
             entries.append([t, reverse, 2, 3])
             self.log.add_entry(entries[-1])
             reverse -= 1
             pause(.1)
     finally:
         self.log._write_lock.release()
         self.log._trim_lock.release()
     self.log._write_queue()
     data = self.log.get_range('timestamp', 0, time.time())
     self.failUnless(
         data[0]['timestamp'] == entries[0][0]
         and data[-1]['timestamp'] == entries[-1][0],
         'Log failed to get correct beginning or end')
     self.failUnless(
         len(data[:]) == len(entries), 'Some entries lossed during trim')
     return
Beispiel #4
0
 def test_cross_thread(self):
     # @note:  This test is relying on the write being large enough to
     #         fill all the OS buffers and block.
     #
     # @note:  Methinks this test relies on too many side effects...
     too_big_for_one_write = 1000000
     some_of_but_not_all_of_it = 65536
     stream = CrossThreadStream()
     cv = Condition()
     t1 = Thread(target=_writer, args=(cv,stream,too_big_for_one_write))
     cv.acquire()
     t1.start()
     # @note:  This pause should cause the _writer to block since it is
     #         trying to write too_big_for_one_write.
     pause(2)
     data = stream.read(some_of_but_not_all_of_it)
     count = len(data)
     self.failUnless(data == 'c'*count and
                     count <= some_of_but_not_all_of_it, 'First read ' + 
                     'failed to return the correct data or returned ' + 
                     'too much data')
     while count < too_big_for_one_write:
         data += stream.read(too_big_for_one_write - count)
         count = len(data)
     self.failUnless(data == 'c'*too_big_for_one_write,
                     'Overall stream did not return ' + 
                     'data written to it correctly or the wrong number')
     self.failUnless(stream.read(100) == '', 'Read did not return empty ' + 
                     'string even though no more data should have been ' + 
                     'waiting and the stream closed')
     cv.wait()
     try:
         self.failIf(_failed, _reason)
     finally:
         cv.release()
Beispiel #5
0
 def _tock(self):
     while 1:
         try:
             self.semaphore.acquire()
             #scheduler.seconds_from_now_do(1.0, self._tick)
             _module_lock.acquire()
             try:
                 if debug > 4:
                     print 'FDT tock'
                 if (self.entries):
                     for key in self.entries.keys():
                         entry = self.entries[key]
                         if entry.tick():  #time to remove
                             try:
                                 del self.entries[key]
                                 if debug > 4:
                                     print "timeout on foreign device"
                             except:
                                 pass
             finally:
                 _module_lock.release()
         except:
             if msglog:
                 msglog.exception()
                 msglog.log('broadway', msglog.types.INFO,
                            'FDT timer thread restarting\n')
                 pause(10.0)
Beispiel #6
0
 def DISABLED_test_authenticating(self):
     unique = time.time()
     self._transporter.configure({'host':'tacos.dyndns.org',
                                  'sender':'*****@*****.**' % unique,
                                  'recipients':'*****@*****.**',
                                  'subject':str(unique),'timeout':'1',
                                  'authenticate':'1',
                                  'username':'******',
                                  'password':'******',
                                  'name':'test','parent':self._parent})
     self._transporter.start()
     self._transporter.transport('Test')
     pause(30)
     pop = poplib.POP3('tacos.dyndns.org')
     try:
         pop.user('mailreceiver')
         pop.pass_('env123')
         count = len(pop.list()[1])
         received = 0
         for i in range(0,count):
             message = pop.retr(i+1)[1]
             pop.dele(i+1)
             for line in message:
                 if (line.startswith('Subject: ') and 
                     line[9:].strip() == str(unique)):
                     received = 1
                     break
     finally:
         pop.quit()
     self.failUnless(received,'Failed to retrieve email, it may be a ' + 
                     'problem with the SMTP server and not the transporter.')
 def test_FDT(self):
     n = network.open_interface('IP', 'lo', 1)
     a = '\x81\x05\x00\x06\x00\x05'
     b = Addr('\x55\xAA\x05\xA0\xBA\xC0')
     c = RegisterForeignDevice(b, decode=a)
     f = ForeignDeviceTable()
     f.register_foreign_device(c)
     for x in range(30):
         pause(1.0)
         if len(f.entries) == 0:
             raise 'Foreign device table tick failure, early removal'
     pause(10.0)
     if len(f.entries) != 0:
         raise 'Foreign device table tick failure to remove device'
     #e = '\x81\x0B\x20\x000123456789012345678901234567'
     #o = OriginalBroadcastNPDU(decode=e)
     #r = Addr('\xAA\x55\x05\xA0\xBA\xC0')
     #f.forward_original_broadcast_message(n.network, r, o.npdu)
     #how do I check to see if it made it?
     #g = '\x81\x04\x20\x00\x55\xAA\x05\xA0\xBA\xC01234567890123456789012'
     #h = ForwardedNPDU(decode=g)
     #f.broadcast_forwarded_message(n.network, r, h)
     #f.distribute(n.network, r, h)
     f._stop_ticking()
     network.close_interface(n)
     pass
Beispiel #8
0
 def _stop(self):
     while self.state is self.PENDING:
         pause(.1)
     if self.state is not self.STOPPED:
         self.state = self.HALTING
         msg = 'RNA service stopping on %s.'
         msglog.log('broadway', msglog.types.INFO, msg % self.transport)
         try:
             # Hack to wake up the tread...
             t = self.transportClass(**self.configuration())
             # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor 
             # SrnaService has a connect() method:
             t.connect()
             i = mpx.lib.rna._InvokeCommand("BOGUS")
             i.pack(ProtocolCommand('/','no_such_method_i_hope',()))
             i.totransport(t.send)
             # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor 
             # SrnaService has a disconnect() method:
             t.disconnect()
             while self.state is not self.STOPPED:
                 pause(.1)
                 return 1
         except:
             msglog.exception()
     return 0
Beispiel #9
0
 def DISABLED_test_non_authenticating(self):
     unique = time.time()
     self._transporter.configure({
         'host': 'localhost',
         'sender': '*****@*****.**' % unique,
         'recipients': '*****@*****.**',
         'subject': str(unique),
         'timeout': '1',
         'authenticate': '0',
         'name': 'test',
         'parent': self._parent
     })
     self._transporter.start()
     self._transporter.transport('Test')
     pause(30)
     pop = poplib.POP3('tacos.dyndns.org')
     try:
         pop.user('mailreceiver')
         pop.pass_('env123')
         count = len(pop.list()[1])
         received = 0
         for i in range(0, count):
             message = pop.retr(i + 1)[1]
             pop.dele(i + 1)
             for line in message:
                 if (line.startswith('Subject: ')
                         and line[9:].strip() == str(unique)):
                     received = 1
                     break
     finally:
         pop.quit()
     self.failUnless(
         received, 'Failed to retrieve email, it may be a ' +
         'problem with the SMTP server and not the transporter.')
Beispiel #10
0
 def get_bdt_from(self, network, ip_address, ip_port):
     a = _ip_format_str_as_ulong(ip_address)
     p = int(ip_port)
     addr = Addr(_ulong_as_str(a) + _ushort_as_str(p))
     try:
         _module_lock.acquire()
         self.reports = {}
     finally:
         _module_lock.release()
     if debug > 1:
         _dump_send(network, addr.address,
                    ReadBroadcastDistributionTable().encoding)
     send(network, addr.address, ReadBroadcastDistributionTable().encoding)
     timeout = 0
     _module_lock.acquire()
     try:
         while (len(self.reports.values()) < 1) and (timeout < 10):
             _module_lock.release()
             pause(0.5)
             timeout += 1
             _module_lock.acquire()
     finally:
         try:
             _module_lock.release()
         except:
             pass
     _module_lock.acquire()
     try:
         if (len(self.reports.values()) > 0):
             self.entries = self.reports.values()[0]
     finally:
         _module_lock.release()
     return self.report(network)
Beispiel #11
0
 def _tock(self):
     while 1:
         try:
             self.semaphore.acquire()
             #scheduler.seconds_from_now_do(1.0, self._tick)
             _module_lock.acquire()
             try:
                 if debug > 4:
                     print 'FDT tock'
                 if (self.entries):
                     for key in self.entries.keys():
                         entry = self.entries[key]
                         if entry.tick():  #time to remove
                             try:
                                 del self.entries[key]
                                 if debug > 4:
                                     print "timeout on foreign device"
                             except:
                                 pass
             finally:
                 _module_lock.release()
         except:
             if msglog:
                 msglog.exception()
                 msglog.log('broadway', msglog.types.INFO,
                    'FDT timer thread restarting\n')
                 pause(10.0)
Beispiel #12
0
 def _stop(self):
     while self.state is self.PENDING:
         pause(.1)
     if self.state is not self.STOPPED:
         self.state = self.HALTING
         msg = 'RNA service stopping on %s.'
         msglog.log('broadway', msglog.types.INFO, msg % self.transport)
         try:
             # Hack to wake up the tread...
             t = self.transportClass(**self.configuration())
             # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor
             # SrnaService has a connect() method:
             t.connect()
             i = mpx.lib.rna._InvokeCommand("BOGUS")
             i.pack(ProtocolCommand('/', 'no_such_method_i_hope', ()))
             i.totransport(t.send)
             # TODO: THIS CANNOT WORK. Neither SimpleTcpService nor
             # SrnaService has a disconnect() method:
             t.disconnect()
             while self.state is not self.STOPPED:
                 pause(.1)
                 return 1
         except:
             msglog.exception()
     return 0
Beispiel #13
0
 def test_data_loss(self):
     self.log.trim_ge('_seq',0)
     self.log._write_queue()
     self.log._trim_lock.acquire()
     self.log._write_lock.acquire()
     try:
         entries = []
         reverse = -1
         for i in range(0, 5):
             t = time.time()
             entries.append([t, reverse, 2, 3])
             self.log.add_entry(entries[-1])
             reverse -= 1
             pause(.1)
     finally:
         self.log._write_lock.release()
         self.log._trim_lock.release()
     self.log._write_queue()
     data = self.log.get_range('timestamp',0,time.time())
     self.failUnless(data[0]['timestamp'] == entries[0][0] and 
                     data[-1]['timestamp'] == entries[-1][0], 
                     'Log failed to get correct beginning or end')
     self.failUnless(len(data[:]) == len(entries), 
                     'Some entries lossed during trim')
     return
Beispiel #14
0
 def _convert_temperature_sensor_list(self):
     try:
         if len(self.conversion_list):
             if self.debug:
                 print "DALLASBUS: _read_temperature_sensors_for", self
             _read_temperature_sensors_for(self)
         else:
             if self.debug:
                 print "DALLASBUS: sleep for one second because there are no sensors", self
             pause(4)
     finally:
         self.request(self._convert_temperature_sensor_list)
Beispiel #15
0
 def _start(self):
     while self.running:
         start_time = self.last_time
         end_time = time.time()
         data = self.log.get_range("timestamp", start_time, end_time)
         try:
             self._send(data, start_time, end_time)
             self.last_time = end_time
             self.success_count += 1
         except:
             msglog.exception()
         pause(self.upload_interval)
 def _exercise_auto(self, pts):
     for v in (0, 1, 0, 1, 1, 1, 0, 1, 0):
         self.PRINT("pts.input.set(%r)\n", v)
         pts.input.set(v)
         for i in range(0,10):	# Maximum time to wait is pts.period*2
             o = pts.output.get()
             if o != v:
                 pause(pts.period/5)
                 continue
             break
         self._fault_on_difference(pts, 0)
     return
Beispiel #17
0
 def _convert_temperature_sensor_list(self):
     try:
         if len(self.conversion_list):
             if self.debug:
                 print "DALLASBUS: _read_temperature_sensors_for", self
             _read_temperature_sensors_for(self)
         else:
             if self.debug:
                 print "DALLASBUS: sleep for one second because there are no sensors", self
             pause(4)
     finally:
         self.request(self._convert_temperature_sensor_list)
Beispiel #18
0
 def _start(self):
     while self.running:
         start_time = self.last_time
         end_time = time.time()
         data = self.log.get_range('timestamp', start_time, end_time)
         try:
             self._send(data, start_time, end_time)
             self.last_time = end_time
             self.success_count += 1
         except:
             msglog.exception()
         pause(self.upload_interval)
Beispiel #19
0
 def _exercise_auto(self, pts):
     for v in (0, 1, 0, 1, 1, 1, 0, 1, 0):
         self.PRINT("pts.input.set(%r)\n", v)
         pts.input.set(v)
         for i in range(0, 10):  # Maximum time to wait is pts.period*2
             o = pts.output.get()
             if o != v:
                 pause(pts.period / 5)
                 continue
             break
         self._fault_on_difference(pts, 0)
     return
Beispiel #20
0
 def _run(self):
     while self.running:
         temperature = self.temperature.get()
         if temperature > self.maximum:
             self.heating.set(0)
             self.cooling.set(1)
         elif temperature < self.minimum:
             self.cooling.set(0)
             self.heating.set(1)
         else:
             self.cooling.set(0)
             self.heating.set(0)
         pause(120)
Beispiel #21
0
 def _run(self):
     while self.running:
         temperature = self.temperature.get()
         if temperature > self.maximum:
             self.heating.set(0)
             self.cooling.set(1)
         elif temperature < self.minimum:
             self.cooling.set(0)
             self.heating.set(1)
         else:
             self.cooling.set(0)
             self.heating.set(0)
         pause(120)
Beispiel #22
0
 def test_release_by_other_thread(self):
     def acquire_it_elsewhere(lock):
         lock.acquire()
     l = allocate1()
     test_thread = Thread(target=acquire_it_elsewhere,args=(l,))
     test_thread.start()
     while l not in l.locked_list:
         pause(0.1)
     try:
         l.release()
     except _WrongThreadAssertion:
         return
     raise "Failed to detect a release of another thread's acquire."
 def test_alias_input(self):
     driver = PeriodicDriver()
     driver.configure({'name':'driver','parent':self.output_value,
                       'input':self.alias_input})
     self.assert_(self.output_value.get() == 0,
                  "Output already non-zero, bogus test...")
     driver.start()
     timeout_at = time.time() + 1.0
     while self.output_value.get() == 0:
         if time.time() > timeout_at:
             self.fail("self.output_value never driven to 1.")
         pause(0.01)
     return
Beispiel #24
0
    def test_release_by_other_thread(self):
        def acquire_it_elsewhere(lock):
            lock.acquire()

        l = allocate1()
        test_thread = Thread(target=acquire_it_elsewhere, args=(l,))
        test_thread.start()
        while l not in l.locked_list:
            pause(0.1)
        try:
            l.release()
        except _WrongThreadAssertion:
            return
        raise "Failed to detect a release of another thread's acquire."
Beispiel #25
0
 def test_at_do_past(self):
     if disable_others:
         return
     s = mpx.lib.scheduler.AutoStartScheduler()
     s.setdebug(debug)
     #this one should execute immediately
     a = s.at_time_do(_time.time() - 1, self.callback, 'a')
     b = s.at_time_do(_time.time() + 3, self.callback, 'b')
     pause(0.1)
     if len(s._entries) != 2:
         raise 'There should be exactly one entry in schedule list'
     b.cancel()
     if len(s._entries) != 1:
         raise 'There should be exactly zero entries in schedule list'
     s.stop()
Beispiel #26
0
 def test_simple_case(self):
     driver = PeriodicDriver()
     driver.configure({
         'name': 'driver',
         'parent': self.output_value,
         'input': self.input_value
     })
     self.assert_(self.output_value.get() == 0,
                  "Output already non-zero, bogus test...")
     driver.start()
     timeout_at = time.time() + 1.0
     while self.output_value.get() == 0:
         if time.time() > timeout_at:
             self.fail("self.output_value never driven to 1.")
         pause(0.01)
     return
Beispiel #27
0
 def test_recurring_items(self):
     if disable_others:
         return
     exp_count = 6
     s = mpx.lib.scheduler.AutoStartScheduler()
     s.setdebug(debug)
     #mpx.lib.scheduler.debug = 1
     a = s.every(1, self.callback2)
     pause(exp_count - 1)
     s.stop()
     s.cancel(a)
     # Our callback should be called around exp_count times
     diff = abs(exp_count - self.callback_count)
     if diff > 1:
         cbcount = self.callback_count
         raise "Callback count should be %d, %d." % (exp_count, cbcount)
Beispiel #28
0
 def test_seconds_from_now_do(self):
     if disable_others:
         return
     s = mpx.lib.scheduler.AutoStartScheduler()
     s.setdebug(debug)
     a = s.seconds_from_now_do(2, self.callback, 'a')
     if len(s._entries) - 1:
         if s._entries[0] != a:
             raise 'Schedule entry mismatch after one add'
     else:
         raise 'Schedule entry not added'
     pause(3)
     if len(s._entries) - 1:
         raise 'Schedule entry did not disappear'
     s.stop()
     s.cancel(a)
 def test_deferred_input(self):
     driver = PeriodicDriver()
     driver.configure({'name':'driver','parent':self.output_value,
                       'input':'/aliases/deferred_input',
                       'period':0.01})
     self.assert_(self.output_value.get() == 0,
                  "Output already non-zero, bogus test...")
     driver.start()
     pause(0.1)
     Alias().configure({'parent':self.aliases, 'name':'deferred_input',
                        'node_url':'/input/value'})
     timeout_at = time.time() + 1.0
     while self.output_value.get() == 0:
         if time.time() > timeout_at:
             self.fail("self.output_value never driven to 1.")
         pause(0.01)
     return
Beispiel #30
0
    def _run(self):
        global commands
        x = xml.sax.make_parser()
        x.setContentHandler(ContentHandler(self.debug))

        while self._running:
            # all in try statement/catch-all so that
            #  service continues to run indefinately.
            try:
                if self.connection.acquire(self.timeout):
                    try:
                        server_url = self.server_url
                        command_url = server_url + 'get?nodeid=' + self.node
                        if self.debug:
                            print "Executing %s" % command_url
                        x.parse(command_url)
                        for c in commands:
                            if self.debug:
                                print "Setting %s to %f with seq %s" % (
                                    c[0], c[1], c[2])
                            try:
                                node = as_node(c[0])
                                node.set(_holistic_conversion(c[1]))
                            except (KeyError):
                                msglog.log('sie', msglog.types.ERR,
                                           'Point %s does not exist.' % c[0])

                            if self.debug:
                                print "Acknowledging setting point %s to %d with sequence %s" % \
                                      (c[0], c[1], c[2])
                            encoded_name = urllib.quote_plus(c[0])
                            encoded_param = urllib.quote_plus(c[3])
                            ack_url = server_url + 'ack?PointID=%s&SeqNum=%s&varParam=%s' % \
                                      (encoded_name, c[2], encoded_param)
                            if self.debug:
                                print "Acknowledging with %s" % ack_url
                            # uses the parser's ability to retrieve url content
                            #  so we dont have to put http logic here.
                            x.parse(ack_url)
                    finally:
                        self.connection.release()
            except:
                msglog.exception()
            pause(self.period)
 def test_30_PeriodicRelayDriver_auto(self, quite=0):
     self.PRINT("ENTER: test_30_PeriodicRelayDriver_set: %s\n",
                "*"*15, quite=quite)
     pts = self.test_20_PeriodicRelayDriver_config(1)
     pts.input.set(0)
     pts.output.set(0)
     pts.start()
     self.PRINT("AUTO MODE:\n", quite=quite)
     pts.set(2)
     # It's in AUTO...
     self.PRINT("pts.input.set(1)\n", quite=quite)
     pts.input.set(1)
     pause(pts.period*2)
     self._fault_on_difference(pts, quite)
     self._exercise_auto(pts)
     self.PRINT("EXIT:  test_30_PeriodicRelayDriver_set: %s\n",
                "*"*15, quite=quite)
     pts.stop()
     return
Beispiel #32
0
 def _run(self):
     global commands
     x = xml.sax.make_parser()
     x.setContentHandler(ContentHandler(self.debug))
     
     while self._running:
         # all in try statement/catch-all so that
         #  service continues to run indefinately.
         try:
             if self.connection.acquire(self.timeout):
                 try:
                     server_url = self.server_url
                     command_url = server_url + 'get?nodeid=' + self.node
                     if self.debug:
                         print "Executing %s" % command_url
                     x.parse(command_url)
                     for c in commands:
                         if self.debug:
                             print "Setting %s to %f with seq %s" % (c[0], c[1], c[2])
                         try:
                             node = as_node(c[0])
                             node.set(_holistic_conversion(c[1]))
                         except(KeyError):
                             msglog.log('sie', msglog.types.ERR, 'Point %s does not exist.' % c[0])
                         
                         if self.debug:
                             print "Acknowledging setting point %s to %d with sequence %s" % \
                                   (c[0], c[1], c[2])
                         encoded_name = urllib.quote_plus(c[0])
                         encoded_param = urllib.quote_plus(c[3])
                         ack_url = server_url + 'ack?PointID=%s&SeqNum=%s&varParam=%s' % \
                                   (encoded_name, c[2], encoded_param)
                         if self.debug:
                             print "Acknowledging with %s" % ack_url
                         # uses the parser's ability to retrieve url content
                         #  so we dont have to put http logic here.
                         x.parse(ack_url)
                 finally:
                     self.connection.release()
         except:
             msglog.exception()
         pause(self.period)
Beispiel #33
0
 def test_run_many(self):
     cursystime = _time.time()
     curuptime = uptime.secs()
     # Create 1000 entries
     count = 1000
     # Earliest entry executes in 20 seconds.
     offset = 5
     # Schedule for random value between offset and offset + range
     variation = 5
     # Generate offset list
     offsets = [offset +  (variation * random()) for i in range(count)]
     sched = mpx.lib.scheduler.Scheduler()
     sched.setdebug(debug)
     sched.start()
     entries = [sched.after(offset, self.callback2) for offset in offsets]
     pause(offset + variation)
     callbacks = self.callback_count
     assert callbacks == count, 'Counted %d, not %d' % (callbacks, count)
     assert len(sched._entries) == 1, 'More than one entry left'
     sched.stop()
Beispiel #34
0
 def report(self, network):
     self.reports = {}
     answer = []
     if (self.entries):
         msg = ReadBroadcastDistributionTable().encoding
         for entry in self.entries:
             if debug > 1:
                 _dump_send(network, entry.asBBMDaddress().address, msg)
             a = network
             b = entry.asBBMDaddress().address
             c = msg
             send(a, b, c)
         timeout = 0
         _module_lock.acquire()
         try:
             while (len(self.reports.keys()) < len(self.entries)) and \
                   (timeout < 10):
                 _module_lock.release()
                 pause(0.5)
                 timeout += 1
                 _module_lock.acquire()
             for entry in self.entries:
                 status = 'no response'
                 if self.reports.has_key(entry.asBBMDaddress().address):
                     status = 'ok'
                     if self.entries != \
                        self.reports[entry.asBBMDaddress().address]:
                         status = 'mismatch'
                 string = entry.as_text()
                 answer.append((
                     string[0],
                     string[1],
                     string[2],
                     status,
                 ))
         finally:
             try:
                 _module_lock.release()
             except:
                 pass
     return answer
Beispiel #35
0
 def test_at_do_callback(self):
     if disable_others:
         return
     s = mpx.lib.scheduler.AutoStartScheduler()
     s.setdebug(debug)
     b=[] #add something to this list in the callback
     a = s.at_time_do(_time.time() + 2, self.callback, 'c', b)
     if len(s._entries) - 1:
         if s._entries[0] != a:
             raise 'Schedule entry mismatch after one add'
     else:
         raise 'Schedule entry not added'
     pause(3)
     if len(s._entries) - 1:
         raise 'Schedule entry did not disappear'
     if len(b) == 0:
         raise 'Callback did not run'
     if b[0] != 'c':
         raise 'No match on added object'
     s.stop()
     s.cancel(a)
Beispiel #36
0
 def test_30_PeriodicRelayDriver_auto(self, quite=0):
     self.PRINT("ENTER: test_30_PeriodicRelayDriver_set: %s\n",
                "*" * 15,
                quite=quite)
     pts = self.test_20_PeriodicRelayDriver_config(1)
     pts.input.set(0)
     pts.output.set(0)
     pts.start()
     self.PRINT("AUTO MODE:\n", quite=quite)
     pts.set(2)
     # It's in AUTO...
     self.PRINT("pts.input.set(1)\n", quite=quite)
     pts.input.set(1)
     pause(pts.period * 2)
     self._fault_on_difference(pts, quite)
     self._exercise_auto(pts)
     self.PRINT("EXIT:  test_30_PeriodicRelayDriver_set: %s\n",
                "*" * 15,
                quite=quite)
     pts.stop()
     return
Beispiel #37
0
 def _scan_sensors(self):
     self.children_nodes()  # trigger autodiscovery to get the ball rolling
     while 1:
         try:
             while self.running:
                 sensors = self.children_nodes(auto_discover=0)[:]
                 for sensor in sensors:
                     # add a little pause based on number of sensors
                     delay = 11.0 / len(
                         sensors)  # roughly one sensor per sec
                     if delay < 0.5:
                         delay = 0.5
                     pause(delay)
                     try:
                         if not self.running: break
                         sensor.result = self._read_temperature_for(sensor)
                         if sensor.bad_crc_count > 10:  #was logged as bad
                             msglog.log('DallasBus', 'information', 'Resumed: %s' % \
                                     as_node_url(sensor))
                         sensor.bad_crc_count = 0
                         # update the scan_period that shows up in configuration
                         t = uptime_secs()
                         sensor.scan_period = t - sensor._last_read_time
                         sensor._last_read_time = t
                     except:
                         if sensor.bad_crc_count < 11:
                             sensor.bad_crc_count += 1
                             if sensor.bad_crc_count == 10:
                                 sensor.result = None  #return ETimeout to gets
                                 msglog.log('DallasBus', 'information', 'Failed: %s' % \
                                            as_node_url(sensor))
                 if not sensors: pause(17)  # slow down if no dallas bus
         except:
             if self.debug: msglog.exception()
         pause(30)
Beispiel #38
0
 def _scan_sensors(self):
     self.children_nodes() # trigger autodiscovery to get the ball rolling
     while 1:
         try:
             while self.running:
                 sensors = self.children_nodes(auto_discover=0)[:]
                 for sensor in sensors:
                     # add a little pause based on number of sensors
                     delay = 11.0 / len(sensors) # roughly one sensor per sec
                     if delay < 0.5:
                         delay = 0.5
                     pause(delay)
                     try:
                         if not self.running: break
                         sensor.result = self._read_temperature_for(sensor)
                         if sensor.bad_crc_count > 10: #was logged as bad
                             msglog.log('DallasBus', 'information', 'Resumed: %s' % \
                                     as_node_url(sensor))
                         sensor.bad_crc_count = 0
                         # update the scan_period that shows up in configuration
                         t = uptime_secs()
                         sensor.scan_period = t - sensor._last_read_time
                         sensor._last_read_time = t
                     except:
                         if sensor.bad_crc_count < 11:
                             sensor.bad_crc_count += 1
                             if sensor.bad_crc_count == 10:
                                 sensor.result = None #return ETimeout to gets
                                 msglog.log('DallasBus', 'information', 'Failed: %s' % \
                                            as_node_url(sensor))
                 if not sensors: pause(17) # slow down if no dallas bus
         except:
             if self.debug: msglog.exception()
         pause(30)
Beispiel #39
0
 def test_exporter_v1(self):
     import _test_lib
     log = _test_lib.trap_log_factory()
     exporter = _test_lib.trap_exporter_factory(parent=log)
     log.start()
     self.assert_comparison("tuple(self.msglog_object[:])", "==", "()")
     version = '1'
     context_engine_id = 'bogus-id'
     context_name = 'bogus-context'
     address = '192.168.1.1'
     sysUpTime = '1 day 03:04:05.06'
     trap = 'Test-MIB::logTestTrap'
     trap_enterprise = 'Test-MIB::RZ'
     varBinds = (('Test-MIB::Payload', 0), )
     logtime = time.time()
     log.log_trap(version, context_engine_id, context_name, address,
                  sysUpTime, trap, trap_enterprise, varBinds, logtime)
     from mpx.lib import pause
     pause(1)
     log.stop()
     self.assert_comparison("tuple(self.msglog_object[:])", "==", "()")
     return
Beispiel #40
0
def run(filename=None, interactive_debug=0):
    if is_running():
        raise EAlreadyRunning
    __system_dict['run_func'] = run
    try:
        process.register_named_process('broadway', os.getpid())
        # clear out any shared memory used by the framework nodes,
        # and C processes eg, C event router and/or C statitics
        if os.path.exists('/usr/bin/shmrm'):
            os.system('/usr/bin/shmrm | /usr/bin/logger -t shmrm')
        
        ## if configuration file not specified, use file from properties
        if filename == None:
            filename = properties.CONFIGURATION_FILE
        configure(filename)
        gc.collect()
        if not interactive_debug:
            # The main process enters a 'do nothing' loop.  It's an easy way
            # to keep the main thread safely active.
            while should_run():
                pause(1)
    except Exception, e:
        msglog.exception()
Beispiel #41
0
 def test_deferred_input(self):
     driver = PeriodicDriver()
     driver.configure({
         'name': 'driver',
         'parent': self.output_value,
         'input': '/aliases/deferred_input',
         'period': 0.01
     })
     self.assert_(self.output_value.get() == 0,
                  "Output already non-zero, bogus test...")
     driver.start()
     pause(0.1)
     Alias().configure({
         'parent': self.aliases,
         'name': 'deferred_input',
         'node_url': '/input/value'
     })
     timeout_at = time.time() + 1.0
     while self.output_value.get() == 0:
         if time.time() > timeout_at:
             self.fail("self.output_value never driven to 1.")
         pause(0.01)
     return
Beispiel #42
0
 def _tock(self):
     while 1:
         try:
             self.semaphore.acquire()
             _module_lock.acquire()
             try:
                 if debug > 4:
                     print 'RAFD tock'
                 for k in BBMD_servers.keys():
                     b = BBMD_servers[k]
                     if b.register_as_foreign_device:
                         # send a new RAFD packet to remote BBMD
                         msg = RegisterForeignDevice(None, self.refresh_rate)
                         if debug > 4: print 'RAFD update TTL'
                         b.bdt.distribute(k, None, msg)
             finally:
                 _module_lock.release()
         except:
             if msglog:
                 msglog.exception()
                 msglog.log('broadway', msglog.types.INFO,
                    'RAFD timer thread restarting\n')
                 pause(10.0)
 def test_cross_thread(self):
     # @note:  This test is relying on the write being large enough to
     #         fill all the OS buffers and block.
     #
     # @note:  Methinks this test relies on too many side effects...
     too_big_for_one_write = 1000000
     some_of_but_not_all_of_it = 65536
     stream = CrossThreadStream()
     cv = Condition()
     t1 = Thread(target=_writer, args=(cv, stream, too_big_for_one_write))
     cv.acquire()
     t1.start()
     # @note:  This pause should cause the _writer to block since it is
     #         trying to write too_big_for_one_write.
     pause(2)
     data = stream.read(some_of_but_not_all_of_it)
     count = len(data)
     self.failUnless(
         data == 'c' * count and count <= some_of_but_not_all_of_it,
         'First read ' + 'failed to return the correct data or returned ' +
         'too much data')
     while count < too_big_for_one_write:
         data += stream.read(too_big_for_one_write - count)
         count = len(data)
     self.failUnless(
         data == 'c' * too_big_for_one_write,
         'Overall stream did not return ' +
         'data written to it correctly or the wrong number')
     self.failUnless(
         stream.read(100) == '', 'Read did not return empty ' +
         'string even though no more data should have been ' +
         'waiting and the stream closed')
     cv.wait()
     try:
         self.failIf(_failed, _reason)
     finally:
         cv.release()
Beispiel #44
0
 def _tock(self):
     while 1:
         try:
             self.semaphore.acquire()
             _module_lock.acquire()
             try:
                 if debug > 4:
                     print 'RAFD tock'
                 for k in BBMD_servers.keys():
                     b = BBMD_servers[k]
                     if b.register_as_foreign_device:
                         # send a new RAFD packet to remote BBMD
                         msg = RegisterForeignDevice(
                             None, self.refresh_rate)
                         if debug > 4: print 'RAFD update TTL'
                         b.bdt.distribute(k, None, msg)
             finally:
                 _module_lock.release()
         except:
             if msglog:
                 msglog.exception()
                 msglog.log('broadway', msglog.types.INFO,
                            'RAFD timer thread restarting\n')
                 pause(10.0)
Beispiel #45
0
 def report(self, network):
     self.reports = {}
     answer = []
     if (self.entries):
         msg = ReadBroadcastDistributionTable().encoding
         for entry in self.entries:
             if debug > 1:
                 _dump_send(network, entry.asBBMDaddress().address, msg)
             a=network
             b=entry.asBBMDaddress().address
             c=msg
             send(a,b,c)
         timeout = 0
         _module_lock.acquire()
         try:
             while (len(self.reports.keys()) < len(self.entries)) and \
                   (timeout < 10):
                 _module_lock.release()
                 pause(0.5)
                 timeout += 1
                 _module_lock.acquire()
             for entry in self.entries:
                 status = 'no response'
                 if self.reports.has_key(entry.asBBMDaddress().address):
                     status = 'ok'
                     if self.entries != \
                        self.reports[entry.asBBMDaddress().address]:
                         status = 'mismatch'
                 string = entry.as_text()
                 answer.append((string[0], string[1], string[2], status,))
         finally:
             try:
                 _module_lock.release()
             except:
                 pass
     return answer
Beispiel #46
0
 def wait_and_do(seconds, function):
     pause(seconds)
     function()
Beispiel #47
0
 def wait_and_do(seconds,function):
     pause(seconds)
     function()
Beispiel #48
0
    def test_add_entry_event(self):
        class Consumer(EventConsumerAbstract):
            def __init__(self, *args, **kw):
                EventConsumerAbstract.__init__(self, *args, **kw)
                self.entries = []
                self.errors = []
                self.lock = Lock()

            def event_thread(self, event):
                # The values returned in the event:
                values = event.values
                # The column as read from the source Log instance:
                column_dict = event.source[event.seq]
                # A map of COLUMN_DICT keys to VALUES indexes.
                column_value_map = {
                    'timestamp': 0,
                    'reverse': 1,
                    'c2': 2,
                    'c3': 3
                }
                # Validate that the list of values matches the actual column in
                # the log:
                for key, index in column_value_map.items():
                    if not column_dict.has_key(key):
                        self.errors.append('column_dict has no %r key.' % key)
                        return
                    if index >= len(values):
                        self.errors.append('Index(%r) >= len(values:%r).' %
                                           (index, len(values)))
                        return
                    if column_dict[key] != values[index]:
                        self.errors.append(
                            'column_dict[%r]:%r != values[%r]:%r' %
                            (key, column_dict[key], index, values[index]))
                        return
                self.lock.acquire()
                try:
                    # If any entries are left, the test will fail.
                    self.entries.remove(values)
                except:
                    # Also, if errors is not empty the test will fail.
                    self.errors.append("Failed to find %r in entries." %
                                       values)
                self.lock.release()

            def event_handler(self, event):
                t = Thread(target=self.event_thread, args=(event, ))
                t.start()
                return

        consumer = Consumer()
        self.log.event_subscribe(consumer, log.LogAddEntryEvent)
        reverse = -1
        for i in range(0, 5):
            t = time.time()
            entry = [t, reverse, 2, 3]
            consumer.entries.append(entry)
            self.log.add_entry(entry)
            reverse -= 1
            # pause(.1)
        t1 = time.time()
        while consumer.entries:
            if (time.time() - t1) >= 1.0:
                msgs = ["Failed to recieve matching events after 1 second."]
                msgs.extend(consumer.errors)
                self.fail('\n'.join(msgs))
            pause(0.1)
        return
 def test_add_entry_event(self):
     class Consumer(EventConsumerAbstract):
         def __init__(self, *args, **kw):
             EventConsumerAbstract.__init__(self, *args, **kw)
             self.entries = []
             self.errors = []
             self.lock = Lock()
         def event_thread(self,event):
             # The values returned in the event:
             values = event.values
             # The column as read from the source Log instance:
             column_dict = event.source[event.seq]
             # A map of COLUMN_DICT keys to VALUES indexes.
             column_value_map = {
                 'c0':0,
                 }
             # Validate that the list of values matches the actual column in
             # the log:
             for key,index in column_value_map.items():
                 if not column_dict.has_key(key):
                     self.errors.append('column_dict has no %r key.' % key)
                     return
                 if index >= len(values):
                     self.errors.append('Index(%r) >= len(values:%r).' %
                                        (index, len(values)))
                     return
                 if column_dict[key] != values[index]:
                     self.errors.append(
                         'column_dict[%r]:%r != values[%r]:%r' % (
                         key, column_dict[key], index, values[index]))
                     return
             self.lock.acquire()
             try:
                 # If any entries are left, the test will fail.
                 self.entries.remove(values)
             except:
                 # Also, if errors is not empty the test will fail.
                 self.errors.append("Failed to find %r in entries." %
                                    values)
             self.lock.release()
         def event_handler(self,event):
             t = Thread(target=self.event_thread, args=(event,))
             t.start()
             return
     consumer = Consumer()
     p = PeriodicLog()
     p.configure({'name':'log','parent':None, 'period':0})
     h = CompositeNode()
     h.configure({'name':'columns','parent':p})
     c = PeriodicColumn()
     c.configure({'position':0, 'name':'c0', 'parent':h,
                  'function':'None',})
     p.event_subscribe(consumer, LogAddEntryEvent)
     p.start()
     try:
         for c0 in range(0,10):
             entry = [c0,]
             consumer.entries.append(entry)
             p.add_entry(entry)
         t1 = time.time()
         while consumer.entries:
             if (time.time() - t1) >= 1.0:
                 msgs = [
                     "Failed to recieve matching events after 1 second."
                     ]
                 msgs.extend(consumer.errors)
                 self.fail('\n'.join(msgs))
             pause(0.1)
     finally:
         p.stop()
     return
Beispiel #50
0
 def test_add_entry_event(self):
     class Consumer(EventConsumerAbstract):
         def __init__(self, *args, **kw):
             EventConsumerAbstract.__init__(self, *args, **kw)
             self.entries = []
             self.errors = []
             self.lock = Lock()
         def event_thread(self,event):
             # The values returned in the event:
             values = event.values
             # The column as read from the source Log instance:
             column_dict = event.source[event.seq]
             # A map of COLUMN_DICT keys to VALUES indexes.
             column_value_map = {
                 'timestamp':0, 'reverse':1, 'c2':2, 'c3':3
                 }
             # Validate that the list of values matches the actual column in
             # the log:
             for key,index in column_value_map.items():
                 if not column_dict.has_key(key):
                     self.errors.append('column_dict has no %r key.' % key)
                     return
                 if index >= len(values):
                     self.errors.append('Index(%r) >= len(values:%r).' %
                                        (index, len(values)))
                     return
                 if column_dict[key] != values[index]:
                     self.errors.append(
                         'column_dict[%r]:%r != values[%r]:%r' % (
                         key, column_dict[key], index, values[index]))
                     return
             self.lock.acquire()
             try:
                 # If any entries are left, the test will fail.
                 self.entries.remove(values)
             except:
                 # Also, if errors is not empty the test will fail.
                 self.errors.append("Failed to find %r in entries." %
                                    values)
             self.lock.release()
         def event_handler(self,event):
             t = Thread(target=self.event_thread, args=(event,))
             t.start()
             return
     consumer = Consumer()
     self.log.event_subscribe(consumer, log.LogAddEntryEvent)
     reverse = -1
     for i in range(0, 5):
         t = time.time()
         entry = [t, reverse, 2, 3]
         consumer.entries.append(entry)
         self.log.add_entry(entry)
         reverse -= 1
         # pause(.1)
     t1 = time.time()
     while consumer.entries:
         if (time.time() - t1) >= 1.0:
             msgs = ["Failed to recieve matching events after 1 second."]
             msgs.extend(consumer.errors)
             self.fail('\n'.join(msgs))
         pause(0.1)
     return
 def test_ttl_collect(self, quite=0):
     # Delete the current SessionManager.
     self._del()
     # Instantiate a new SessionManager, setting ttl to 2 seconds.
     ttl = 2.0
     self._new(ttl=ttl)
     total = 10
     fresh_sids = []
     stagnant_sids = []
     all_sids = []
     # Instantiate <code>total</code> (10) new sessions.
     for i in range(0, total):
         fresh_sid = self.session_manager.create('mpxadmin', 'mpxadmin', 1)
         fresh_sids.append(fresh_sid)
         all_sids.append(fresh_sid)
     # Select half the sessions for expiration.
     stagnate = range(0, total, 2)
     stagnate.reverse()
     for i in stagnate:
         stagnant_sid = fresh_sids.pop(i)
         stagnant_sids.append(stagnant_sid)
     # Wait for half of the session expiration time.
     pause(ttl / 2.0)
     # "Touch" the sessions NOT selected for expiration.
     for fresh_sid in fresh_sids:
         # Validate and touch.
         is_valid = self.session_manager.validate(fresh_sid, touch=1)
         assert is_valid, ("validate(%r) unexpectedly failed." % fresh_sid)
     # Wait "just over" the expiration time for the "untouched" sessions.
     pause(ttl / 2.0 + 0.1)
     # Confirm that the correct sessions are valid/stale.
     for sid in all_sids:
         is_valid = self.session_manager.validate(sid)
         if is_valid:
             assert sid in fresh_sids, (
                 "%r is fresh, but should be stagnant." % sid)
         else:
             assert sid in stagnant_sids, (
                 "%r is stagnant, but should be fresh." % sid)
     # Force the SessionManager to collect "stale" sessions.
     count = self.session_manager.collect()
     assert count == total / 2, (
         "Excepted to collect %d expired sessions, not %d." %
         (total / 2, count))
     # Wait for the all the original sessions to expire.
     pause(ttl / 2.0 + 0.1)
     # Create enough new sessions to force a background collection.
     configuration = self.session_manager.configuration()
     for i in range(0, int(configuration['_collection_threshold']) + 1):
         self.session_manager.create('mpxadmin', 'mpxadmin', 1)
     # Ensure that any pending background collection finishes.
     for i in range(0, 10):
         if self.session_manager._collection_action is not None:
             # Allow the background collection to get scheduled.
             pause(0.1)
         else:
             break
     # Confirm that all the expired sessions were already collected.
     count = self.session_manager.collect()
     assert count == 0, (
         "Crossing the _collection_threshold did not cause the expected"
         " background collection.")
     return
 def test_ttl_collect(self, quite=0):
     # Delete the current SessionManager.
     self._del()
     # Instantiate a new SessionManager, setting ttl to 2 seconds.
     ttl=2.0
     self._new(ttl=ttl)
     total = 10
     fresh_sids = []
     stagnant_sids = []
     all_sids = []
     # Instantiate <code>total</code> (10) new sessions.
     for i in range(0,total):
         fresh_sid = self.session_manager.create('mpxadmin','mpxadmin',1)
         fresh_sids.append(fresh_sid)
         all_sids.append(fresh_sid)
     # Select half the sessions for expiration.
     stagnate = range(0,total,2)
     stagnate.reverse()
     for i in stagnate:
         stagnant_sid = fresh_sids.pop(i)
         stagnant_sids.append(stagnant_sid)
     # Wait for half of the session expiration time.
     pause(ttl/2.0)
     # "Touch" the sessions NOT selected for expiration.
     for fresh_sid in fresh_sids:
         # Validate and touch.
         is_valid = self.session_manager.validate(fresh_sid, touch=1)
         assert is_valid, ("validate(%r) unexpectedly failed." % fresh_sid)
     # Wait "just over" the expiration time for the "untouched" sessions.
     pause(ttl/2.0+0.1)
     # Confirm that the correct sessions are valid/stale.
     for sid in all_sids:
         is_valid = self.session_manager.validate(sid)
         if is_valid:
             assert sid in fresh_sids, (
                 "%r is fresh, but should be stagnant." % sid)
         else:
             assert sid in stagnant_sids, (
                 "%r is stagnant, but should be fresh." % sid)
     # Force the SessionManager to collect "stale" sessions.
     count = self.session_manager.collect()
     assert count == total/2, (
         "Excepted to collect %d expired sessions, not %d." % (total/2,
                                                               count)
         )
     # Wait for the all the original sessions to expire.
     pause(ttl/2.0+0.1)
     # Create enough new sessions to force a background collection.
     configuration = self.session_manager.configuration()
     for i in range(0,int(configuration['_collection_threshold'])+1):
         self.session_manager.create('mpxadmin','mpxadmin',1)
     # Ensure that any pending background collection finishes.
     for i in range(0,10):
         if self.session_manager._collection_action is not None:
             # Allow the background collection to get scheduled.
             pause(0.1)
         else:
             break
     # Confirm that all the expired sessions were already collected.
     count = self.session_manager.collect()
     assert count == 0, (
         "Crossing the _collection_threshold did not cause the expected"
         " background collection.")
     return