Example #1
0
    def checkReadBandwidth(self):
        """Checks if we've passed bandwidth limits."""
        if self.readLimit and self.readThisSecond > self.readLimit:
            self.throttleReads()
            throttleTime = (float(self.readThisSecond) / self.readLimit) - 1.0
            self.unthrottleReadsID = reactor.callLater(throttleTime,
                                                       self.unthrottleReads)

        nzbFiles = []
        if Hellanzb.downloading:
            # Update the total download rate and each NZBFiles rate and d/l percentage
            self.rate = self.readThisSecond
            for nsf in Hellanzb.nsfs:
                for activeClient in nsf.activeClients:
                    if activeClient.currentSegment and \
                            activeClient.currentSegment.nzbFile not in nzbFiles:
                        nzbFile = activeClient.currentSegment.nzbFile
                        nzbFile.rate = nzbFile.readThisSecond

                        talliedBytes = float(nzbFile.totalReadBytes + nzbFile.totalSkippedBytes)
                        percentage = int(talliedBytes / max(1, nzbFile.totalBytes) * 100)
                        nzbFile.downloadPercentage = min(100, percentage)

                        nzbFiles.append(nzbFile)

            Hellanzb.scroller.updateLog()

        # Reset the rate counters
        self.readThisSecond = 0
        for nzbFile in nzbFiles:
            nzbFile.readThisSecond = 0

        self.checkReadBandwidthID = reactor.callLater(1, self.checkReadBandwidth)
Example #2
0
    def buildFinished(self, text, results):
        """This method must be called when the last Step has completed. It
        marks the Build as complete and returns the Builder to the 'idle'
        state.

        It takes two arguments which describe the overall build status:
        text, results. 'results' is one of SUCCESS, WARNINGS, or FAILURE.

        If 'results' is SUCCESS or WARNINGS, we will permit any dependant
        builds to start. If it is 'FAILURE', those builds will be
        abandoned."""

        self.finished = True
        if self.remote:
            self.remote.dontNotifyOnDisconnect(self.lostRemote)
        self.results = results

        log.msg(" %s: build finished" % self)
        self.build_status.setText(text)
        self.build_status.setResults(results)
        self.build_status.buildFinished()
        if self.progress and results == SUCCESS:
            # XXX: also test a 'timing consistent' flag?
            log.msg(" setting expectations for next time")
            self.builder.setExpectations(self.progress)
        reactor.callLater(0, self.releaseLocks)
        self.deferred.callback(self)
        self.deferred = None
Example #3
0
def handleError():
    from twisted.python import failure
    global exitStatus
    exitStatus = 2
    reactor.callLater(0.01, _stopReactor)
    log.err(failure.Failure())
    raise
Example #4
0
 def createDirectory(self, childName):
     d = defer.Deferred()
     d2 = defer.maybeDeferred(inmem.FakeDirectory.createDirectory,
                              self, childName)
     from twisted.internet import reactor
     reactor.callLater(1, d2.chainDeferred, d)
     return d
Example #5
0
    def testStopAndCancelWithOneUnderway(self):
        """
        Start a dispatch queue of width 2, and send it 3 jobs. Verify that
        2 of the jobs are underway. Then stop it before they can complete,
        telling it to cancel the underway jobs. The two jobs that were
        underway should both be cancelled and returned by the stop method.
        The first 2 jobs returned should have state CANCELLED, and the
        final one should still be PENDING.
        """
        def ok(result):
            self.fail('Unexpected success!')

        def checkCancel(failure):
            self.assertEqual(failure.value.state, Job.CANCELLED)

        dq = ResizableDispatchQueue(self.slow, 2)
        dq.put(0).addCallbacks(ok, checkCancel)
        dq.put(1).addCallbacks(ok, checkCancel)
        dq.put(2)
        reactor.callLater(0.01, self._testUnderway, dq, set([0, 1]))
        pendingJobs = yield task.deferLater(
            reactor, 0.1, dq.stop, cancelUnderway=True)
        pendingArgs = [p.jobarg for p in pendingJobs]
        self.assertEqual([0, 1, 2], sorted(pendingArgs))
        self.assertEqual(pendingJobs[0].state, Job.CANCELLED)
        self.assertEqual(pendingJobs[1].state, Job.CANCELLED)
        self.assertEqual(pendingJobs[2].state, Job.PENDING)
Example #6
0
 def _NH_IncomingRequestRejected(self, notification):
     request = notification.sender
     if request is not self.incoming_request:
         return
     log.msg('Incoming request rejected')
     self.incoming_request = None
     reactor.callLater(30, self._end_current_session)
Example #7
0
 def launch(howmany):        
     print 'launch' + str(howmany)            
     if howmany > 0:
         loop('test' + str(howmany), Gardener())
         reactor.callLater(1, lambda: launch(howmany - 1))
     elif howmany == 0:
         print 'done!'
Example #8
0
 def remote_shutdown(self):
     log.msg("worker shutting down on command from master")
     # there's no good way to learn that the PB response has been delivered,
     # so we'll just wait a bit, in hopes the master hears back.  Masters are
     # resilient to workers dropping their connections, so there is no harm
     # if this timeout is too short.
     reactor.callLater(0.2, reactor.stop)
Example #9
0
 def start(self):       
    myJid = jid.JID(self.username)
    factory = client.XMPPClientFactory(myJid, self.password)
    factory.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authd)       
    connector = SRVConnector(reactor, 'xmpp-client', self.jabberserver, factory)       
    reactor.callLater(5, self.stop)
    connector.connect()    
Example #10
0
    def render(self, request):
        now = time()
        delta = now - self.lasttime

        # reset stats on high iter-request times caused by client restarts
        if delta > 3:  # seconds
            self._reset_stats()
            return ''

        self.tail.appendleft(delta)
        self.lasttime = now
        self.concurrent += 1

        if now - self.lastmark >= 3:
            self.lastmark = now
            qps = len(self.tail) / sum(self.tail)
            print('samplesize={0} concurrent={1} qps={2:0.2f}'.format(len(self.tail), self.concurrent, qps))

        if 'latency' in request.args:
            latency = float(request.args['latency'][0])
            reactor.callLater(latency, self._finish, request)
            return NOT_DONE_YET

        self.concurrent -= 1
        return ''
Example #11
0
 def try_login(self, uname):
     if (self.cf.instance != None):
         self.cf.instance.login(uname)
         self.uname = uname
         return True
     
     reactor.callLater(0.25, self.try_login, uname)
Example #12
0
 def _maybe_retry(e):
     log.err()
     if attempt < self.max_attempts:
         reactor.callLater(attempt * self.retry_multiplier,
                           self._retrying_fetch, u, data, event, attempt + 1)
     else:
         return e
Example #13
0
 def timeout_checker(self):
     """
     Called periodically to enforce timeout rules on all connections.
     Also checks pings at the same time.
     """
     self.factory.check_timeouts()
     reactor.callLater(2, self.timeout_checker)
Example #14
0
    def closedown(self):
        self.stopService()
        try:
            reactor.callLater(0.5,reactor.stop)
#            reactor.stop()
        except ReactorNotRunning:
            pass
Example #15
0
    def testKeyboardInterrupt(self):
        # Test the KeyboardInterrupt is *not* caught by wait -- we
        # want to allow users to Ctrl-C test runs.  And the use of the
        # useWaitError should not matter in this case.
        def raiseKeyInt(ignored):

            # XXX Abstraction violation, I suppose.  However: signals are
            # unreliable, so using them to simulate a KeyboardInterrupt
            # would be sketchy too; os.kill() is not available on Windows,
            # so we can't use that and let this run on Win32; raising
            # KeyboardInterrupt itself is wholely unrealistic, as the
            # reactor would normally block SIGINT for its own purposes and
            # not allow a KeyboardInterrupt to happen at all!

            if interfaces.IReactorThreads.providedBy(reactor):
                reactor.callInThread(reactor.sigInt)
            else:
                reactor.callLater(0, reactor.sigInt)
            return defer.Deferred()

        d = defer.Deferred()
        d.addCallback(raiseKeyInt)
        reactor.callLater(0, d.callback, True)
        self.assertRaises(KeyboardInterrupt, util.wait, d, useWaitError=False)

        d = defer.Deferred()
        d.addCallback(raiseKeyInt)
        reactor.callLater(0, d.callback, True)
        self.assertRaises(KeyboardInterrupt, util.wait, d, useWaitError=True)
Example #16
0
 def buildProtocol(self, addr):
     self.resetDelay()
     proto = self.protocol()
     proto.factory = self
     reactor.callLater(0, self.connected, proto)
     self.proto = proto
     return proto
Example #17
0
def sync_dirty_attributes(queue, loop=True):
    _l = queue.qsize()
    if _l > 0:
        if loop:
            _times = min(_l, MAX_SYNC_CNT_PER_LOOP) 
        else:
            _times = _l

        i = 0
        while i < _times:
            i += 1

            try:
                attr = queue.get_nowait()
                attr.syncdb()
            except Queue.Empty:
                break
            except:
                pass

        log.info('End sync character to db, total: {0}, dirty attributes length: {1}'.format( _times, queue.qsize() ))

    if loop:
        reactor.callLater(SYNC_DB_INTERVAL, sync_dirty_attributes, queue)
    else:
        log.debug('End sync db, dirty attributes length {0}, loop:{1}'.format(
            queue.qsize(), loop))
Example #18
0
    def update_session(self, updateLog):
        connector = self.pb['smppcm'].getConnector(self.sessionContext['cid'])
        connectorDetails = self.pb['smppcm'].getConnectorDetails(self.sessionContext['cid'])
        for key, value in updateLog.iteritems():
            connector['config'].set(key, value)

        if connector['config'].PendingRestart and connectorDetails['service_status'] == 1:
            self.protocol.sendData('Restarting connector [%s] for updates to take effect ...' % self.sessionContext['cid'], prompt=False)
            st = yield self.pb['smppcm'].perspective_connector_stop(self.sessionContext['cid'])
            if not st:
                self.protocol.sendData('Failed stopping connector, check log for details', prompt=False)
            else:
                st = yield self.pb['smppcm'].perspective_connector_start(self.sessionContext['cid'])
                if not st:
                    self.protocol.sendData('Failed starting connector, will retry in 5 seconds', prompt=False)

                    # Wait before start retrial
                    exitDeferred = defer.Deferred()
                    reactor.callLater(5, exitDeferred.callback, None)
                    yield exitDeferred

                    st = yield self.pb['smppcm'].perspective_connector_start(self.sessionContext['cid'])
                    if not st:
                        self.protocol.sendData('Permanently failed starting connector !', prompt=False)

        self.protocol.sendData('Successfully updated connector [%s]' % self.sessionContext['cid'], prompt=False)
        self.stopSession()
Example #19
0
    def test_mount(self):
        """
        ``/VolumeDriver.Mount`` sets the primary of the dataset with matching
        name to the current node and then waits for the dataset to
        actually arrive.
        """
        name = u"myvol"
        dataset_id = UUID(dataset_id_from_name(name))
        # Create dataset on a different node:
        d = self.flocker_client.create_dataset(
            self.NODE_B, DEFAULT_SIZE, metadata={u"name": name},
            dataset_id=dataset_id)

        # After two polling intervals the dataset arrives as state:
        reactor.callLater(VolumePlugin._POLL_INTERVAL,
                          self.flocker_client.synchronize_state)

        d.addCallback(lambda _:
                      self.assertResult(
                          b"POST", b"/VolumeDriver.Mount",
                          {u"Name": name}, OK,
                          {u"Err": None,
                           u"Mountpoint": u"/flocker/{}".format(dataset_id)}))
        d.addCallback(lambda _: self.flocker_client.list_datasets_state())
        d.addCallback(lambda ds: self.assertEqual(
            [self.NODE_A], [d.primary for d in ds
                            if d.dataset_id == dataset_id]))
        return d
Example #20
0
 def print_traffic():
     self.log.info("Traffic {}: {} / {} in / out bytes - {} / {} in / out msgs".format(self.peer,
                                                                                       self.trafficStats.incomingOctetsWireLevel,
                                                                                       self.trafficStats.outgoingOctetsWireLevel,
                                                                                       self.trafficStats.incomingWebSocketMessages,
                                                                                       self.trafficStats.outgoingWebSocketMessages))
     reactor.callLater(1, print_traffic)
Example #21
0
 def _processQueue(self):
     def processFinished(value, processProtocol):
         self._num_running -= 1
         reactor.callLater(0, self._processQueue)
         
         execTime =  processProtocol.execStopTime - processProtocol.execStartTime
         qTime = processProtocol.queueStopTime - processProtocol.queueStartTime
         self._maxQtime = max(self._maxQtime, qTime)
         self._maxExecTime = max(self._maxExecTime, execTime)
         log.debug("execution time %s seconds; queue time %s seconds; "
                   "process %s" 
                   % ( execTime, qTime, processProtocol))
         if (self._num_running == 0 
             and self._stopped 
             and not self._stopped.called 
             and len(self._processes) == 0):
             self._stopped.callback("process queue is empty and stopped")
     log.debug("Number of process being executed: %s" % self._num_running)
     if self._num_running < self._parallel:
         processQProtocol = None
         if self._processes:
             processQProtocol = self._processes.popleft()
         if processQProtocol:
             self._num_running += 1
             d = processQProtocol.start()
             d.addBoth(processFinished, processQProtocol)
     
     if self._processes and self._num_running < self._parallel:
         reactor.callLater(0, self._processQueue)
     return
Example #22
0
                    def on_fsevent(evt):
                        worker.watcher.stop()
                        proto.signal('TERM')

                        if options['watch'].get('action', None) == 'restart':
                            log.msg("Restarting guest ..")
                            reactor.callLater(0.1, self.start_guest, id, config, details)
Example #23
0
 def measure(self):
     # TODO: Improve this with a looping call
     wl = self.dll.CLGetLambdaReading(self.handle)
     power = self.dll.CLGetPowerReading(self.handle)
     self.freqchanged = wl
     self.powerchanged = power
     reactor.callLater(0.1, self.measure)
Example #24
0
def hold(lock, owner, la, mode="now"):
    if mode == "now":
        lock.release(owner, la)
    elif mode == "very soon":
        reactor.callLater(0, lock.release, owner, la)
    elif mode == "soon":
        reactor.callLater(0.1, lock.release, owner, la)
Example #25
0
 def __init__(self):
     self.clients = []
     self.game = Game()
     self.mob = Actor("eeeeeeewwwwwww")
     self.game.place(self.mob, (1,1))
     self.game.register(self.on_notify)
     reactor.callLater(1, self.tick)
Example #26
0
 def commandRestore(self, parts, fromloc, overriderank):
     "/restore worldname number - Op\nRestore world to indicated number."
     if len(parts) < 2:
         self.client.sendServerMessage("Please specify at least a world ID!")
     else:
         world_id = parts[1].lower()
         world_dir = ("worlds/%s/" % world_id)
         if len(parts) < 3:
             backups = os.listdir(world_dir + "backup/")
             backups.sort(lambda x, y: int(x) - int(y))
             backup_number = str(int(backups[-1]))
         else:
             backup_number = parts[2]
         if not os.path.exists(world_dir + "backup/%s/" % backup_number):
             self.client.sendServerMessage("Backup %s does not exist." % backup_number)
         else:
             if not os.path.exists(world_dir + "blocks.gz.new"):
                 shutil.copy((world_dir + "backup/%s/blocks.gz" % backup_number), world_dir)
                 if os.path.exists(world_dir + "backup/%s/world.meta" % backup_number):
                     shutil.copy((world_dir + "backup/%s/world.meta" % backup_number), world_dir)
             else:
                 reactor.callLater(1, self.commandRestore, self, parts, fromloc, overriderank)
             self.client.factory.unloadWorld(world_id, skiperror=True)
             self.client.sendServerMessage("%s has been restored to %s and booted." % (world_id, backup_number))
             if world_id in self.client.factory.worlds:
                 for client in self.client.factory.worlds[world_id].clients:
                     client.changeToWorld(world_id)
Example #27
0
 def _work_done(res):
     log.msg("Completed a piece of work")
     self.queue.pop(0)
     if self.queue:
         log.msg("Preparing next piece of work")
         reactor.callLater(0, self._process)
     return res
Example #28
0
    def connected(self, msg):
        """Once I've connected I want to subscribe to my the message queue.
        """
        stomper.Engine.connected(self, msg)

        self.log.info("senderID:%s Connected: session %s." % (
            self.senderID, 
            msg['headers']['session'])
        )

        # I originally called loopingCall(self.send) directly, however it turns
        # out that we had not fully subscribed. This meant we did not receive 
        # out our first send message. I fixed this by using reactor.callLater
        # 
        #
        def setup_looping_call():
            lc = LoopingCall(self.send)
            lc.start(2)
            
        reactor.callLater(1, setup_looping_call)

        f = stomper.Frame()
        f.unpack(stomper.subscribe(DESTINATION))

        # ActiveMQ specific headers:
        #
        # prevent the messages we send comming back to us.
        f.headers['activemq.noLocal'] = 'true'
        
        return f.pack()
Example #29
0
    def _postTo(self, callbacks, service, nodeIdentifier,
                      payload=None, contentType=None, eventType=None,
                      redirectURI=None):

        if not callbacks:
            return

        postdata = None
        nodeURI = getXMPPURI(service, nodeIdentifier)
        headers = {'Referer': nodeURI.encode('utf-8'),
                   'PubSub-Service': service.full().encode('utf-8')}

        if payload:
            postdata = payload.toXml().encode('utf-8')
            if contentType:
                headers['Content-Type'] = "%s;charset=utf-8" % contentType

        if eventType:
            headers['Event'] = eventType

        if redirectURI:
            headers['Link'] = '<%s>; rel=alternate' % (
                              redirectURI.encode('utf-8'),
                              )

        def postNotification(callbackURI):
            f = getPageWithFactory(str(callbackURI),
                                   method='POST',
                                   postdata=postdata,
                                   headers=headers)
            d = f.deferred
            d.addErrback(log.err)

        for callbackURI in callbacks:
            reactor.callLater(0, postNotification, callbackURI)
Example #30
0
    def test_quick_restart(self):
        "Testing for #68, restarting quickly a connector will loose its session state"

        # Add a connector and start it
        extraCommands = [{'command': 'cid operator_1'},
                         {'command': 'port %s' % self.SMSCPort.getHost().port},]
        yield self.add_connector(r'jcli : ', extraCommands)
        yield self.start_connector('operator_1', wait = 3)

        # List and assert it is BOUND
        expectedList = ['#Connector id                        Service Session          Starts Stops', 
                        '#operator_1                          started BOUND_TRX        1      0    ', 
                        'Total connectors: 1']
        commands = [{'command': 'smppccm -l', 'expect': expectedList}]
        yield self._test(r'jcli : ', commands)

        # Stop and start very quickly will lead to an error starting the connector because there were
        # no sufficient time for unbind to complete
        yield self.stop_connector('operator_1', finalPrompt = None, wait = 0)
        yield self.start_connector('operator_1', finalPrompt = None, 
                                    wait = 0, 
                                    expect= 'Failed starting connector, check log for details')

        # Wait
        exitDeferred = defer.Deferred()
        reactor.callLater(2, exitDeferred.callback, None)
        yield exitDeferred

        # List and assert it is stopped (start command errored)
        expectedList = ['#Connector id                        Service Session          Starts Stops', 
                        '#operator_1                          stopped NONE             1      1    ', 
                        'Total connectors: 1']
        commands = [{'command': 'smppccm -l', 'expect': expectedList}]
        yield self._test(r'jcli : ', commands)
Example #31
0
 def do_stop(self, arg):
     self.clear_all_breaks()
     self.set_continue()
     from twisted.internet import reactor
     reactor.callLater(0, reactor.stop)
     return 1
Example #32
0
    def _did_timeout(self, deferred):
        if deferred.called:
            return
        deferred.errback(Failure(TimeoutError("OpenSIPS command did timeout")))

    def send(self, request):
        self.deferred = request.deferred
        try:
            self.transport.write(request.command, OpenSIPSConfig.socket_path)
        except socket.error, why:
            log.error("cannot write request to `%s': %s" %
                      (OpenSIPSConfig.socket_path, why[1]))
            self.deferred.errback(
                Failure(CommandError("Cannot send request to OpenSIPS")))
        else:
            reactor.callLater(self.timeout, self._did_timeout, self.deferred)


class UNIXSocketConnectionPool(object):
    """Pool of UNIX socket connection to OpenSIPS"""
    def __init__(self, max_connections=10, pool_id=''):
        assert max_connections > 0, 'maximum should be > 0'
        self.max = max_connections
        self.id = pool_id
        self.workers = 0
        self.waiters = deque()
        self.connections = deque()

    def _create_connections_as_needed(self):
        while self.workers < self.max and len(self.waiters) > len(
                self.connections):
 def schedule(self):
     when = self._get_next_time()
     #log.debug("Next prevhash update in %.03f sec" % when)
     #log.debug("Merkle update in next %.03f sec" % \
     #          ((self.registry.last_update + config.MERKLE_REFRESH_INTERVAL)-posix_time()))
     self.clock = reactor.callLater(when, self.run) #@UndefinedVariable
Example #34
0
    def activate(self, device):
        self.log.info('activating')

        # first we verify that we got parent reference and proxy info
        assert device.parent_id, 'Invalid Parent ID'
        assert device.proxy_address.device_id, 'Invalid Device ID'

        if device.vlan:
            # vlan non-zero if created via legacy method (not xPON). Also
            # Set a random serial number since not xPON based
            self._olt_created = True

        # register for proxied messages right away
        self.proxy_address = device.proxy_address
        self.adapter_agent.register_for_proxied_messages(device.proxy_address)

        # initialize device info
        device.root = True
        device.vendor = 'Adtran Inc.'
        device.model = 'n/a'
        device.hardware_version = 'n/a'
        device.firmware_version = 'n/a'
        device.reason = ''
        device.connect_status = ConnectStatus.UNKNOWN

        ############################################################################
        # Setup PM configuration for this device

        self.pm_metrics = OnuPmMetrics(self, device, grouped=True, freq_override=False)
        pm_config = self.pm_metrics.make_proto()
        self.log.info("initial-pm-config", pm_config=pm_config)
        self.adapter_agent.update_device_pm_config(pm_config, init=True)

        ############################################################################
        # Setup Alarm handler

        self.alarms = AdapterAlarms(self.adapter, device.id)

        # reference of uni_port is required when re-enabling the device if
        # it was disabled previously
        # Need to query ONU for number of supported uni ports
        # For now, temporarily set number of ports to 1 - port #2

        parent_device = self.adapter_agent.get_device(device.parent_id)
        self.logical_device_id = parent_device.parent_id
        assert self.logical_device_id, 'Invalid logical device ID'

        # Register physical ports.  Should have at least one of each

        self._pon = PonPort.create(self, self._next_port_number)
        self.adapter_agent.add_port(device.id, self._pon.get_port())

        if self._olt_created:
            # vlan non-zero if created via legacy method (not xPON). Also
            # Set a random serial number since not xPON based

            uni_port = UniPort.create(self, self._next_port_number, device.vlan,
                                      'deprecated', device.vlan, None)
            self._unis[uni_port.port_number] = uni_port
            self.adapter_agent.add_port(device.id, uni_port.get_port())

            device.serial_number = uuid4().hex
            uni_port.add_logical_port(device.vlan, subscriber_vlan=device.vlan)

            # Start things up for this ONU Handler.
            self.enabled = True

        # Start collecting stats from the device after a brief pause
        reactor.callLater(30, self.start_kpi_collection, device.id)

        self.adapter_agent.update_device(device)
Example #35
0
 def loop(self, _):
     reactor.callLater(self.delay, self.source.deferredReceived)
 def schedule(self, delay=0):
     from twisted.internet import reactor
     if self._call is None:
         self._call = reactor.callLater(delay, self)
Example #37
0
def newid_no_signal(left, all_done):
    if left > 0:
        print('Waiting %d more seconds.' % left)
        reactor.callLater(1, newid_no_signal, left - 1, all_done)
    else:
        all_done.errback(RuntimeError('no acknowledgement in 10 seconds.'))
Example #38
0
 def is_ready(self):
     deferred = defer.Deferred()
     reactor.callLater(1, deferred.callback, True)
     return deferred
def getTaxiData():
    now = datetime.datetime.now()
    with urllib.request.urlopen(taxiApiUrl) as url:
        data = json.loads(url.read().decode())
        #print(data)
        print(data['features'][0]['properties']['timestamp'])

        #saving
        nowStr = now.strftime("%Y%m%d_%H%M%S_%s")
        fnToSaveTo = 'taxi_%s.json' % (nowStr, )
        print('Saving to %s\n' % fnToSaveTo)
        with open(fnToSaveTo, 'w+') as outfile:
            json.dump(data, outfile)

    reactor.callLater(60.0, getTaxiData)  # start the rest 60s later


def f(s):
    print("this will run 3.5 seconds after it was scheduled: %s" % (s, ))

    reactor.callLater(3.5, f, "hello, world")


#reactor.callLater(3.5, f, "hello, world")
reactor.callLater(10.0, getTaxiData)  # start 1st run 10s later

# f() will only be called if the event loop is started.
reactor.run()

#getTaxiData()
Example #40
0
 def _ntp_service_start(self):
     deferred = defer.Deferred()
     reactor.callLater(0.5, deferred.callback, 'Enabled')
     return deferred
 def start(self):
     # don't complete immediately, or synchronously
     reactor.callLater(0, self.doStuff)
def f(s):
    print("this will run 3.5 seconds after it was scheduled: %s" % (s, ))

    reactor.callLater(3.5, f, "hello, world")
Example #43
0
 def poll(self):
     reactor.callLater(0, self.polledFn)
     return defer.succeed(None)
Example #44
0
 def schedule_next_crawl(null, sleep_time):
     """
     Schedule the next crawl
     """
     reactor.callLater(sleep_time, crawl)
Example #45
0
 def on_connect_fail(self, result):
     """Pauses the reactor, returns PluginError. Gets called when connection to deluge daemon fails."""
     log.debug('Connect to deluge daemon failed, result: %s' % result)
     reactor.callLater(0, reactor.pause, plugin.PluginError('Could not connect to deluge daemon', log))
 def finishNewLog(self):
     for d in newLogDeferreds:
         reactor.callLater(0, d.callback, None)
Example #47
0
 def scheduleSend(self):
     if self.deferSendPending and self.deferSendPending.active():
         return
     self.deferSendPending = reactor.callLater(
         settings.TIME_TO_DEFER_SENDING, self.sendQueued)
Example #48
0
    def on_connect_success(self, result, task, config):
        """Gets called when successfully connected to a daemon."""
        from deluge.ui.client import client
        from twisted.internet import reactor, defer

        if not result:
            log.debug('on_connect_success returned a failed result. BUG?')

        if task.options.test:
            log.debug('Test connection to deluge daemon successful.')
            client.disconnect()
            return

        def format_label(label):
            """Makes a string compliant with deluge label naming rules"""
            return re.sub('[^\w-]+', '_', label.lower())

        def set_torrent_options(torrent_id, entry, opts):
            """Gets called when a torrent was added to the daemon."""
            dlist = []
            if not torrent_id:
                log.error('There was an error adding %s to deluge.' % entry['title'])
                # TODO: Fail entry? How can this happen still now?
                return
            log.info('%s successfully added to deluge.' % entry['title'])
            entry['deluge_id'] = torrent_id

            def create_path(result, path):
                """Creates the specified path if deluge is older than 1.3"""
                from deluge.common import VersionSplit
                # Before 1.3, deluge would not create a non-existent move directory, so we need to.
                if VersionSplit('1.3.0') > VersionSplit(self.deluge_version):
                    if client.is_localhost():
                        if not os.path.isdir(path):
                            log.debug('path %s doesn\'t exist, creating' % path)
                            os.makedirs(path)
                    else:
                        log.warning('If path does not exist on the machine running the daemon, move will fail.')

            if opts.get('movedone'):
                dlist.append(version_deferred.addCallback(create_path, opts['movedone']))
                dlist.append(client.core.set_torrent_move_completed(torrent_id, True))
                dlist.append(client.core.set_torrent_move_completed_path(torrent_id, opts['movedone']))
                log.debug('%s move on complete set to %s' % (entry['title'], opts['movedone']))
            if opts.get('label'):

                def apply_label(result, torrent_id, label):
                    """Gets called after labels and torrent were added to deluge."""
                    return client.label.set_torrent(torrent_id, label)

                dlist.append(label_deferred.addCallback(apply_label, torrent_id, opts['label']))
            if opts.get('queuetotop') is not None:
                if opts['queuetotop']:
                    dlist.append(client.core.queue_top([torrent_id]))
                    log.debug('%s moved to top of queue' % entry['title'])
                else:
                    dlist.append(client.core.queue_bottom([torrent_id]))
                    log.debug('%s moved to bottom of queue' % entry['title'])

            def on_get_torrent_status(status):
                """Gets called with torrent status, including file info.
                Sets the torrent options which require knowledge of the current status of the torrent."""

                main_file_dlist = []

                # Determine where the file should be
                move_now_path = None
                if opts.get('movedone'):
                    if status['progress'] == 100:
                        move_now_path = opts['movedone']
                    else:
                        # Deluge will unset the move completed option if we move the storage, forgo setting proper
                        # path, in favor of leaving proper final location.
                        log.debug('Not moving storage for %s, as this will prevent movedone.' % entry['title'])
                elif opts.get('path'):
                    move_now_path = opts['path']

                if move_now_path and os.path.normpath(move_now_path) != os.path.normpath(status['save_path']):
                    main_file_dlist.append(version_deferred.addCallback(create_path, move_now_path))
                    log.debug('Moving storage for %s to %s' % (entry['title'], move_now_path))
                    main_file_dlist.append(client.core.move_storage([torrent_id], move_now_path))

                if opts.get('content_filename') or opts.get('main_file_only'):

                    def file_exists():
                        # Checks the download path as well as the move completed path for existence of the file
                        if os.path.exists(os.path.join(status['save_path'], filename)):
                            return True
                        elif status.get('move_on_completed') and status.get('move_on_completed_path'):
                            if os.path.exists(os.path.join(status['move_on_completed_path'], filename)):
                                return True
                        else:
                            return False

                    for file in status['files']:
                        # Only rename file if it is > 90% of the content
                        if file['size'] > (status['total_size'] * 0.9):
                            if opts.get('content_filename'):
                                filename = opts['content_filename'] + os.path.splitext(file['path'])[1]
                                counter = 1
                                if client.is_localhost():
                                    while file_exists():
                                        # Try appending a (#) suffix till a unique filename is found
                                        filename = ''.join([opts['content_filename'], '(', str(counter), ')',
                                                            os.path.splitext(file['path'])[1]])
                                        counter += 1
                                else:
                                    log.debug('Cannot ensure content_filename is unique '
                                              'when adding to a remote deluge daemon.')
                                log.debug('File %s in %s renamed to %s' % (file['path'], entry['title'], filename))
                                main_file_dlist.append(
                                    client.core.rename_files(torrent_id, [(file['index'], filename)]))
                            if opts.get('main_file_only'):
                                file_priorities = [1 if f['index'] == file['index'] else 0 for f in status['files']]
                                main_file_dlist.append(
                                    client.core.set_torrent_file_priorities(torrent_id, file_priorities))
                            break
                    else:
                        log.warning('No files in %s are > 90%% of content size, no files renamed.' % entry['title'])

                return defer.DeferredList(main_file_dlist)

            status_keys = ['files', 'total_size', 'save_path', 'move_on_completed_path',
                           'move_on_completed', 'progress']
            dlist.append(client.core.get_torrent_status(torrent_id, status_keys).addCallback(on_get_torrent_status))

            return defer.DeferredList(dlist)

        def on_fail(result, task, entry):
            """Gets called when daemon reports a failure adding the torrent."""
            log.info('%s was not added to deluge! %s' % (entry['title'], result))
            entry.fail('Could not be added to deluge')

        # dlist is a list of deferreds that must complete before we exit
        dlist = []
        # loop through entries to get a list of labels to add
        labels = set([format_label(entry['label']) for entry in task.accepted if entry.get('label')])
        if config.get('label'):
            labels.add(format_label(config['label']))
        label_deferred = defer.succeed(True)
        if labels:
            # Make sure the label plugin is available and enabled, then add appropriate labels

            def on_get_enabled_plugins(plugins):
                """Gets called with the list of enabled deluge plugins."""

                def on_label_enabled(result):
                    """ This runs when we verify the label plugin is enabled. """

                    def on_get_labels(d_labels):
                        """Gets available labels from deluge, and adds any new labels we need."""
                        dlist = []
                        for label in labels:
                            if not label in d_labels:
                                log.debug('Adding the label %s to deluge' % label)
                                dlist.append(client.label.add(label))
                        return defer.DeferredList(dlist)

                    return client.label.get_labels().addCallback(on_get_labels)

                if 'Label' in plugins:
                    return on_label_enabled(True)
                else:
                    # Label plugin isn't enabled, so we check if it's available and enable it.

                    def on_get_available_plugins(plugins):
                        """Gets plugins available to deluge, enables Label plugin if available."""
                        if 'Label' in plugins:
                            log.debug('Enabling label plugin in deluge')
                            return client.core.enable_plugin('Label').addCallback(on_label_enabled)
                        else:
                            log.error('Label plugin is not installed in deluge')

                    return client.core.get_available_plugins().addCallback(on_get_available_plugins)

            label_deferred = client.core.get_enabled_plugins().addCallback(on_get_enabled_plugins)
            dlist.append(label_deferred)

        def on_get_daemon_info(ver):
            """Gets called with the daemon version info, stores it in self."""
            log.debug('deluge version %s' % ver)
            self.deluge_version = ver

        version_deferred = client.daemon.info().addCallback(on_get_daemon_info)
        dlist.append(version_deferred)

        def on_get_session_state(torrent_ids):
            """Gets called with a list of torrent_ids loaded in the deluge session.
            Adds new torrents and modifies the settings for ones already in the session."""
            dlist = []
            # add the torrents
            for entry in task.accepted:

                def add_entry(entry, opts):
                    """Adds an entry to the deluge session"""
                    magnet, filedump = None, None
                    if entry.get('url', '').startswith('magnet:'):
                        magnet = entry['url']
                    else:
                        if not os.path.exists(entry['file']):
                            entry.fail('Downloaded temp file \'%s\' doesn\'t exist!' % entry['file'])
                            del(entry['file'])
                            return
                        with open(entry['file'], 'rb') as f:
                            filedump = base64.encodestring(f.read())

                    log.verbose('Adding %s to deluge.' % entry['title'])
                    if magnet:
                        return client.core.add_torrent_magnet(magnet, opts)
                    else:
                        return client.core.add_torrent_file(entry['title'], filedump, opts)

                # Generate deluge options dict for torrent add
                add_opts = {}
                try:
                    path = entry.render(entry.get('path', config['path']))
                    if path:
                        add_opts['download_location'] = pathscrub(os.path.expanduser(path))
                except RenderError as e:
                    log.error('Could not set path for %s: %s' % (entry['title'], e))
                for fopt, dopt in self.options.iteritems():
                    value = entry.get(fopt, config.get(fopt))
                    if value is not None:
                        add_opts[dopt] = value
                        if fopt == 'ratio':
                            add_opts['stop_at_ratio'] = True
                # Make another set of options, that get set after the torrent has been added
                modify_opts = {'label': format_label(entry.get('label', config['label'])),
                               'queuetotop': entry.get('queuetotop', config.get('queuetotop')),
                               'main_file_only': entry.get('main_file_only', config.get('main_file_only', False))}
                try:
                    movedone = entry.render(entry.get('movedone', config['movedone']))
                    modify_opts['movedone'] = pathscrub(os.path.expanduser(movedone))
                except RenderError as e:
                    log.error('Error setting movedone for %s: %s' % (entry['title'], e))
                try:
                    content_filename = entry.get('content_filename', config.get('content_filename', ''))
                    modify_opts['content_filename'] = pathscrub(entry.render(content_filename))
                except RenderError as e:
                    log.error('Error setting content_filename for %s: %s' % (entry['title'], e))

                torrent_id = entry.get('deluge_id') or entry.get('torrent_info_hash')
                torrent_id = torrent_id and torrent_id.lower()
                if torrent_id in torrent_ids:
                    log.info('%s is already loaded in deluge, setting options' % entry['title'])
                    # Entry has a deluge id, verify the torrent is still in the deluge session and apply options
                    # Since this is already loaded in deluge, we may also need to change the path
                    modify_opts['path'] = add_opts.pop('download_location', None)
                    dlist.extend([set_torrent_options(torrent_id, entry, modify_opts),
                                  client.core.set_torrent_options([torrent_id], add_opts)])
                else:
                    dlist.append(add_entry(entry, add_opts).addCallbacks(
                        set_torrent_options, on_fail, callbackArgs=(entry, modify_opts), errbackArgs=(task, entry)))
            return defer.DeferredList(dlist)
        dlist.append(client.core.get_session_state().addCallback(on_get_session_state))

        def on_complete(result):
            """Gets called when all of our tasks for deluge daemon are complete."""
            client.disconnect()
        tasks = defer.DeferredList(dlist).addBoth(on_complete)

        def on_timeout(result):
            """Gets called if tasks have not completed in 30 seconds.
            Should only happen when something goes wrong."""
            log.error('Timed out while adding torrents to deluge.')
            log.debug('dlist: %s' % result.resultList)
            client.disconnect()

        # Schedule a disconnect to happen if FlexGet hangs while connected to Deluge
        # Leave the timeout long, to give time for possible lookups to occur
        reactor.callLater(600, lambda: tasks.called or on_timeout(tasks))
Example #49
0
            st = headers['st']
        except KeyError:
            Logr.warning("Received message with missing headers")
            return
        except ValueError:
            Logr.warning("Received message with invalid values")
            return

        if man != 'ssdp:discover':
            Logr.warning("Received message where MAN != 'ssdp:discover'")
            return

        if st == 'ssdp:all':
            for target in self.ssdp.targets:
                reactor.callLater(self.rand.randint(1,
                                                    mx), self.respond_MSEARCH,
                                  target, (address, port))
        elif st in self.ssdp.targets:
            reactor.callLater(self.rand.randint(1, mx), self.respond_MSEARCH,
                              st, (address, port))
        else:
            Logr.debug("ignoring %s", st)

    def respond(self, headers, (address, port)):
        Logr.debug("respond() %s:%s", address, port)
        msg = 'HTTP/1.1 200 OK\r\n'
        msg += headers_join(headers)
        msg += '\r\n\r\n'

        try:
            self.transport.write(msg, (address, port))
Example #50
0
 def on_disconnect(self):
     """Pauses the reactor. Gets called when we disconnect from the daemon."""
     # pause the reactor, so flexget can continue
     reactor.callLater(0, reactor.pause)
Example #51
0
 def enter_TIME_WAIT(self):
     self._stopRetransmitting()
     self._timeWaitCall = reactor.callLater(self._timeWaitTimeout,
                                            self._do2mslTimeout)
 def startedConnecting(self, connector):
     from twisted.internet import reactor
     self.timeout_call = reactor.callLater(self.timeout, self.on_timeout,
                                           connector)
Example #53
0
 def _writeLater(self):
     if self._nagle is None:
         self._nagle = reactor.callLater(0.001, self._reallyWrite)
Example #54
0
 def enter_CLOSE_WAIT(self):
     # Twisted automatically reacts to network half-close by issuing a full
     # close.
     self._closeWaitLoseConnection = reactor.callLater(
         0.01, self._loseConnectionBecauseOfCloseWait)
Example #55
0
File: core.py Project: zec/OESS
 def post_callback(self, t, function):
     from twisted.internet import reactor
     reactor.callLater(t, function)
Example #56
0
 def _retransmitLater(self):
     assert self.state != tcpdfa.CLOSED
     if self._retransmitter is None:
         self._retransmitter = reactor.callLater(self._retransmitTimeout,
                                                 self._reallyRetransmit)
Example #57
0
 def onResult(self, resp):
     reactor.callLater(
         0.01,
         self.close,
     )
     return resp
Example #58
0
        model = upnp.upnpInfo.get('friendlyName', 'unknown')
        print("UPnP discovered a %s (%s) device"%(model, manuf))
        print("UPnP controlURL:", upnp.controlURL)
    else:
        print("No UPnP-capable device discovered")
    if sres:
        print("STUN says NAT type: %s"%(stun.name))
        if not upnp:
            if stun.blocked:
                print("You will be unable to make calls to the internet")
            elif not stun.useful:
                print("You will need to use an outbound proxy to make calls to the internet")
    else:
        print("STUN was unable to get a result. This is bad")
    print("And the mapper we'd use is: %r"%(mapper))
    reactor.stop()


if __name__ == "__main__":
    from twisted.internet import reactor
    import sys
    from shtoom import log
    print("Starting",__name__)
    print("="*40)
    if len(sys.argv) > 1 and sys.argv[1] == "-v":
        import shtoom.stun
        shtoom.stun.STUNVERBOSE = True
        log.startLogging(sys.stdout)
    reactor.callLater(0, main)
    reactor.run()
 def onMessage(self, msg, binary):
     print "Got echo: " + msg
     reactor.callLater(1, self.sendHello)
Example #60
0
 def on_ok(result):
     if timeout_call.active():
         timeout_call.cancel()
         self.__next_call = reactor.callLater(self.interval, self.ping)