Ejemplo n.º 1
0
    def __init__(self, delegate, vhost, spec, heartbeat=0, clock=None, insist=False):
        FrameReceiver.__init__(self, spec)
        self.delegate = delegate

        # XXX Cyclic dependency
        self.delegate.client = self

        self.vhost = vhost

        self.channelFactory = type("Channel%s" % self.spec.klass.__name__,
                                   (self.channelClass, self.spec.klass), {})
        self.channels = {}
        self.channelLock = defer.DeferredLock()

        self.outgoing = defer.DeferredQueue()
        self.work = defer.DeferredQueue()

        self.started = TwistedEvent()
        self.disconnected = TwistedEvent()  # Fired upon connection shutdown
        self.closed = False

        self.queueLock = defer.DeferredLock()
        self.basic_return_queue = TimeoutDeferredQueue()

        self.queues = {}

        self.outgoing.get().addCallback(self.writer)
        self.work.get().addCallback(self.worker)
        self.heartbeatInterval = heartbeat
        self.insist = insist
        if clock is None:
            clock = reactor
        self.clock = clock
        if self.heartbeatInterval > 0:
            self.checkHB = self.clock.callLater(self.heartbeatInterval *
                                                self.MAX_UNSEEN_HEARTBEAT, self.check_heartbeat)
            self.sendHB = LoopingCall(self.send_heartbeat)
            self.sendHB.clock = self.clock
            d = self.started.wait()
            d.addCallback(lambda _: self.reschedule_send_heartbeat())
            d.addCallback(lambda _: self.reschedule_check_heartbeat())
            # If self.started fails, don't start the heartbeat.
            d.addErrback(lambda _: None)
Ejemplo n.º 2
0
    def connectionMade(self):
        self.srv_queue = defer.DeferredQueue()
        self.srv_queue.get().addCallback(self.clientDataReceived)
        self.master_cli_queue = defer.DeferredQueue()
        self.slave_cli_queues = []

        factory = ProxyClientFactory(self.srv_queue, self.master_cli_queue)
        master_settings = self.factory.master_settings
        reactor.connectTCP(master_settings["host"], master_settings["port"],
                           factory)

        for slave_settings in self.factory.slave_settings:
            slave_cli_queue = defer.DeferredQueue()
            self.slave_cli_queues.append(slave_cli_queue)
            factory = ProxyClientFactory(self.srv_queue, slave_cli_queue)
            reactor.connectTCP(slave_settings["host"], slave_settings["port"],
                               factory)

        self.iter_slave_cli_queues = itertools.cycle(self.slave_cli_queues)
Ejemplo n.º 3
0
 def __init__(self, device):
     super(BufferingStateMachine, self).__init__()
     self.device = device
     self.custom = device.custom
     self.cmd = None
     self.state = 'idle'
     # since Python 2.5+ using a string as buffer is efficient m'kay?
     self.idlebuf = ""
     self.waitbuf = ""
     self.notifications = defer.DeferredQueue()
Ejemplo n.º 4
0
 def __init__(self, oper, del_obj=True):
     self.oper = oper
     self.queue = defer.DeferredQueue()
     self.presence = defaultdict(lambda: [])
     self.qcounter = defaultdict(lambda: 0)
     self.queue.get().addCallback(self._new_data)
     task.LoopingCall(self._auto_dispatch).start(
         1)  # secs between checkings
     task.LoopingCall(self._counters_cleanup).start(
         30)  # presence maintenance
     self.delete_objects = del_obj
Ejemplo n.º 5
0
    def __init__(self, factory, poolsize=10, isolated=False):
        self.factory = factory
        self.poolsize = poolsize
        self.isolated = isolated

        self.size = 0
        self.pool = []
        self.connectionQueue = defer.DeferredQueue()

        self._waitingForConnection = set()
        self._waitingForEmptyPool = set()
Ejemplo n.º 6
0
    def connectionMade(self):
        self.srv_queue = defer.DeferredQueue()
        self.cli_queue = defer.DeferredQueue()
        self.srv_queue.get().addCallback(self.clientDataReceived)

        src_addr = self.transport.getPeer().host
        src_port = self.transport.getPeer().port

        tuples = self.factory.hosts.get((src_addr, src_port), False)

        # check if we've got a new tuple to connect to and
        # if we're still waiting for connections or not
        while not tuples and self.factory.proxy.running:
            sleep(0.1)
            tuples = self.factory.hosts.get((src_addr, src_port), False)

        if tuples:
            self.dst_addr, self.dst_port, dst_fid = tuples
            self.dst_flow = self.factory.get_flow(dst_fid)
            print "Connecting to %s:%s" % (self.dst_addr, self.dst_port)
class Globals():
    """Global variables to keep track of experiment flow, image files, etc.

    The following are options, to be edited to suit:
        FULLSCREEN (bool): Toggles fullscreen or windowed mode for pyglet.
        IMAGES_DIR (str): Path to folder with images. Include trailing slash!
        IMAGES_SCALE (float): Scaling factor to resize images.
        LEGAL_FILE_EXTENSIONS (list of str): Types of image files to read from
            the directory. Case sensitive! Include leading dot!
        LOG_FILE (str): Path to a text file where the log will be written.
            Include extension!
        QTM_PROJECT_DATA_DIR (str): Path to the Data directory in the QTM
            project. Include trailing slash!
        QTM_RT_PASSWORD (str): Password for QTM real-time client control.
            Default is blank ("").
        QTM_RT_PORT (int): Port number (little endian) for QTM real-time
            connection. Default is 22223.
        SCREEN_IDX (int): Index of screen to create pyglet window on. See
            pyglet documentation for more info.


    STATUS_* variables can be changed or added (along with new directories for
    images) to modify experiment flow; this is only a rudimentary example.
    """

    FULLSCREEN = False
    IMAGES_DIR = "PATH_TO_IMAGES_DIR/"
    IMAGES_SCALE = 0.65
    LEGAL_FILE_EXTENSIONS = ('.JPG', '.jpg')
    LOG_FILE = "log.txt"
    QTM_PROJECT_DATA_DIR = "PATH_TO_DATA_DIR/"
    QTM_RT_PASSWORD = ""
    QTM_RT_PORT = 22223
    SCREEN_IDX = 0

    STATUS_BEGIN = 0
    STATUS_EXPERIMENT = 1
    STATUS_DONE = 2

    img_files = []
    for f in os.listdir(IMAGES_DIR):
        if f.endswith(LEGAL_FILE_EXTENSIONS):
            img_files.append(os.path.join(IMAGES_DIR, f))
    print(img_files)
    random.shuffle(img_files)
    print(img_files)


    status = None
    sprite = None
    connection = None
    event_queue = defer.DeferredQueue()

    startTime = datetime.now().strftime('%m%d%H%M')
Ejemplo n.º 8
0
 def __init__(self, func, width=0, size=None, backlog=None):
     self._queue = defer.DeferredQueue(size, backlog)
     self._func = func
     self._pool = DeferredPool()
     self._coop = task.Cooperator()
     self._currentWidth = 0
     self._pendingStops = 0
     self._underway = set()
     self.stopped = self.paused = False
     self.width = int(width)
     assert self.width >= 0
    def start_sniffer_thread(self):
        self.packet_queue = defer.DeferredQueue()
        cpu_port_intf = str(
            self.topo.get_cpu_port_intf(self.sw_name).replace("eth0", "eth1"))
        self.sniffer_thread = SnifferThread(reactor, self.packet_queue,
                                            cpu_port_intf)
        self.sniffer_thread.daemon = True  # die when the main thread dies
        self.sniffer_thread.start()

        workers = 4
        for i in range(workers):
            self._consume_from_packet_queue()
Ejemplo n.º 10
0
 def get_bluetooth_discv_queue(self):
     bus = dbus.SystemBus()
     
     items = [(self._rem_dev_name_cb, 'RemoteNameUpdated'),
              (self._disc_completed_cb, 'DiscoveryCompleted')]
     
     for method, signal in items:
         bus.add_signal_receiver(method, signal, INTERFACE, SERVICE, DEV)
     
     self.queue = defer.DeferredQueue()
     self.adapter.DiscoverDevices()
     return self.queue
Ejemplo n.º 11
0
    def test_cancelQueueAfterSynchronousGet(self):
        """
        When canceling a L{Deferred} from a L{DeferredQueue} that already has
        a result, the cancel should have no effect.
        """
        def _failOnErrback(_):
            self.fail("Unexpected errback call!")

        queue = defer.DeferredQueue()
        d = queue.get()
        d.addErrback(_failOnErrback)
        queue.put(None)
        d.cancel()
Ejemplo n.º 12
0
    def __init__(self, nick, connections, *args, **kwargs):
        """
        acts as a connection manager, middle man for incoming events, and processor of outgoing actions.
        """
        #TODO: maybe support a custom internal key to keep track of bots, not just by 'name'...
        #TODO: if we do it by name, make sure we don't have name dupes, even between networks :-/

        # bot id
        self.nick = nick
        self.id = str(uuid.uuid1())
        self.log = logging.getLogger('{0}.{1}.{2}'.format(
            self.__class__.__module__, self.__class__.__name__, self.nick))
        self.log.info('starting bot')

        #bot manager instance
        self.bot_manager = None

        # setup this bots event queue / consumer, action queue/consumer
        self.event_queue = defer.DeferredQueue()
        self._consume_events(self.event_queue)

        self.action_queue = defer.DeferredQueue()
        self._consume_actions(self.action_queue)

        # setup plugins
        self.enabled_plugins = kwargs.get('enabled_plugins')
        self.plugin_manager = PluginManager(bot=self)
        # self.manager.config.PLUGINS:

        # build connections
        # TODO: create connection manager
        self.connection_manager = ConnectionManager(config=connections,
                                                    bot=self)
        self.log.debug('connections on {0!r}: {1!r}'.format(
            self.nick, self.connection_manager))

        # should have a 'ready' state that we should check before starting?
        self.state = OFF
        self.party_line = None
Ejemplo n.º 13
0
    def __init__(self):
        self.log = None

        self.data_buf = ''
        self.response_data_buf = ''

        self.stats_client = StatsdClient.get()

        # Local connection state
        self.tree_connect_requests = {}
        self.connected_trees = {}
        self.file_open_requests = {}
        self.file_close_requests = {}
        self.open_files = {}
        self.session_latest_create_request_filename = None
        self.session_latest_tree_connect_path = None

        self.srv_queue = defer.DeferredQueue()
        self.cli_queue = defer.DeferredQueue()
        # Manually bind a reaction to the data that comes from the server
        self.srv_queue.get().addCallback(self.clientDataReceived)

        # The list of NMB packets from the client waiting to be processed
        self.client_pending_packets_queue = defer.DeferredQueue()
        self.client_pending_packets_queue_len = 0
        self.client_pending_packets_queue.get().addCallback(self.process_client_pending_packet)

        # The list of NMB packets from the server waiting to be processed
        self.server_pending_packets_queue = defer.DeferredQueue()
        self.server_pending_packets_queue_len = 0
        self.server_pending_packets_queue.get().addCallback(self.process_server_pending_packet)

        # Whether a shutdown has been requested
        self.shutdown_requested = False
        self.shutdown_deferred = None

        # Misc counters
        self.total_processed_client_packets = 0
        self.total_processed_server_packets = 0
Ejemplo n.º 14
0
 def __init__(self, circuit_manager, request, socks):
     '''
     :param oppy.util.exitrequest.ExitRequest request: connection request
         for this stream
     :param oppy.socks.socks.OppySOCKSProtocol socks: socks protocol
         instance this stream should relay data to and from
     '''
     self.stream_id = None
     self._read_queue = defer.DeferredQueue()
     self._write_queue = defer.DeferredQueue()
     self._read_deferred = None
     self._write_deferred = None
     self.request = request
     self.socks = socks
     self._deliver_window = STREAM_WINDOW_INIT
     self._package_window = STREAM_WINDOW_INIT
     self.circuit = None
     # set this flag if SOCKS closes our connection before the circuit
     # is done building
     self._closed = False
     self._circuit_request = circuit_manager.getOpenCircuit(self)
     self._circuit_request.addCallback(self._registerNewStream)
Ejemplo n.º 15
0
    def __init__(self, delegate, vhost, spec, heartbeat=0, clock=None, insist=False):
        FrameReceiver.__init__(self, spec)
        self.delegate = delegate

        # XXX Cyclic dependency
        self.delegate.client = self

        self.vhost = vhost

        self.channelFactory = type("Channel%s" % self.spec.klass.__name__,
                                    (self.channelClass, self.spec.klass), {})
        self.channels = {}
        self.channelLock = defer.DeferredLock()

        self.outgoing = defer.DeferredQueue()
        self.work = defer.DeferredQueue()

        self.started = TwistedEvent()

        self.queueLock = defer.DeferredLock()
        self.basic_return_queue = TimeoutDeferredQueue()

        self.queues = {}

        self.outgoing.get().addCallback(self.writer)
        self.work.get().addCallback(self.worker)
        self.heartbeatInterval = heartbeat
        self.insist = insist
        if self.heartbeatInterval > 0:
            if clock is None:
                from twisted.internet import reactor as clock
            self.clock = clock
            self.checkHB = self.clock.callLater(self.heartbeatInterval *
                          self.MAX_UNSEEN_HEARTBEAT, self.checkHeartbeat)
            self.sendHB = LoopingCall(self.sendHeartbeat)
            d = self.started.wait()
            d.addCallback(lambda _: self.reschedule_sendHB())
            d.addCallback(lambda _: self.reschedule_checkHB())
Ejemplo n.º 16
0
 def __init__(self,
              keyspace=None,
              retries=0,
              credentials=None,
              decoder=None):
     self.deferred = defer.Deferred()
     self.queue = defer.DeferredQueue()
     self.continueTrying = True
     self._protos = []
     self._pending = []
     self.request_retries = retries
     self.keyspace = keyspace
     self.credentials = credentials
     self.decoder = decoder
Ejemplo n.º 17
0
def main():

    logging.basicConfig(level=logging.DEBUG,
                        format="%(asctime)-15s %(message)s")
    logging.info('Starting up')

    #sensor_pollers, buffer_unloaders, sqlite_unloaders are worker lists
    sensor_pollers, buffer_unloaders, sqlite_unloaders = [], [], []
    upload_tasks = defer.DeferredQueue()

    #Setup monitoring of fake sensors

    #Create sensor instance in sensor list
    fake_sensors = [fake_sensor()]
    #Iterate over sensors
    for source in fake_sensors:
        name = 'temp%d' % (1, )
        #Create a datapointBuffer which will call source_method (with optional source_method_args) to retrieve a datapoint)
        dp_buffer = sensorup.datapointBuffer(
            source_method=source.get_temp_channel,
            source_method_args=0,
            datastream_id=name)
        #Add this new datasource (datapointBuffer instance) to worker lists

        #sensorPollAndBuffer: poll for new data and store in an in-memory list
        sensor_pollers.append(
            task.LoopingCall(sensorup.sensorPollAndBuffer, dp_buffer))
        #bufferUnloadAndUpload: Flushes in-memory list of datapoints, and attempts uploading to Cosm. Failures go into sqlite.
        buffer_unloaders.append(
            task.LoopingCall(sensorup.bufferUnloadAndUpload, dp_buffer,
                             upload_tasks))

    #Collect datapoint every 1s
    for poller in sensor_pollers:
        poller.start(1.0)
    #Upload every 30s
    for poller in buffer_unloaders:
        poller.start(30.0, False)

    #interval for sqlite_unloaders should be sufficiently large as to prevent double uploading
    # sqlite_unloader interval > buffer_unloaders interval
    # sqlite_unloader interval > client.request_timeout * sensor count

    #Flush old datapoints from the database every 86400s
    sqlite_purger = task.LoopingCall(sensorup.sqlite_purge)
    sqlite_purger.start(86400)

    #Only one concurrent upload task
    task.cooperate(sensorup.worker(upload_tasks))
    reactor.run()
Ejemplo n.º 18
0
    def load(self):
        # Chargement d'un plan de test
        self.plan = plan

        # Chargement dans une liste des phases a traiter
        self.queue = defer.DeferredQueue()
        for phase, servers in self.plan.iteritems():
            if 'bridage' in servers:
                maxparallel = servers['bridage']
                del servers['bridage']
            else:
                maxparallel = 0
            self.queue.put(Phase(phase, servers, maxparallel, self))

        print "Chargement de la premiere phase"
Ejemplo n.º 19
0
 def __init__(self, endpoint, sid, userid):
     resource.Resource.__init__(self)
     self.endpoint = endpoint
     self.broker = self.endpoint.broker
     self.sessionid = sid
     self.userid = userid
     self.queue = defer.DeferredQueue()
     self.zombie = False
     self.pollingDefer = None
     self.putChild('logout', BaseRequest(self, self.logout))
     self.putChild('userinfo', BaseRequest(self, self.userinfo))
     self.putChild('pending', BaseRequest(self, self.pending))
     self.putChild('polling', BaseRequest(self, self.polling))
     self.putChild('received', BaseRequest(self, self.received))
     self.putChild('message', BaseRequest(self, self.message))
Ejemplo n.º 20
0
 def __init__(self, dispatcher, interval, protocol, address,
              incoming_account, outgoing_account):
     self.dispatcher = dispatcher
     self.interval = interval
     self.address = address
     self.incoming = self.getIncoming(protocol, incoming_account)
     self.outgoing = ApplicationserverSMTPSender(address=self.address,
                                                 **outgoing_account)
     self.mailQueue = defer.DeferredQueue(size=100)
     self.mailProcessor = MailProcessor(
         self.mailQueue,
         taskMethod=self.dispatcher.callServiceMethod,
         send=self.sendMail)
     self.mailCache = list()
     self.run()
Ejemplo n.º 21
0
 def __init__(self, conf, passive=False):
     node.Node.__init__(self, name='file-transfer')
     self.passive = passive
     self.conf = conf
     self.working_queue = {}
     self.queue = defer.DeferredQueue()
     self.working = 0
     self.QUEUESIZE = int(self.conf.get('transport', 'threads'))
     self.PENDINGLIMIT = 100
     self.timestamp = time()
     self.stopped = False
     if not self.passive:
         self._sched_resume_ft()
     self._sched_stop_ft()
     self._requeue()
Ejemplo n.º 22
0
    def testQueue(self):
        N, M = 2, 2
        queue = defer.DeferredQueue(N, M)

        gotten = []

        for i in range(M):
            queue.get().addCallback(gotten.append)
        self.assertRaises(defer.QueueUnderflow, queue.get)

        for i in range(M):
            queue.put(i)
            self.assertEquals(gotten, range(i + 1))
        for i in range(N):
            queue.put(N + i)
            self.assertEquals(gotten, range(M))
        self.assertRaises(defer.QueueOverflow, queue.put, None)

        gotten = []
        for i in range(N):
            queue.get().addCallback(gotten.append)
            self.assertEquals(gotten, range(N, N + i + 1))

        queue = defer.DeferredQueue()
        gotten = []
        for i in range(N):
            queue.get().addCallback(gotten.append)
        for i in range(N):
            queue.put(i)
        self.assertEquals(gotten, range(N))

        queue = defer.DeferredQueue(size=0)
        self.assertRaises(defer.QueueOverflow, queue.put, None)

        queue = defer.DeferredQueue(backlog=0)
        self.assertRaises(defer.QueueUnderflow, queue.get)
Ejemplo n.º 23
0
    def __init__(self, maxParrallel=None, startnow=True):
        """
        @param maxParrallel: the maximum number of tasks that can run
            at a time.
        @type maxParallel: int

        @param startnow: Set True to start the executor immediately.
        @type startnow: boolean
        """
        queue = defer.DeferredQueue()
        limit = maxParrallel if maxParrallel is not None else 0
        tokens = DynamicDeferredSemaphore(limit)
        super(TwistedExecutor, self).__init__(queue, tokens, log)
        if startnow:
            self.start(reactor)
Ejemplo n.º 24
0
 def startup(self, remoteReference):
     """
     Starts things up with the remote reference in hand. Useful to have this
     as a separate method when you're subclassing and doing difference
     constructor stuff.
     """
     # Setup resignation-upon-disconnect
     self.resignators = []
     self.disconnectErrors = (pb.DeadReferenceError, pb.PBConnectionLost)
     remoteReference.notifyOnDisconnect(self.resign)
     # Prepare the run request queue
     self.jobs = []
     self.runRequestQueue = defer.DeferredQueue()
     for k in xrange(self.N):
         self.runRequestQueue.put(None)
Ejemplo n.º 25
0
 def __init__(self,
              slots,
              network_manager_factory,
              splash_proxy_factory_cls,
              js_profiles_path,
              verbosity=1):
     self.network_manager_factory = network_manager_factory
     self.splash_proxy_factory_cls = splash_proxy_factory_cls or (
         lambda profile_name: None)
     self.js_profiles_path = js_profiles_path
     self.active = set()
     self.queue = defer.DeferredQueue()
     self.verbosity = verbosity
     for n in range(slots):
         self._wait_for_render(None, n, log=False)
Ejemplo n.º 26
0
 def __init__(self,
              keyspace=None,
              retries=0,
              credentials={},
              require_api_version=None):
     self.deferred = defer.Deferred()
     self.queue = defer.DeferredQueue()
     self.continueTrying = True
     self._protos = []
     self._pending = []
     self.request_retries = retries
     self.keyspace = keyspace
     self.credentials = credentials
     if credentials:
         self.protocol = AuthenticatedThriftClientProtocol
     self.require_api_version = require_api_version
Ejemplo n.º 27
0
 def __init__(self,
              connection_manager,
              micro_status_entry,
              link_protocol=3):
     msg = ("Creating connection task to {}".format(
         micro_status_entry.address))
     logging.debug(msg)
     # MAp all circuits using this connections
     self._connection_manager = connection_manager
     self.micro_status_entry = micro_status_entry
     self._link_protocol = link_protocol
     self._read_queue = defer.DeferredQueue()
     self._buffer = ''
     self._connection_cert = None
     self._tasks = None
     self._current_task = None
     self._connection = None
     self._failed = False
Ejemplo n.º 28
0
 def __init__(self,
              keyspace=None,
              retries=0,
              credentials=None,
              sasl_kwargs=None):
     self.deferred = defer.Deferred()
     self.queue = defer.DeferredQueue()
     self.continueTrying = True
     self._protos = []
     self._pending = []
     self.request_retries = retries
     self.keyspace = keyspace
     self.credentials = credentials
     self.sasl_kwargs = sasl_kwargs
     if credentials:
         self.protocol = AuthenticatedThriftClientProtocol
     elif sasl_kwargs:
         self.protocol = SASLThriftClientProtocol
Ejemplo n.º 29
0
class InterpreterProtocol(basic.NetstringReceiver):
    log = Logger()
    queue = defer.DeferredQueue()

    def connectionMade(self):
        self.log.debug("Interpreter connections eastablished")

    def stringReceived(self, string):
        response = json.loads(string.decode("utf8", "ignore"))
        self.responseReceived(response)

    def sendRequest(self, request):
        request = json.dumps(request).encode("utf-8")
        self.sendString(request)
        return self.queue.get()

    def responseReceived(self, response):
        self.queue.put(response)
Ejemplo n.º 30
0
    def test_cancelQueueAfterGet(self):
        """
        When canceling a L{Deferred} from a L{DeferredQueue} that does not
        have a result (i.e., the L{Deferred} has not fired), the cancel
        causes a L{defer.CancelledError} failure. If the queue has a result
        later on, it doesn't try to fire the deferred.
        """
        queue = defer.DeferredQueue()
        d = queue.get()
        self.assertFailure(d, defer.CancelledError)
        d.cancel()

        def cb(ignore):
            # If the deferred is still linked with the deferred queue, it will
            # fail with an AlreadyCalledError
            queue.put(None)
            return queue.get().addCallback(self.assertIdentical, None)

        return d.addCallback(cb)