Ejemplo n.º 1
0
 def __init__(self, server_url, login_params):
     self.running = True
     self.agent = True
     self.cmd_out_queue = []
     self.cmd_in_queue = []
     self.out_queue = Queue()
     self.in_queue = Queue()
     self.server_url = server_url
     self.login_params = login_params
     Thread.__init__(self)
Ejemplo n.º 2
0
    def setupDatamodel(self, datamodel):

        self.datamodel = datamodel
        self.doc.datamodel = datamodel_mapping[datamodel]()

        self.dm = self.doc.datamodel
        self.dm.response = Queue()
        self.dm.websocket = Queue()
        self.dm["__event"] = None
        #        self.dm["_x"]["sessions"] = {}

        if datamodel != "xpath":
            self.dm["In"] = self.interpreter.In
Ejemplo n.º 3
0
 def __init__(self):
     self.running = True
     self.exited = False
     self.cancelled = False
     self.configuration = OrderedSet()
     
     self.internalQueue = Queue()
     self.externalQueue = Queue()
     
     self.statesToInvoke = OrderedSet()
     self.historyValue = {}
     self.dm = None
     self.invokeId = None
     self.parentId = None
     self.logger = None
Ejemplo n.º 4
0
    def test_blocks_on_pool(self):
        waiter = Queue(0)
        def greedy():
            self.pool.get()
            self.pool.get()
            self.pool.get()
            self.pool.get()
            # No one should be waiting yet.
            self.assertEquals(self.pool.waiting(), 0)
            # The call to the next get will unschedule this routine.
            self.pool.get()
            # So this put should never be called.
            waiter.put('Failed!')

        killable = eventlet.spawn(greedy)

        # no one should be waiting yet.
        self.assertEquals(self.pool.waiting(), 0)

        ## Wait for greedy
        eventlet.sleep(0)

        ## Greedy should be blocking on the last get
        self.assertEquals(self.pool.waiting(), 1)

        ## Send will never be called, so balance should be 0.
        self.assertFalse(not waiter.full())

        eventlet.kill(killable)
Ejemplo n.º 5
0
 def handler(self, ws):
     self.queue = Queue()
     while True:
         m = ws.wait()
         #            import ipdb; ipdb.set_trace()
         if m is None:
             break
         self.queue.put(m)
Ejemplo n.º 6
0
    def test_connection_pooling(self):
        with patch('swift.common.memcached.socket') as mock_module:
            # patch socket, stub socket.socket, mock sock
            mock_sock = mock_module.socket.return_value

            # track clients waiting for connections
            connected = []
            connections = Queue()
            errors = []

            def wait_connect(addr):
                connected.append(addr)
                sleep(0.1)  # yield
                val = connections.get()
                if val is not None:
                    errors.append(val)

            mock_sock.connect = wait_connect

            memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'],
                                                     connect_timeout=10)
            # sanity
            self.assertEquals(1, len(memcache_client._client_cache))
            for server, pool in memcache_client._client_cache.items():
                self.assertEqual(2, pool.max_size)

            # make 10 requests "at the same time"
            p = GreenPool()
            for i in range(10):
                p.spawn(memcache_client.set, 'key', 'value')
            for i in range(3):
                sleep(0.1)
                self.assertEqual(2, len(connected))

            # give out a connection
            connections.put(None)

            # at this point, only one connection should have actually been
            # created, the other is in the creation step, and the rest of the
            # clients are not attempting to connect. we let this play out a
            # bit to verify.
            for i in range(3):
                sleep(0.1)
                self.assertEqual(2, len(connected))

            # finish up, this allows the final connection to be created, so
            # that all the other clients can use the two existing connections
            # and no others will be created.
            connections.put(None)
            connections.put('nono')
            self.assertEqual(2, len(connected))
            p.waitall()
            self.assertEqual(2, len(connected))
            self.assertEqual(0, len(errors),
                             "A client was allowed a third connection")
            connections.get_nowait()
            self.assertTrue(connections.empty())
Ejemplo n.º 7
0
 def __init__(self, chunk, conn, write_timeout=None, **_kwargs):
     self._chunk = chunk
     self._conn = conn
     self.failed = False
     self.bytes_transferred = 0
     self.checksum = hashlib.md5()
     self.write_timeout = write_timeout or io.CHUNK_TIMEOUT
     # we use eventlet Queue to pass data to the send coroutine
     self.queue = Queue(io.PUT_QUEUE_DEPTH)
Ejemplo n.º 8
0
 def _restart(self):
     self._pkt_trans = PacketTransceiver(self._node_id, self._dp,
                                         self._flood_ports)
     demux, trans = build_transceiver(self._cps, self._pkt_trans)
     for cpid in self._cps:
         q = Queue()
         self._qs[cpid] = q
         if os.path.exists('/common/pp'):
             v = PushPullECSMgr(self._node_id, q, self._topo, trans[cpid])
         else:
             v = FloodECSMgr(self._node_id, q, self._topo, trans[cpid])
         spawn(v.run)
Ejemplo n.º 9
0
    def test_putting_to_queue(self):
        timer = eventlet.Timeout(0.1)
        try:
            size = 2
            self.pool = IntPool(min_size=0, max_size=size)
            queue = Queue()
            results = []
            def just_put(pool_item, index):
                self.pool.put(pool_item)
                queue.put(index)
            for index in xrange(size + 1):
                pool_item = self.pool.get()
                eventlet.spawn(just_put, pool_item, index)

            for _ in range(size+1):
                x = queue.get()
                results.append(x)
            self.assertEqual(sorted(results), range(size + 1))
        finally:
            timer.cancel()
Ejemplo n.º 10
0
    def take_action(self, parsed_args):
        self.log.debug('take_action(%s)', parsed_args)
        digits = self.app.client_manager.get_meta1_digits()
        workers_count = parsed_args.workers

        conf = {'namespace': self.app.client_manager.namespace}
        if parsed_args.proxy:
            conf.update({'proxyd_url': parsed_args.proxy})
        else:
            ns_conf = load_namespace_conf(conf['namespace'])
            proxy = ns_conf.get('proxy')
            conf.update({'proxyd_url': proxy})

        workers = list()
        with green.ContextPool(workers_count) as pool:
            pile = GreenPile(pool)
            prefix_queue = Queue(16)

            # Prepare some workers
            for i in range(workers_count):
                w = WarmupWorker(conf, self.log)
                workers.append(w)
                pile.spawn(w.run, prefix_queue)

            # Feed the queue
            trace_increment = 0.01
            trace_next = trace_increment
            sent, total = 0, float(count_prefixes(digits))
            for prefix in generate_prefixes(digits):
                sent += 1
                prefix_queue.put(prefix)
                # Display the progression
                ratio = float(sent) / total
                if ratio >= trace_next:
                    self.log.info("... %d%%", int(ratio * 100.0))
                    trace_next += trace_increment

            self.log.debug("Send the termination marker")
            prefix_queue.join()

        self.log.info("All the workers are done")
Ejemplo n.º 11
0
    def test_exhaustion(self):
        waiter = Queue(0)
        def consumer():
            gotten = None
            try:
                gotten = self.pool.get()
            finally:
                waiter.put(gotten)

        eventlet.spawn(consumer)

        one, two, three, four = (
            self.pool.get(), self.pool.get(), self.pool.get(), self.pool.get())
        self.assertEquals(self.pool.free(), 0)

        # Let consumer run; nothing will be in the pool, so he will wait
        eventlet.sleep(0)

        # Wake consumer
        self.pool.put(one)

        # wait for the consumer
        self.assertEquals(waiter.get(), one)
Ejemplo n.º 12
0
        "/home/pi/craftbeerpi3/modules/plugins/FermentWifiPlugin/roda.txt",
        "w")
    file.write("nao")
    file.close()
    os.system("reboot")
else:
    file.close()

import paho.mqtt.client as mqtt

a_ativado = 0
r_ativado = 0

cache = {}

q = Queue()
cbpi.gpio_compressors = []

cbpi.gpio_compressors2 = []

client = None

mqttc = mqtt.Client()
mqttc.connect("localhost", 1883, 60)
mqttc.loop_start()


@cbpi.actor
class Resfriador_FermentWifi(ActorBase):

    key0 = Property.Text(label="Nome do FermentWifi (ex: FW_0000)",
Ejemplo n.º 13
0
    def _decode_segments(self, fragment_iterators):
        """
        Reads from fragments and yield full segments
        """
        # we use eventlet Queue to read fragments
        queues = []
        # each iterators has its queue
        for _j in range(len(fragment_iterators)):
            queues.append(Queue(1))

        def put_in_queue(fragment_iterator, queue):
            """
            Coroutine to read the fragments from the iterator
            """
            try:
                for fragment in fragment_iterator:
                    # put the read fragment in the queue
                    queue.put(fragment)
                    # the queues are of size 1 so this coroutine blocks
                    # until we decode a full segment
            except GreenletExit:
                # ignore
                pass
            except green.ChunkReadTimeout:
                logger.error("Timeout on reading")
            except Exception:
                logger.exception("Exception on reading")
            finally:
                queue.resize(2)
                # put None to indicate the decoding loop
                # this is over
                queue.put(None)
                # close the iterator
                fragment_iterator.close()

        # we use eventlet GreenPool to manage the read of fragments
        with green.ContextPool(len(fragment_iterators)) as pool:
            # spawn coroutines to read the fragments
            for fragment_iterator, queue in zip(fragment_iterators, queues):
                pool.spawn(put_in_queue, fragment_iterator, queue)

            # main decoding loop
            while True:
                data = []
                # get the fragments from the queues
                for queue in queues:
                    fragment = queue.get()
                    queue.task_done()
                    data.append(fragment)

                if not all(data):
                    # one of the readers returned None
                    # impossible to read segment
                    break
                # actually decode the fragments into a segment
                try:
                    segment = self.storage_method.driver.decode(data)
                except exceptions.ECError:
                    # something terrible happened
                    logger.exception("ERROR decoding fragments")
                    raise

                yield segment
Ejemplo n.º 14
0
def main():
    args = options()

    global ACCOUNT, PROXY, QUEUE, NS, VERBOSE, TIMEOUT
    global COUNTERS, ELECTIONS
    ACCOUNT = args.account
    NS = args.namespace
    VERBOSE = args.verbose
    TIMEOUT = args.timeout
    PROXY = ObjectStorageApi(NS)
    ELECTIONS = AtomicInteger()

    num_worker_threads = int(args.max_worker)
    print("Using %d workers" % num_worker_threads)

    total_objects = {'size': 0, 'files': 0, 'elapsed': 0}
    total_containers = {'size': 0, 'files': 0, 'elapsed': 0}

    for path in args.path:
        path = path.rstrip('/')
        if '/' in path:
            bucket, path = path.split('/', 1)
        else:
            bucket = path
            path = ""

        containers = []

        QUEUE = Queue()
        pool = eventlet.GreenPool(num_worker_threads)

        for i in range(num_worker_threads):
            pool.spawn(worker_objects)

        COUNTERS = AtomicInteger()
        _bucket = container_hierarchy(bucket, path)
        # we don't use placeholders, we use prefix path as prefix
        for entry in full_list(prefix=container_hierarchy(bucket, path)):
            name, _files, _size, _ = entry
            if name != _bucket and not name.startswith(_bucket + '%2F'):
                continue

            if _files:
                QUEUE.put(name)

            containers.append(name)

        # we have to wait all objects
        print("Waiting flush of objects")

        report = args.report

        while not QUEUE.empty():
            ts = time.time()
            while time.time() - ts < report and not QUEUE.empty():
                time.sleep(1)
            diff = time.time() - ts
            val = COUNTERS.reset()
            elections = ELECTIONS.reset()
            print("Objects: %5.2f / Size: %5.2f" %
                  (val[0] / diff, val[1] / diff),
                  "Elections failed: %5.2f/s total: %d" %
                  (elections[0] / diff, ELECTIONS.total()[0]),
                  " " * 20,
                  end='\r')
            sys.stdout.flush()

        print("Waiting end of workers")
        QUEUE.join()

        val = COUNTERS.total()
        total_objects['files'] += val[0]
        total_objects['size'] += val[1]
        total_objects['elapsed'] += COUNTERS.time()

        COUNTERS = AtomicInteger()

        QUEUE = Queue()
        for i in range(num_worker_threads):
            pool.spawn(worker_container)

        print("We have to delete", len(containers), "containers")

        for container in containers:
            QUEUE.put(container)

        while not QUEUE.empty():
            ts = time.time()
            while time.time() - ts < report and not QUEUE.empty():
                time.sleep(1)
            diff = time.time() - ts
            val = COUNTERS.reset()
            elections = ELECTIONS.reset()
            print("Containers: %5.2f" % (val[0] / diff),
                  "Elections failed: %5.2f/s total: %d" %
                  (elections[0] / diff, ELECTIONS.total()[0]),
                  " " * 20,
                  end='\r')
            sys.stdout.flush()

        QUEUE.join()
        val = COUNTERS.total()
        total_containers['files'] += val[0]
        total_containers['size'] += val[1]
        total_containers['elapsed'] += COUNTERS.time()

    print("""
Objects:
    - ran during {o[elapsed]:5.2f}
    - {o[files]} objects removed (size {size})
    - {o_file_avg:5.2f} objects/s ({o_size_avg} avg. size/s)
""".format(o=total_objects,
           size=show(total_objects['size'], True),
           o_file_avg=total_objects['files'] / total_objects['elapsed'],
           o_size_avg=show(total_objects['size'] / total_objects['elapsed'],
                           True)))

    print("""
Containers:
    - ran during {o[elapsed]:5.2f}
    - {o[files]} containers
    - {o_file_avg:5.2f} containers/s
""".format(o=total_containers,
           o_file_avg=total_containers['files'] / total_containers['elapsed']))

    print("Elections failed: %d" % ELECTIONS.total()[0])
Ejemplo n.º 15
0
    def __call__(self):
        """
        :return httplib.HTTP(S)Connection in success, and webob.exc.HTTPException in failure
        """
        if self.headers.has_key('content-length'):
            if int(self.headers['content-length']) >= MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=self.req)

        parsed = urlparse(self.url)
        if self.proxy:
            proxy_parsed = urlparse(self.proxy)

        if self._proxy_request_check(parsed.path):
            host, port = self.split_netloc(proxy_parsed)
            path = self.url
            ssl = True if proxy_parsed.scheme == 'https' else False
        else:
            host, port = self.split_netloc(parsed)
            path = parsed.path
            ssl = True if parsed.scheme == 'https' else False
        self.headers['host'] = '%s:%s' % (host, port)

        if self.method == 'PUT' and len(parsed.path.split('/')) >= 5:
            if self.headers.has_key('content-length') and int(
                    self.headers['content-length']) != 0:
                if not self.headers.has_key('expect'):
                    self.headers['expect'] = '100-continue'
            chunked = self.req.headers.get('transfer-encoding')
            if isinstance(self.req.environ['wsgi.input'], str):
                reader = self.req.environ['wsgi.input'].read
                data_source = iter(lambda: reader(self.chunk_size), '')
            else:
                data_source = self.req.environ['wsgi.input']
            bytes_transferred = 0
            try:
                conn = self._connect_put_node(host,
                                              port,
                                              self.method,
                                              path,
                                              headers=self.headers,
                                              query_string=parsed.query,
                                              ssl=ssl)
                if not conn:
                    return HTTPServiceUnavailable(request=self.req)
                with ContextPool(1) as pool:
                    conn.failed = False
                    conn.queue = Queue(10)
                    pool.spawn(self._send_file, conn, path)
                    while True:
                        with ChunkReadTimeout(self.client_timeout):
                            try:
                                chunk = next(data_source)
                            except StopIteration:
                                if chunked:
                                    conn.queue.put('0\r\n\r\n')
                                break
                            except TypeError, err:
                                self.logger.info('Chunk Read Error: %s' % err)
                                break
                            except Exception, err:
                                self.logger.info('Chunk Read Error: %s' % err)
                                return HTTPServerError(request=self.req)
                        bytes_transferred += len(chunk)
                        if bytes_transferred > MAX_FILE_SIZE:
                            return HTTPRequestEntityTooLarge(request=self.req)
                        if not conn.failed:
                            conn.queue.put('%x\r\n%s\r\n' %
                                           (len(chunk),
                                            chunk) if chunked else chunk)
Ejemplo n.º 16
0
    def __call__(self):
        """
        :return httplib.HTTP(S)Connection in success, and webob.exc.HTTPException in failure
        """
        if self.headers.has_key('content-length'):
            if int(self.headers['content-length']) >= MAX_FILE_SIZE:
                return HTTPRequestEntityTooLarge(request=self.req)

        parsed = urlparse(self.url)
        if self.proxy:
            proxy_parsed = urlparse(self.proxy)

        if self._proxy_request_check(parsed.path):
            host, port = self.split_netloc(proxy_parsed)
            path = self.url
        else:
            host, port = self.split_netloc(parsed)
            path = parsed.path
        self.headers['host'] = '%s:%s' % (host, port)

        if self.method == 'PUT' and len(parsed.path.split('/')) == 5:
            chunked = self.req.headers.get('transfer-encoding')
            reader = self.req.environ['wsgi.input'].read
            data_source = iter(lambda: reader(self.chunk_size), '')
            bytes_transferred = 0
            # pile = GreenPile()
            # pile.spawn(self._connect_server, host, port, self.method, path, self.headers, parsed.query)
            # conns = [conn for conn in pile if conn]
            # conn = conns[0]
            try:
                with ConnectionTimeout(self.conn_timeout):
                    conn = http_connect_raw(host,
                                            port,
                                            self.method,
                                            path,
                                            headers=self.headers,
                                            query_string=parsed.query)
                with ContextPool(1) as pool:
                    conn.failed = False
                    conn.queue = Queue(10)
                    pool.spawn(self._send_file, conn, path)
                    while True:
                        with ChunkReadTimeout(self.client_timeout):
                            try:
                                chunk = next(data_source)
                            except StopIteration:
                                if chunked:
                                    conn.queue.put('0\r\n\r\n')
                                break
                            except TypeError, err:
                                self.logger.info('Chunk Read Error: %s' % err)
                                break
                            except Exception, err:
                                self.logger.info('Chunk Read Error: %s' % err)
                                return HTTPServerError(request=self.req)
                        bytes_transferred += len(chunk)
                        if bytes_transferred > MAX_FILE_SIZE:
                            return HTTPRequestEntityTooLarge(request=self.req)
                        if not conn.failed:
                            conn.queue.put('%x\r\n%s\r\n' %
                                           (len(chunk),
                                            chunk) if chunked else chunk)
Ejemplo n.º 17
0
 def start(self, pool):
     # we use eventlet Queue to pass data to the send coroutine
     self.queue = Queue(io.PUT_QUEUE_DEPTH)
     # spawn the send coroutine
     pool.spawn(self._send)
Ejemplo n.º 18
0
 def _get_queue(self):
     from eventlet import Queue
     return Queue()