def __init__(self,
              interval,
              debug_print=False,
              chunk_size=5,
              loop=None,
              serializers=None):
     # XXX is the loop arg useful?
     self.loop = loop or IOLoop.current()
     self.interval = parse_timedelta(interval, default="ms")
     self.waker = locks.Event()
     self.stopped = locks.Event()
     self.please_stop = False
     self.buffer = []
     self.message_count = 0
     self.batch_count = 0
     self.chunk_size = chunk_size
     self.byte_count = 0
     self.total_lambdas_invoked = 0
     self.num_tasks_invoked = 0
     self.next_deadline = None
     self.debug_print = debug_print
     self.lambda_client = None
     self.recent_message_log = deque(maxlen=dask.config.get(
         "distributed.comm.recent-messages-log-length"))
     self.serializers = serializers
 def __init__(self, interval, use_multiple_invokers = True, function_name="WukongTaskExecutor", num_invokers = 16, redis_channel_names = None, debug_print = False, 
         chunk_size = 5, loop=None, serializers=None, minimum_tasks_for_multiple_invokers = 8):
     # XXX is the loop arg useful?
     self.loop = loop or IOLoop.current()
     self.interval = parse_timedelta(interval, default="ms")
     self.waker = locks.Event()
     self.stopped = locks.Event()
     self.please_stop = False
     self.redis_channel_names = redis_channel_names
     self.current_redis_channel_index = 0
     self.buffer = []
     self.message_count = 0
     self.lambda_function_name = function_name
     self.batch_count = 0
     self.chunk_size = chunk_size
     self.byte_count = 0
     self.total_lambdas_invoked = 0
     self.minimum_tasks_for_multiple_invokers = minimum_tasks_for_multiple_invokers
     self.num_tasks_invoked = 0
     self.next_deadline = None
     self.debug_print = debug_print 
     self.lambda_client = None 
     self.time_spent_invoking = 0
     self.lambda_invokers = []
     self.lambda_pipes = []
     self.use_multiple_invokers = use_multiple_invokers 
     self.num_invokers = num_invokers
     self.recent_message_log = deque(
         maxlen=dask.config.get("distributed.comm.recent-messages-log-length")
     )
     self.serializers = serializers
Exemplo n.º 3
0
    def __init__(self, interval, loop=None):
        self.loop = loop or IOLoop.current()
        self.interval = interval / 1000.

        self.waker = locks.Event()
        self.stopped = locks.Event()
        self.please_stop = False
        self.buffer = []
        self.stream = None
        self.message_count = 0
        self.batch_count = 0
        self.next_deadline = None
Exemplo n.º 4
0
 def __init__(self, interval, loop=None):
     # XXX is the loop arg useful?
     self.loop = loop or IOLoop.current()
     self.interval = parse_timedelta(interval, default='ms')
     self.waker = locks.Event()
     self.stopped = locks.Event()
     self.please_stop = False
     self.buffer = []
     self.comm = None
     self.message_count = 0
     self.batch_count = 0
     self.byte_count = 0
     self.next_deadline = None
     self.recent_message_log = deque(maxlen=dask.config.get('distributed.comm.recent-messages-log-length'))
Exemplo n.º 5
0
    def __init__(self, interval, loop=None):
        # XXX is the loop arg useful?
        self.loop = loop or IOLoop.current()
        self.interval = interval / 1000.

        self.waker = locks.Event()
        self.stopped = locks.Event()
        self.please_stop = False
        self.buffer = []
        self.comm = None
        self.message_count = 0
        self.batch_count = 0
        self.byte_count = 0
        self.next_deadline = None
        self.recent_message_log = deque(maxlen=100)
Exemplo n.º 6
0
    async def connect(self, address, deserialize=True, **connection_args):
        listener = self.manager.get_listener_for(address)
        if listener is None:
            raise IOError("no endpoint for inproc address %r" % (address, ))

        conn_req = ConnectionRequest(
            c2s_q=Queue(),
            s2c_q=Queue(),
            c_loop=IOLoop.current(),
            c_addr=self.manager.new_address(),
            conn_event=locks.Event(),
        )
        listener.connect_threadsafe(conn_req)
        # Wait for connection acknowledgement
        # (do not pretend we're connected if the other comm never gets
        #  created, for example if the listener was stopped in the meantime)
        await conn_req.conn_event.wait()

        comm = InProc(
            local_addr="inproc://" + conn_req.c_addr,
            peer_addr="inproc://" + address,
            read_q=conn_req.s2c_q,
            write_q=conn_req.c2s_q,
            write_loop=listener.loop,
            deserialize=deserialize,
        )
        return comm
Exemplo n.º 7
0
 def test_repr(self):
     event = locks.Event()
     self.assertTrue("clear" in str(event))
     self.assertFalse("set" in str(event))
     event.set()
     self.assertFalse("clear" in str(event))
     self.assertTrue("set" in str(event))
Exemplo n.º 8
0
 def test_repr(self):
     event = locks.Event()
     self.assertTrue('clear' in str(event))
     self.assertFalse('set' in str(event))
     event.set()
     self.assertFalse('clear' in str(event))
     self.assertTrue('set' in str(event))
Exemplo n.º 9
0
    def tcp_server(self):
        self.event = locks.Event()
        super().__init__()

        sock, port = testing.bind_unused_port()
        self.add_socket(sock)
        self.sockaddr = sock.getsockname()
Exemplo n.º 10
0
 def test_event_wait_clear(self):
     e = locks.Event()
     f0 = asyncio.ensure_future(e.wait())
     e.clear()
     f1 = asyncio.ensure_future(e.wait())
     e.set()
     self.assertTrue(f0.done())
     self.assertTrue(f1.done())
Exemplo n.º 11
0
    def test_event_timeout(self):
        e = locks.Event()
        with self.assertRaises(TimeoutError):
            yield e.wait(timedelta(seconds=0.01))

        # After a timed-out waiter, normal operation works.
        self.io_loop.add_timeout(timedelta(seconds=0.01), e.set)
        yield e.wait(timedelta(seconds=1))
Exemplo n.º 12
0
 def test_event_wait_clear(self):
     e = locks.Event()
     f0 = e.wait()
     e.clear()
     f1 = e.wait()
     e.set()
     self.assertTrue(f0.done())
     self.assertTrue(f1.done())
Exemplo n.º 13
0
 def __init__(self, udid: str, lock: locks.Lock, callback):
     """
     Args:
         callback: function (str, dict) -> None
     
     Example callback:
         callback("update", {"ip": "1.2.3.4"})
     """
     self.__udid = udid
     self.name = udid2name(udid)
     self.product = udid2product(udid)
     self.wda_directory = "./ATX-WebDriverAgent"
     self._procs = []
     self._wda_proxy_port = None
     self._wda_proxy_proc = None
     self._lock = lock  # only allow one xcodebuild test run
     self._finished = locks.Event()
     self._stop = locks.Event()
     self._callback = partial(callback, self) or nop_callback
Exemplo n.º 14
0
 def __init__(self, udid: str, lock: locks.Lock):
     self.__udid = udid
     self.name = udid2name(udid)
     self.product = udid2product(udid)
     self._stopped = False
     self._procs = []
     self._wda_proxy_port = None
     self._wda_proxy_proc = None
     self._lock = lock  # only allow one xcodebuild test run
     self._stop_event = locks.Event()
Exemplo n.º 15
0
    def setUp(self):
        super(BaseTestCase, self).setUp()

        # make sure that our logging statements get executed
        amqp.amqp.LOGGER.enabled = True
        amqp.amqp.LOGGER.setLevel(logging.DEBUG)
        amqp.mixins.LOGGER.enabled = True
        amqp.mixins.LOGGER.setLevel(logging.DEBUG)

        self.exchange = str(uuid.uuid4())
        self.queue = str(uuid.uuid4())
        self.routing_key = str(uuid.uuid4())
        self.correlation_id = str(uuid.uuid4())
        self.message = None
        self.test_queue_bound = locks.Event()
        self.get_response = locks.Event()
        self.amqp_ready = locks.Event()
        self.condition = locks.Condition()
        self.config = {
            "url": AMQP_URL,
            "reconnect_delay": 1,
            "timeout": 2,
            "on_ready_callback": self.on_ready,
            "on_unavailable_callback": self.on_unavailable,
            "on_persistent_failure_callback": self.on_persistent_failure,
            "on_message_returned_callback": self.on_message_returned,
            "io_loop": self.io_loop,
        }
        self.app = web.Application()
        self.app.settings = {'service': 'unit_tests', 'version': '0.0'}
        self.handler = TestRequestHandler(self.app)

        self.clear_event_tracking()

        amqp.install(self.app, **self.config)
        yield self.condition.wait(self.io_loop.time() + 5)

        LOGGER.info('Connected to RabbitMQ, declaring exchange %s',
                    self.exchange)
        self.app.amqp.channel.exchange_declare(self.on_exchange_declare_ok,
                                               self.exchange,
                                               auto_delete=True)
Exemplo n.º 16
0
    def test_event(self):
        e = locks.Event()
        future_0 = asyncio.ensure_future(e.wait())
        e.set()
        future_1 = asyncio.ensure_future(e.wait())
        e.clear()
        future_2 = asyncio.ensure_future(e.wait())

        self.assertTrue(future_0.done())
        self.assertTrue(future_1.done())
        self.assertFalse(future_2.done())
Exemplo n.º 17
0
    def test_event(self):
        e = locks.Event()
        future_0 = e.wait()
        e.set()
        future_1 = e.wait()
        e.clear()
        future_2 = e.wait()

        self.assertTrue(future_0.done())
        self.assertTrue(future_1.done())
        self.assertFalse(future_2.done())
Exemplo n.º 18
0
 def __init__(self,
              interval="5ms",
              use_multiple_invokers=True,
              function_name="RedisMultipleVMsExecutor",
              num_invokers=8,
              redis_channel_names=None,
              debug_print=False,
              chunk_size=5,
              loop=None,
              serializers=None,
              minimum_tasks_for_multiple_invokers=8,
              redis_channel_names_for_proxy=None):
     # XXX is the loop arg useful?
     self.loop = loop or IOLoop.current()
     self.interval = parse_timedelta(interval, default="ms")
     self.waker = locks.Event()
     self.stopped = locks.Event()
     self.please_stop = False
     self.redis_channel_names = redis_channel_names
     self.redis_channel_names_for_proxy = redis_channel_names_for_proxy
     self.current_redis_channel_index = 0
     self.current_redis_channel_index_for_proxy = 0
     self.buffer = []
     self.message_count = 0
     self.lambda_function_name = function_name
     self.batch_count = 0
     self.chunk_size = chunk_size
     self.byte_count = 0
     self.total_lambdas_invoked = 0
     self.minimum_tasks_for_multiple_invokers = minimum_tasks_for_multiple_invokers
     self.num_tasks_invoked = 0
     self.next_deadline = None
     self.debug_print = debug_print
     self.lambda_client = None
     self.time_spent_invoking = 0
     self.lambda_invokers = []
     self.lambda_pipes = []
     self.use_multiple_invokers = use_multiple_invokers
     self.num_invokers = num_invokers
     self.serializers = serializers
Exemplo n.º 19
0
 def setUp(self):
     super(AsyncHTTPTestCase, self).setUp()
     self.correlation_id = str(uuid.uuid4())
     self.exchange = str(uuid.uuid4())
     self.get_delivered_message = concurrent.Future()
     self.get_returned_message = concurrent.Future()
     self.queue = str(uuid.uuid4())
     self.routing_key = str(uuid.uuid4())
     self.ready = locks.Event()
     amqp.install(self._app, self.io_loop, **{
         'on_ready_callback': self.on_amqp_ready,
         'enable_confirmations': self.CONFIRMATIONS,
         'on_return_callback': self.on_message_returned,
         'url': 'amqp://*****:*****@127.0.0.1:5672/%2f'})
     self.io_loop.start()
Exemplo n.º 20
0
async def check_connector_deserialize(addr, deserialize, in_value, check_out):
    done = locks.Event()

    async def handle_comm(comm):
        await comm.write(in_value)
        await done.wait()
        await comm.close()

    async with listen(addr, handle_comm) as listener:
        comm = await connect(listener.contact_address, deserialize=deserialize)

    out_value = await comm.read()
    done.set()
    await comm.close()
    check_out(out_value)
Exemplo n.º 21
0
def check_connector_deserialize(addr, deserialize, in_value, check_out):
    done = locks.Event()

    @gen.coroutine
    def handle_comm(comm):
        yield comm.write(in_value)
        yield done.wait()
        yield comm.close()

    with listen(addr, handle_comm) as listener:
        comm = yield connect(listener.contact_address, deserialize=deserialize)

    out_value = yield comm.read()
    done.set()
    yield comm.close()
    check_out(out_value)
Exemplo n.º 22
0
async def main():

    tornado.options.parse_command_line()
    # Create the global connection pool.
    async with aiopg.create_pool(
        host=options.db_host,
        port=options.db_port,
        user=options.db_user,
        password=options.db_password,
        dbname=options.db_database,
    ) as db:
        await create_table(db)
        app = Application(db)
        app.listen(options.port)

        shutdown_event = locks.Event()
        await shutdown_event.wait()
Exemplo n.º 23
0
    def test_simple_table_measurements(self):
        definition = self.generic_table_definition()

        wait_for_measurements = locks.Event()

        def instrumentation_check(measurements):
            for attempt, measurement in enumerate(measurements):
                self.assertEqual(measurement.attempt, attempt + 1)
                self.assertEqual(measurement.action, 'CreateTable')
                self.assertEqual(measurement.table, definition['TableName'])
                self.assertEqual(measurement.error, None)
            self.assertEqual(len(measurements), 1)
            wait_for_measurements.set()

        self.client.set_instrumentation_callback(instrumentation_check)
        response = yield self.client.create_table(definition)
        self.assertEqual(response['TableName'], definition['TableName'])
        yield wait_for_measurements.wait()
Exemplo n.º 24
0
    def test_internal_server_exception_has_max_retries_measurements(self):
        definition = self.generic_table_definition()

        wait_for_measurements = locks.Event()

        def instrumentation_check(measurements):
            for attempt, measurement in enumerate(measurements):
                self.assertEqual(measurement.error, 'InternalServerError')
            self.assertEqual(len(measurements), 3)
            wait_for_measurements.set()

        self.client.set_instrumentation_callback(instrumentation_check)
        with mock.patch('tornado_aws.client.AsyncAWSClient.fetch') as fetch:
            future = concurrent.Future()
            fetch.return_value = future
            future.set_exception(dynamodb.InternalServerError())
            with self.assertRaises(dynamodb.InternalServerError):
                yield self.client.create_table(definition)

        yield wait_for_measurements.wait()
Exemplo n.º 25
0
 async def start(self):
     # Initialize options from env vars or defaults
     options = self.initOptions()
     mongoClient = motor.motor_tornado.MotorClient(options["MONGO_URL"])
     dbConnection = mongoClient[options["DB_NAME"]]
     app = web.Application(
         [("/api/messages", MessagesController),
          (r"/(.*)", web.StaticFileHandler, {
              'path':
              os.path.join(os.path.dirname(__file__), '../client/build'),
              'default_filename':
              'index.html'
          })],
         dbConnection=dbConnection,
         options=options,
         debug=options["DEBUG"],
     )
     self.server = app.listen(options["SERVER_PORT"], options["ADDRESS"])
     logging.info("Server started on {0}:{1}".format(
         options["ADDRESS"], options["SERVER_PORT"]))
     shutdown_event = locks.Event()
     await shutdown_event.wait()
Exemplo n.º 26
0
    def test_retriable_exception_has_max_retries_measurements(self):
        definition = self.generic_table_definition()

        wait_for_measurements = locks.Event()

        def instrumentation_check(measurements):
            for attempt, measurement in enumerate(measurements):
                self.assertEqual(measurement.attempt, attempt + 1)
                self.assertEqual(measurement.action, 'CreateTable')
                self.assertEqual(measurement.table, definition['TableName'])
                self.assertEqual(measurement.error, 'RequestException')
            self.assertEqual(len(measurements), 3)
            wait_for_measurements.set()

        self.client.set_instrumentation_callback(instrumentation_check)
        with mock.patch('tornado_aws.client.AsyncAWSClient.fetch') as fetch:
            future = concurrent.Future()
            fetch.return_value = future
            future.set_exception(dynamodb.RequestException())
            with self.assertRaises(dynamodb.RequestException):
                yield self.client.create_table(definition)

        yield wait_for_measurements.wait()
Exemplo n.º 27
0
    def __init__(self,
                 hosts,
                 on_close=None,
                 io_loop=None,
                 clustering=False,
                 auto_connect=True):
        """Create a new instance of the ``Client`` class.

        :param hosts: A list of host connection values.
        :type hosts: list(dict)
        :param io_loop: Override the current Tornado IOLoop instance
        :type io_loop: tornado.ioloop.IOLoop
        :param method on_close: The method to call if the connection is closed
        :param bool clustering: Toggle the cluster support in the client
        :param bool auto_connect: Toggle the auto-connect on creation feature

        """
        self._buffer = bytes()
        self._busy = locks.Lock()
        self._closing = False
        self._cluster = {}
        self._clustering = clustering
        self._connected = locks.Event()
        self._connect_future = concurrent.Future()
        self._connection = None
        self._discovery = False
        self._hosts = hosts
        self._on_close_callback = on_close
        self._reader = hiredis.Reader()
        self.io_loop = io_loop or ioloop.IOLoop.current()
        if not self._clustering:
            if len(hosts) > 1:
                raise ValueError('Too many hosts for non-clustering mode')
        if auto_connect:
            LOGGER.debug('Auto-connecting')
            self.connect()
Exemplo n.º 28
0
 def test_event_set_multiple(self):
     e = locks.Event()
     e.set()
     e.set()
     self.assertTrue(e.is_set())
Exemplo n.º 29
0
# internal packages
from api import api_routes, API_VERSION
from build_api_docs import generate_doc_from_endpoints


class Application(web.Application):
    def __init__(self, routes, pg_pool):
        settings = dict(template_path=os.path.join(os.path.dirname(__file__),
                                                   "templates"), )
        self.SWAGGER_SCHEMA = generate_doc_from_endpoints(
            routes, api_version=API_VERSION)
        super(Application, self).__init__(routes, **settings)


shutdown_event = locks.Event()


async def on_shutdown(app):
    #  cleanup_context
    shutdown_event.set()


def exit_handler(app, sig, frame):
    ioloop.IOLoop.instance().add_callback_from_signal(on_shutdown, app)


async def main():
    routes = api_routes
    app = Application(routes=routes, pg_pool=None)
    # ioloop.IOLoop.current().spawn_callback(listen_to_redis, app)
Exemplo n.º 30
0
def update(dbname, tname, opcols, oplockcols, matchcols):
    LOG.debug("|-update {0}:{1}, opcols: {2}, oplockcols: {3}, matchcols: {4}".
              format(dbname, tname, opcols, oplockcols, matchcols))
    # # match clause
    match_clause = ""
    if len(matchcols) != 0:
        match_clause = ''' WHERE '''
    for matchcol_num in range(0, len(matchcols)):
        matchcol = matchcols[matchcol_num]
        if isinstance(matchcol[1], str):
            match_clausei = ''' `{0}`{1}"{2}" '''\
                .format(matchcol[0], "=" if matchcol[2] == match_equal else "!=", matchcol[1])
        else:
            match_typei = "="
            if matchcol[2] == match_equal:
                match_typei = "="
            elif matchcol[2] == match_not_equal:
                match_typei = "!="
            elif matchcol[2] == match_greater:
                match_typei = ">"
            elif matchcol[2] == match_greater_equal:
                match_typei = ">="
            elif matchcol[2] == match_lesser:
                match_typei = "<"
            elif matchcol[2] == match_lesser_equal:
                match_typei = "<="
            else:
                raise NotImplemented
            match_clausei = ''' `{0}`{1}{2} '''\
                .format(matchcol[0], match_typei, matchcol[1])
        if matchcol_num != 0:  # consider match relation
            if matchcol[3] == match_and:
                match_clause += " AND " + match_clausei
            elif matchcol[3] == match_or:
                match_clause += " OR " + match_clausei
            else:
                raise NotImplemented
        else:
            match_clause += match_clausei
    tablelocks = __col_locks__[dbname][tname]
    opcollocks = []
    if len(oplockcols) != 0:
        sql = '''SELECT ''' + ",".join(
            map(lambda col: "`" + str(col[0]) + "`", oplockcols))
        # acquire lock columns
        # todo: more precise
        for oplockcol in oplockcols:
            if oplockcol[0] not in tablelocks:
                lock = locks.Event()
                opcollocks.append(lock)
                lock.clear()
                tablelocks[oplockcol[0]] = lock
            else:
                lock = tablelocks[oplockcol[0]]
                opcollocks.append(lock)
                yield lock.wait()
                lock.clear()
        sql += " FROM `{0}`.`{1}` ".format(dbname, tname)
        sql += " " + match_clause + " ;"
        # get out lock columns
        LOG.debug("|-execute sql: {0}".format(sql))
        try:
            cursor = yield m_configs_db.pool.execute(sql)
        except Exception as e:
            import traceback
            traceback.print_exc()

            LOG.error(traceback.format_exc())
        LOG.debug("|-executed")
        oplockcols_db_values = list(cursor.fetchone())
    sql = '''UPDATE `{db}`.`{table}` SET '''.format(db=dbname, table=tname)
    update_params = []
    update_opcols = []
    for col_name, col_value, op_code in opcols:
        if op_code == popc_add:  # +
            update_opcols.append(''' `{name}`=`{name}`+{delta} '''.format(
                name=col_name, delta=col_value))
        elif op_code == popc_sub:  # -
            update_opcols.append(''' `{name}`=`{name}`-{delta} '''.format(
                name=col_name, delta=col_value))
        elif op_code == popc_multi:  # *
            update_opcols.append(''' `{name}`=`{name}`*{delta} '''.format(
                name=col_name, delta=col_value))
        elif op_code == popc_divide:  # /
            update_opcols.append(''' `{name}`=`{name}`/{delta} '''.format(
                name=col_name, delta=col_value))
        elif op_code == popc_mod:  # %
            update_opcols.append(''' `{name}`=`{name}`%{delta} '''.format(
                name=col_name, delta=col_value))
        elif op_code == popc_assign:  # =
            update_opcols.append(''' `{name}`=%s '''.format(name=col_name))
            update_params.append(col_value)
        elif op_code == popc_extend:  # extend list
            col_db_value = oplockcols_db_values.pop(0)
            if col_db_value is not None:
                col_db_list, _ = parse_value(col_db_value)
                if col_db_list is None:
                    col_db_list = []
            else:
                col_db_list = []
            col_db_list.extend(col_value)
            col_db_list_bytes = serialize_value(col_db_list)
            update_opcols.append(''' `{name}`=%s '''.format(name=col_name))
            update_params.append(col_db_list_bytes)
        elif op_code == popc_remove:  # remove from list
            col_db_value = oplockcols_db_values.pop(0)
            if col_db_value is not None:
                col_db_list, _ = parse_value(col_db_value)
                if col_db_list is None:
                    col_db_list = []
            else:
                col_db_list = []
            for col_valuei in col_value:
                if col_valuei in col_db_list:
                    col_db_list.remove(col_valuei)
            col_db_list_bytes = serialize_value(col_db_list)
            update_opcols.append(''' `{name}`=%s '''.format(name=col_name))
            update_params.append(col_db_list_bytes)
        elif op_code == popc_append:  # string append
            col_db_value = oplockcols_db_values.pop(0)
            col_db_value = col_db_value if col_db_value is not None else ""
            update_opcols.append(''' `{name}`="{value}" '''.format(
                name=col_name, value=col_db_value + col_value))
        elif op_code == popc_prepend:  # string prepend
            col_db_value = oplockcols_db_values.pop(0)
            col_db_value = col_db_value if col_db_value is not None else ""
            update_opcols.append(''' `{name}`="{value}" '''.format(
                name=col_name, value=col_value + col_db_value))
        elif op_code == popc_replace:  # string replace
            col_db_value = oplockcols_db_values.pop(0)
            update_opcols.append(''' `{name}`="{value}" '''.format(
                name=col_name,
                value=col_db_value.replace(col_value[0], col_value[1])))
        else:
            raise NotImplemented
    sql += ",".join(update_opcols)
    sql += " " + match_clause + " ;"
    LOG.debug("|-execute sql: {0}, params: {1}".format(sql, update_params))
    try:
        yield m_configs_db.pool.execute(sql, update_params)
    except Exception as e:
        import traceback
        traceback.print_exc()

        LOG.error(traceback.format_exc())
    LOG.debug("|-executed")
    # release lock columns
    for opcollock in opcollocks:
        opcollock.set()