コード例 #1
0
class _SessionState(object):
    __slots__ = ("id", "group_id", "ttl", "creation_time", "acquire_count")

    def __init__(self, state_id, group_id, ttl):
        self.id = state_id
        self.ttl = ttl
        self.group_id = group_id
        self.creation_time = time.time()
        self.acquire_count = AtomicInteger()

    def acquire(self, count):
        self.acquire_count.add(count)
        return self.id

    def release(self, count):
        self.acquire_count.add(-count)

    def is_valid(self):
        return self.is_in_use() or not self._is_expired(time.time())

    def is_in_use(self):
        return self.acquire_count.get() > 0

    def _is_expired(self, timestamp):
        expiration_time = self.creation_time + self.ttl
        if expiration_time < 0:
            expiration_time = six.MAXSIZE
        return timestamp > expiration_time

    def __eq__(self, other):
        return isinstance(other, _SessionState) and self.id == other.id

    def __ne__(self, other):
        return not self.__eq__(other)
コード例 #2
0
    def test_map_listener(self):
        config = {
            "cluster_name":
            self.cluster.id,
            "compact_serializers":
            [SomeFieldsSerializer([FieldDefinition(name="int32")])],
        }
        client = self.create_client(config)

        map_name = random_string()
        m = client.get_map(map_name).blocking()

        counter = AtomicInteger()

        def listener(_):
            counter.add(1)

        m.add_entry_listener(include_value=True, added_func=listener)

        # Put the entry from other client to not create a local
        # registry in the actual client. This will force it to
        # go the cluster to fetch the schema.
        other_client = self.create_client(config)
        other_client_map = other_client.get_map(map_name).blocking()
        other_client_map.put(1, SomeFields(int32=42))

        self.assertTrueEventually(lambda: self.assertEqual(1, counter.get()))
コード例 #3
0
    def test_client_state_is_sent_once_if_send_operation_is_successful(self):
        conn_manager = self.client._connection_manager

        counter = AtomicInteger()

        def send_state_to_cluster_fn():
            counter.add(1)
            return ImmediateFuture(None)

        conn_manager._send_state_to_cluster_fn = send_state_to_cluster_fn

        self._restart_cluster()

        self.assertEqual(1, counter.get())
コード例 #4
0
ファイル: invocation.py プロジェクト: OlyaT/FunFic
    def __init__(self, client):
        self._pending = {}
        self._event_handlers = {}
        self._next_correlation_id = AtomicInteger(1)
        self._client = client
        self._event_queue = Queue()
        self._is_redo_operation = client.config.network_config.redo_operation

        if client.config.network_config.smart_routing:
            self.invoke = self.invoke_smart
        else:
            self.invoke = self.invoke_non_smart

        self._client.connection_manager.add_listener(on_connection_closed=self.cleanup_connection)
        client.heartbeat.add_listener(on_heartbeat_stopped=self._heartbeat_stopped)
コード例 #5
0
 def __init__(self, config=None):
     self.config = config or ClientConfig()
     self.properties = ClientProperties(self.config.get_properties())
     self.id = HazelcastClient.CLIENT_ID.get_and_increment()
     self.name = self._create_client_name()
     self._init_logger()
     self._logger_extras = {"client_name": self.name, "group_name": self.config.group_config.name}
     self._log_group_password_info()
     self.lifecycle = LifecycleService(self.config, self._logger_extras)
     self.reactor = AsyncoreReactor(self._logger_extras)
     self._address_providers = self._create_address_providers()
     self._address_translator = self._create_address_translator()
     self.connection_manager = ConnectionManager(self, self.reactor.new_connection, self._address_translator)
     self.heartbeat = Heartbeat(self)
     self.invoker = InvocationService(self)
     self.listener = ListenerService(self)
     self.cluster = ClusterService(self.config, self, self._address_providers)
     self.partition_service = PartitionService(self)
     self.proxy = ProxyManager(self)
     self.load_balancer = RandomLoadBalancer(self.cluster)
     self.serialization_service = SerializationServiceV1(serialization_config=self.config.serialization_config)
     self.transaction_manager = TransactionManager(self)
     self.lock_reference_id_generator = AtomicInteger(1)
     self.near_cache_manager = NearCacheManager(self)
     self.statistics = Statistics(self)
     self._start()
コード例 #6
0
    def test_await_latch_multiple_waiters(self):
        latch = self._get_latch(1)

        completed = AtomicInteger()

        def run():
            latch.await_latch(inf)
            completed.get_and_increment()

        count = 10
        threads = []
        for _ in range(count):
            t = Thread(target=run)
            threads.append(t)
            t.start()

        latch.count_down()

        def assertion():
            self.assertEqual(count, completed.get())

        self.assertTrueEventually(assertion)

        for i in range(count):
            threads[i].join()
コード例 #7
0
    def test_listener_re_register(self):
        member = self.cluster.start_member()
        config = ClientConfig()
        config.network_config.connection_attempt_limit = 10
        client = self.create_client(config)

        map = client.get_map("map")

        collector = event_collector()
        reg_id = map.add_entry_listener(added_func=collector)
        self.logger.info("Registered listener with id %s", reg_id)
        member.shutdown()
        sleep(3)
        self.cluster.start_member()

        count = AtomicInteger()

        def assert_events():
            if client.lifecycle.is_live:
                map.put("key-%d" % count.get_and_increment(), "value").result()
                self.assertGreater(len(collector.events), 0)
            else:
                self.fail("Client disconnected...")

        self.assertTrueEventually(assert_events)
コード例 #8
0
def combine_futures(futures):
    """Combines set of Futures.

    Args:
        futures (list[Future]): List of Futures to be combined.

    Returns:
        Future: Result of the combination.
    """
    expected = len(futures)
    results = []
    if expected == 0:
        return ImmediateFuture(results)

    completed = AtomicInteger()
    combined = Future()

    def done(f):
        if not combined.done():
            if f.is_success():  # TODO: ensure ordering of results as original list
                results.append(f.result())
                if completed.get_and_increment() + 1 == expected:
                    combined.set_result(results)
            else:
                combined.set_exception(f.exception(), f.traceback())

    for future in futures:
        future.add_done_callback(done)

    return combined
コード例 #9
0
    def test_listener_re_register(self):
        member = self.cluster.start_member()
        client = self.create_client({
            "cluster_name": self.cluster.id,
            "cluster_connect_timeout": 5.0,
        })

        map = client.get_map("map").blocking()

        collector = event_collector()
        reg_id = map.add_entry_listener(added_func=collector)
        self.logger.info("Registered listener with id %s", reg_id)
        member.shutdown()
        self.cluster.start_member()

        count = AtomicInteger()

        def assert_events():
            if client.lifecycle_service.is_running():
                while True:
                    try:
                        map.put("key-%d" % count.get_and_increment(), "value")
                        break
                    except TargetDisconnectedError:
                        pass
                self.assertGreater(len(collector.events), 0)
            else:
                self.fail("Client disconnected...")

        self.assertTrueEventually(assert_events)
コード例 #10
0
    def __init__(
        self,
        client,
        config,
        reactor,
        address_provider,
        lifecycle_service,
        partition_service,
        cluster_service,
        invocation_service,
        near_cache_manager,
        send_state_to_cluster_fn,
    ):
        self.live = False
        self.active_connections = {
        }  # uuid to connection, must be modified under the _lock
        self.client_uuid = uuid.uuid4()

        self._client = client
        self._config = config
        self._reactor = reactor
        self._address_provider = address_provider
        self._lifecycle_service = lifecycle_service
        self._partition_service = partition_service
        self._cluster_service = cluster_service
        self._invocation_service = invocation_service
        self._near_cache_manager = near_cache_manager
        self._send_state_to_cluster_fn = send_state_to_cluster_fn
        self._client_state = _ClientState.INITIAL  # must be modified under the _lock
        self._smart_routing_enabled = config.smart_routing
        self._wait_strategy = self._init_wait_strategy(config)
        self._reconnect_mode = config.reconnect_mode
        self._heartbeat_manager = _HeartbeatManager(self, self._client, config,
                                                    reactor,
                                                    invocation_service)
        self._connection_listeners = []
        self._connect_all_members_timer = None
        self._async_start = config.async_start
        self._connect_to_cluster_thread_running = False
        self._shuffle_member_list = config.shuffle_member_list
        self._lock = threading.RLock()
        self._connection_id_generator = AtomicInteger()
        self._labels = frozenset(config.labels)
        self._cluster_id = None
        self._load_balancer = None
        self._use_public_ip = (isinstance(
            address_provider, DefaultAddressProvider) and config.use_public_ip)
コード例 #11
0
 def __init__(self, **kwargs):
     config = _Config.from_dict(kwargs)
     self._config = config
     self._context = _ClientContext()
     client_id = HazelcastClient._CLIENT_ID.get_and_increment()
     self.name = self._create_client_name(client_id)
     self._reactor = AsyncoreReactor()
     self._serialization_service = SerializationServiceV1(config)
     self._near_cache_manager = NearCacheManager(
         config, self._serialization_service)
     self._internal_lifecycle_service = _InternalLifecycleService(config)
     self.lifecycle_service = LifecycleService(
         self._internal_lifecycle_service)
     self._invocation_service = InvocationService(self, config,
                                                  self._reactor)
     self._address_provider = self._create_address_provider()
     self._internal_partition_service = _InternalPartitionService(self)
     self.partition_service = PartitionService(
         self._internal_partition_service, self._serialization_service)
     self._internal_cluster_service = _InternalClusterService(self, config)
     self.cluster_service = ClusterService(self._internal_cluster_service)
     self._connection_manager = ConnectionManager(
         self,
         config,
         self._reactor,
         self._address_provider,
         self._internal_lifecycle_service,
         self._internal_partition_service,
         self._internal_cluster_service,
         self._invocation_service,
         self._near_cache_manager,
     )
     self._load_balancer = self._init_load_balancer(config)
     self._listener_service = ListenerService(self, config,
                                              self._connection_manager,
                                              self._invocation_service)
     self._proxy_manager = ProxyManager(self._context)
     self.cp_subsystem = CPSubsystem(self._context)
     self._proxy_session_manager = ProxySessionManager(self._context)
     self._transaction_manager = TransactionManager(self._context)
     self._lock_reference_id_generator = AtomicInteger(1)
     self._statistics = Statistics(
         self,
         config,
         self._reactor,
         self._connection_manager,
         self._invocation_service,
         self._near_cache_manager,
     )
     self._cluster_view_listener = ClusterViewListenerService(
         self,
         self._connection_manager,
         self._internal_partition_service,
         self._internal_cluster_service,
         self._invocation_service,
     )
     self._shutdown_lock = threading.RLock()
     self._init_context()
     self._start()
コード例 #12
0
    def test_count_down_retry_on_timeout(self):
        latch = self._get_latch(1)

        original = latch._wrapped._request_count_down
        called_count = AtomicInteger()

        def mock(expected_round, invocation_uuid):
            if called_count.get_and_increment() < 2:
                return ImmediateExceptionFuture(OperationTimeoutError("xx"))
            return original(expected_round, invocation_uuid)

        latch._wrapped._request_count_down = mock

        latch.count_down()
        self.assertEqual(3, called_count.get(
        ))  # Will resolve on it's third call. First 2 throws timeout error
        self.assertEqual(0, latch.get_count())
コード例 #13
0
 def __init__(self, client):
     self._is_live = False
     self._pending = {}
     self._listeners = {}
     self._next_correlation_id = AtomicInteger(1)
     self._client = client
     self._event_queue = Queue()
     self._pending_lock = threading.Lock()
コード例 #14
0
    def _mock_request(self, method_name, result, first_call_err):
        called = AtomicInteger()

        def mock(*_, **__):
            if called.get_and_increment() == 0 and first_call_err:
                return ImmediateExceptionFuture(first_call_err)
            return ImmediateFuture(result)

        setattr(self.proxy._wrapped, method_name, MagicMock(side_effect=mock))
コード例 #15
0
    def test_sending_client_state_is_retried_if_send_operation_is_failed_synchronously(self):
        conn_manager = self.client._connection_manager

        counter = AtomicInteger()

        def send_state_to_cluster_fn():
            counter.add(1)
            if counter.get() == 5:
                # Let's pretend it succeeds at some point
                return ImmediateFuture(None)

            raise RuntimeError("expected")

        conn_manager._send_state_to_cluster_fn = send_state_to_cluster_fn

        self._restart_cluster()

        self.assertEqual(5, counter.get())
コード例 #16
0
def combine_futures(futures):
    """Combines set of Futures.

    It waits for the completion of the all input Futures regardless
    of their output.

    The returned Future completes with the list of the results of the input
    Futures, respecting the input order.

    If one of the input Futures completes exceptionally, the returned
    Future also completes exceptionally. In case of multiple exceptional
    completions, the returned Future will be completed with the first
    exceptional result.

    Args:
        futures (list[Future]): List of Futures to be combined.

    Returns:
        Future: Result of the combination.
    """
    count = len(futures)
    results = [None] * count
    if count == 0:
        return ImmediateFuture(results)

    completed = AtomicInteger()
    combined = Future()
    errors = []

    def done(future, index):
        if future.is_success():
            results[index] = future.result()
        else:
            if not errors:
                # We are fine with this check-then-act.
                # At most, we will end up with couple of
                # errors stored in case of the concurrent calls.
                # The idea behind this check is to try to minimize
                # the number of errors we store without
                # synchronization, as we only need the first error.
                errors.append((future.exception(), future.traceback()))

        if count == completed.increment_and_get():
            if errors:
                first_exception, first_traceback = errors[0]
                combined.set_exception(first_exception, first_traceback)
            else:
                combined.set_result(results)

    for index, future in enumerate(futures):
        # Capture the index in the closure or else we
        # will only update the last element.
        future.add_done_callback(
            lambda f, captured_index=index: done(f, captured_index))

    return combined
コード例 #17
0
    def test_timer_cleanup(self, _, cls):
        call_count = AtomicInteger()

        def callback():
            call_count.add(1)

        loop = cls({})
        loop.start()
        loop.add_timer(float('inf'), callback)  # never expired, must be cleaned up
        time.sleep(1)
        try:
            self.assertEqual(0, call_count.get())
        finally:
            loop.shutdown()

        def assertion():
            self.assertEqual(1, call_count.get())

        self.assertTrueEventually(assertion)
コード例 #18
0
 def __init__(self, config=None):
     self._context = _ClientContext()
     self.config = config or ClientConfig()
     self.properties = ClientProperties(self.config.get_properties())
     self._id = HazelcastClient._CLIENT_ID.get_and_increment()
     self.name = self._create_client_name()
     self._init_logger()
     self._logger_extras = {
         "client_name": self.name,
         "cluster_name": self.config.cluster_name
     }
     self._reactor = AsyncoreReactor(self._logger_extras)
     self._serialization_service = SerializationServiceV1(
         serialization_config=self.config.serialization)
     self._near_cache_manager = NearCacheManager(
         self, self._serialization_service)
     self._internal_lifecycle_service = _InternalLifecycleService(
         self, self._logger_extras)
     self.lifecycle_service = LifecycleService(
         self._internal_lifecycle_service)
     self._invocation_service = InvocationService(self, self._reactor,
                                                  self._logger_extras)
     self._address_provider = self._create_address_provider()
     self._internal_partition_service = _InternalPartitionService(
         self, self._logger_extras)
     self.partition_service = PartitionService(
         self._internal_partition_service)
     self._internal_cluster_service = _InternalClusterService(
         self, self._logger_extras)
     self.cluster_service = ClusterService(self._internal_cluster_service)
     self._connection_manager = ConnectionManager(
         self, self._reactor, self._address_provider,
         self._internal_lifecycle_service, self._internal_partition_service,
         self._internal_cluster_service, self._invocation_service,
         self._near_cache_manager, self._logger_extras)
     self._load_balancer = self._init_load_balancer(self.config)
     self._listener_service = ListenerService(self,
                                              self._connection_manager,
                                              self._invocation_service,
                                              self._logger_extras)
     self._proxy_manager = ProxyManager(self._context)
     self._transaction_manager = TransactionManager(self._context,
                                                    self._logger_extras)
     self._lock_reference_id_generator = AtomicInteger(1)
     self._statistics = Statistics(self, self._reactor,
                                   self._connection_manager,
                                   self._invocation_service,
                                   self._near_cache_manager,
                                   self._logger_extras)
     self._cluster_view_listener = ClusterViewListenerService(
         self, self._connection_manager, self._internal_partition_service,
         self._internal_cluster_service, self._invocation_service)
     self._shutdown_lock = threading.RLock()
     self._init_context()
     self._start()
コード例 #19
0
    def _verify_listeners_after_client_disconnected(self, member_address,
                                                    client_address):
        heartbeat_seconds = 2
        cluster = self.create_cluster(
            self.rc,
            self._create_cluster_config(member_address, heartbeat_seconds))
        member = cluster.start_member()

        client = HazelcastClient(
            cluster_name=cluster.id,
            cluster_members=[client_address],
            cluster_connect_timeout=sys.maxsize,
        )

        self.client = client

        test_map = client.get_map("test").blocking()

        event_count = AtomicInteger()

        test_map.add_entry_listener(
            added_func=lambda _: event_count.get_and_increment(),
            include_value=False)

        self.assertTrueEventually(lambda: self.assertEqual(
            1, len(client._connection_manager.active_connections)))

        member.shutdown()

        time.sleep(2 * heartbeat_seconds)

        cluster.start_member()

        def assertion():
            test_map.remove(1)
            test_map.put(1, 2)
            self.assertNotEqual(0, event_count.get())

        self.assertTrueEventually(assertion)

        client.shutdown()
        self.rc.terminateCluster(cluster.id)
コード例 #20
0
    def __init__(self, client, reactor, logger_extras):
        config = client.config
        if config.network.smart_routing:
            self.invoke = self._invoke_smart
        else:
            self.invoke = self._invoke_non_smart

        self._client = client
        self._reactor = reactor
        self._logger_extras = logger_extras
        self._partition_service = None
        self._connection_manager = None
        self._listener_service = None
        self._check_invocation_allowed_fn = None
        self._pending = {}
        self._next_correlation_id = AtomicInteger(1)
        self._is_redo_operation = config.network.redo_operation
        self._invocation_timeout = self._init_invocation_timeout()
        self._invocation_retry_pause = self._init_invocation_retry_pause()
        self._shutdown = False
コード例 #21
0
    def __init__(self, client, config, reactor):
        smart_routing = config.smart_routing
        if smart_routing:
            self._do_invoke = self._invoke_smart
        else:
            self._do_invoke = self._invoke_non_smart

        self._client = client
        self._reactor = reactor
        self._partition_service = None
        self._connection_manager = None
        self._listener_service = None
        self._check_invocation_allowed_fn = None
        self._pending = {}
        self._next_correlation_id = AtomicInteger(1)
        self._is_redo_operation = config.redo_operation
        self._invocation_timeout = config.invocation_timeout
        self._invocation_retry_pause = config.invocation_retry_pause
        self._backup_ack_to_client_enabled = smart_routing and config.backup_ack_to_client_enabled
        self._fail_on_indeterminate_state = config.fail_on_indeterminate_operation_state
        self._backup_timeout = config.operation_backup_timeout
        self._clean_resources_timer = None
        self._shutdown = False
コード例 #22
0
    def __init__(self, client):
        self._pending = {}
        self._event_handlers = {}
        self._next_correlation_id = AtomicInteger(1)
        self._client = client
        self._logger_extras = {
            "client_name": client.name,
            "group_name": client.config.group_config.name
        }
        self._event_queue = queue.Queue()
        self._is_redo_operation = client.config.network_config.redo_operation
        self._invocation_retry_pause = self._init_invocation_retry_pause()
        self.invocation_timeout = self._init_invocation_timeout()

        if client.config.network_config.smart_routing:
            self.invoke = self.invoke_smart
        else:
            self.invoke = self.invoke_non_smart

        self._client.connection_manager.add_listener(
            on_connection_closed=self.cleanup_connection)
        client.heartbeat.add_listener(
            on_heartbeat_stopped=self._heartbeat_stopped)
コード例 #23
0
    def __init__(self, client, reactor, address_provider, lifecycle_service,
                 partition_service, cluster_service, invocation_service,
                 near_cache_manager, logger_extras):
        self.live = False
        self.active_connections = dict()
        self.client_uuid = uuid.uuid4()

        self._client = client
        self._reactor = reactor
        self._address_provider = address_provider
        self._lifecycle_service = lifecycle_service
        self._partition_service = partition_service
        self._cluster_service = cluster_service
        self._invocation_service = invocation_service
        self._near_cache_manager = near_cache_manager
        self._logger_extras = logger_extras
        config = self._client.config
        self._smart_routing_enabled = config.network.smart_routing
        self._wait_strategy = self._init_wait_strategy(config)
        self._reconnect_mode = config.connection_strategy.reconnect_mode
        self._heartbeat_manager = _HeartbeatManager(self, self._client,
                                                    reactor,
                                                    invocation_service,
                                                    logger_extras)
        self._connection_listeners = []
        self._connect_all_members_timer = None
        self._async_start = config.connection_strategy.async_start
        self._connect_to_cluster_thread_running = False
        self._pending_connections = dict()
        props = self._client.properties
        self._shuffle_member_list = props.get_bool(props.SHUFFLE_MEMBER_LIST)
        self._lock = threading.RLock()
        self._connection_id_generator = AtomicInteger()
        self._labels = config.labels
        self._cluster_id = None
        self._load_balancer = None
コード例 #24
0
    def __init__(self, client):
        self._pending = {}
        self._event_handlers = {}
        self._next_correlation_id = AtomicInteger(1)
        self._client = client
        self._event_queue = Queue()
        self._is_redo_operation = client.config.network_config.redo_operation

        if client.config.network_config.smart_routing:
            self.invoke = self.invoke_smart
        else:
            self.invoke = self.invoke_non_smart

        self._client.connection_manager.add_listener(on_connection_closed=self.cleanup_connection)
        client.heartbeat.add_listener(on_heartbeat_stopped=self._heartbeat_stopped)
コード例 #25
0
    def __init__(self, client, reactor, address_provider, lifecycle_service,
                 partition_service, cluster_service, invocation_service,
                 near_cache_manager):
        self.live = False
        self.active_connections = dict(
        )  # uuid to connection, must be modified under the _lock
        self.client_uuid = uuid.uuid4()

        self._client = client
        self._reactor = reactor
        self._address_provider = address_provider
        self._lifecycle_service = lifecycle_service
        self._partition_service = partition_service
        self._cluster_service = cluster_service
        self._invocation_service = invocation_service
        self._near_cache_manager = near_cache_manager
        config = self._client.config
        self._smart_routing_enabled = config.smart_routing
        self._wait_strategy = self._init_wait_strategy(config)
        self._reconnect_mode = config.reconnect_mode
        self._heartbeat_manager = _HeartbeatManager(self, self._client,
                                                    reactor,
                                                    invocation_service)
        self._connection_listeners = []
        self._connect_all_members_timer = None
        self._async_start = config.async_start
        self._connect_to_cluster_thread_running = False
        self._pending_connections = dict()  # must be modified under the _lock
        self._addresses_to_connections = dict(
        )  # address to connection, must be modified under the _lock
        self._shuffle_member_list = config.shuffle_member_list
        self._lock = threading.RLock()
        self._connection_id_generator = AtomicInteger()
        self._labels = frozenset(config.labels)
        self._cluster_id = None
        self._load_balancer = None
コード例 #26
0
class Connection(object):
    _closed = False
    endpoint = None
    heartbeating = True
    is_owner = False
    counter = AtomicInteger()

    def __init__(self, address, connection_closed_callback, message_callback):
        self._address = (address.host, address.port)
        self.id = self.counter.get_and_increment()
        self.logger = logging.getLogger("Connection[%s](%s:%d)" %
                                        (self.id, address.host, address.port))
        self._connection_closed_callback = connection_closed_callback
        self._message_callback = message_callback
        self._read_buffer = ""
        self.last_read = 0

    def live(self):
        return not self._closed

    def send_message(self, message):
        if not self.live():
            raise IOError("Connection is not live.")

        message.add_flag(BEGIN_END_FLAG)
        self.write(message.buffer)

    def receive_message(self):
        self.last_read = time.time()
        # split frames
        while len(self._read_buffer) >= INT_SIZE_IN_BYTES:
            frame_length = struct.unpack_from(FMT_LE_INT, self._read_buffer,
                                              0)[0]
            if frame_length > len(self._read_buffer):
                return
            message = ClientMessage(buffer(self._read_buffer, 0, frame_length))
            self._read_buffer = self._read_buffer[frame_length:]
            self._message_callback(message, self)

    def write(self, data):
        # must be implemented by subclass
        pass

    def close(self, cause):
        pass

    def __repr__(self):
        return "Connection(address=%s, id=%s)" % (self._address, self.id)
コード例 #27
0
    def test_add_timer(self, _, cls):
        call_count = AtomicInteger()

        def callback():
            call_count.add(1)

        loop = cls({})
        loop.start()
        loop.add_timer(0, callback)  # already expired, should be run immediately

        def assertion():
            self.assertEqual(1, call_count.get())

        try:
            self.assertTrueEventually(assertion)
        finally:
            loop.shutdown()
コード例 #28
0
    def test_listener_re_register(self):
        member = self.cluster.start_member()
        client = self.create_client()

        map = client.get_map("map")

        collector = event_collector()
        reg_id = map.add_entry_listener(added=collector)
        self.logger.info("Registered listener with id %s", reg_id)
        member.shutdown()
        self.cluster.start_member()

        count = AtomicInteger()

        def assert_events():
            map.put("key-%d" % count.get_and_increment(), "value").result()
            self.assertGreater(len(collector.events), 0)

        self.assertTrueEventually(assert_events)
コード例 #29
0
def combine_futures(*futures):
    expected = len(futures)
    results = []
    completed = AtomicInteger()
    combined = Future()

    def done(f):
        if not combined.done():
            if f.is_success():  # TODO: ensure ordering of results as original list
                results.append(f.result())
                if completed.get_and_increment() + 1 == expected:
                    combined.set_result(results)
            else:
                combined.set_exception(f.exception(), f.traceback())

    for future in futures:
        future.add_done_callback(done)

    return combined
コード例 #30
0
    def test_timer_that_adds_another_timer(self, _, cls):
        loop = cls({})
        loop.start()

        call_count = AtomicInteger()

        def callback():
            if call_count.get() == 0:
                loop.add_timer(0, callback)
            call_count.add(1)

        loop.add_timer(float('inf'), callback)

        loop.shutdown()

        def assertion():
            self.assertEqual(2, call_count.get())  # newly added timer must also be cleaned up

        self.assertTrueEventually(assertion)
コード例 #31
0
class ConnectionManager(object):
    """ConnectionManager is responsible for managing ``Connection`` objects."""
    def __init__(self, client, reactor, address_provider, lifecycle_service,
                 partition_service, cluster_service, invocation_service,
                 near_cache_manager):
        self.live = False
        self.active_connections = dict(
        )  # uuid to connection, must be modified under the _lock
        self.client_uuid = uuid.uuid4()

        self._client = client
        self._reactor = reactor
        self._address_provider = address_provider
        self._lifecycle_service = lifecycle_service
        self._partition_service = partition_service
        self._cluster_service = cluster_service
        self._invocation_service = invocation_service
        self._near_cache_manager = near_cache_manager
        config = self._client.config
        self._smart_routing_enabled = config.smart_routing
        self._wait_strategy = self._init_wait_strategy(config)
        self._reconnect_mode = config.reconnect_mode
        self._heartbeat_manager = _HeartbeatManager(self, self._client,
                                                    reactor,
                                                    invocation_service)
        self._connection_listeners = []
        self._connect_all_members_timer = None
        self._async_start = config.async_start
        self._connect_to_cluster_thread_running = False
        self._pending_connections = dict()  # must be modified under the _lock
        self._addresses_to_connections = dict(
        )  # address to connection, must be modified under the _lock
        self._shuffle_member_list = config.shuffle_member_list
        self._lock = threading.RLock()
        self._connection_id_generator = AtomicInteger()
        self._labels = frozenset(config.labels)
        self._cluster_id = None
        self._load_balancer = None

    def add_listener(self,
                     on_connection_opened=None,
                     on_connection_closed=None):
        """Registers a ConnectionListener.

        If the same listener is registered multiple times, it will be notified multiple times.

        Args:
            on_connection_opened (function): Function to be called when a connection is opened. (Default value = None)
            on_connection_closed (function): Function to be called when a connection is removed. (Default value = None)
        """
        self._connection_listeners.append(
            (on_connection_opened, on_connection_closed))

    def get_connection(self, member_uuid):
        return self.active_connections.get(member_uuid, None)

    def get_connection_from_address(self, address):
        return self._addresses_to_connections.get(address, None)

    def get_random_connection(self):
        if self._smart_routing_enabled:
            member = self._load_balancer.next()
            if member:
                connection = self.get_connection(member.uuid)
                if connection:
                    return connection

        # We should not get to this point under normal circumstances.
        # Therefore, copying the list should be OK.
        for connection in list(six.itervalues(self.active_connections)):
            return connection

        return None

    def start(self, load_balancer):
        if self.live:
            return

        self.live = True
        self._load_balancer = load_balancer
        self._heartbeat_manager.start()
        self._connect_to_cluster()
        if self._smart_routing_enabled:
            self._start_connect_all_members_timer()

    def shutdown(self):
        if not self.live:
            return

        self.live = False
        if self._connect_all_members_timer:
            self._connect_all_members_timer.cancel()

        self._heartbeat_manager.shutdown()

        with self._lock:
            for connection_future in six.itervalues(self._pending_connections):
                connection_future.set_exception(
                    HazelcastClientNotActiveError(
                        "Hazelcast client is shutting down"))

            # Need to create copy of connection values to avoid modification errors on runtime
            for connection in list(six.itervalues(self.active_connections)):
                connection.close("Hazelcast client is shutting down", None)

            self.active_connections.clear()
            self._addresses_to_connections.clear()
            self._pending_connections.clear()

        del self._connection_listeners[:]

    def connect_to_all_cluster_members(self):
        if not self._smart_routing_enabled:
            return

        for member in self._cluster_service.get_members():
            try:
                self._get_or_connect(member.address).result()
            except:
                pass

    def on_connection_close(self, closed_connection, cause):
        connected_address = closed_connection.connected_address
        remote_uuid = closed_connection.remote_uuid
        remote_address = closed_connection.remote_address

        if not connected_address:
            _logger.debug(
                "Destroying %s, but it has no remote address, hence nothing is "
                "removed from the connection dictionary", closed_connection)

        with self._lock:
            pending = self._pending_connections.pop(connected_address, None)
            connection = self.active_connections.pop(remote_uuid, None)
            self._addresses_to_connections.pop(remote_address, None)

            if pending:
                pending.set_exception(cause)

            if connection:
                _logger.info("Removed connection to %s:%s, connection: %s",
                             connected_address, remote_uuid, connection)
                if not self.active_connections:
                    self._lifecycle_service.fire_lifecycle_event(
                        LifecycleState.DISCONNECTED)
                    self._trigger_cluster_reconnection()

        if connection:
            for _, on_connection_closed in self._connection_listeners:
                if on_connection_closed:
                    try:
                        on_connection_closed(connection, cause)
                    except:
                        _logger.exception("Exception in connection listener")
        else:
            if remote_uuid:
                _logger.debug(
                    "Destroying %s, but there is no mapping for %s in the connection dictionary",
                    closed_connection, remote_uuid)

    def check_invocation_allowed(self):
        if self.active_connections:
            return

        if self._async_start or self._reconnect_mode == ReconnectMode.ASYNC:
            raise ClientOfflineError()
        else:
            raise IOError("No connection found to cluster")

    def _trigger_cluster_reconnection(self):
        if self._reconnect_mode == ReconnectMode.OFF:
            _logger.info("Reconnect mode is OFF. Shutting down the client")
            self._shutdown_client()
            return

        if self._lifecycle_service.running:
            self._start_connect_to_cluster_thread()

    def _init_wait_strategy(self, config):
        return _WaitStrategy(config.retry_initial_backoff,
                             config.retry_max_backoff, config.retry_multiplier,
                             config.cluster_connect_timeout,
                             config.retry_jitter)

    def _start_connect_all_members_timer(self):
        connecting_addresses = set()

        def run():
            if not self._lifecycle_service.running:
                return

            for member in self._cluster_service.get_members():
                address = member.address

                if not self.get_connection_from_address(
                        address) and address not in connecting_addresses:
                    connecting_addresses.add(address)
                    if not self._lifecycle_service.running:
                        break

                    if not self.get_connection(member.uuid):
                        # Bind the address to the value
                        # in this loop iteration
                        def cb(_, address=address):
                            connecting_addresses.discard(address)

                        self._get_or_connect(address).add_done_callback(cb)

            self._connect_all_members_timer = self._reactor.add_timer(1, run)

        self._connect_all_members_timer = self._reactor.add_timer(1, run)

    def _connect_to_cluster(self):
        if self._async_start:
            self._start_connect_to_cluster_thread()
        else:
            self._sync_connect_to_cluster()

    def _start_connect_to_cluster_thread(self):
        with self._lock:
            if self._connect_to_cluster_thread_running:
                return

            self._connect_to_cluster_thread_running = True

        def run():
            try:
                while True:
                    self._sync_connect_to_cluster()
                    with self._lock:
                        if self.active_connections:
                            self._connect_to_cluster_thread_running = False
                            return
            except:
                _logger.exception(
                    "Could not connect to any cluster, shutting down the client"
                )
                self._shutdown_client()

        t = threading.Thread(target=run, name='hazelcast_async_connection')
        t.daemon = True
        t.start()

    def _shutdown_client(self):
        try:
            self._client.shutdown()
        except:
            _logger.exception("Exception during client shutdown")

    def _sync_connect_to_cluster(self):
        tried_addresses = set()
        self._wait_strategy.reset()
        try:
            while True:
                for address in self._get_possible_addresses():
                    self._check_client_active()
                    tried_addresses.add(address)
                    connection = self._connect(address)
                    if connection:
                        return
                # If the address providers load no addresses (which seems to be possible),
                # then the above loop is not entered and the lifecycle check is missing,
                # hence we need to repeat the same check at this point.
                self._check_client_active()
                if not self._wait_strategy.sleep():
                    break
        except (ClientNotAllowedInClusterError, InvalidConfigurationError):
            cluster_name = self._client.config.cluster_name
            _logger.exception("Stopped trying on cluster %s", cluster_name)

        cluster_name = self._client.config.cluster_name
        _logger.info(
            "Unable to connect to any address from the cluster with name: %s. "
            "The following addresses were tried: %s", cluster_name,
            tried_addresses)
        if self._lifecycle_service.running:
            msg = "Unable to connect to any cluster"
        else:
            msg = "Client is being shutdown"
        raise IllegalStateError(msg)

    def _connect(self, address):
        _logger.info("Trying to connect to %s", address)
        try:
            return self._get_or_connect(address).result()
        except (ClientNotAllowedInClusterError,
                InvalidConfigurationError) as e:
            _logger.warning("Error during initial connection to %s: %s",
                            address, e)
            raise e
        except Exception as e:
            _logger.warning("Error during initial connection to %s: %s",
                            address, e)
            return None

    def _get_or_connect(self, address):
        connection = self.get_connection_from_address(address)
        if connection:
            return ImmediateFuture(connection)

        with self._lock:
            connection = self.get_connection_from_address(address)
            if connection:
                return ImmediateFuture(connection)
            else:
                pending = self._pending_connections.get(address, None)
                if pending:
                    return pending
                else:
                    try:
                        translated = self._address_provider.translate(address)
                        if not translated:
                            return ImmediateExceptionFuture(
                                ValueError(
                                    "Address translator could not translate address %s"
                                    % address))

                        factory = self._reactor.connection_factory
                        connection = factory(
                            self,
                            self._connection_id_generator.get_and_increment(),
                            translated, self._client.config,
                            self._invocation_service.handle_client_message)
                    except IOError:
                        error = sys.exc_info()
                        return ImmediateExceptionFuture(error[1], error[2])

                    future = self._authenticate(connection).continue_with(
                        self._on_auth, connection, address)
                    self._pending_connections[address] = future
                    return future

    def _authenticate(self, connection):
        client = self._client
        cluster_name = client.config.cluster_name
        client_name = client.name
        request = client_authentication_codec.encode_request(
            cluster_name, None, None, self.client_uuid, CLIENT_TYPE,
            SERIALIZATION_VERSION, __version__, client_name, self._labels)

        invocation = Invocation(request,
                                connection=connection,
                                urgent=True,
                                response_handler=lambda m: m)
        self._invocation_service.invoke(invocation)
        return invocation.future

    def _on_auth(self, response, connection, address):
        if response.is_success():
            response = client_authentication_codec.decode_response(
                response.result())
            status = response["status"]
            if status == _AuthenticationStatus.AUTHENTICATED:
                return self._handle_successful_auth(response, connection,
                                                    address)

            if status == _AuthenticationStatus.CREDENTIALS_FAILED:
                err = AuthenticationError(
                    "Authentication failed. The configured cluster name on "
                    "the client does not match the one configured in the cluster."
                )
            elif status == _AuthenticationStatus.NOT_ALLOWED_IN_CLUSTER:
                err = ClientNotAllowedInClusterError(
                    "Client is not allowed in the cluster")
            elif status == _AuthenticationStatus.SERIALIZATION_VERSION_MISMATCH:
                err = IllegalStateError(
                    "Server serialization version does not match to client")
            else:
                err = AuthenticationError(
                    "Authentication status code not supported. status: %s" %
                    status)

            connection.close("Failed to authenticate connection", err)
            raise err
        else:
            e = response.exception()
            # This will set the exception for the pending connection future
            connection.close("Failed to authenticate connection", e)
            six.reraise(e.__class__, e, response.traceback())

    def _handle_successful_auth(self, response, connection, address):
        self._check_partition_count(response["partition_count"])

        server_version_str = response["server_hazelcast_version"]
        remote_address = response["address"]
        remote_uuid = response["member_uuid"]

        connection.remote_address = remote_address
        connection.server_version = calculate_version(server_version_str)
        connection.remote_uuid = remote_uuid

        new_cluster_id = response["cluster_id"]

        is_initial_connection = not self.active_connections
        changed_cluster = is_initial_connection and self._cluster_id is not None and self._cluster_id != new_cluster_id
        if changed_cluster:
            _logger.warning(
                "Switching from current cluster: %s to new cluster: %s",
                self._cluster_id, new_cluster_id)
            self._on_cluster_restart()

        with self._lock:
            self.active_connections[remote_uuid] = connection
            self._addresses_to_connections[remote_address] = connection
            self._pending_connections.pop(address, None)

        if is_initial_connection:
            self._cluster_id = new_cluster_id
            self._lifecycle_service.fire_lifecycle_event(
                LifecycleState.CONNECTED)

        _logger.info(
            "Authenticated with server %s:%s, server version: %s, local address: %s",
            remote_address, remote_uuid, server_version_str,
            connection.local_address)

        for on_connection_opened, _ in self._connection_listeners:
            if on_connection_opened:
                try:
                    on_connection_opened(connection)
                except:
                    _logger.exception("Exception in connection listener")

        if not connection.live:
            self.on_connection_close(connection, None)

        return connection

    def _on_cluster_restart(self):
        self._near_cache_manager.clear_near_caches()
        self._cluster_service.clear_member_list_version()

    def _check_partition_count(self, partition_count):
        if not self._partition_service.check_and_set_partition_count(
                partition_count):
            raise ClientNotAllowedInClusterError(
                "Client can not work with this cluster because it has a "
                "different partition count. Expected partition count: %d, "
                "Member partition count: %d" %
                (self._partition_service.partition_count, partition_count))

    def _check_client_active(self):
        if not self._lifecycle_service.running:
            raise HazelcastClientNotActiveError()

    def _get_possible_addresses(self):
        member_addresses = list(
            map(lambda m: (m.address, None),
                self._cluster_service.get_members()))

        if self._shuffle_member_list:
            random.shuffle(member_addresses)

        addresses = OrderedDict(member_addresses)
        primaries, secondaries = self._address_provider.load_addresses()
        if self._shuffle_member_list:
            random.shuffle(primaries)
            random.shuffle(secondaries)

        for address in primaries:
            addresses[address] = None

        for address in secondaries:
            addresses[address] = None

        return six.iterkeys(addresses)
コード例 #32
0
class InvocationService(object):
    _CLEAN_RESOURCES_PERIOD = 0.1

    def __init__(self, client, reactor):
        config = client.config
        smart_routing = config.smart_routing
        if smart_routing:
            self._do_invoke = self._invoke_smart
        else:
            self._do_invoke = self._invoke_non_smart

        self._client = client
        self._reactor = reactor
        self._partition_service = None
        self._connection_manager = None
        self._listener_service = None
        self._check_invocation_allowed_fn = None
        self._pending = {}
        self._next_correlation_id = AtomicInteger(1)
        self._is_redo_operation = config.redo_operation
        self._invocation_timeout = config.invocation_timeout
        self._invocation_retry_pause = config.invocation_retry_pause
        self._backup_ack_to_client_enabled = smart_routing and config.backup_ack_to_client_enabled
        self._fail_on_indeterminate_state = config.fail_on_indeterminate_operation_state
        self._backup_timeout = config.operation_backup_timeout
        self._clean_resources_timer = None
        self._shutdown = False

    def init(self, partition_service, connection_manager, listener_service):
        self._partition_service = partition_service
        self._connection_manager = connection_manager
        self._listener_service = listener_service
        self._check_invocation_allowed_fn = connection_manager.check_invocation_allowed

    def start(self):
        self._start_clean_resources_timer()
        if self._backup_ack_to_client_enabled:
            self._register_backup_listener()

    def handle_client_message(self, message):
        correlation_id = message.get_correlation_id()

        start_frame = message.start_frame
        if start_frame.has_event_flag() or start_frame.has_backup_event_flag():
            self._listener_service.handle_client_message(
                message, correlation_id)
            return

        invocation = self._pending.get(correlation_id, None)
        if not invocation:
            _logger.warning("Got message with unknown correlation id: %s",
                            message)
            return

        if message.get_message_type() == EXCEPTION_MESSAGE_TYPE:
            error = create_error_from_message(message)
            return self._notify_error(invocation, error)

        self._notify(invocation, message)

    def invoke(self, invocation):
        if not invocation.timeout:
            invocation.timeout = self._invocation_timeout + time.time()

        correlation_id = self._next_correlation_id.get_and_increment()
        request = invocation.request
        request.set_correlation_id(correlation_id)
        request.set_partition_id(invocation.partition_id)
        self._do_invoke(invocation)

    def shutdown(self):
        if self._shutdown:
            return

        self._shutdown = True
        if self._clean_resources_timer:
            self._clean_resources_timer.cancel()
        for invocation in list(six.itervalues(self._pending)):
            self._notify_error(invocation, HazelcastClientNotActiveError())

    def _invoke_on_partition_owner(self, invocation, partition_id):
        owner_uuid = self._partition_service.get_partition_owner(partition_id)
        if not owner_uuid:
            _logger.debug("Partition owner is not assigned yet")
            return False
        return self._invoke_on_target(invocation, owner_uuid)

    def _invoke_on_target(self, invocation, owner_uuid):
        connection = self._connection_manager.get_connection(owner_uuid)
        if not connection:
            _logger.debug("Client is not connected to target: %s", owner_uuid)
            return False
        return self._send(invocation, connection)

    def _invoke_on_random_connection(self, invocation):
        connection = self._connection_manager.get_random_connection()
        if not connection:
            _logger.debug("No connection found to invoke")
            return False
        return self._send(invocation, connection)

    def _invoke_smart(self, invocation):
        try:
            if not invocation.urgent:
                self._check_invocation_allowed_fn()

            connection = invocation.connection
            if connection:
                invoked = self._send(invocation, connection)
                if not invoked:
                    self._notify_error(
                        invocation,
                        IOError("Could not invoke on connection %s" %
                                connection))
                return

            if invocation.partition_id != -1:
                invoked = self._invoke_on_partition_owner(
                    invocation, invocation.partition_id)
            elif invocation.uuid:
                invoked = self._invoke_on_target(invocation, invocation.uuid)
            else:
                invoked = self._invoke_on_random_connection(invocation)

            if not invoked:
                invoked = self._invoke_on_random_connection(invocation)

            if not invoked:
                self._notify_error(invocation,
                                   IOError("No connection found to invoke"))
        except Exception as e:
            self._notify_error(invocation, e)

    def _invoke_non_smart(self, invocation):
        try:
            if not invocation.urgent:
                self._check_invocation_allowed_fn()

            connection = invocation.connection
            if connection:
                invoked = self._send(invocation, connection)
                if not invoked:
                    self._notify_error(
                        invocation,
                        IOError("Could not invoke on connection %s" %
                                connection))
                return

            if not self._invoke_on_random_connection(invocation):
                self._notify_error(invocation,
                                   IOError("No connection found to invoke"))
        except Exception as e:
            self._notify_error(invocation, e)

    def _send(self, invocation, connection):
        if self._shutdown:
            raise HazelcastClientNotActiveError()

        if self._backup_ack_to_client_enabled:
            invocation.request.set_backup_aware_flag()

        message = invocation.request
        correlation_id = message.get_correlation_id()
        self._pending[correlation_id] = invocation

        if invocation.event_handler:
            self._listener_service.add_event_handler(correlation_id,
                                                     invocation.event_handler)

        if not connection.send_message(message):
            if invocation.event_handler:
                self._listener_service.remove_event_handler(correlation_id)
            return False

        invocation.sent_connection = connection
        return True

    def _notify_error(self, invocation, error):
        _logger.debug("Got exception for request %s, error: %s",
                      invocation.request, error)

        if not self._client.lifecycle_service.is_running():
            self._complete_with_error(invocation,
                                      HazelcastClientNotActiveError())
            return

        if not self._should_retry(invocation, error):
            self._complete_with_error(invocation, error)
            return

        if invocation.timeout < time.time():
            _logger.debug(
                "Error will not be retried because invocation timed out: %s",
                error)
            error = OperationTimeoutError(
                "Request timed out because an error occurred "
                "after invocation timeout: %s" % error)
            self._complete_with_error(invocation, error)
            return

        invoke_func = functools.partial(self._do_invoke, invocation)
        self._reactor.add_timer(self._invocation_retry_pause, invoke_func)

    def _should_retry(self, invocation, error):
        if invocation.connection and isinstance(
                error, (IOError, TargetDisconnectedError)):
            return False

        if invocation.uuid and isinstance(error, TargetNotMemberError):
            return False

        if isinstance(
                error,
            (IOError,
             HazelcastInstanceNotActiveError)) or is_retryable_error(error):
            return True

        if isinstance(error, TargetDisconnectedError):
            return invocation.request.retryable or self._is_redo_operation

        return False

    def _complete_with_error(self, invocation, error):
        invocation.set_exception(error, None)
        correlation_id = invocation.request.get_correlation_id()
        self._pending.pop(correlation_id, None)

    def _register_backup_listener(self):
        codec = client_local_backup_listener_codec
        request = codec.encode_request()
        self._listener_service.register_listener(
            request, codec.decode_response, lambda reg_id: None,
            lambda m: codec.handle(m, self._backup_event_handler)).result()

    def _backup_event_handler(self, correlation_id):
        invocation = self._pending.get(correlation_id, None)
        if not invocation:
            _logger.debug(
                "Invocation not found for backup event, invocation id %s",
                correlation_id)
            return
        self._notify_backup_complete(invocation)

    def _notify(self, invocation, client_message):
        expected_backups = client_message.get_number_of_backup_acks()
        if expected_backups > invocation.backup_acks_received:
            invocation.pending_response_received_time = time.time()
            invocation.backup_acks_expected = expected_backups
            invocation.pending_response = client_message
            return

        self._complete(invocation, client_message)

    def _notify_backup_complete(self, invocation):
        invocation.backup_acks_received += 1
        if not invocation.pending_response:
            return

        if invocation.backup_acks_expected != invocation.backup_acks_received:
            return

        self._complete(invocation, invocation.pending_response)

    def _complete(self, invocation, client_message):
        invocation.set_response(client_message)
        correlation_id = invocation.request.get_correlation_id()
        self._pending.pop(correlation_id, None)

    def _start_clean_resources_timer(self):
        def run():
            if self._shutdown:
                return

            now = time.time()
            for invocation in list(self._pending.values()):
                connection = invocation.sent_connection
                if not connection:
                    continue

                if not connection.live:
                    error = TargetDisconnectedError(connection.close_reason)
                    self._notify_error(invocation, error)

                if self._backup_ack_to_client_enabled:
                    self._detect_and_handle_backup_timeout(invocation, now)

            self._clean_resources_timer = self._reactor.add_timer(
                self._CLEAN_RESOURCES_PERIOD, run)

        self._clean_resources_timer = self._reactor.add_timer(
            self._CLEAN_RESOURCES_PERIOD, run)

    def _detect_and_handle_backup_timeout(self, invocation, now):
        if not invocation.pending_response:
            return

        if invocation.backup_acks_expected == invocation.backup_acks_received:
            return

        expiration_time = invocation.pending_response_received_time + self._backup_timeout
        timeout_reached = 0 < expiration_time < now
        if not timeout_reached:
            return

        if self._fail_on_indeterminate_state:
            error = IndeterminateOperationStateError(
                "Invocation failed because the backup acks are missed")
            self._complete_with_error(invocation, error)
            return

        self._complete(invocation, invocation.pending_response)
コード例 #33
0
class InvocationService(object):
    logger = logging.getLogger("InvocationService")

    def __init__(self, client):
        self._pending = {}
        self._event_handlers = {}
        self._next_correlation_id = AtomicInteger(1)
        self._client = client
        self._event_queue = Queue()
        self._is_redo_operation = client.config.network_config.redo_operation

        if client.config.network_config.smart_routing:
            self.invoke = self.invoke_smart
        else:
            self.invoke = self.invoke_non_smart

        self._client.connection_manager.add_listener(on_connection_closed=self.cleanup_connection)
        client.heartbeat.add_listener(on_heartbeat_stopped=self._heartbeat_stopped)

    def invoke_on_connection(self, message, connection, ignore_heartbeat=False):
        return self.invoke(Invocation(message, connection=connection), ignore_heartbeat)

    def invoke_on_partition(self, message, partition_id):
        return self.invoke(Invocation(message, partition_id=partition_id))

    def invoke_on_random_target(self, message):
        return self.invoke(Invocation(message))

    def invoke_on_target(self, message, address):
        return self.invoke(Invocation(message, address=address))

    def invoke_smart(self, invocation, ignore_heartbeat=False):
        if invocation.has_connection():
            self._send(invocation, invocation.connection, ignore_heartbeat)
        elif invocation.has_partition_id():
            addr = self._client.partition_service.get_partition_owner(invocation.partition_id)
            self._send_to_address(invocation, addr)
        elif invocation.has_address():
            self._send_to_address(invocation, invocation.address)
        else:  # send to random address
            addr = self._client.load_balancer.next_address()
            self._send_to_address(invocation, addr)

        return invocation.future

    def invoke_non_smart(self, invocation, ignore_heartbeat=False):
        if invocation.has_connection():
            self._send(invocation, invocation.connection, ignore_heartbeat)
        else:
            addr = self._client.cluster.owner_connection_address
            self._send_to_address(invocation, addr)
        return invocation.future

    def cleanup_connection(self, connection, cause):
        for correlation_id, invocation in dict(self._pending).iteritems():
            if invocation.sent_connection == connection:
                self._handle_exception(invocation, cause)

        if self._client.lifecycle.is_live:
            for correlation_id, invocation in dict(self._event_handlers).iteritems():
                if invocation.sent_connection == connection and invocation.connection is None:
                    self._client.listener.re_register_listener(invocation)

    def _heartbeat_stopped(self, connection):
        for correlation_id, invocation in dict(self._pending).iteritems():
            if invocation.sent_connection == connection:
                self._handle_exception(invocation,
                                       TargetDisconnectedError("%s has stopped heart beating." % connection))

    def _remove_event_handler(self, correlation_id):
        self._event_handlers.pop(correlation_id)

    def _send_to_address(self, invocation, address, ignore_heartbeat=False):
        try:
            conn = self._client.connection_manager.connections[address]
            self._send(invocation, conn, ignore_heartbeat)
        except KeyError:
            if self._client.lifecycle.state != LIFECYCLE_STATE_CONNECTED:
                self._handle_exception(invocation, IOError("Client is not in connected state"))
            else:
                self._client.connection_manager.get_or_connect(address).continue_with(self.on_connect, invocation, ignore_heartbeat)

    def on_connect(self, f, invocation, ignore_heartbeat):
        if f.is_success():
            self._send(invocation, f.result(), ignore_heartbeat)
        else:
            self._handle_exception(invocation, f.exception(), f.traceback())

    def _send(self, invocation, connection, ignore_heartbeat):
        correlation_id = self._next_correlation_id.get_and_increment()
        message = invocation.request
        message.set_correlation_id(correlation_id)
        message.set_partition_id(invocation.partition_id)
        self._pending[correlation_id] = invocation
        if not invocation.timer:
            invocation.timer = self._client.reactor.add_timer_absolute(invocation.timeout, invocation.on_timeout)

        if isinstance(invocation, ListenerInvocation):
            self._event_handlers[correlation_id] = invocation

        self.logger.debug("Sending %s to %s", message, connection)

        if not ignore_heartbeat and not connection.heartbeating:
            self._handle_exception(invocation, TargetDisconnectedError("%s has stopped heart beating." % connection))
            return

        invocation.sent_connection = connection
        try:
            connection.send_message(message)
        except IOError as e:
            self._handle_exception(invocation, e)

    def _handle_client_message(self, message):
        correlation_id = message.get_correlation_id()
        if message.has_flags(LISTENER_FLAG):
            if correlation_id not in self._event_handlers:
                self.logger.warn("Got event message with unknown correlation id: %s", message)
                return
            invocation = self._event_handlers[correlation_id]
            self._handle_event(invocation, message)
            return
        if correlation_id not in self._pending:
            self.logger.warn("Got message with unknown correlation id: %s", message)
            return
        invocation = self._pending.pop(correlation_id)

        if message.get_message_type() == EXCEPTION_MESSAGE_TYPE:
            error = create_exception(ErrorCodec(message))
            return self._handle_exception(invocation, error)

        invocation.set_response(message)

    def _handle_event(self, invocation, message):
        try:
            invocation.event_handler(message)
        except:
            self.logger.warn("Error handling event %s", message, exc_info=True)

    def _handle_exception(self, invocation, error, traceback=None):
        if self.logger.isEnabledFor(logging.DEBUG):
            self.logger.debug("Got exception for request %s: %s: %s", invocation.request,
                              type(error).__name__, error)
        if isinstance(error, (AuthenticationError, IOError, HazelcastInstanceNotActiveError)):
            if self._try_retry(invocation):
                return

        if is_retryable_error(error):
            if invocation.request.is_retryable() or self._is_redo_operation:
                if self._try_retry(invocation):
                    return

        invocation.set_exception(error, traceback)

    def _try_retry(self, invocation):
        if invocation.connection:
            return False
        if invocation.timeout < time.time():
            return False

        invoke_func = functools.partial(self.invoke, invocation)
        self.logger.debug("Rescheduling request %s to be retried in %s seconds", invocation.request,
                          RETRY_WAIT_TIME_IN_SECONDS)
        self._client.reactor.add_timer(RETRY_WAIT_TIME_IN_SECONDS, invoke_func)
        return True
コード例 #34
0
class InvocationService(object):
    logger = logging.getLogger("InvocationService")

    def __init__(self, client):
        self._is_live = False
        self._pending = {}
        self._listeners = {}
        self._next_correlation_id = AtomicInteger(1)
        self._client = client
        self._event_queue = Queue()
        self._pending_lock = threading.Lock()

    def start(self):
        self._is_live = True
        self._start_event_thread()

    def shutdown(self):
        self._is_live = False
        self._event_thread.join()

    def _start_event_thread(self):
        def handle_event(event):
            try:
                event[0](event[1])
            except:
                self.logger.warn("Error handling event %s", event, exc_info=True)

        def service_timeouts():
            now = time.time()
            for correlation_id, invocation in dict(self._pending).iteritems():
                if invocation.check_timer(now):
                    try:
                        self._pending.pop(correlation_id)
                    except KeyError:
                        pass

        def event_loop():
            self.logger.debug("Starting event thread")
            while True and self._is_live:
                for _ in xrange(0, EVENT_LOOP_COUNT):
                    try:
                        event = self._event_queue.get(timeout=0.01)
                        handle_event(event)
                    except Empty:
                        break
                service_timeouts()

        self.logger.debug("Event thread exited.")
        self._event_thread = threading.Thread(target=event_loop, name="hazelcast-event-handler-loop")
        self._event_thread.setDaemon(True)
        self._event_thread.start()

    def invoke_on_connection(self, message, connection, event_handler=None):
        return self._invoke(Invocation(message, connection=connection), event_handler)

    def invoke_on_partition(self, message, partition_id, event_handler=None):
        return self._invoke(Invocation(message, partition_id=partition_id), event_handler)

    def invoke_on_random_target(self, message, event_handler=None):
        return self._invoke(Invocation(message), event_handler)

    def invoke_on_target(self, message, address, event_handler=None):
        return self._invoke(Invocation(message, address=address), event_handler)

    def _invoke(self, invocation, event_handler):
        if event_handler is not None:
            invocation.handler = event_handler
        if invocation.has_connection():
            self._send(invocation, invocation.connection)
        elif invocation.has_partition_id():
            addr = self._client.partition_service.get_partition_owner(invocation.partition_id)
            self._send_to_address(invocation, addr)
        elif invocation.has_address():
            self._send_to_address(invocation, invocation.address)
        else:  # send to random address
            addr = self._client.load_balancer.next_address()
            self._send_to_address(invocation, addr)

        return invocation

    def _send_to_address(self, invocation, address):
        conn = self._client.connection_manager.get_or_connect(address)
        self._send(invocation, conn)

    def _send(self, invocation, connection):
        correlation_id = self._next_correlation_id.increment_and_get()
        message = invocation.request
        message.set_correlation_id(correlation_id)
        message.set_partition_id(invocation.partition_id)
        with self._pending_lock:
            self._pending[correlation_id] = invocation

        if invocation.has_handler():
            self._listeners[correlation_id] = invocation

        self.logger.debug(
            "Sending message with correlation id %s and type %s", correlation_id, message.get_message_type()
        )
        connection.send_message(message)

    def handle_client_message(self, message):
        correlation_id = message.get_correlation_id()
        self.logger.debug(
            "Received message with correlation id %s and type %s", correlation_id, message.get_message_type()
        )
        if message.has_flags(LISTENER_FLAG):
            self.logger.debug("Got event message with type %d", message.get_message_type())
            if correlation_id not in self._listeners:
                self.logger.warn("Got event message with unknown correlation id: %d", correlation_id)
                return
            invocation = self._listeners[correlation_id]
            self._event_queue.put((invocation.handler, message))
            return
        with self._pending_lock:
            if correlation_id not in self._pending:
                self.logger.warn("Got message with unknown correlation id: %d", correlation_id)
                return
            invocation = self._pending.pop(correlation_id)
        invocation.set_response(message)
コード例 #35
0
class IdGenerator(Proxy):
    def __init__(self, client, service_name, name, atomic_long):
        super(IdGenerator, self).__init__(client, service_name, name)
        self._atomic_long = atomic_long
        self._residue = AtomicInteger(BLOCK_SIZE)
        self._local = AtomicInteger(-1)
        self._lock = threading.RLock()

    def _on_destroy(self):
        self._atomic_long.destroy()

    def init(self, initial):
        if id <= 0:
            return False
        step = initial / BLOCK_SIZE
        with self._lock:
            init = self._atomic_long.compare_and_set(0, step + 1)
            if init:
                self._local.set(step)
                self._residue.set((initial % BLOCK_SIZE) + 1)
            return init

    def new_id(self):
        val = self._residue.get_and_increment()
        if val >= BLOCK_SIZE:
            with self._lock:
                val = self._residue.get()
                if val >= BLOCK_SIZE:
                    increment = self._atomic_long.get_and_increment()
                    self._local.set(increment)
                    self._residue.set(0)
                return self.new_id()
        get = self._local.get()
        return get * BLOCK_SIZE + val
コード例 #36
0
 def __init__(self, client, service_name, name, atomic_long):
     super(IdGenerator, self).__init__(client, service_name, name)
     self._atomic_long = atomic_long
     self._residue = AtomicInteger(BLOCK_SIZE)
     self._local = AtomicInteger(-1)
     self._lock = threading.RLock()