def data_serializable_factories(self, value): if isinstance(value, dict): for factory_id, factory in six.iteritems(value): if not isinstance(factory_id, six.integer_types): raise TypeError("Keys of data_serializable_factories must be integers") if not isinstance(factory, dict): raise TypeError("Values of data_serializable_factories must be dict") for class_id, clazz in six.iteritems(factory): if not isinstance(class_id, six.integer_types): raise TypeError( "Keys of factories of data_serializable_factories must be integers" ) if not ( isinstance(clazz, type) and issubclass(clazz, IdentifiedDataSerializable) ): raise TypeError( "Values of factories of data_serializable_factories must be " "subclasses of IdentifiedDataSerializable" ) self._data_serializable_factories = value else: raise TypeError("data_serializable_factories must be a dict")
def _fill_map_and_near_cache(self, count=10): fill_content = {"key-%d" % x: "value-%d" % x for x in range(0, count)} for k, v in six.iteritems(fill_content): self.map.put(k, v) for k, v in six.iteritems(fill_content): self.map.get(k) return fill_content
def cleanup_connection(self, connection, cause): for correlation_id, invocation in six.iteritems(dict(self._pending)): if invocation.sent_connection == connection: self._handle_exception(invocation, cause) if self._client.lifecycle.is_live: for correlation_id, invocation in six.iteritems(dict(self._event_handlers)): if invocation.sent_connection == connection and invocation.connection is None: self._client.listener.re_register_listener(invocation)
def _heartbeat_stopped(self, connection): for correlation_id, invocation in six.iteritems(dict(self._pending)): if invocation.sent_connection == connection: self._handle_exception( invocation, TargetDisconnectedError("%s has stopped heart beating." % connection))
def test_put_all(self): map = {"key-%d" % x: "value-%d" % x for x in range(0, 10)} self.replicated_map.put_all(map) self.assertTrueEventually(lambda: six.assertCountEqual( self, six.iteritems(map), self.replicated_map.entry_set()))
def __init__( self, config, version=1, global_partition_strategy=default_partition_strategy, output_buffer_size=DEFAULT_OUT_BUFFER_SIZE, ): super(SerializationServiceV1, self).__init__( version, global_partition_strategy, output_buffer_size, config.is_big_endian, config.default_int_type, ) self._portable_context = PortableContext(self, config.portable_version) self.register_class_definitions(config.class_definitions, config.check_class_definition_errors) self._registry._portable_serializer = PortableSerializer( self._portable_context, config.portable_factories) # merge configured factories with built in ones factories = {} factories.update(config.data_serializable_factories) self._registry._data_serializer = IdentifiedDataSerializer(factories) self._register_constant_serializers() # Register Custom Serializers for _type, custom_serializer in six.iteritems( config.custom_serializers): self._registry.safe_register_serializer(custom_serializer(), _type) # Register Global Serializer global_serializer = config.global_serializer if global_serializer: self._registry._global_serializer = global_serializer()
def _connection_added(self, connection): with self._registration_lock: for user_reg_id, listener_registration in six.iteritems( self._active_registrations): self._register_on_connection_async(user_reg_id, listener_registration, connection)
def _fill_map(self, key_count=5, value_count=5): map = {"key-%d" % x: ["value-%d-%d" % (x, y) for y in range(0, value_count)] for x in range(0, key_count)} for k, l in six.iteritems(map): for v in l: self.multi_map.put(k, v) return map
def test_put_all(self): map = {"key-%d" % x: "value-%d" % x for x in range(0, 1000)} self.map.put_all(map) entries = self.map.entry_set() six.assertCountEqual(self, entries, six.iteritems(map))
def process_partition_response(self, message): partitions = client_get_partitions_codec.decode_response( message)["partitions"] for addr, partition_list in six.iteritems(partitions): for partition in partition_list: self.partitions[partition] = addr self.logger.debug("Finished updating partitions", extra=self._logger_extras)
def _fill_map(self, count=1000): map = { InnerPortable("key-%d" % x, x): InnerPortable("value-%d" % x, x) for x in range(0, count) } for k, v in six.iteritems(map): self.map.put(k, v) return map
def enum(**enums): """ Utility method for defining enums. :param enums: Parameters of enumeration. :return: (Enum), the created enumerations. """ enums['reverse'] = dict((value, key) for key, value in six.iteritems(enums)) return type('Enum', (), enums)
def from_dict(cls, d): config = cls() for k, v in six.iteritems(d): if v is not None: try: config.__setattr__(k, v) except AttributeError: raise InvalidConfigurationError("Unrecognized config option: %s" % k) return config
def encode(buf, m, key_encoder, value_encoder, is_final=False): buf.extend(BEGIN_FRAME_BUF) for key, value in six.iteritems(m): key_encoder(buf, key) value_encoder(buf, value) if is_final: buf.extend(END_FINAL_FRAME_BUF) else: buf.extend(END_FRAME_BUF)
def test_entry_set(self): mm = self._fill_map() entry_list = [] for key, list in six.iteritems(mm): for value in list: entry_list.append((key, value)) six.assertCountEqual(self, self.multi_map.entry_set(), entry_list)
def calculate_size(name, entries): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += INT_SIZE_IN_BYTES for key, val in six.iteritems(entries): data_size += calculate_size_data(key) data_size += calculate_size_data(val) return data_size
def from_dict(cls, d): options = cls() for k, v in six.iteritems(d): try: options.__setattr__(k, v) except AttributeError: raise InvalidConfigurationError( "Unrecognized config option for the bitmap index options: %s" % k ) return options
def encode_request(name, entries): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(name, entries)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(name) client_message.append_int(len(entries)) for entries_item in six.iteritems(entries): client_message.append_tuple(entries_item) client_message.update_frame_length() return client_message
def put_all(self, map): """ Copies all of the mappings from the specified map to this map. No atomicity guarantees are given. In the case of a failure, some of the key-value tuples may get written, while others are not. :param map: (dict), map which includes mappings to be stored in this map. """ entries = {} for key, value in six.iteritems(map): check_not_none(key, "key can't be None") check_not_none(value, "value can't be None") entries[self._to_data(key)] = self._to_data(value) self._encode_invoke(replicated_map_put_all_codec, entries=entries)
def custom_serializers(self, value): if isinstance(value, dict): for _type, serializer in six.iteritems(value): if not isinstance(_type, type): raise TypeError("Keys of custom_serializers must be types") if not (isinstance(serializer, type) and issubclass(serializer, StreamSerializer)): raise TypeError( "Values of custom_serializers must be subclasses of StreamSerializer" ) self._custom_serializers = value else: raise TypeError("custom_serializers must be a dict")
def near_caches(self, value): if isinstance(value, dict): configs = dict() for name, config in six.iteritems(value): if not isinstance(name, six.string_types): raise TypeError("Keys of near_caches must be strings") if not isinstance(config, dict): raise TypeError("Values of near_caches must be dict") configs[name] = _NearCacheConfig.from_dict(config) self._near_caches = configs else: raise TypeError("near_caches must be a dict")
def flake_id_generators(self, value): if isinstance(value, dict): configs = dict() for name, config in six.iteritems(value): if not isinstance(name, six.string_types): raise TypeError("Keys of flake_id_generators must be strings") if not isinstance(config, dict): raise TypeError("Values of flake_id_generators must be dict") configs[name] = _FlakeIdGeneratorConfig.from_dict(config) self._flake_id_generators = configs else: raise TypeError("flake_id_generators must be a dict")
def reliable_topics(self, value): if isinstance(value, dict): configs = {} for name, config in six.iteritems(value): if not isinstance(name, six.string_types): raise TypeError("Keys of reliable_topics must be strings") if not isinstance(config, dict): raise TypeError("Values of reliable_topics must be dict") configs[name] = _ReliableTopicConfig.from_dict(config) self._reliable_topics = configs else: raise TypeError("reliable_topics must be a dict")
def put_all(self, source): """ Copies all of the mappings from the specified map to this map. No atomicity guarantees are given. In the case of a failure, some of the key-value tuples may get written, while others are not. :param source: (dict), map which includes mappings to be stored in this map. """ entries = [] for key, value in six.iteritems(source): check_not_none(key, "key can't be None") check_not_none(value, "value can't be None") entries.append((self._to_data(key), self._to_data(value))) request = replicated_map_put_all_codec.encode_request( self.name, entries) return self._invoke(request)
def deregister_listener(self, user_registration_id): check_not_none(user_registration_id, "None user_registration_id is not allowed!") with self._registration_lock: listener_registration = self._active_registrations.pop(user_registration_id, None) if not listener_registration: return ImmediateFuture(False) for connection, event_registration in six.iteritems( listener_registration.connection_registrations ): # Remove local handler self.remove_event_handler(event_registration.correlation_id) # The rest is for deleting the remote registration server_registration_id = event_registration.server_registration_id deregister_request = listener_registration.encode_deregister_request( server_registration_id ) if deregister_request is None: # None means no remote registration (e.g. for backup acks) continue invocation = Invocation( deregister_request, connection=connection, timeout=six.MAXSIZE, urgent=True ) self._invocation_service.invoke(invocation) def handler(f, connection=connection): e = f.exception() if e: if isinstance( e, (HazelcastClientNotActiveError, IOError, TargetDisconnectedError) ): return _logger.warning( "Deregistration of listener with ID %s has failed for address %s", user_registration_id, connection.remote_address, ) invocation.future.add_done_callback(handler) listener_registration.connection_registrations.clear() return ImmediateFuture(True)
def get_random_connection(self, should_get_data_member=False): if self._smart_routing_enabled: connection = self._get_connection_from_load_balancer(should_get_data_member) if connection: return connection # We should not get to this point under normal circumstances # for the smart client. For uni-socket client, there would be # a single connection in the dict. Therefore, copying the list # should be acceptable. for member_uuid, connection in list(six.iteritems(self.active_connections)): if should_get_data_member: member = self._cluster_service.get_member(member_uuid) if not member or member.lite_member: continue return connection return None
def deregister_listener(self, user_registration_id): check_not_none(user_registration_id, "None user_registration_id is not allowed!") with self._registration_lock: listener_registration = self._active_registrations.get( user_registration_id) if not listener_registration: return False successful = True # Need to copy items to avoid getting runtime modification errors for connection, event_registration in list( six.iteritems( listener_registration.connection_registrations)): try: server_registration_id = event_registration.server_registration_id deregister_request = listener_registration.encode_deregister_request( server_registration_id) invocation = Invocation(deregister_request, connection=connection) self._invocation_service.invoke(invocation) invocation.future.result() self.remove_event_handler( event_registration.correlation_id) listener_registration.connection_registrations.pop( connection) except: if connection.live: successful = False self.logger.warning( "Deregistration for listener with ID %s has failed to address %s ", user_registration_id, "address", exc_info=True, extra=self._logger_extras) if successful: self._active_registrations.pop(user_registration_id) return successful
def _fill_map(self, count=10): map = {"key-%d" % x: "value-%d" % x for x in range(0, count)} for k, v in six.iteritems(map): self.map.put(k, v) return map
def test_entry_set(self): entries = self._fill_map() six.assertCountEqual(self, self.map.entry_set(), list(six.iteritems(entries)))
def get_entry_listener_flags(**kwargs): flags = 0 for (key, value) in six.iteritems(kwargs): if value: flags |= getattr(EntryEventType, key) return flags