def test_discover_set_name_from_primary(self):
        # Discovering a replica set without the setName supplied by the user
        # is not yet supported by MongoClient, but Topology can do it.
        topology_settings = SetNameDiscoverySettings(
            seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor)

        t = Topology(topology_settings)
        self.assertEqual(t.description.replica_set_name, None)
        self.assertEqual(t.description.topology_type,
                         TOPOLOGY_TYPE.ReplicaSetNoPrimary)
        t.open()
        got_ismaster(t, address, {
            'ok': 1,
            'ismaster': True,
            'setName': 'rs',
            'hosts': ['a']
        })

        self.assertEqual(t.description.replica_set_name, 'rs')
        self.assertEqual(t.description.topology_type,
                         TOPOLOGY_TYPE.ReplicaSetWithPrimary)

        # Another response from the primary. Tests the code that processes
        # primary response when topology type is already ReplicaSetWithPrimary.
        got_ismaster(t, address, {
            'ok': 1,
            'ismaster': True,
            'setName': 'rs',
            'hosts': ['a']
        })

        # No change.
        self.assertEqual(t.description.replica_set_name, 'rs')
        self.assertEqual(t.description.topology_type,
                         TOPOLOGY_TYPE.ReplicaSetWithPrimary)
def create_topology(scenario_def, **kwargs):
    # Initialize topologies.
    if 'heartbeatFrequencyMS' in scenario_def:
        frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0
    else:
        frequency = HEARTBEAT_FREQUENCY

    seeds, hosts = get_addresses(
        scenario_def['topology_description']['servers'])

    settings = get_topology_settings_dict(heartbeat_frequency=frequency,
                                          seeds=seeds,
                                          **kwargs)

    # "Eligible servers" is defined in the server selection spec as
    # the set of servers matching both the ReadPreference's mode
    # and tag sets.
    topology = Topology(TopologySettings(**settings))
    topology.open()

    # Update topologies with server descriptions.
    for server in scenario_def['topology_description']['servers']:
        server_description = make_server_description(server, hosts)
        topology.on_change(server_description)

    return topology
示例#3
0
    def run_scenario(self):
        if 'heartbeatFrequencyMS' in scenario_def:
            frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0
        else:
            frequency = HEARTBEAT_FREQUENCY

        # Initialize topologies.
        seeds, hosts = get_addresses(
            scenario_def['topology_description']['servers'])

        topology = Topology(
            TopologySettings(seeds=seeds,
                             monitor_class=MockMonitor,
                             pool_class=MockPool,
                             heartbeat_frequency=frequency))

        # Update topologies with server descriptions.
        for server in scenario_def['topology_description']['servers']:
            server_description = make_server_description(server, hosts)
            topology.on_change(server_description)

        # Create server selector.
        # Make first letter lowercase to match read_pref's modes.
        pref_def = scenario_def['read_preference']
        mode_string = pref_def.get('mode', 'primary')
        mode_string = mode_string[:1].lower() + mode_string[1:]
        mode = read_preferences.read_pref_mode_from_name(mode_string)
        max_staleness = pref_def.get('maxStalenessSeconds', -1)
        tag_sets = pref_def.get('tag_sets')

        if scenario_def.get('error'):
            with self.assertRaises(ConfigurationError):
                # Error can be raised when making Read Pref or selecting.
                pref = read_preferences.make_read_preference(
                    mode, tag_sets=tag_sets, max_staleness=max_staleness)

                topology.select_server(pref)
            return

        expected_addrs = set([
            server['address'] for server in scenario_def['in_latency_window']])

        # Select servers.
        pref = read_preferences.make_read_preference(
            mode, tag_sets=tag_sets, max_staleness=max_staleness)

        if not expected_addrs:
            with self.assertRaises(ConnectionFailure):
                topology.select_servers(pref, server_selection_timeout=0)
            return

        servers = topology.select_servers(pref, server_selection_timeout=0)
        actual_addrs = set(['%s:%d' % s.description.address for s in servers])

        for unexpected in actual_addrs - expected_addrs:
            self.fail("'%s' shouldn't have been selected, but was" % unexpected)

        for unselected in expected_addrs - actual_addrs:
            self.fail("'%s' should have been selected, but wasn't" % unselected)
    def test_latency_threshold_application(self):
        selector = SelectionStoreSelector()

        scenario_def = {
            'topology_description': {
                'type':
                'ReplicaSetWithPrimary',
                'servers': [
                    {
                        'address': 'b:27017',
                        'avg_rtt_ms': 10000,
                        'type': 'RSSecondary',
                        'tag': {}
                    },
                    {
                        'address': 'c:27017',
                        'avg_rtt_ms': 20000,
                        'type': 'RSSecondary',
                        'tag': {}
                    },
                    {
                        'address': 'a:27017',
                        'avg_rtt_ms': 30000,
                        'type': 'RSPrimary',
                        'tag': {}
                    },
                ]
            }
        }

        # Create & populate Topology such that all but one server is too slow.
        rtt_times = [
            srv['avg_rtt_ms']
            for srv in scenario_def['topology_description']['servers']
        ]
        min_rtt_idx = rtt_times.index(min(rtt_times))
        seeds, hosts = get_addresses(
            scenario_def["topology_description"]["servers"])
        settings = get_topology_settings_dict(heartbeat_frequency=1,
                                              local_threshold_ms=1,
                                              seeds=seeds,
                                              server_selector=selector)
        topology = Topology(TopologySettings(**settings))
        topology.open()
        for server in scenario_def['topology_description']['servers']:
            server_description = make_server_description(server, hosts)
            topology.on_change(server_description)

        # Invoke server selection and assert no filtering based on latency
        # prior to custom server selection logic kicking in.
        server = topology.select_server(ReadPreference.NEAREST)
        self.assertEqual(len(selector.selection),
                         len(topology.description.server_descriptions()))

        # Ensure proper filtering based on latency after custom selection.
        self.assertEqual(server.description.address, seeds[min_rtt_idx])
def create_mock_topology(seeds=None,
                         replica_set_name=None,
                         monitor_class=MockMonitor):
    partitioned_seeds = list(imap(common.partition_node, seeds or ['a']))
    topology_settings = TopologySettings(partitioned_seeds,
                                         replica_set_name=replica_set_name,
                                         pool_class=MockPool,
                                         monitor_class=monitor_class)

    t = Topology(topology_settings)
    t.open()
    return t
示例#6
0
def create_mock_topology(uri, monitor_class=MockMonitor):
    parsed_uri = parse_uri(uri)
    replica_set_name = None
    if 'replicaset' in parsed_uri['options']:
        replica_set_name = parsed_uri['options']['replicaset']

    topology_settings = TopologySettings(parsed_uri['nodelist'],
                                         replica_set_name=replica_set_name,
                                         monitor_class=monitor_class)

    c = Topology(topology_settings)
    c.open()
    return c
    def test_server_selector_bypassed(self):
        selector = FunctionCallCounter(lambda x: x)

        scenario_def = {
            'topology_description': {
                'type':
                'ReplicaSetNoPrimary',
                'servers': [
                    {
                        'address': 'b:27017',
                        'avg_rtt_ms': 10000,
                        'type': 'RSSecondary',
                        'tag': {}
                    },
                    {
                        'address': 'c:27017',
                        'avg_rtt_ms': 20000,
                        'type': 'RSSecondary',
                        'tag': {}
                    },
                    {
                        'address': 'a:27017',
                        'avg_rtt_ms': 30000,
                        'type': 'RSSecondary',
                        'tag': {}
                    },
                ]
            }
        }

        # Create & populate Topology such that no server is writeable.
        seeds, hosts = get_addresses(
            scenario_def["topology_description"]["servers"])
        settings = get_topology_settings_dict(heartbeat_frequency=1,
                                              local_threshold_ms=1,
                                              seeds=seeds,
                                              server_selector=selector)
        topology = Topology(TopologySettings(**settings))
        topology.open()
        for server in scenario_def['topology_description']['servers']:
            server_description = make_server_description(server, hosts)
            topology.on_change(server_description)

        # Invoke server selection and assert no calls to our custom selector.
        with self.assertRaisesRegex(ServerSelectionTimeoutError,
                                    'No primary available for writes'):
            topology.select_server(writable_server_selector,
                                   server_selection_timeout=0.1)
        self.assertEqual(selector.call_count, 0)
def create_mock_topology(uri, monitor_class=MockMonitor):
    # Some tests in the spec include URIs like mongodb://A/?connect=direct,
    # but PyMongo considers any single-seed URI with no setName to be "direct".
    parsed_uri = parse_uri(uri.replace('connect=direct', ''))
    replica_set_name = None
    if 'replicaset' in parsed_uri['options']:
        replica_set_name = parsed_uri['options']['replicaset']

    topology_settings = TopologySettings(parsed_uri['nodelist'],
                                         replica_set_name=replica_set_name,
                                         pool_class=MockPool,
                                         monitor_class=monitor_class)

    c = Topology(topology_settings)
    c.open()
    return c
def create_mock_topology(uri, monitor_class=DummyMonitor):
    parsed_uri = parse_uri(uri)
    replica_set_name = None
    direct_connection = None
    if 'replicaset' in parsed_uri['options']:
        replica_set_name = parsed_uri['options']['replicaset']
    if 'directConnection' in parsed_uri['options']:
        direct_connection = parsed_uri['options']['directConnection']

    topology_settings = TopologySettings(parsed_uri['nodelist'],
                                         replica_set_name=replica_set_name,
                                         monitor_class=monitor_class,
                                         direct_connection=direct_connection)

    c = Topology(topology_settings)
    c.open()
    return c
    def test_timeout_configuration(self):
        pool_options = PoolOptions(connect_timeout=1, socket_timeout=2)
        topology_settings = TopologySettings(pool_options=pool_options)
        t = Topology(topology_settings=topology_settings)
        t.open()

        # Get the default server.
        server = t.get_server_by_address(('localhost', 27017))

        # The pool for application operations obeys our settings.
        self.assertEqual(1, server._pool.opts.connect_timeout)
        self.assertEqual(2, server._pool.opts.socket_timeout)

        # The pool for monitoring operations uses our connect_timeout as both
        # its connect_timeout and its socket_timeout.
        monitor = server._monitor
        self.assertEqual(1, monitor._pool.opts.connect_timeout)
        self.assertEqual(1, monitor._pool.opts.socket_timeout)

        # The monitor, not its pool, is responsible for calling ismaster.
        self.assertFalse(monitor._pool.handshake)
    def test_discover_set_name_from_secondary(self):
        # Discovering a replica set without the setName supplied by the user
        # is not yet supported by MongoClient, but Topology can do it.
        topology_settings = SetNameDiscoverySettings(
            seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor)

        t = Topology(topology_settings)
        self.assertEqual(t.description.replica_set_name, None)
        self.assertEqual(t.description.topology_type,
                         TOPOLOGY_TYPE.ReplicaSetNoPrimary)
        t.open()
        got_ismaster(
            t, address, {
                'ok': 1,
                'ismaster': False,
                'secondary': True,
                'setName': 'rs',
                'hosts': ['a']
            })

        self.assertEqual(t.description.replica_set_name, 'rs')
        self.assertEqual(t.description.topology_type,
                         TOPOLOGY_TYPE.ReplicaSetNoPrimary)
示例#12
0
def create_topology(scenario_def, **kwargs):
    # Initialize topologies.
    if 'heartbeatFrequencyMS' in scenario_def:
        frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0
    else:
        frequency = HEARTBEAT_FREQUENCY

    seeds, hosts = get_addresses(
        scenario_def['topology_description']['servers'])

    topology_type = get_topology_type_name(scenario_def)
    if topology_type == 'LoadBalanced':
        kwargs.setdefault('load_balanced', True)
    # Force topology description to ReplicaSet
    elif topology_type in ['ReplicaSetNoPrimary', 'ReplicaSetWithPrimary']:
        kwargs.setdefault('replica_set_name', 'rs')
    settings = get_topology_settings_dict(heartbeat_frequency=frequency,
                                          seeds=seeds,
                                          **kwargs)

    # "Eligible servers" is defined in the server selection spec as
    # the set of servers matching both the ReadPreference's mode
    # and tag sets.
    topology = Topology(TopologySettings(**settings))
    topology.open()

    # Update topologies with server descriptions.
    for server in scenario_def['topology_description']['servers']:
        server_description = make_server_description(server, hosts)
        topology.on_change(server_description)

    # Assert that descriptions match
    assert (
        scenario_def['topology_description']['type'] == topology.description.
        topology_type_name), topology.description.topology_type_name

    return topology
示例#13
0
    def __init__(self,
                 host=None,
                 port=None,
                 document_class=dict,
                 tz_aware=False,
                 connect=True,
                 **kwargs):
        """Client for a MongoDB instance, a replica set, or a set of mongoses.

        The client object is thread-safe and has connection-pooling built in.
        If an operation fails because of a network error,
        :class:`~pymongo.errors.ConnectionFailure` is raised and the client
        reconnects in the background. Application code should handle this
        exception (recognizing that the operation failed) and then continue to
        execute.

        The `host` parameter can be a full `mongodb URI
        <http://dochub.mongodb.org/core/connections>`_, in addition to
        a simple hostname. It can also be a list of hostnames or
        URIs. Any port specified in the host string(s) will override
        the `port` parameter. If multiple mongodb URIs containing
        database or auth information are passed, the last database,
        username, and password present will be used.  For username and
        passwords reserved characters like ':', '/', '+' and '@' must be
        escaped following RFC 2396.

        .. warning:: When using PyMongo in a multiprocessing context, please
          read :ref:`multiprocessing` first.

        :Parameters:
          - `host` (optional): hostname or IP address of the
            instance to connect to, or a mongodb URI, or a list of
            hostnames / mongodb URIs. If `host` is an IPv6 literal
            it must be enclosed in '[' and ']' characters following
            the RFC2732 URL syntax (e.g. '[::1]' for localhost)
          - `port` (optional): port number on which to connect
          - `document_class` (optional): default class to use for
            documents returned from queries on this client
          - `tz_aware` (optional): if ``True``,
            :class:`~datetime.datetime` instances returned as values
            in a document by this :class:`MongoClient` will be timezone
            aware (otherwise they will be naive)
          - `connect` (optional): if ``True`` (the default), immediately
            begin connecting to MongoDB in the background. Otherwise connect
            on the first operation.

          | **Other optional parameters can be passed as keyword arguments:**

          - `maxPoolSize` (optional): The maximum number of connections
            that the pool will open simultaneously. If this is set, operations
            will block if there are `maxPoolSize` outstanding connections
            from the pool. Defaults to 100. Cannot be 0.
          - `socketTimeoutMS`: (integer or None) Controls how long (in
            milliseconds) the driver will wait for a response after sending an
            ordinary (non-monitoring) database operation before concluding that
            a network error has occurred. Defaults to ``None`` (no timeout).
          - `connectTimeoutMS`: (integer or None) Controls how long (in
            milliseconds) the driver will wait during server monitoring when
            connecting a new socket to a server before concluding the server
            is unavailable. Defaults to ``20000`` (20 seconds).
          - `serverSelectionTimeoutMS`: (integer) Controls how long (in
            milliseconds) the driver will wait to find an available,
            appropriate server to carry out a database operation; while it is
            waiting, multiple server monitoring operations may be carried out,
            each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30
            seconds).
          - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds)
            a thread will wait for a socket from the pool if the pool has no
            free sockets. Defaults to ``None`` (no timeout).
          - `waitQueueMultiple`: (integer or None) Multiplied by maxPoolSize
            to give the number of threads allowed to wait for a socket at one
            time. Defaults to ``None`` (no limit).
          - `socketKeepAlive`: (boolean) Whether to send periodic keep-alive
            packets on connected sockets. Defaults to ``False`` (do not send
            keep-alive packets).

          | **Write Concern options:**
          | (Only set if passed. No default values.)

          - `w`: (integer or string) If this is a replica set, write operations
            will block until they have been replicated to the specified number
            or tagged set of servers. `w=<int>` always includes the replica set
            primary (e.g. w=3 means write to the primary and wait until
            replicated to **two** secondaries). Passing w=0 **disables write
            acknowledgement** and all other write concern options.
          - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
            in milliseconds to control how long to wait for write propagation
            to complete. If replication does not complete in the given
            timeframe, a timeout exception is raised.
          - `j`: If ``True`` block until write operations have been committed
            to the journal. Cannot be used in combination with `fsync`. Prior
            to MongoDB 2.6 this option was ignored if the server was running
            without journaling. Starting with MongoDB 2.6 write operations will
            fail with an exception if this option is used when the server is
            running without journaling.
          - `fsync`: If ``True`` and the server is running without journaling,
            blocks until the server has synced all data files to disk. If the
            server is running with journaling, this acts the same as the `j`
            option, blocking until write operations have been committed to the
            journal. Cannot be used in combination with `j`.

          | **Replica set keyword arguments for connecting with a replica set
            - either directly or via a mongos:**

          - `replicaSet`: (string or None) The name of the replica set to
            connect to. The driver will verify that all servers it connects to
            match this name. Implies that the hosts specified are a seed list
            and the driver should attempt to find all members of the set.
            Defaults to ``None``.
          - `read_preference`: The read preference for this client. If
            connecting directly to a secondary then a read preference mode
            *other* than PRIMARY is required - otherwise all queries will throw
            :class:`~pymongo.errors.AutoReconnect` "not master".
            See :class:`~pymongo.read_preferences.ReadPreference` for all
            available read preference options. Defaults to ``PRIMARY``.

          | **SSL configuration:**

          - `ssl`: If ``True``, create the connection to the server using SSL.
            Defaults to ``False``.
          - `ssl_keyfile`: The private keyfile used to identify the local
            connection against mongod.  If included with the ``certfile`` then
            only the ``ssl_certfile`` is needed.  Implies ``ssl=True``.
            Defaults to ``None``.
          - `ssl_certfile`: The certificate file used to identify the local
            connection against mongod. Implies ``ssl=True``. Defaults to
            ``None``.
          - `ssl_cert_reqs`: Specifies whether a certificate is required from
            the other side of the connection, and whether it will be validated
            if provided. It must be one of the three values ``ssl.CERT_NONE``
            (certificates ignored), ``ssl.CERT_OPTIONAL``
            (not required, but validated if provided), or ``ssl.CERT_REQUIRED``
            (required and validated). If the value of this parameter is not
            ``ssl.CERT_NONE`` and a value is not provided for ``ssl_ca_certs``
            PyMongo will attempt to load system provided CA certificates.
            If the python version in use does not support loading system CA
            certificates then the ``ssl_ca_certs`` parameter must point
            to a file of CA certificates. Implies ``ssl=True``. Defaults to
            ``ssl.CERT_REQUIRED`` if not provided and ``ssl=True``.
          - `ssl_ca_certs`: The ca_certs file contains a set of concatenated
            "certification authority" certificates, which are used to validate
            certificates passed from the other end of the connection.
            Implies ``ssl=True``. Defaults to ``None``.
          - `ssl_match_hostname`: If ``True`` (the default), and
            `ssl_cert_reqs` is not ``ssl.CERT_NONE``, enables hostname
            verification using the :func:`~ssl.match_hostname` function from
            python's :mod:`~ssl` module. Think very carefully before setting
            this to ``False`` as that could make your application vulnerable to
            man-in-the-middle attacks.

        .. mongodoc:: connections

        .. versionchanged:: 3.0
           :class:`~pymongo.mongo_client.MongoClient` is now the one and only
           client class for a standalone server, mongos, or replica set.
           It includes the functionality that had been split into
           :class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect
           to a replica set, discover all its members, and monitor the set for
           stepdowns, elections, and reconfigs.

           The :class:`~pymongo.mongo_client.MongoClient` constructor no
           longer blocks while connecting to the server or servers, and it no
           longer raises :class:`~pymongo.errors.ConnectionFailure` if they
           are unavailable, nor :class:`~pymongo.errors.ConfigurationError`
           if the user's credentials are wrong. Instead, the constructor
           returns immediately and launches the connection process on
           background threads.

           Therefore the ``alive`` method is removed since it no longer
           provides meaningful information; even if the client is disconnected,
           it may discover a server in time to fulfill the next operation.

           In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of
           standalone MongoDB servers and used the first it could connect to::

               MongoClient(['host1.com:27017', 'host2.com:27017'])

           A list of multiple standalones is no longer supported; if multiple
           servers are listed they must be members of the same replica set, or
           mongoses in the same sharded cluster.

           The behavior for a list of mongoses is changed from "high
           availability" to "load balancing". Before, the client connected to
           the lowest-latency mongos in the list, and used it until a network
           error prompted it to re-evaluate all mongoses' latencies and
           reconnect to one of them. In PyMongo 3, the client monitors its
           network latency to all the mongoses continuously, and distributes
           operations evenly among those with the lowest latency. See
           :ref:`mongos-load-balancing` for more information.

           The ``connect`` option is added.

           The ``start_request``, ``in_request``, and ``end_request`` methods
           are removed, as well as the ``auto_start_request`` option.

           The ``copy_database`` method is removed, see the
           :doc:`copy_database examples </examples/copydb>` for alternatives.

           The :meth:`MongoClient.disconnect` method is removed; it was a
           synonym for :meth:`~pymongo.MongoClient.close`.

           :class:`~pymongo.mongo_client.MongoClient` no longer returns an
           instance of :class:`~pymongo.database.Database` for attribute names
           with leading underscores. You must use dict-style lookups instead::

               client['__my_database__']

           Not::

               client.__my_database__
        """
        if host is None:
            host = self.HOST
        if isinstance(host, string_type):
            host = [host]
        if port is None:
            port = self.PORT
        if not isinstance(port, int):
            raise TypeError("port must be an instance of int")

        seeds = set()
        username = None
        password = None
        dbase = None
        opts = {}
        for entity in host:
            if "://" in entity:
                if entity.startswith("mongodb://"):
                    res = uri_parser.parse_uri(entity, port, warn=True)
                    seeds.update(res["nodelist"])
                    username = res["username"] or username
                    password = res["password"] or password
                    dbase = res["database"] or dbase
                    opts = res["options"]
                else:
                    idx = entity.find("://")
                    raise InvalidURI("Invalid URI scheme: "
                                     "%s" % (entity[:idx], ))
            else:
                seeds.update(uri_parser.split_hosts(entity, port))
        if not seeds:
            raise ConfigurationError("need to specify at least one host")

        # _pool_class, _monitor_class, and _condition_class are for deep
        # customization of PyMongo, e.g. Motor.
        pool_class = kwargs.pop('_pool_class', None)
        monitor_class = kwargs.pop('_monitor_class', None)
        condition_class = kwargs.pop('_condition_class', None)

        keyword_opts = kwargs
        keyword_opts['document_class'] = document_class
        keyword_opts['tz_aware'] = tz_aware
        keyword_opts['connect'] = connect
        # Validate all keyword options.
        keyword_opts = dict(
            common.validate(k, v) for k, v in keyword_opts.items())
        opts.update(keyword_opts)
        self.__options = options = ClientOptions(username, password, dbase,
                                                 opts)

        self.__default_database_name = dbase
        self.__lock = threading.Lock()
        self.__cursor_manager = CursorManager(self)
        self.__kill_cursors_queue = []

        # Cache of existing indexes used by ensure_index ops.
        self.__index_cache = {}

        super(MongoClient,
              self).__init__(options.codec_options, options.read_preference,
                             options.write_concern)

        self.__all_credentials = {}
        creds = options.credentials
        if creds:
            self._cache_credentials(creds.source, creds)

        self._topology_settings = TopologySettings(
            seeds=seeds,
            replica_set_name=options.replica_set_name,
            pool_class=pool_class,
            pool_options=options.pool_options,
            monitor_class=monitor_class,
            condition_class=condition_class,
            local_threshold_ms=options.local_threshold_ms,
            server_selection_timeout=options.server_selection_timeout)

        self._topology = Topology(self._topology_settings)
        if connect:
            self._topology.open()

        def target():
            client = self_ref()
            if client is None:
                return False  # Stop the executor.
            MongoClient._process_kill_cursors_queue(client)
            return True

        executor = periodic_executor.PeriodicExecutor(
            condition_class=self._topology_settings.condition_class,
            interval=common.KILL_CURSOR_FREQUENCY,
            min_interval=0,
            target=target,
            name="pymongo_kill_cursors_thread")

        # We strongly reference the executor and it weakly references us via
        # this closure. When the client is freed, stop the executor soon.
        self_ref = weakref.ref(self, executor.close)
        self._kill_cursors_executor = executor
        executor.open()
示例#14
0
    def run_scenario(self):

        # Initialize topologies.
        seeds, hosts = get_addresses(
            scenario_def['topology_description']['servers'])

        # "Eligible servers" is defined in the server selection spec as
        # the set of servers matching both the ReadPreference's mode
        # and tag sets.
        top_latency = Topology(
            TopologySettings(seeds=seeds,
                             monitor_class=MockMonitor,
                             pool_class=MockPool))
        # "In latency window" is defined in the server selection
        # spec as the subset of suitable_servers that falls within the
        # allowable latency window.
        top_suitable = Topology(
            TopologySettings(seeds=seeds,
                             local_threshold_ms=1000000,
                             monitor_class=MockMonitor,
                             pool_class=MockPool))

        # Update topologies with server descriptions.
        for server in scenario_def['topology_description']['servers']:
            server_description = make_server_description(server, hosts)
            top_suitable.on_change(server_description)
            top_latency.on_change(server_description)

        # Create server selector.
        if scenario_def["operation"] == "write":
            instance = writable_server_selector
        else:
            # Make first letter lowercase to match read_pref's modes.
            mode_string = scenario_def['read_preference']['mode']
            if mode_string:
                mode_string = mode_string[:1].lower() + mode_string[1:]

            mode = read_preferences.read_pref_mode_from_name(mode_string)
            tag_sets = None
            if scenario_def['read_preference']['tag_sets'][0]:
                tag_sets = scenario_def['read_preference']['tag_sets']
            instance = read_preferences.make_read_preference(mode, tag_sets)

        # Select servers.
        if not scenario_def['suitable_servers']:
            self.assertRaises(AutoReconnect,
                              top_suitable.select_server,
                              instance,
                              server_selection_timeout=0)
            return

        if not scenario_def['in_latency_window']:
            self.assertRaises(AutoReconnect,
                              top_latency.select_server,
                              instance,
                              server_selection_timeout=0)
            return

        actual_suitable_s = top_suitable.select_servers(
            instance, server_selection_timeout=0)
        actual_latency_s = top_latency.select_servers(
            instance, server_selection_timeout=0)

        expected_suitable_servers = {}
        for server in scenario_def['suitable_servers']:
            server_description = make_server_description(server, hosts)
            expected_suitable_servers[server['address']] = server_description

        actual_suitable_servers = {}
        for s in actual_suitable_s:
            actual_suitable_servers["%s:%d" %
                                    (s.description.address[0],
                                     s.description.address[1])] = s.description

        self.assertEqual(len(actual_suitable_servers),
                         len(expected_suitable_servers))
        for k, actual in actual_suitable_servers.items():
            expected = expected_suitable_servers[k]
            self.assertEqual(expected.address, actual.address)
            self.assertEqual(expected.server_type, actual.server_type)
            self.assertEqual(expected.round_trip_time, actual.round_trip_time)
            self.assertEqual(expected.tags, actual.tags)
            self.assertEqual(expected.all_hosts, actual.all_hosts)

        expected_latency_servers = {}
        for server in scenario_def['in_latency_window']:
            server_description = make_server_description(server, hosts)
            expected_latency_servers[server['address']] = server_description

        actual_latency_servers = {}
        for s in actual_latency_s:
            actual_latency_servers["%s:%d" %
                                   (s.description.address[0],
                                    s.description.address[1])] = s.description

        self.assertEqual(len(actual_latency_servers),
                         len(expected_latency_servers))
        for k, actual in actual_latency_servers.items():
            expected = expected_latency_servers[k]
            self.assertEqual(expected.address, actual.address)
            self.assertEqual(expected.server_type, actual.server_type)
            self.assertEqual(expected.round_trip_time, actual.round_trip_time)
            self.assertEqual(expected.tags, actual.tags)
            self.assertEqual(expected.all_hosts, actual.all_hosts)
示例#15
0
    def run_scenario(self):
        # Initialize topologies.
        if 'heartbeatFrequencyMS' in scenario_def:
            frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0
        else:
            frequency = HEARTBEAT_FREQUENCY

        settings = dict(monitor_class=MockMonitor,
                        heartbeat_frequency=frequency,
                        pool_class=MockPool)

        settings['seeds'], hosts = get_addresses(
            scenario_def['topology_description']['servers'])

        # "Eligible servers" is defined in the server selection spec as
        # the set of servers matching both the ReadPreference's mode
        # and tag sets.
        top_latency = Topology(TopologySettings(**settings))
        top_latency.open()

        # "In latency window" is defined in the server selection
        # spec as the subset of suitable_servers that falls within the
        # allowable latency window.
        settings['local_threshold_ms'] = 1000000
        top_suitable = Topology(TopologySettings(**settings))
        top_suitable.open()

        # Update topologies with server descriptions.
        for server in scenario_def['topology_description']['servers']:
            server_description = make_server_description(server, hosts)
            top_suitable.on_change(server_description)
            top_latency.on_change(server_description)

        # Create server selector.
        if scenario_def.get("operation") == "write":
            pref = writable_server_selector
        else:
            # Make first letter lowercase to match read_pref's modes.
            pref_def = scenario_def['read_preference']
            mode_string = pref_def.get('mode', 'primary')
            mode_string = mode_string[:1].lower() + mode_string[1:]
            mode = read_preferences.read_pref_mode_from_name(mode_string)
            max_staleness = pref_def.get('maxStalenessSeconds', -1)
            tag_sets = pref_def.get('tag_sets')

            if scenario_def.get('error'):
                with self.assertRaises((ConfigurationError, ValueError)):
                    # Error can be raised when making Read Pref or selecting.
                    pref = read_preferences.make_read_preference(
                        mode, tag_sets=tag_sets, max_staleness=max_staleness)

                    top_latency.select_server(pref)
                return

            pref = read_preferences.make_read_preference(
                mode, tag_sets=tag_sets, max_staleness=max_staleness)

        # Select servers.
        if not scenario_def.get('suitable_servers'):
            with self.assertRaises(AutoReconnect):
                top_suitable.select_server(pref, server_selection_timeout=0)

            return

        if not scenario_def['in_latency_window']:
            with self.assertRaises(AutoReconnect):
                top_latency.select_server(pref, server_selection_timeout=0)

            return

        actual_suitable_s = top_suitable.select_servers(
            pref, server_selection_timeout=0)
        actual_latency_s = top_latency.select_servers(
            pref, server_selection_timeout=0)

        expected_suitable_servers = {}
        for server in scenario_def['suitable_servers']:
            server_description = make_server_description(server, hosts)
            expected_suitable_servers[server['address']] = server_description

        actual_suitable_servers = {}
        for s in actual_suitable_s:
            actual_suitable_servers["%s:%d" %
                                    (s.description.address[0],
                                     s.description.address[1])] = s.description

        self.assertEqual(len(actual_suitable_servers),
                         len(expected_suitable_servers))
        for k, actual in actual_suitable_servers.items():
            expected = expected_suitable_servers[k]
            self.assertEqual(expected.address, actual.address)
            self.assertEqual(expected.server_type, actual.server_type)
            self.assertEqual(expected.round_trip_time, actual.round_trip_time)
            self.assertEqual(expected.tags, actual.tags)
            self.assertEqual(expected.all_hosts, actual.all_hosts)

        expected_latency_servers = {}
        for server in scenario_def['in_latency_window']:
            server_description = make_server_description(server, hosts)
            expected_latency_servers[server['address']] = server_description

        actual_latency_servers = {}
        for s in actual_latency_s:
            actual_latency_servers["%s:%d" %
                                   (s.description.address[0],
                                    s.description.address[1])] = s.description

        self.assertEqual(len(actual_latency_servers),
                         len(expected_latency_servers))
        for k, actual in actual_latency_servers.items():
            expected = expected_latency_servers[k]
            self.assertEqual(expected.address, actual.address)
            self.assertEqual(expected.server_type, actual.server_type)
            self.assertEqual(expected.round_trip_time, actual.round_trip_time)
            self.assertEqual(expected.tags, actual.tags)
            self.assertEqual(expected.all_hosts, actual.all_hosts)