示例#1
0
    def on_change(self, server_description):
        """Process a new ServerDescription after an ismaster call completes."""
        # We do no I/O holding the lock.
        with self._lock:
            # Any monitored server was definitely in the topology description
            # once. Check if it's still in the description or if some state-
            # change removed it. E.g., we got a host list from the primary
            # that didn't include this server.
            if self._description.has_server(server_description.address):
                td_old = self._description
                if self._publish_server:
                    old_server_description = td_old._server_descriptions[
                        server_description.address]
                    self._events.put((
                        self._listeners.publish_server_description_changed,
                        (old_server_description, server_description,
                         server_description.address, self._topology_id)))

                self._description = updated_topology_description(
                    self._description, server_description)

                self._update_servers()
                self._receive_cluster_time_no_lock(
                    server_description.cluster_time)

                if self._publish_tp:
                    self._events.put((
                        self._listeners.publish_topology_description_changed,
                        (td_old, self._description, self._topology_id)))

                # Wake waiters in select_servers().
                self._condition.notify_all()
示例#2
0
    def _process_change(self, server_description):
        """Process a new ServerDescription on an opened topology.

        Hold the lock when calling this.
        """
        td_old = self._description
        if self._publish_server:
            old_server_description = td_old._server_descriptions[
                server_description.address]
            self._events.put((
                self._listeners.publish_server_description_changed,
                (old_server_description, server_description,
                 server_description.address, self._topology_id)))

        self._description = updated_topology_description(
            self._description, server_description)

        self._update_servers()
        self._receive_cluster_time_no_lock(server_description.cluster_time)

        if self._publish_tp:
            self._events.put((
                self._listeners.publish_topology_description_changed,
                (td_old, self._description, self._topology_id)))

        # Wake waiters in select_servers().
        self._condition.notify_all()
示例#3
0
    def on_change(self, server_description):
        """Process a new ServerDescription after an ismaster call completes."""
        # We do no I/O holding the lock.
        with self._lock:
            # Any monitored server was definitely in the topology description
            # once. Check if it's still in the description or if some state-
            # change removed it. E.g., we got a host list from the primary
            # that didn't include this server.
            if self._description.has_server(server_description.address):
                self._description = updated_topology_description(
                    self._description, server_description)

                self._update_servers()

                # Wake waiters in select_servers().
                self._condition.notify_all()
示例#4
0
    def on_change(self, server_description):
        """Process a new ServerDescription after an ismaster call completes."""
        # We do no I/O holding the lock.
        with self._lock:
            # Any monitored server was definitely in the topology description
            # once. Check if it's still in the description or if some state-
            # change removed it. E.g., we got a host list from the primary
            # that didn't include this server.
            if self._description.has_server(server_description.address):
                self._description = updated_topology_description(
                    self._description, server_description)

                self._update_servers()

                # Wake waiters in select_servers().
                self._condition.notify_all()
示例#5
0
    def _process_change(self, server_description, reset_pool=False):
        """Process a new ServerDescription on an opened topology.

        Hold the lock when calling this.
        """
        td_old = self._description
        sd_old = td_old._server_descriptions[server_description.address]
        if _is_stale_server_description(sd_old, server_description):
            # This is a stale hello response. Ignore it.
            return

        suppress_event = ((self._publish_server or self._publish_tp)
                          and sd_old == server_description)
        if self._publish_server and not suppress_event:
            self._events.put(
                (self._listeners.publish_server_description_changed,
                 (sd_old, server_description, server_description.address,
                  self._topology_id)))

        self._description = updated_topology_description(
            self._description, server_description)

        self._update_servers()
        self._receive_cluster_time_no_lock(server_description.cluster_time)

        if self._publish_tp and not suppress_event:
            self._events.put(
                (self._listeners.publish_topology_description_changed,
                 (td_old, self._description, self._topology_id)))

        # Shutdown SRV polling for unsupported cluster types.
        # This is only applicable if the old topology was Unknown, and the
        # new one is something other than Unknown or Sharded.
        if self._srv_monitor and (td_old.topology_type == TOPOLOGY_TYPE.Unknown
                                  and self._description.topology_type
                                  not in SRV_POLLING_TOPOLOGIES):
            self._srv_monitor.close()

        # Clear the pool from a failed heartbeat.
        if reset_pool:
            server = self._servers.get(server_description.address)
            if server:
                server.pool.reset()

        # Wake waiters in select_servers().
        self._condition.notify_all()
示例#6
0
    def _process_change(self, server_description):
        """Process a new ServerDescription on an opened topology.

        Hold the lock when calling this.
        """
        td_old = self._description
        old_server_description = td_old._server_descriptions[
            server_description.address]
        suppress_event = ((self._publish_server or self._publish_tp)
                          and old_server_description == server_description)
        if self._publish_server and not suppress_event:
            self._events.put(
                (self._listeners.publish_server_description_changed,
                 (old_server_description, server_description,
                  server_description.address, self._topology_id)))

        self._description = updated_topology_description(
            self._description, server_description)

        self._update_servers()
        self._receive_cluster_time_no_lock(server_description.cluster_time)

        if self._publish_tp and not suppress_event:
            self._events.put(
                (self._listeners.publish_topology_description_changed,
                 (td_old, self._description, self._topology_id)))

        # Shutdown SRV polling for unsupported cluster types.
        # This is only applicable if the old topology was Unknown, and the
        # new one is something other than Unknown or Sharded.
        if self._srv_monitor and (td_old.topology_type == TOPOLOGY_TYPE.Unknown
                                  and self._description.topology_type
                                  not in SRV_POLLING_TOPOLOGIES):
            self._srv_monitor.close()

        # Wake waiters in select_servers().
        self._condition.notify_all()
示例#7
0
    def run_scenario(self, scenario_def, test):
        """Run a CMAP spec test."""
        self.logs = []
        self.assertEqual(scenario_def['version'], 1)
        self.assertIn(scenario_def['style'], ['unit', 'integration'])
        self.listener = CMAPListener()
        self._ops = []

        # Configure the fail point before creating the client.
        if 'failPoint' in test:
            fp = test['failPoint']
            self.set_fail_point(fp)
            self.addCleanup(self.set_fail_point, {
                'configureFailPoint': fp['configureFailPoint'],
                'mode': 'off'
            })

        opts = test['poolOptions'].copy()
        opts['event_listeners'] = [self.listener]
        opts['_monitor_class'] = DummyMonitor
        opts['connect'] = False
        with client_knobs(kill_cursor_frequency=.05,
                          min_heartbeat_interval=.05):
            client = single_client(**opts)
            # Update the SD to a known type because the DummyMonitor will not.
            # Note we cannot simply call topology.on_change because that would
            # internally call pool.ready() which introduces unexpected
            # PoolReadyEvents. Instead, update the initial state before
            # opening the Topology.
            td = client_context.client._topology.description
            sd = td.server_descriptions()[(client_context.host,
                                           client_context.port)]
            client._topology._description = updated_topology_description(
                client._topology._description, sd)
            client._get_topology()
        self.addCleanup(client.close)
        self.pool = list(client._topology._servers.values())[0].pool

        # Map of target names to Thread objects.
        self.targets = dict()
        # Map of label names to Connection objects
        self.labels = dict()

        def cleanup():
            for t in self.targets.values():
                t.stop()
            for t in self.targets.values():
                t.join(5)
            for conn in self.labels.values():
                conn.close_socket(None)

        self.addCleanup(cleanup)

        try:
            if test['error']:
                with self.assertRaises(PyMongoError) as ctx:
                    self.run_operations(test['operations'])
                self.check_error(ctx.exception, test['error'])
            else:
                self.run_operations(test['operations'])

            self.check_events(test['events'], test['ignore'])
        except Exception:
            # Print the events after a test failure.
            print('\nFailed test: %r' % (test['description'], ))
            print('Operations:')
            for op in self._ops:
                print(op)
            print('Threads:')
            print(self.targets)
            print('Connections:')
            print(self.labels)
            print('Events:')
            for event in self.listener.events:
                print(event)
            print('Log:')
            for log in self.logs:
                print(log)
            raise