示例#1
0
class TestSchedulerListener(unittest.TestCase):

    def setUp(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_jobstore(MemoryJobStore(), alias='in_memory')
        self.scheduler.add_executor(ThreadPoolExecutor(1), alias='secondary_executor')

        self.scheduler.start()

    def tearDown(self):
        self.scheduler.shutdown()

    def test_watcher_injection(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual(watcher.scheduler, self.scheduler, 'Watcher should keep a reference to the scheduler')
        self.assertEqual(1, len(self.scheduler._listeners), 'Watcher should inject itself as a scheduler listener')

        self.assertEqual(
            self.scheduler._listeners[0][1], EVENT_ALL, 'Watcher should register iself to watch all events'
        )

    def test_scheduler_inspection(self):
        self.scheduler.add_job(lambda: 0, jobstore='in_memory', trigger='interval', minutes=60, id='test_job')

        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual('running', watcher.scheduler_info['state'], 'Watcher should inspect scheduler status')
        self.assertEqual(
            str(self.scheduler.timezone),
            watcher.scheduler_info['timezone'],
            'Watcher should inspect scheduler timezone'
        )
        self.assertEqual(
            'BackgroundScheduler', watcher.scheduler_info['class'], 'Watcher should inspect scheduler class'
        )

        self.assertEqual(2, len(watcher.jobstores), 'Watcher should inspect all scheduler jobstores')
        self.assertIn('in_memory', watcher.jobstores, 'Watcher should have inspected the in_memory jobstore')

        self.assertEqual(2, len(watcher.executors), 'Watcher should inspect all scheduler executors')
        self.assertIn('secondary_executor', watcher.executors, 'Watcher should have inspected the secondary_executor')

        self.assertEqual(1, len(watcher.jobs), 'Watcher should inspect all jobs in scheduler on init')
        self.assertIn('test_job', watcher.jobs, 'Watcher should index jobs by id')

    def test_job_properties_on_add(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(
            lambda x, y: x + y,
            id='added_job',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2}
        )

        self.assertIn('added_job', watcher.jobs)

        job_properties = watcher.jobs['added_job']['properties']

        self.assertEqual('added_job', job_properties['id'], 'Job properties should have the job id')
        self.assertEqual('Added job', job_properties['name'], 'Job properties should have the job name')
        self.assertIn('trigger', job_properties, 'Job properties should have a representation of the trigger')
        self.assertEqual('in_memory', job_properties['jobstore'], 'Job properties should have the jobstore name')
        self.assertEqual('default', job_properties['executor'], 'Job properties should have the executor name')
        self.assertIn('lambda', job_properties['func'], 'Job properties should have the function string repr')
        self.assertIn('func_ref', job_properties, 'Job properties should have the function reference')
        self.assertEqual('(1,)', job_properties['args'], 'Job properties should have the job arguments')
        self.assertEqual("{'y': 2}", job_properties['kwargs'], 'Job properties should have the job keyword arguments')
        self.assertIn('pending', job_properties, 'Job properties should have the job pending status')
        self.assertFalse(job_properties['pending'], 'Job status should not be pending')
        self.assertIn('coalesce', job_properties, 'Job properties should have the job coalesce configuration')
        self.assertIn('next_run_time', job_properties, 'Job properties should have the next run time calculated')
        self.assertIn('misfire_grace_time', job_properties, 'Job properties should have the misfire grace time')
        self.assertIn('max_instances', job_properties, 'Job properties should have the max instances configuration')

    def test_job_inspection_matches_job_added_event(self):
        # We're going to add two jobs that should have the exact same properties, except for the id, in two different
        # stages of the usage: before the watcher is created and after we start watching for events.
        def job_function(x, y):
            return x + y
        next_run_time = datetime.now() + timedelta(hours=1)

        # Job that is added before the user calls us.
        self.scheduler.add_job(
            job_function,
            id='job_1',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2},
            next_run_time=next_run_time
        )

        watcher = SchedulerWatcher(self.scheduler)

        # Job that gets added after we start watching.
        self.scheduler.add_job(
            job_function,
            id='job_2',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2},
            next_run_time=next_run_time
        )

        self.assertEqual(2, len(watcher.jobs))

        job_1 = watcher.jobs['job_1']
        job_2 = watcher.jobs['job_2']

        for property_name in job_1['properties'].keys():
            # All properties, except the id, should match.
            if property_name == 'id':
                continue
            self.assertEqual(job_1['properties'][property_name], job_2['properties'][property_name])

    def test_all_events_have_a_processing_method(self):
        for event_name in list(SchedulerWatcher.apscheduler_events.values()):
            self.assertIn(event_name, dir(SchedulerWatcher))

    def test_job_execution_monitoring(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(
            lambda: time.sleep(0.02),
            id='waiting_job',
            name='Waiting job',
            jobstore='in_memory',
            trigger='interval',
            seconds=0.2,
            next_run_time=datetime.now()
        )

        job_events = watcher.jobs['waiting_job']['events']

        self.assertEqual(1, len(job_events))
        self.assertEqual('job_added', job_events[0]['event_name'])
        time.sleep(0.05)
        self.assertEqual(3, len(job_events), 'Job execution needs to be tracked in job events')
        self.assertEqual(
            'job_submitted',
            job_events[1]['event_name'],
            'Job submision needs to be tracked in job events'
        )
        self.assertEqual('job_executed', job_events[2]['event_name'], 'Job execution needs to be tracked in job events')

        time.sleep(0.2)

        self.assertEqual(5, len(job_events), 'Subsequent executions get tracked')

    def test_job_failure_monitoring(self):
        watcher = SchedulerWatcher(self.scheduler)

        def fail():
            time.sleep(0.02)
            return 0 / 0

        self.scheduler.add_job(
            fail,
            id='failing_job',
            name='Failing job',
            jobstore='in_memory',
            trigger='interval',
            next_run_time=datetime.now(),
            minutes=60
        )

        failing_job_events = watcher.jobs['failing_job']['events']

        time.sleep(0.05)
        self.assertEqual(3, len(failing_job_events))
        self.assertEqual('job_error', failing_job_events[2]['event_name'])

    def test_scheduler_summary(self):
        watcher = SchedulerWatcher(self.scheduler)

        summary = watcher.scheduler_summary()

        self.assertEqual(sorted(['scheduler', 'jobs', 'executors', 'jobstores']), sorted(summary.keys()))

        self.assertEqual('running', summary['scheduler']['state'], 'scheduler_summary should have the scheduler status')
        self.assertEqual(2, len(summary['executors']), 'scheduler_summaru should have the two added executors')
        self.assertEqual(2, len(summary['jobstores']), 'scheduler_summary should have the two executors')
        self.assertEqual(0, len(summary['jobs']), 'scheduler_summary should have no jobs')

        self.scheduler.add_job(lambda: 0, id='job_1')

        summary = watcher.scheduler_summary()

        self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have the added jobs in it')

        self.scheduler.remove_job('job_1')

        summary = watcher.scheduler_summary()
        self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have all jobs in it, even if job was removed')

    def test_removed_jobs_are_only_flagged_as_removed(self):
        self.scheduler.add_job(lambda: 0, id='a_job')

        watcher = SchedulerWatcher(self.scheduler)

        self.assertIn('a_job', watcher.jobs)
        self.assertIsNone(watcher.jobs['a_job']['removed_time'])

        self.scheduler.remove_job('a_job')

        self.assertIn('a_job', watcher.jobs, 'removed jobs should be still tracked in the scheduler watcher')
        self.assertIsNotNone(watcher.jobs['a_job']['removed_time'], 'removed_time should be set')

    def test_modified_job_properties_are_tracked(self):
        self.scheduler.add_job(
            lambda x, y: x + y,
            id='a_job',
            name='A job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2}
        )

        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time'])

        next_run_time = watcher.jobs['a_job']['properties']['next_run_time'][0]

        self.scheduler.modify_job('a_job', name='A modified job', next_run_time=datetime.now() + timedelta(days=1))

        self.assertGreater(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time'])
        self.assertEqual('A modified job', watcher.jobs['a_job']['properties']['name'])
        self.assertGreater(watcher.jobs['a_job']['properties']['next_run_time'][0], next_run_time)

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event')
    def test_removing_a_jobstore_removes_all_jobs(self, mock_notify_jobstore_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(lambda: 0, id='job_1', jobstore='in_memory', trigger='interval', minutes=60)
        self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60)

        self.assertEqual(2, len(watcher.jobs))
        self.assertIsNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be None')
        self.assertEqual('in_memory', watcher.jobs['job_1']['properties']['jobstore'])

        self.scheduler.remove_jobstore('in_memory')

        mock_notify_jobstore_event.assert_called()

        self.assertEqual(2, len(watcher.jobs), 'The amount of jobs after removing a jobstore should not change')
        self.assertIsNotNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be set')
        self.assertIsNotNone(watcher.jobs['job_2']['removed_time'], 'job_2 removed time should be set')

    @patch('apschedulerui.watcher.SchedulerWatcher._repr_job')
    @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event')
    @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event')
    def test_adding_a_jobstore_adds_all_jobs_in_it(self, mock_notify_jobstore_event, mock_notify_job_event, _):
        watcher = SchedulerWatcher(self.scheduler)

        jobstore = MemoryJobStore()

        jobstore.add_job(Job(scheduler=self.scheduler, id='job_1', next_run_time=datetime.now() + timedelta(days=1)))
        jobstore.add_job(Job(scheduler=self.scheduler, id='job_2', next_run_time=datetime.now() + timedelta(days=2)))

        self.assertEqual(0, len(watcher.jobs))

        self.scheduler.add_jobstore(jobstore, alias='in_memory_2')

        self.assertIn('in_memory_2', watcher.jobstores, 'Watcher should have the new jobstore tracked')
        self.assertEqual(2, len(watcher.jobs), 'Watcher should add all jobs in the newly added jobstore')
        self.assertTrue(all([job_id in watcher.jobs for job_id in ['job_1', 'job_2']]))
        self.assertEqual(2, mock_notify_job_event.call_count)
        mock_notify_jobstore_event.assert_called_once()

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event')
    def test_removing_all_jobs_flags_all_as_removed(self, mock_notify_job_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(lambda: 0, id='job_1', jobstore='default', trigger='interval', minutes=60)
        self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60)

        self.assertEqual(2, len(watcher.jobs))
        self.assertEqual(2, mock_notify_job_event.call_count)

        mock_notify_job_event.reset_mock()

        self.scheduler.remove_all_jobs()

        self.assertEqual(2, len(watcher.jobs), 'job count should not change after removing all jobs')
        self.assertEqual(2, mock_notify_job_event.call_count)

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_executor_event')
    def test_adding_and_removing_executors(self, mock_notify_executor_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_executor(ThreadPoolExecutor(), alias='new_executor')

        self.assertIn('new_executor', watcher.executors)
        mock_notify_executor_event.assert_called()

        mock_notify_executor_event.reset_mock()
        self.scheduler.remove_executor('new_executor')

        self.assertNotIn('new_executor', watcher.executors)
        mock_notify_executor_event.assert_called()

    def test_job_event_history_is_limited(self):
        watcher = SchedulerWatcher(self.scheduler, max_events_per_job=4)

        self.scheduler.add_job(lambda: 0, trigger='interval', seconds=0.01, id='recurrent_job')

        time.sleep(0.1)

        # recurrent_job should have been executed ~10 times now, generating ~20 events (submission + execution).
        self.assertEqual(
            watcher.max_events_per_job,
            len(watcher.jobs['recurrent_job']['events']),
            'job event history should be limited'
        )
示例#2
0
class TestWebServer(unittest.TestCase):
    def setUp(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(lambda: 1,
                               id='a_job',
                               trigger='interval',
                               minutes=10)
        self.scheduler.start()

    def tearDown(self):
        self.scheduler.shutdown()

    def test_webserver_init(self):
        scheduler_server = SchedulerUI(self.scheduler)

        self.assertIsInstance(scheduler_server._scheduler_listener,
                              SchedulerWatcher)

        self.assertRaises(TypeError,
                          SchedulerUI,
                          self.scheduler,
                          operation_timeout=None)
        self.assertRaises(ValueError,
                          SchedulerUI,
                          self.scheduler,
                          operation_timeout=-1)

        self.assertRaises(TypeError,
                          SchedulerUI,
                          self.scheduler,
                          capabilities=set())

    @patch('flask.Flask.add_url_rule')
    def test_webserver_capabilities(self, mock_add_url_rule):
        SchedulerUI(self.scheduler)

        mock_add_url_rule.assert_called()
        base_call_count = mock_add_url_rule.call_count

        mock_add_url_rule.reset_mock()

        SchedulerUI(self.scheduler, capabilities={'pause_job': True})

        self.assertEqual(2 + base_call_count, mock_add_url_rule.call_count)

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'run_job': True})
        self.assertEqual(1 + base_call_count, mock_add_url_rule.call_count)

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'pause_scheduler': True})

        self.assertEqual(
            2 + base_call_count, mock_add_url_rule.call_count,
            'Web server should register scheduler pause and resume endpoints')

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'stop_scheduler': True})

        self.assertEqual(
            2 + base_call_count, mock_add_url_rule.call_count,
            'Web server should register scheduler stop and start endpoints')

        mock_add_url_rule.reset_mock()
        SchedulerUI(self.scheduler, capabilities={'remove_job': True})

        self.assertEqual(
            1 + base_call_count, mock_add_url_rule.call_count,
            'Web server should register the endpoint to remove a job')

    @patch('flask.Flask.send_static_file')
    def test_index_retrieval(self, mock_send_static_file):
        SchedulerUI(self.scheduler)._index('/any_path')

        mock_send_static_file.assert_called_with('index.html')

    @patch('flask.abort')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.pause')
    def test_scheduler_commands_are_serialized(self, mock_pause, mock_abort):
        ui = SchedulerUI(self.scheduler, operation_timeout=0.01)

        with ui._scheduler_lock:
            # If we acquire the lock, every command we send to the web server should be aborted on lock acquire timeout.
            ui._pause_scheduler()

            mock_abort.assert_called()
            mock_pause.assert_not_called()

            ui._resume_scheduler()
            ui._stop_scheduler()
            ui._start_scheduler()
            ui._pause_job('a_job')
            ui._resume_job('a_job')
            ui._run_job('a_job')
            ui._remove_job('a_job')

            self.assertEqual(8, mock_abort.call_count)

    @patch('apscheduler.schedulers.background.BackgroundScheduler.pause')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.resume')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.shutdown')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.start')
    def test_scheduler_requests(self, mock_start, mock_shutdown, mock_resume,
                                mock_pause):
        ui = SchedulerUI(self.scheduler)

        ui._pause_scheduler()
        mock_pause.assert_called()

        ui._resume_scheduler()
        mock_resume.assert_called()

        ui._stop_scheduler()
        mock_shutdown.assert_called()

        ui._start_scheduler()
        mock_start.assert_called()

    @patch('apscheduler.schedulers.background.BackgroundScheduler.remove_job')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.resume_job')
    @patch('apscheduler.schedulers.background.BackgroundScheduler.pause_job')
    def test_job_requests(self, mock_pause_job, mock_resume_job,
                          mock_remove_job):
        ui = SchedulerUI(self.scheduler)

        ui._pause_job('a_job')
        mock_pause_job.assert_called()

        ui._resume_job('a_job')
        mock_resume_job.assert_called()

        ui._remove_job('a_job')
        mock_remove_job.assert_called()

    @patch('flask.abort')
    def test_missing_jobs_requests_are_aborted(self, mock_abort):
        ui = SchedulerUI(self.scheduler)

        ui._pause_job('non_existing_job')
        ui._resume_job('non_existing_job')
        ui._run_job('non_existing_job')

        self.assertEqual(3, mock_abort.call_count)

        mock_abort.reset_mock()

        response = ui._run_job(job_id=None)
        self.assertEqual(response.status_code, 404,
                         'Requests with missing job_id should fail')

    @patch('flask_socketio.SocketIO.run')
    def test_webserver_start(self, mock_run):
        ui = SchedulerUI(self.scheduler)

        self.assertEqual(0, len(ui._scheduler_listener.listeners))

        ui.start()

        self.assertEqual(1, len(ui._scheduler_listener.listeners))
        self.assertEqual(ui, ui._scheduler_listener.listeners[0],
                         'Webserver should register itself as listener')

        # SocketIO.run should be called by the web server thread on start.
        mock_run.assert_called_with(ui._web_server, host='0.0.0.0', port=5000)

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_scheduler_events_are_emitted_to_clients(self, mock_run,
                                                     mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        mock_run.assert_called()

        # Pause scheduler.
        self.scheduler.pause()
        mock_emit.assert_called_once()
        self.assertEqual('scheduler_paused',
                         mock_emit.call_args[0][1]['event_name'])

        mock_emit.reset_mock()

        # Resume it.
        self.scheduler.resume()
        self.assertEqual('scheduler_resumed',
                         mock_emit.call_args[0][1]['event_name'])

        # Stop it.
        mock_emit.reset_mock()
        self.scheduler.shutdown()
        self.assertEqual('scheduler_shutdown',
                         mock_emit.call_args[0][1]['event_name'])

        # Start it again.
        mock_emit.reset_mock()
        self.scheduler.start()
        self.assertEqual('scheduler_started',
                         mock_emit.call_args[0][1]['event_name'])

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_job_events_are_emitted_to_clients(self, mock_run, mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        self.scheduler.add_job(lambda: time.sleep(0.1),
                               id='waiting_job',
                               name='Waiting job',
                               trigger='interval',
                               seconds=0.2,
                               next_run_time=datetime.now() +
                               timedelta(milliseconds=50))

        mock_emit.assert_called_once()
        emitted_event = mock_emit.call_args[0][1]

        self.assertEqual('job_added', emitted_event['event_name'])
        self.assertIn('properties', emitted_event)
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

        mock_emit.reset_mock()

        time.sleep(0.1)
        # Job submission event.
        mock_emit.assert_called_once()

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_jobstore_events_are_emitted_to_clients(self, mock_run, mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        # Job store addition.
        self.scheduler.add_jobstore(MemoryJobStore(), alias='in_memory')

        mock_emit.assert_called_once()
        self.assertEqual('jobstore_event', mock_emit.call_args[0][0])

        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('in_memory', emitted_event['jobstore_name'])
        self.assertEqual('jobstore_added', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

        # Job store removal.
        mock_emit.reset_mock()
        self.scheduler.remove_jobstore('in_memory')

        mock_emit.assert_called_once()
        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('in_memory', emitted_event['jobstore_name'])
        self.assertEqual('jobstore_removed', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

    @patch('flask_socketio.SocketIO.emit')
    @patch('flask_socketio.SocketIO.run')
    def test_executors_events_are_emitted_to_clients(self, mock_run,
                                                     mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        # Executor addition.
        self.scheduler.add_executor(ThreadPoolExecutor(max_workers=1),
                                    alias='thread_pool')

        mock_emit.assert_called_once()
        self.assertEqual('executor_event', mock_emit.call_args[0][0])

        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('thread_pool', emitted_event['executor_name'])
        self.assertEqual('executor_added', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

        # Executor removal.
        mock_emit.reset_mock()
        self.scheduler.remove_executor('thread_pool')

        mock_emit.assert_called_once()
        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('thread_pool', emitted_event['executor_name'])
        self.assertEqual('executor_removed', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

    @patch('flask_socketio.emit')
    def test_connected_clients_get_initialized(self, mock_emit):
        ui = SchedulerUI(self.scheduler, capabilities={'run_job': True})
        ui.start(port=5001, host='localhost')

        time.sleep(0.1)

        import socketio

        socket_client = socketio.Client()
        socket_client.connect('ws://localhost:5001')
        socket_client.emit(
            'connected'
        )  # Notify server that we're now connected, as frontend would do.

        time.sleep(0.1)

        self.assertEqual(2, mock_emit.call_count,
                         'emit should be called twice when a client connects')

        first_call = mock_emit.call_args_list[0]
        second_call = mock_emit.call_args_list[1]

        self.assertEqual(
            'init_jobs', first_call[0][0],
            'First argument of the first emit should be event name')

        self.assertEqual(
            'init_capabilities', second_call[0][0],
            'First argument of the second emit shoud be the init_capabilities event name'
        )
        self.assertEqual(
            ui.capabilities, second_call[0][1],
            "Second argument of init_capabilities should equal the web server's capabilities"
        )

        socket_client.disconnect()