def test_generate_credentials_set_credentials(self): url = "http://www.google.com" payload = {'label': "test", 'scope': "read", 'token_expires_in': "200000"} test_vals = {'client_id': 'client-id', 'client_secret': 'client-secret'} response_vals = {'ok': True, 'json.return_value': test_vals} response = Mock() response.configure_mock(**response_vals) with patch.object(requests, 'post', return_value=response) as mocker: self.access.generate_new_credentials(url, self.label) self.assertEqual(self.access.client_id, test_vals['client_id']) self.assertEqual(self.access.client_secret, test_vals['client_secret']) self.assertEqual(self.access.expiration, payload['token_expires_in']) response_vals['ok'] = False response.reset_mock() response.configure_mock(**response_vals) with patch.object(requests, 'post', return_value=response) as mocker: with self.assertRaises(BadRequest): self.access.generate_new_credentials(url, self.label)
async def test_setup_entry_successful(hass): """Test setup entry is successful.""" entry = Mock() entry.data = {'host': '1.2.3.4', 'port': 80, 'api_key': '1234567890ABCDEF'} with patch.object(hass, 'async_create_task') as mock_add_job, \ patch.object(hass, 'config_entries') as mock_config_entries, \ patch('pydeconz.DeconzSession.async_get_state', return_value=mock_coro(CONFIG)), \ patch('pydeconz.DeconzSession.start', return_value=True), \ patch('homeassistant.helpers.device_registry.async_get_registry', return_value=mock_coro(Mock())): assert await deconz.async_setup_entry(hass, entry) is True assert hass.data[deconz.DOMAIN] assert hass.data[deconz.DATA_DECONZ_ID] == {} assert len(hass.data[deconz.DATA_DECONZ_UNSUB]) == 1 assert len(mock_add_job.mock_calls) == 5 assert len(mock_config_entries.async_forward_entry_setup.mock_calls) == 5 assert mock_config_entries.async_forward_entry_setup.mock_calls[0][1] == \ (entry, 'binary_sensor') assert mock_config_entries.async_forward_entry_setup.mock_calls[1][1] == \ (entry, 'light') assert mock_config_entries.async_forward_entry_setup.mock_calls[2][1] == \ (entry, 'scene') assert mock_config_entries.async_forward_entry_setup.mock_calls[3][1] == \ (entry, 'sensor') assert mock_config_entries.async_forward_entry_setup.mock_calls[4][1] == \ (entry, 'switch')
def test_accept_no_display_name(self, mocked_accept, mocked_critical_error): """ Test the accept() method with no display name and auto-combine """ # GIVEN: A form and no text in the display name edit mocked_accept.return_value = True mocked_critical_error.return_value = QtWidgets.QMessageBox.Yes with patch.object(self.form.first_name_edit, 'text') as mocked_first_name_edit_text, \ patch.object(self.form.last_name_edit, 'text') as mocked_last_name_edit_text, \ patch.object(self.form.display_edit, 'text') as mocked_display_edit_text, \ patch.object(self.form.display_edit, 'setText') as mocked_display_edit_setText: mocked_first_name_edit_text.return_value = 'John' mocked_last_name_edit_text.return_value = 'Newton' mocked_display_edit_text.return_value = '' # WHEN: accept() is called result = self.form.accept() # THEN: The result should be false and a critical error displayed assert result is True mocked_critical_error.assert_called_once_with( message='You have not set a display name for the author, combine the first and last names?', parent=self.form, question=True) assert mocked_first_name_edit_text.call_count == 2 assert mocked_last_name_edit_text.call_count == 2 mocked_display_edit_text.assert_called_once_with() mocked_display_edit_setText.assert_called_once_with('John Newton') mocked_accept.assert_called_once_with(self.form)
def test_data_change_shall_notify_all_observers_once(cls): with patch.object(cls.dec_obs, 'update') as mock_dec_obs_update, patch.object( cls.hex_obs, 'update' ) as mock_hex_obs_update: cls.sub.data = 10 cls.assertEqual(mock_dec_obs_update.call_count, 1) cls.assertEqual(mock_hex_obs_update.call_count, 1)
def test_delegates_simple_and_complex_objects(self): mock_simple_objects = [[self.direction], [self.gender], [self.item]] mock_game = 'game' with patch.object( GameLoader, '_reconstitute_simple_objects', return_value=mock_simple_objects, ) as mock_simple_reconstitute: with patch.object( GameLoader, '_reconstitute_complex_objects', return_value=mock_game, ) as mock_complex_reconstitute: rebuilt_game = self.loader._reconstitute_all_game_objects( self.serialized_objects ) self.assertEqual(rebuilt_game, mock_game) mock_simple_reconstitute.assert_called_once_with( self.serialized_objects['directions'], self.serialized_objects['genders'], self.serialized_objects['items'] ) mock_complex_reconstitute.assert_called_once_with( self.serialized_objects['game'], self.serialized_objects['player'], self.serialized_objects['people'], self.serialized_objects['locations'], self.serialized_objects['exits'], *mock_simple_objects )
async def test_import_with_existing_config(hass): """Test importing a host with an existing config file.""" flow = config_flow.HueFlowHandler() flow.hass = hass bridge = Mock() bridge.username = '******' bridge.config.bridgeid = 'bridge-id-1234' bridge.config.name = 'Mock Bridge' bridge.host = '0.0.0.0' with patch.object(config_flow, '_find_username_from_config', return_value='mock-user'), \ patch.object(config_flow, 'get_bridge', return_value=mock_coro(bridge)): result = await flow.async_step_import({ 'host': '0.0.0.0', 'path': 'bla.conf' }) assert result['type'] == 'create_entry' assert result['title'] == 'Mock Bridge' assert result['data'] == { 'host': '0.0.0.0', 'bridge_id': 'bridge-id-1234', 'username': '******' }
async def setup_gateway(hass, mock_gateway, mock_api, generate_psk=generate_psk, known_hosts=None): """Load the Tradfri platform with a mock gateway.""" def request_config(_, callback, description, submit_caption, fields): """Mock request_config.""" hass.async_add_job(callback, {'security_code': 'mock'}) if known_hosts is None: known_hosts = {} with patch('pytradfri.api.aiocoap_api.APIFactory.generate_psk', generate_psk), \ patch('pytradfri.api.aiocoap_api.APIFactory.request', mock_api), \ patch('pytradfri.Gateway', return_value=mock_gateway), \ patch.object(tradfri, 'load_json', return_value=known_hosts), \ patch.object(hass.components.configurator, 'request_config', request_config): await async_setup_component(hass, tradfri.DOMAIN, { tradfri.DOMAIN: { 'host': 'mock-host', 'allow_tradfri_groups': True } }) await hass.async_block_till_done()
async def test_setup_defined_hosts_no_known_auth(hass): """Test we initiate config entry if config bridge is not known.""" with patch.object(hass, 'config_entries') as mock_config_entries, \ patch.object(hue, 'configured_hosts', return_value=[]): mock_config_entries.flow.async_init.return_value = mock_coro() assert await async_setup_component(hass, hue.DOMAIN, { hue.DOMAIN: { hue.CONF_BRIDGES: { hue.CONF_HOST: '0.0.0.0', hue.CONF_FILENAME: 'bla.conf', hue.CONF_ALLOW_HUE_GROUPS: False, hue.CONF_ALLOW_UNREACHABLE: True } } }) is True # Flow started for discovered bridge assert len(mock_config_entries.flow.mock_calls) == 1 assert mock_config_entries.flow.mock_calls[0][2]['data'] == { 'host': '0.0.0.0', 'path': 'bla.conf', } # Config stored for domain. assert hass.data[hue.DOMAIN] == { '0.0.0.0': { hue.CONF_HOST: '0.0.0.0', hue.CONF_FILENAME: 'bla.conf', hue.CONF_ALLOW_HUE_GROUPS: False, hue.CONF_ALLOW_UNREACHABLE: True } }
def test_infer(self, json_loads) -> None: arguments = mock_arguments() arguments.recursive = False arguments.strict = False configuration = mock_configuration() with patch.object(commands.Command, '_call_client') as call_client: Infer(arguments, configuration, source_directory='.').run() call_client.assert_called_once_with( command=commands.Check.NAME, flags=['-show-error-traces', '-project-root', '.', '-infer']) with patch.object(commands.Command, '_call_client') as call_client: arguments.recursive = True Infer(arguments, configuration, source_directory='.').run() call_client.assert_called_once_with( command=commands.Check.NAME, flags=[ '-show-error-traces', '-project-root', '.', '-infer', '-recursive-infer', ])
def test_get_index_no_platform_with_offline_cache(self): import conda.core.subdir_data with env_var('CONDA_REPODATA_TIMEOUT_SECS', '0', stack_callback=conda_tests_ctxt_mgmt_def_pol): with patch.object(conda.core.subdir_data, 'read_mod_and_etag') as read_mod_and_etag: read_mod_and_etag.return_value = {} channel_urls = ('https://repo.anaconda.com/pkgs/pro',) with env_var('CONDA_REPODATA_TIMEOUT_SECS', '0', stack_callback=conda_tests_ctxt_mgmt_def_pol): this_platform = context.subdir index = get_index(channel_urls=channel_urls, prepend=False) for dist, record in iteritems(index): assert platform_in_record(this_platform, record), (this_platform, record.url) # When unknown=True (which is implicity engaged when context.offline is # True), there may be additional items in the cache that are included in # the index. But where those items coincide with entries already in the # cache, they must not change the record in any way. TODO: add one or # more packages to the cache so these tests affirmatively exercise # supplement_index_from_cache on CI? for unknown in (None, False, True): with env_var('CONDA_OFFLINE', 'yes', stack_callback=conda_tests_ctxt_mgmt_def_pol): with patch.object(conda.core.subdir_data, 'fetch_repodata_remote_request') as remote_request: index2 = get_index(channel_urls=channel_urls, prepend=False, unknown=unknown) assert all(index2.get(k) == rec for k, rec in iteritems(index)) assert unknown is not False or len(index) == len(index2) assert remote_request.call_count == 0 for unknown in (False, True): with env_var('CONDA_REPODATA_TIMEOUT_SECS', '0', stack_callback=conda_tests_ctxt_mgmt_def_pol): with patch.object(conda.core.subdir_data, 'fetch_repodata_remote_request') as remote_request: remote_request.side_effect = Response304ContentUnchanged() index3 = get_index(channel_urls=channel_urls, prepend=False, unknown=unknown) assert all(index3.get(k) == rec for k, rec in iteritems(index)) assert unknown or len(index) == len(index3)
def test_auto_indent(self): script = ''' a ''' wnd = self._getwnd(script) with patch.object(wnd.cursor, 'pos', new=2): wnd.document.mode.on_auto_indent(wnd) assert wnd.document.gettext(0, wnd.document.endpos()) == '\na\n\n' script = ''' a ''' wnd = self._getwnd(script) with patch.object(wnd.cursor, 'pos', new=6): wnd.document.mode.on_auto_indent(wnd) assert wnd.document.gettext(0, wnd.document.endpos() ) == '\n a\n \n' script = ''' a ''' wnd = self._getwnd(script) with patch.object(wnd.cursor, 'pos', new=5): wnd.document.mode.on_auto_indent(wnd) assert wnd.document.gettext(0, wnd.document.endpos() ) == '\n\n a\n'
def test_attribute_dirty(self): first_items = [1, 2, 3] second_items = [1, 2, 4] third_items = [1, 2, 3, 4] for item in third_items: attribute_cache.delete('attribute:%s' % item) with patch.object(AttributeContainer, 'pull', side_effect=_attribute_pull) as mock_pull: # first execution should run pull self.assertEqual(AttributeContainer(third_items).get(), _attribute_pull(third_items)) mock_pull.assert_called_once_with(third_items) # second execution should not run pull again self.assertEqual(AttributeContainer(third_items).get(), _attribute_pull(third_items)) mock_pull.assert_called_once_with(third_items) with patch.object(AttributeContainer, 'pull', side_effect=_attribute_pull) as mock_pull: # dirtying the cache means we should call pull again AttributeContainer(third_items).dirty() self.assertEqual(AttributeContainer(third_items).get(), _attribute_pull(third_items)) mock_pull.assert_called_once_with(third_items) # second execution should not run pull again self.assertEqual(AttributeContainer(third_items).get(), _attribute_pull(third_items)) mock_pull.assert_called_once_with(third_items) with patch.object(AttributeContainer, 'pull', side_effect=_attribute_pull) as mock_pull: # dirtying subset of items should call pull for dirtied items AttributeContainer(first_items).dirty() self.assertEqual(AttributeContainer(third_items).get(), _attribute_pull(third_items)) mock_pull.assert_called_once_with(first_items)
def test_call_decorated_kwargs_on_trait_change(): """test calling @interact(foo=bar) decorated functions""" d = {} with patch.object(interaction, 'display', record_display): @interact(a='kwarg') def foo(a='default'): d['a'] = a return a nt.assert_equal(len(displayed), 1) w = displayed[0].children[0] check_widget(w, cls=widgets.Text, value='kwarg', ) # test calling the function directly a = foo('hello') nt.assert_equal(a, 'hello') nt.assert_equal(d['a'], 'hello') # test that setting trait values calls the function with patch.object(interaction, 'display', record_display): w.value = 'called' nt.assert_equal(d['a'], 'called') nt.assert_equal(len(displayed), 2) nt.assert_equal(w.value, displayed[-1])
def test_provider_shall_update_affected_subscribers_with_published_subscription(cls): pro = Provider() pub = Publisher(pro) sub1 = Subscriber('sub 1 name', pro) sub1.subscribe('sub 1 msg 1') sub1.subscribe('sub 1 msg 2') sub2 = Subscriber('sub 2 name', pro) sub2.subscribe('sub 2 msg 1') sub2.subscribe('sub 2 msg 2') with patch.object(sub1, 'run') as mock_subscriber1_run,\ patch.object(sub2, 'run') as mock_subscriber2_run: pro.update() cls.assertEqual(mock_subscriber1_run.call_count, 0) cls.assertEqual(mock_subscriber2_run.call_count, 0) pub.publish('sub 1 msg 1') pub.publish('sub 1 msg 2') pub.publish('sub 2 msg 1') pub.publish('sub 2 msg 2') with patch.object(sub1, 'run') as mock_subscriber1_run,\ patch.object(sub2, 'run') as mock_subscriber2_run: pro.update() expected_sub1_calls = [call('sub 1 msg 1'), call('sub 1 msg 2')] mock_subscriber1_run.assert_has_calls(expected_sub1_calls) expected_sub2_calls = [call('sub 2 msg 1'), call('sub 2 msg 2')] mock_subscriber2_run.assert_has_calls(expected_sub2_calls)
def test_attribute(self): first_items = [1, 2, 3] second_items = [1, 2, 4] third_items = [1, 2, 3, 4] for item in third_items: attribute_cache.delete('attribute:%s' % item) with patch.object(AttributeContainer, 'pull', side_effect=_attribute_pull) as mock_pull: # first execution should run pull self.assertEqual(AttributeContainer(first_items).get(), _attribute_pull(first_items)) mock_pull.assert_called_once_with(first_items) # second execution should not run pull again self.assertEqual(AttributeContainer(first_items).get(), _attribute_pull(first_items)) mock_pull.assert_called_once_with(first_items) with patch.object(AttributeContainer, 'pull', side_effect=_attribute_pull) as mock_pull: # running with second items should only call pull on 4 self.assertEqual(AttributeContainer(second_items).get(), _attribute_pull(second_items)) mock_pull.assert_called_once_with([4]) # second execution should not run pull again self.assertEqual(AttributeContainer(second_items).get(), _attribute_pull(second_items)) mock_pull.assert_called_once_with([4]) with patch.object(AttributeContainer, 'pull', side_effect=_attribute_pull) as mock_pull: # running with third items should not call pull self.assertEqual(AttributeContainer(third_items).get(), _attribute_pull(third_items)) self.assertEqual(mock_pull.called, False)
def setUp(self): # Cleanup self.addCleanup(patch.stopall) # Data packages self.data_dir = os.path.join( os.path.dirname(__file__), '..', '..', 'examples') self.dp_valid = os.path.join(self.data_dir, 'dp-valid') self.dp_invalid = os.path.join(self.data_dir, 'dp-invalid') # Config path _, self.herepath = tempfile.mkstemp() patch.object(services.config, 'HEREPATH', self.herepath).start() # Path validate model self.ValidateModel = patch.object(actions, 'ValidateModel').start() # Patch http requests self.requests = patch.object(upload, 'requests').start() self.requests.post.return_value.json.return_value = { 'filedata': { 'datapackage.json': { 'name': 'datapackage.json', 'length': 100, 'md5': 'md5', 'upload_url': 'url', 'upload_query': {'key': 'value'}, }, }, } self.FuturesSession = patch.object(upload, 'FuturesSession').start()
def test_tooltip(self): data = self.iris self.send_signal(self.widget.Inputs.data, data) event = MagicMock() with patch.object(self.widget.imageplot.plot.vb, "mapSceneToView"), \ patch.object(QToolTip, "showText") as show_text: sel = np.zeros(len(data), dtype="bool") sel[3] = 1 # a single instance with patch.object(self.widget.imageplot, "_points_at_pos", return_value=(sel, 2)): self.assertTrue(self.widget.imageplot.help_event(event)) (_, text), _ = show_text.call_args self.assertIn("iris = {}".format(data[3, "iris"]), text) self.assertIn("value = {}".format(data[3, 2]), text) self.assertEqual(1, text.count("iris =")) sel[51] = 1 # add a data point with patch.object(self.widget.imageplot, "_points_at_pos", return_value=(sel, 2)): self.assertTrue(self.widget.imageplot.help_event(event)) (_, text), _ = show_text.call_args self.assertIn("iris = {}".format(data[3, "iris"]), text) self.assertIn("iris = {}".format(data[51, "iris"]), text) self.assertIn("value = {}".format(data[3, 2]), text) self.assertIn("value = {}".format(data[51, 2]), text) self.assertEqual(2, text.count("iris ="))
def test_prunePaths(self): fake_env = { "PATH": "/sw/bin:/usr/local/bin", "LD_LIBRARY_PATH": "/sw/lib", "DYLD_LIBRARY_PATH": "/sw/lib", "ALIBUILD_VERSION": "v1.0.0", "ROOT_VERSION": "v1.0.0" } fake_env_copy = { "PATH": "/sw/bin:/usr/local/bin", "LD_LIBRARY_PATH": "/sw/lib", "DYLD_LIBRARY_PATH": "/sw/lib", "ALIBUILD_VERSION": "v1.0.0", "ROOT_VERSION": "v1.0.0" } with patch.object(os, "environ", fake_env): prunePaths("/sw") self.assertTrue(not "ROOT_VERSION" in fake_env) self.assertTrue(fake_env["PATH"] == "/usr/local/bin") self.assertTrue(fake_env["LD_LIBRARY_PATH"] == "") self.assertTrue(fake_env["DYLD_LIBRARY_PATH"] == "") self.assertTrue(fake_env["ALIBUILD_VERSION"] == "v1.0.0") with patch.object(os, "environ", fake_env_copy): prunePaths("/foo") self.assertTrue(not "ROOT_VERSION" in fake_env_copy) self.assertTrue(fake_env_copy["PATH"] == "/sw/bin:/usr/local/bin") self.assertTrue(fake_env_copy["LD_LIBRARY_PATH"] == "/sw/lib") self.assertTrue(fake_env_copy["DYLD_LIBRARY_PATH"] == "/sw/lib") self.assertTrue(fake_env_copy["ALIBUILD_VERSION"] == "v1.0.0")
def test_report(self): widget = self.widget widget.k = 4 widget.optimize_k = False with patch.object(widget, "report_items") as report_items, \ patch.object(widget, "report_data") as report_data, \ patch.object(widget, "report_table") as report_table, \ patch.object(widget, "selected_row", new=Mock(return_value=42)): widget.send_report() items = report_items.call_args[0][0] self.assertEqual(items[0], ("Number of clusters", 4)) self.assertEqual(items[1][0], "Optimization") self.assertFalse(report_data.called) self.assertFalse(report_table.called) widget.data = data = Mock() widget.send_report() self.assertIs(report_data.call_args[0][1], data) self.assertFalse(report_table.called) report_data.reset_mock() report_items.reset_mock() widget.k_from, widget.k_to = 2, 3 widget.optimize_k = True widget.send_report() items = report_items.call_args[0][0] self.assertEqual(items[0], ("Number of clusters", 44)) self.assertIs(report_data.call_args[0][1], data) self.assertIs(report_table.call_args[0][1], widget.table_view)
def test_non_existing_dep(self): requested = { REPO_PYPI: [get_req('dep1 == 1000')] } interpreter = 'python3' is_current = True options = {'virtualenv_options': [], 'pyvenv_options': [], } pip_options = [] with patch.object(envbuilder._FadesEnvBuilder, 'create_env') as mock_create: with patch.object(envbuilder, 'PipManager') as mock_mgr_c: mock_create.return_value = ('env_path', 'env_bin_path', 'pip_installed') mock_mgr_c.return_value = self.FailInstallManager() with patch.object(envbuilder, 'destroy_venv', spec=True) as mock_destroy: with self.assertRaises(FadesError) as cm: envbuilder.create_venv( requested, interpreter, is_current, options, pip_options) self.assertEqual(str(cm.exception), 'Dependency installation failed') mock_destroy.assert_called_once_with('env_path') self.assertLoggedDebug("Installation Step failed, removing virtualenv")
def test_create_simple(self): requested = { REPO_PYPI: [get_req('dep1 == v1'), get_req('dep2 == v2')] } interpreter = 'python3' is_current = True options = {"virtualenv_options": [], "pyvenv_options": [], } pip_options = [] with patch.object(envbuilder._FadesEnvBuilder, 'create_env') as mock_create: with patch.object(envbuilder, 'PipManager') as mock_mgr_c: mock_create.return_value = ('env_path', 'env_bin_path', 'pip_installed') mock_mgr_c.return_value = fake_manager = self.FakeManager() fake_manager.really_installed = {'dep1': 'v1', 'dep2': 'v2'} venv_data, installed = envbuilder.create_venv(requested, interpreter, is_current, options, pip_options) self.assertEqual(venv_data, { 'env_bin_path': 'env_bin_path', 'env_path': 'env_path', 'pip_installed': 'pip_installed', }) self.assertDictEqual(installed, { REPO_PYPI: { 'dep1': 'v1', 'dep2': 'v2', } })
async def test_cloudhook_app_created_then_show_wait_form( hass, app, app_oauth_client, smartthings_mock): """Test SmartApp is created with a cloudhoko and shows wait form.""" # Unload the endpoint so we can reload it under the cloud. await smartapp.unload_smartapp_endpoint(hass) mock_async_active_subscription = Mock(return_value=True) mock_create_cloudhook = Mock(return_value=mock_coro( return_value="http://cloud.test")) with patch.object(cloud, 'async_active_subscription', new=mock_async_active_subscription), \ patch.object(cloud, 'async_create_cloudhook', new=mock_create_cloudhook): await smartapp.setup_smartapp_endpoint(hass) flow = SmartThingsFlowHandler() flow.hass = hass smartthings = smartthings_mock.return_value smartthings.apps.return_value = mock_coro(return_value=[]) smartthings.create_app.return_value = \ mock_coro(return_value=(app, app_oauth_client)) smartthings.update_app_settings.return_value = mock_coro() smartthings.update_app_oauth.return_value = mock_coro() result = await flow.async_step_user({'access_token': str(uuid4())}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'wait_install' assert mock_create_cloudhook.call_count == 1
def test_delete_blocks(self): blocks = self.add_blocks(self.add_operations()) bc = get_blockchain() with patch.object(BlockChain, '_get_new_blocks', return_value=blocks): bc.update_blocks() with self.assertRaisesRegex(Block.ChainOperationBlockedError, "can't remove: blocked by another block"): blocks[4].remove() self.assertEqual(bc.max_depth, 5) self.assertEqual(bc.head, blocks[5].id) for block_to_remove, max_depth, heads in ( (blocks[5], 5, [blocks[6].id]), (blocks[6], 4, [blocks[3].id, blocks[4].id]), (blocks[4], 4, [blocks[3].id]), (blocks[3], 3, [blocks[2].id]), (blocks[2], 2, [blocks[1].id]), (blocks[1], 1, [blocks[0].id]), (blocks[0], 0, [BlockRev().id]) ): block_to_remove.remove() self.assertEqual(bc.max_depth, max_depth) self.assertIn(bc.head, heads) with patch.object(BlockChain, '_get_new_blocks', return_value=blocks): bc.update_blocks() self.assertEqual(bc.max_depth, 5) self.assertEqual(bc.head, blocks[5].id) self.assertCountEqual([op.uuid for op in blocks[0].operations + blocks[2].operations[1:2]], Identifier.get_uuid_list())
async def test_setup_with_discovery_no_known_auth(hass, aioclient_mock): """Test discovering a bridge and not having known auth.""" aioclient_mock.get(hue.API_NUPNP, json=[ { 'internalipaddress': '0.0.0.0', 'id': 'abcd1234' } ]) with patch.object(hass, 'config_entries') as mock_config_entries, \ patch.object(hue, 'configured_hosts', return_value=[]): mock_config_entries.flow.async_init.return_value = mock_coro() assert await async_setup_component(hass, hue.DOMAIN, { hue.DOMAIN: {} }) is True # Flow started for discovered bridge assert len(mock_config_entries.flow.mock_calls) == 1 assert mock_config_entries.flow.mock_calls[0][2]['data'] == { 'host': '0.0.0.0', 'path': '.hue_abcd1234.conf', } # Config stored for domain. assert hass.data[hue.DOMAIN] == { '0.0.0.0': { hue.CONF_HOST: '0.0.0.0', hue.CONF_FILENAME: '.hue_abcd1234.conf', hue.CONF_ALLOW_HUE_GROUPS: hue.DEFAULT_ALLOW_HUE_GROUPS, hue.CONF_ALLOW_UNREACHABLE: hue.DEFAULT_ALLOW_UNREACHABLE, } }
def wrapper(*args, **kargs): future = Future() future.set_result(self._response) with patch.object(AsyncHTTPClient, "fetch", return_value=future): with patch.object(Client, "fetch", return_value=future): yield coroutine(*args, **kargs)
def test_lock_get_usercode_service(hass, mock_openzwave): """Test the zwave lock get_usercode service.""" node = MockNode(node_id=12) value0 = MockValue(data=None, node=node, index=0) value1 = MockValue(data='1234', node=node, index=1) yield from zwave.async_setup_platform( hass, {}, MagicMock()) node.get_values.return_value = { value0.value_id: value0, value1.value_id: value1, } with patch.object(zwave.zwave, 'NETWORK') as mock_network: with patch.object(zwave, '_LOGGER') as mock_logger: mock_network.nodes = { node.node_id: node } yield from hass.services.async_call( zwave.DOMAIN, zwave.SERVICE_GET_USERCODE, { const.ATTR_NODE_ID: node.node_id, zwave.ATTR_CODE_SLOT: 1, }) yield from hass.async_block_till_done() # This service only seems to write to the log assert mock_logger.info.called assert len(mock_logger.info.mock_calls) == 1 assert mock_logger.info.mock_calls[0][1][2] == '1234'
def test_migrate_one(td): src = pjoin(td, 'src') srcdir = pjoin(td, 'srcdir') dst = pjoin(td, 'dst') dstdir = pjoin(td, 'dstdir') touch(src, 'test file') touch(pjoin(srcdir, 'f'), 'test dir file') called = {} def notice_m_file(src, dst): called['migrate_file'] = True return migrate_file(src, dst) def notice_m_dir(src, dst): called['migrate_dir'] = True return migrate_dir(src, dst) with patch.object(migrate_mod, 'migrate_file', notice_m_file), \ patch.object(migrate_mod, 'migrate_dir', notice_m_dir): assert migrate_one(src, dst) assert called == {'migrate_file': True} called.clear() assert migrate_one(srcdir, dstdir) assert called == {'migrate_dir': True} called.clear() assert not migrate_one(pjoin(td, 'dne'), dst) assert called == {}
def test_request(self, client): response = Mock(spec=Response) with patch.object(requests, "request", return_value=response): mapper = ObjectMapper(GitterClient.Room) with patch.object(response.content, "decode", return_value=json.dumps(room_info)): ret = client.request("GET", "http://localhost/rooms", ObjectMapper(GitterClient.Room), {'param': "spam"}, {'body': "ham"}) method, endpoint = requests.request.call_args[0] kwargs = requests.request.call_args[1] assert_that(ret).is_equal_to([mapper.map(obj) for obj in room_info]) assert_that(method).is_equal_to("GET") assert_that(endpoint).is_equal_to("http://localhost/rooms") assert_that(kwargs['params']).is_equal_to({'param': "spam"}) assert_that(kwargs['json']).is_equal_to({'body': "ham"}) assert_that(kwargs['headers']['Authorization']).is_not_empty() assert_that(kwargs['headers']['Accept']).is_not_empty() assert_that(kwargs['headers']['Content-Type']).is_not_empty()
def test_install_without_pip(self): mgr = PipManager('/usr/bin', pip_installed=False) with patch.object(helpers, 'logged_exec') as mocked_exec: with patch.object(mgr, '_brute_force_install_pip') as mocked_install_pip: mgr.install('foo') self.assertEqual(mocked_install_pip.call_count, 1) mocked_exec.assert_called_with(['/usr/bin/pip', 'install', 'foo'])
def test_use_cache(self): widget = self.widget widget.k = 3 widget.optimize_k = False self.send_signal(self.widget.Inputs.data, self.iris[:50]) widget.unconditional_apply() widget.k_from = 2 widget.k_to = 3 widget.optimize_k = True with patch.object(widget, "_compute_clustering", wraps=widget._compute_clustering) as compute, \ patch.object(widget, "progressBar", wraps=widget.progressBar) as progressBar: widget.unconditional_apply() self.assertEqual(compute.call_count, 1) compute.assert_called_with(2) self.assertEqual(progressBar.call_count, 1) progressBar.assert_called_with(1) compute.reset_mock() progressBar.reset_mock() widget.unconditional_apply() # compute.assert_not_called unfortunately didn't exist before 3.5 self.assertFalse(compute.called) self.assertFalse(progressBar.called)
def test_start_flags(self, get_directories_to_analyze): # Check start with watchman. arguments = mock_arguments() configuration = mock_configuration(version_hash="hash") command = commands.Start(arguments, configuration, AnalysisDirectory(".")) self.assertEqual( command._flags(), [ "-project-root", ".", "-use-watchman", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) arguments = mock_arguments(no_watchman=True, terminal=True) configuration = mock_configuration(version_hash="hash") command = commands.Start(arguments, configuration, AnalysisDirectory(".")) self.assertEqual( command._flags(), [ "-project-root", ".", "-terminal", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) # Check filter directories. arguments = mock_arguments(no_watchman=True) configuration = mock_configuration(version_hash="hash") command = commands.Start(arguments, configuration, AnalysisDirectory(".")) with patch.object(command, "_get_directories_to_analyze") as get_directories: get_directories.return_value = {"a", "b"} self.assertEqual( command._flags(), [ "-project-root", ".", "-filter-directories", "a;b", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) # Check save-initial-state-to. arguments = mock_arguments(save_initial_state_to="/tmp") command = commands.Start(arguments, configuration, AnalysisDirectory(".")) self.assertEqual( command._flags(), [ "-project-root", ".", "-use-watchman", "-save-initial-state-to", "/tmp", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) # Check load-initial-state-from. arguments = mock_arguments( load_initial_state_from="/tmp/pyre_shared_memory", changed_files_path="/tmp/changed_files", ) command = commands.Start(arguments, configuration, AnalysisDirectory(".")) self.assertEqual( command._flags(), [ "-project-root", ".", "-use-watchman", "-load-state-from", "/tmp/pyre_shared_memory", "-changed-files-path", "/tmp/changed_files", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) # Both changed-files-path and load-initial-state-from must be not-None. arguments = mock_arguments(changed_files_path="/tmp/changed_files") command = commands.Start(arguments, configuration, AnalysisDirectory(".")) self.assertEqual( command._flags(), [ "-project-root", ".", "-use-watchman", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) # Check load-initial-state-from. arguments = mock_arguments(changed_files_path="/tmp/changed_files") command = commands.Start(arguments, configuration, AnalysisDirectory(".")) self.assertEqual( command._flags(), [ "-project-root", ".", "-use-watchman", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], )
def test_user_cookie(self): with patch.object(BaseHandler, 'get_secure_cookie') as m: m.return_value = '"success"' handler = BaseHandler(self.application, self.request) user = handler.get_current_user() self.assertEqual('success', user)
async def test_bad_formatted_scene(hass, hass_client, setup_scene): """Test that we handle scene without ID.""" with patch.object(config, "SECTIONS", ["scene"]): await async_setup_component(hass, "config", {}) client = await hass_client() orig_data = [ { # No ID "entities": { "light.bedroom": "on" } }, { "id": "light_off" }, ] def mock_read(path): """Mock reading data.""" return orig_data written = [] def mock_write(path, data): """Mock writing data.""" written.append(data) with patch("homeassistant.components.config._read", mock_read), patch( "homeassistant.components.config._write", mock_write), patch("homeassistant.config.async_hass_config_yaml", return_value={}): resp = await client.post( "/api/config/scene/config/light_off", data=json.dumps({ "id": "light_off", "name": "Lights off", "entities": { "light.bedroom": { "state": "off" } }, }), ) assert resp.status == HTTPStatus.OK result = await resp.json() assert result == {"result": "ok"} # Verify ID added to orig_data assert "id" in orig_data[0] assert orig_data[1] == { "id": "light_off", "name": "Lights off", "entities": { "light.bedroom": { "state": "off" } }, }
def test_returns_none_if_invalid_ref(self, web_track_mock): with patch.object(translator, "web_to_track_ref", return_value=None): assert translator.web_to_track(web_track_mock) is None
def test_returns_none_if_invalid_ref(self, web_artist_mock): with patch.object(translator, "web_to_artist_ref", return_value=None): assert translator.to_playlist(web_artist_mock) is None
def test_returns_none_if_invalid(self, web_playlist_mock): with patch.object(translator, "valid_web_data", return_value=False): assert translator.to_playlist_ref(web_playlist_mock) is None
def test_start(self, _daemonize, get_directories_to_analyze, lock_file) -> None: arguments = mock_arguments() arguments.terminal = False configuration = mock_configuration() configuration.version_hash = "hash" configuration.number_of_workers = 5 analysis_directory = AnalysisDirectory(".") # Check start without watchman. with patch("builtins.open", mock_open()), patch.object(commands.Command, "_call_client") as call_client: arguments.no_watchman = True command = commands.Start(arguments, configuration, analysis_directory) self.assertEqual( command._flags(), [ "-project-root", ".", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) command.run() call_client.assert_called_once_with(command=commands.Start.NAME) analysis_directory = AnalysisDirectory(".") # This magic is necessary to test, because the inner call to ping a server is # always non-blocking. def pass_when_blocking(file_descriptor, command): if not pass_when_blocking.failed and (command & fcntl.LOCK_NB): pass_when_blocking.failed = True raise OSError(errno.EAGAIN, "Only accepting blocking calls.") pass_when_blocking.failed = False lock_file.side_effect = pass_when_blocking # EAGAINs get caught. with patch("builtins.open", mock_open()), patch.object(commands.Command, "_call_client") as call_client: arguments.no_watchman = True command = commands.Start(arguments, configuration, analysis_directory) self.assertEqual( command._flags(), [ "-project-root", ".", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) command.run() call_client.assert_called_once_with(command=commands.Start.NAME) lock_file.side_effect = None def raise_mount_error(fileno, command): raise OSError(errno.ENOTCONN) lock_file.side_effect = raise_mount_error # Check that the command errors on OS errors other than EAGAIN. with patch("builtins.open", mock_open()), patch.object(commands.Command, "_call_client") as call_client: arguments.no_watchman = True command = commands.Start(arguments, configuration, analysis_directory) self.assertEqual( command._flags(), [ "-project-root", ".", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) with self.assertRaises(OSError): command.run() call_client.assert_not_called() lock_file.side_effect = None # Shared analysis directories are prepared when starting. shared_analysis_directory = MagicMock() shared_analysis_directory.get_root = lambda: "." with patch.object(commands.Command, "_call_client") as call_client, patch.object( shared_analysis_directory, "prepare") as prepare: arguments = mock_arguments(no_watchman=True) configuration = mock_configuration(version_hash="hash") command = commands.Start(arguments, configuration, shared_analysis_directory) self.assertEqual( command._flags(), [ "-project-root", ".", "-workers", "5", "-typeshed", "stub", "-expected-binary-version", "hash", "-search-path", "path1,path2", ], ) command.run() call_client.assert_called_once_with(command=commands.Start.NAME) prepare.assert_called_once_with()
def setUp(self): self.auth_patch = patch.object(auth_lxdx, 'AWS4Auth') #self.requests_patch = patch.object(exchange_interface, 'requests') self.auth = self.auth_patch.start()
def test_on_user_delete_calls_pinpoint(user_manager, user): with patch.object(user_manager, 'pinpoint_client') as pinpoint_client_mock: user_manager.on_user_delete(user.id, old_item=user.item) assert pinpoint_client_mock.mock_calls == [ call.delete_user_endpoints(user.id) ]
def test_analyze(self, directories_to_analyze, realpath, check_output, find_global_and_local_root) -> None: realpath.side_effect = lambda x: x arguments = mock_arguments() configuration = mock_configuration() configuration.taint_models_path = [] original_directory = "/original/directory" result = MagicMock() result.output = "" with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-dump-call-graph", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): configuration.taint_models_path = ["taint_models"] command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models", "-dump-call-graph", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): configuration.taint_models_path = [ "taint_models_1", "taint_models_2" ] command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models_1", "-taint-models", "taint_models_2", "-dump-call-graph", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): configuration.taint_models_path = {"taint_models"} command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=["overriding_models"], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "overriding_models", "-dump-call-graph", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): configuration.taint_models_path = {"taint_models"} command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=["overriding_models"], no_verify=True, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "overriding_models", "-dump-call-graph", "-no-verify", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) # Test "." is a valid directory arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to=".", dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models", "-save-results-to", ".", "-dump-call-graph", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to="/tmp/results.json", dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models", "-save-results-to", "/tmp/results.json", "-dump-call-graph", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to="/tmp/results.json", dump_call_graph=True, repository_root="/home/username/root", rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models", "-save-results-to", "/tmp/results.json", "-dump-call-graph", "-repository-root", "/home/username/root", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=[5021, 5022], use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models", "-dump-call-graph", "-rules", "5021,5022", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="liveness", taint_models_path=[], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=False, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "liveness", "-taint-models", "taint_models", "-dump-call-graph", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=True, inline_decorators=False, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models", "-dump-call-graph", "-use-cache", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME) arguments = mock_arguments() with patch.object(commands.Command, "_call_client", return_value=result) as call_client, patch( "json.loads", return_value=[]): command = commands.Analyze( arguments, original_directory, configuration=configuration, analysis_directory=AnalysisDirectory( configuration_module.SimpleSearchPathElement(".")), analysis="taint", taint_models_path=[], no_verify=False, save_results_to=None, dump_call_graph=True, repository_root=None, rules=None, use_cache=True, inline_decorators=True, ) self.assertEqual( command._flags(), [ "-logging-sections", "-progress", "-project-root", "/root", "-log-directory", ".pyre", "-python-major-version", "3", "-python-minor-version", "6", "-python-micro-version", "0", "-workers", "5", "-analysis", "taint", "-taint-models", "taint_models", "-dump-call-graph", "-use-cache", "-inline-decorators", ], ) command.run() call_client.assert_called_once_with(command=commands.Analyze.NAME)
def test_run_stress_tests_1(capsys) -> None: args = ["my_tool.py", "-ftest.txt", "-sLine", "-m"] with patch.object(sys, 'argv', args): my_tool.SearchInFile.run() captured = capsys.readouterr() assert captured.out == "test.txt:3:6:Dummy Line"
def test_on_user_delete_calls_dating_project(user_manager, user): with patch.object(user_manager, 'real_dating_client') as rdc_mock: user_manager.on_user_delete(user.id, old_item=user.item) assert rdc_mock.mock_calls == [call.remove_user(user.id, fail_soft=True)]
def test_get_release_suggestion_patch(self): with patch.object(ChangelogUtils, 'get_changes', return_value={'changes': ''}): CL = ChangelogUtils() result = CL.get_release_suggestion() self.assertEqual(result, 'patch')
def test_on_user_delete_calls_elasticsearch(user_manager, user): with patch.object(user_manager, 'elasticsearch_client') as elasticsearch_client_mock: user_manager.on_user_delete(user.id, old_item=user.item) assert elasticsearch_client_mock.mock_calls == [call.delete_user(user.id)]
def test_get_new_release_version_major(self): with patch.object(ChangelogUtils, 'get_current_version', return_value='1.1.1'): CL = ChangelogUtils() self.assertEqual(CL.get_new_release_version('major'), '2.0.0')
def test_get_release_suggestion_major(self): with patch.object(ChangelogUtils, 'get_changes', return_value={'break': 'stuff'}): CL = ChangelogUtils() result = CL.get_release_suggestion() self.assertEqual(result, 'major')
def test_process_response_json_uses_required_getters(): ''' This test only checks for appropriate calls to getter functions ''' response_json = {'test key': 'test value'} row_list = ['row0', 'row1'] image_lists = [['image', 'list', 'zero'], ['image', 'list', 'one']] flu_list = ['flu0', 'flu1'] title_list = ['title0', 'title1'] creator_list = ['creator0', 'creator1'] metadata_list = ['metadata0', 'metadata1'] tags_list = ['tags0', 'tags1'] source_list = ['source0', 'source1'] get_row_list = patch.object(si, '_get_row_list', return_value=row_list) process_image_list = patch.object( si, '_process_image_list', return_value=2 ) get_image_list = patch.object( si, '_get_image_list', side_effect=image_lists ) get_flu = patch.object( si, '_get_foreign_landing_url', side_effect=flu_list ) get_title = patch.object(si, '_get_title', side_effect=title_list) get_creator = patch.object(si, '_get_creator', side_effect=creator_list) ext_meta_data = patch.object( si, '_extract_meta_data', side_effect=metadata_list ) ext_tags = patch.object(si, '_extract_tags', side_effect=tags_list) ext_source = patch.object( si, '_extract_source', side_effect=source_list ) with\ get_row_list as mock_get_row_list,\ get_image_list as mock_get_image_list,\ get_flu as mock_get_foreign_landing_url,\ get_title as mock_get_title,\ get_creator as mock_get_creator,\ ext_meta_data as mock_extract_meta_data, \ ext_tags as mock_extract_tags, \ ext_source as mock_extract_source, \ process_image_list as mock_process_image_list: si._process_response_json(response_json) getter_calls_list = [call(r) for r in row_list] image_processing_call_list = [ call( image_lists[0], flu_list[0], title_list[0], creator_list[0], metadata_list[0], tags_list[0], source_list[0] ), call( image_lists[1], flu_list[1], title_list[1], creator_list[1], metadata_list[1], tags_list[1], source_list[1] ) ] mock_get_row_list.assert_called_once_with(response_json) assert mock_process_image_list.mock_calls == image_processing_call_list assert mock_get_image_list.mock_calls == getter_calls_list assert mock_get_foreign_landing_url.mock_calls == getter_calls_list assert mock_get_title.mock_calls == getter_calls_list assert mock_get_creator.mock_calls == getter_calls_list assert mock_extract_meta_data.mock_calls == getter_calls_list assert mock_extract_tags.mock_calls == getter_calls_list assert mock_extract_source.mock_calls == [call(m) for m in metadata_list]
def test_get_new_release_version_suggest(self): with patch.object(ChangelogUtils, 'get_current_version', return_value='1.1.1'): with patch.object(ChangelogUtils, 'get_release_suggestion', return_value='minor'): CL = ChangelogUtils() self.assertEqual(CL.get_new_release_version('suggest'), '1.2.0')
def test_tile(app): """Test GET /mosaicjson/tiles endpoint""" mosaicjson = read_json_fixture(MOSAICJSON_FILE) bounds = mosaicjson["bounds"] tile = mercantile.tile(*mosaicjson["center"]) partial_tile = mercantile.tile(bounds[0], bounds[1], mosaicjson["minzoom"]) with patch.object(FileBackend, "_read", mosaic_read_factory(MOSAICJSON_FILE)): # full tile response = app.get( f"/mosaicjson/tiles/{tile.z}/{tile.x}/{tile.y}", params={"url": MOSAICJSON_FILE}, ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" meta = parse_img(response.content) assert meta["width"] == meta["height"] == 256 response = app.get( f"/mosaicjson/tiles/{tile.z}/{tile.x}/{tile.y}@2x", params={"url": MOSAICJSON_FILE}, ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" meta = parse_img(response.content) assert meta["width"] == meta["height"] == 512 response = app.get( f"/mosaicjson/tiles/{tile.z}/{tile.x}/{tile.y}.tif", params={"url": MOSAICJSON_FILE}, ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff" meta = parse_img(response.content) assert meta["width"] == meta["height"] == 256 assert meta["crs"] == 3857 response = app.get( f"/mosaicjson/tiles/{tile.z}/{tile.x}/{tile.y}@2x.tif", params={"url": MOSAICJSON_FILE, "nodata": 0, "bidx": 1}, ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff" meta = parse_img(response.content) assert meta["dtype"] == "uint16" assert meta["count"] == 2 assert meta["width"] == 512 assert meta["height"] == 512 response = app.get( f"/mosaicjson/tiles/{tile.z}/{tile.x}/{tile.y}@2x.jpg", params={ "url": MOSAICJSON_FILE, "rescale": "0,1000", "color_map": "viridis", "bidx": 1, }, ) assert response.status_code == 200 assert response.headers["content-type"] == "image/jpeg" # partial tile response = app.get( f"/mosaicjson/tiles/{partial_tile.z}/{partial_tile.x}/{partial_tile.y}", params={"url": MOSAICJSON_FILE}, ) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" response = app.get( f"/mosaicjson/tiles/{partial_tile.z}/{partial_tile.x}/{partial_tile.y}.tif", params={"url": MOSAICJSON_FILE, "resampling_method": "bilinear"}, ) assert response.status_code == 200 assert response.headers["content-type"] == "image/tiff"
def test_get_current_version_default(self): sample_data = [] with patch.object(ChangelogUtils, 'get_changelog_data', return_value=sample_data) as mock_read: CL = ChangelogUtils() result = CL.get_current_version() self.assertEqual(result, '0.0.0')
def setUp(self): # Any TemporaryDirectory objects appended to this list will be cleaned # up at the end of the test run. self.tempdirs = [] self.devnull = open(os.devnull, "w") @self.addCleanup def cleanup_tempdirs(): for d in self.tempdirs: d.cleanup() self.test_dir = self.tempdir() self.data_dir = pjoin(self.test_dir, "data") self.config_dir = pjoin(self.test_dir, "config") self.pkg_names = {} # Copy in the mock packages. for name in ["extension", "incompat", "package", "mimeextension"]: src = pjoin(here, "mock_packages", name) def ignore(dname, files): if "node_modules" in dname: files = [] if "node_modules" in files: files.remove("node_modules") return dname, files dest = pjoin(self.test_dir, name) shutil.copytree(src, dest, ignore=ignore) # Make a node modules folder so npm install is not called. if not os.path.exists(pjoin(dest, "node_modules")): os.makedirs(pjoin(dest, "node_modules")) setattr(self, "mock_" + name, dest) with open(pjoin(dest, "package.json")) as fid: data = json.load(fid) self.pkg_names[name] = data["name"] self.patches = [] p = patch.dict( "os.environ", { "JUPYTER_CONFIG_DIR": self.config_dir, "JUPYTER_DATA_DIR": self.data_dir, "JUPYTERLAB_DIR": pjoin(self.data_dir, "lab"), }, ) self.patches.append(p) for mod in [paths]: if hasattr(mod, "ENV_JUPYTER_PATH"): p = patch.object(mod, "ENV_JUPYTER_PATH", [self.data_dir]) self.patches.append(p) if hasattr(mod, "ENV_CONFIG_PATH"): p = patch.object(mod, "ENV_CONFIG_PATH", [self.config_dir]) self.patches.append(p) if hasattr(mod, "CONFIG_PATH"): p = patch.object(mod, "CONFIG_PATH", self.config_dir) self.patches.append(p) if hasattr(mod, "BUILD_PATH"): p = patch.object(mod, "BUILD_PATH", self.data_dir) self.patches.append(p) for p in self.patches: p.start() self.addCleanup(p.stop) # verify our patches self.assertEqual(paths.ENV_CONFIG_PATH, [self.config_dir]) self.assertEqual(paths.ENV_JUPYTER_PATH, [self.data_dir]) self.assertEqual( Path(commands.get_app_dir()).resolve(), (Path(self.data_dir) / "lab").resolve()) self.app_dir = commands.get_app_dir() # Set pinned extension names self.pinned_packages = [ "[email protected]", "[email protected]" ]
async def test_atomic_transfer_race_condition(self, wallet, wallet_factory): """Run concurrency - 1 transfer normally, and check for the raise transaction conflict error due to race condition.""" concurrency = 5 nonce = "test_atomic_transfer_idempotency" target_wallet = await wallet_factory() target_wallet2 = await wallet_factory() await wallet.atomic_deposit(concurrency, nonce=nonce) transfer_nonce = f"{nonce}_trasfer" coroutines = [ wallet.atomic_transfer(1, nonce=f"{transfer_nonce}_{i}", target_wallet=target) for i, target in zip( range(concurrency), itertools.cycle((target_wallet, target_wallet2))) ] original = wallet.storage._client.transact_write_items call_count = 0 # todo: integration test is needed # mock due to TransactionConflictExceptions are not thrown by # downloadable DynamoDB for transactional APIs. exc = wallet.storage._client.exceptions.TransactionConflictException( error_response={ "Error": { "Code": "TransactionConflictException", "Message": "Conflict occured", } }, operation_name="Put", ) async def mocked(*args, **kwargs): nonlocal call_count call_count += 1 if call_count == concurrency: raise exc return await original(*args, **kwargs) with patch.object( wallet.storage._client, "transact_write_items", mock.AsyncMock(side_effect=mocked), ): result = await asyncio.gather(*coroutines, return_exceptions=True) assert result[:-1] == [None] * (concurrency - 1) assert isinstance(result[-1], core.storage.exceptions.TransactionConflictError) assert await wallet.get_balance() == 1 # check that we do not lose any penny target_balance = (await target_wallet.get_balance() + await target_wallet2.get_balance()) assert target_balance + 1 == concurrency
async def test_get_health(self, mocker, client): # empty service registry resp = await client.get('/foglamp/service') assert 200 == resp.status result = await resp.text() json_response = json.loads(result) assert {'services': []} == json_response mocker.patch.object(InterestRegistry, "__init__", return_value=None) mocker.patch.object(InterestRegistry, "get", return_value=list()) with patch.object(ServiceRegistry._logger, 'info') as log_patch_info: # populated service registry s_id_1 = ServiceRegistry.register('name1', 'Storage', 'address1', 1, 1, 'protocol1') s_id_2 = ServiceRegistry.register('name2', 'Southbound', 'address2', 2, 2, 'protocol2') s_id_3 = ServiceRegistry.register('name3', 'Southbound', 'address3', 3, 3, 'protocol3') s_id_4 = ServiceRegistry.register('name4', 'Southbound', 'address4', 4, 4, 'protocol4') ServiceRegistry.unregister(s_id_3) ServiceRegistry.mark_as_failed(s_id_4) resp = await client.get('/foglamp/service') assert 200 == resp.status result = await resp.text() json_response = json.loads(result) assert json_response == { 'services': [{ 'type': 'Storage', 'service_port': 1, 'address': 'address1', 'protocol': 'protocol1', 'status': 'running', 'name': 'name1', 'management_port': 1 }, { 'type': 'Southbound', 'service_port': 2, 'address': 'address2', 'protocol': 'protocol2', 'status': 'running', 'name': 'name2', 'management_port': 2 }, { 'type': 'Southbound', 'service_port': 3, 'address': 'address3', 'protocol': 'protocol3', 'status': 'down', 'name': 'name3', 'management_port': 3 }, { 'type': 'Southbound', 'service_port': 4, 'address': 'address4', 'protocol': 'protocol4', 'status': 'failed', 'name': 'name4', 'management_port': 4 }] } assert 6 == log_patch_info.call_count
def patch_window_size(size): with patch.object(AuditWindowQuery.__init__, "__defaults__", (size,)): qry = AuditWindowQuery("ignored") assert qry.window_size == size, f"patch failed ({qry.window_size})" yield
async def test_dupe_schedule_name_add_service(self, client): async def async_mock(): return None def q_result(*arg): table = arg[0] payload = arg[1] if table == 'scheduled_processes': assert { 'return': ['name'], 'where': { 'column': 'name', 'condition': '=', 'value': 'furnace4' } } == json.loads(payload) return {'count': 0, 'rows': []} if table == 'schedules': assert { 'return': ['schedule_name'], 'where': { 'column': 'schedule_name', 'condition': '=', 'value': 'furnace4' } } == json.loads(payload) return { 'count': 1, 'rows': [{ 'schedule_name': 'schedule_name' }] } data = {"name": "furnace4", "type": "north", "plugin": "dht11"} description = '{} service configuration'.format(data['name']) storage_client_mock = MagicMock(StorageClient) c_mgr = ConfigurationManager(storage_client_mock) val = { 'plugin': { 'default': data['plugin'], 'description': 'Python module name of the plugin to load', 'type': 'string' } } with patch.object(connect, 'get_storage', return_value=storage_client_mock): with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): with patch.object(storage_client_mock, 'insert_into_tbl', return_value={ 'rows_affected': 1, "response": "inserted" }) as insert_table_patch: with patch.object( c_mgr, 'create_category', return_value=async_mock()) as patch_create_cat: resp = await client.post('/foglamp/service', data=json.dumps(data)) assert 400 == resp.status assert 'A schedule with that name already exists' == resp.reason patch_create_cat.assert_called_once_with( category_name=data['name'], category_description=description, category_value=val, keep_original_items=False) args, kwargs = insert_table_patch.call_args assert 'scheduled_processes' == args[0] p = json.loads(args[1]) assert { 'name': 'furnace4', 'script': '["services/north"]' } == p
def test_valid_name_empty(self): names = ('a', 'b') dialog = qtimgren.profile.ProfileDialog(self.main, names=names) with patch.object(qtimgren.profile, 'QMessageBox') as msg_box: self.assertFalse(dialog.valid()) self.assertEqual(1, msg_box.warning.call_count)
def testBatchLifecycle(self): # Check that state of trial statuses mapping on experiment: there should only be # one index, 0, among the `CANDIDATE` trials. trial_idcs_by_status = iter( self.experiment.trial_indices_by_status.values()) self.assertEqual(next(trial_idcs_by_status), {0}) # `CANDIDATE` trial indices # ALl other trial statuses should not yet have trials carry them. self.assertTrue(all(len(idcs) == 0 for idcs in trial_idcs_by_status)) staging_mock = PropertyMock() with patch.object(SyntheticRunner, "staging_required", staging_mock): mock_runner = SyntheticRunner() staging_mock.return_value = True self.batch.runner = mock_runner self.batch.run() self.assertEqual(self.batch.status, TrialStatus.STAGED) # Check that the trial statuses mapping on experiment has been updated. self.assertEqual( self.experiment.trial_indices_by_status[TrialStatus.STAGED], {0}) self.assertTrue( all(len(idcs) == 0) for status, idcs in self.experiment.trial_indices_by_status.items() if status != TrialStatus.STAGED) self.assertIsNotNone(self.batch.time_staged) self.assertTrue(self.batch.status.is_deployed) self.assertFalse(self.batch.status.expecting_data) # Cannot change arms or runner once run with self.assertRaises(ValueError): self.batch.add_arms_and_weights(arms=self.arms, weights=self.weights) with self.assertRaises(ValueError): self.batch.runner = None # Cannot run batch that was already run with self.assertRaises(ValueError): self.batch.run() self.batch.mark_running() self.assertEqual(self.batch.status, TrialStatus.RUNNING) # Check that the trial statuses mapping on experiment has been updated. self.assertEqual( self.experiment.trial_indices_by_status[TrialStatus.RUNNING], {0}) self.assertTrue( all(len(idcs) == 0) for status, idcs in self.experiment.trial_indices_by_status.items() if status != TrialStatus.RUNNING) self.assertIsNotNone(self.batch.time_run_started) self.assertTrue(self.batch.status.expecting_data) self.batch.complete() # Cannot complete that which is already completed with self.assertRaises(ValueError): self.batch.complete() # Verify trial is completed self.assertEqual(self.batch.status, TrialStatus.COMPLETED) # Check that the trial statuses mapping on experiment has been updated. self.assertEqual( self.experiment.trial_indices_by_status[TrialStatus.COMPLETED], {0}) self.assertTrue( all(len(idcs) == 0) for status, idcs in self.experiment.trial_indices_by_status.items() if status != TrialStatus.COMPLETED) self.assertIsNotNone(self.batch.time_completed) self.assertTrue(self.batch.status.is_terminal) # Cannot change status after BatchTrial is completed with self.assertRaises(ValueError): self.batch.mark_staged() with self.assertRaises(ValueError): self.batch.mark_completed() with self.assertRaises(ValueError): self.batch.mark_running() with self.assertRaises(ValueError): self.batch.mark_abandoned() with self.assertRaises(ValueError): self.batch.mark_failed() # Check that the trial statuses mapping on experiment is updated when # trial status is set hackily / directly, without using `mark_X`. self.batch._status = TrialStatus.CANDIDATE self.assertEqual( self.experiment.trial_indices_by_status[TrialStatus.CANDIDATE], {0}) self.assertTrue( all(len(idcs) == 0) for status, idcs in self.experiment.trial_indices_by_status.items() if status != TrialStatus.CANDIDATE)
async def test_add_service(self, client): async def async_mock(return_value): return return_value async def async_mock_get_schedule(): schedule = StartUpSchedule() schedule.schedule_id = '2129cc95-c841-441a-ad39-6469a87dbc8b' return schedule def q_result(*arg): table = arg[0] payload = arg[1] if table == 'scheduled_processes': assert { 'return': ['name'], 'where': { 'column': 'name', 'condition': '=', 'value': 'furnace4' } } == json.loads(payload) return {'count': 0, 'rows': []} if table == 'schedules': assert { 'return': ['schedule_name'], 'where': { 'column': 'schedule_name', 'condition': '=', 'value': 'furnace4' } } == json.loads(payload) return {'count': 0, 'rows': []} server.Server.scheduler = Scheduler(None, None) data = {"name": "furnace4", "type": "south", "plugin": "dht11"} description = '{} service configuration'.format(data['name']) storage_client_mock = MagicMock(StorageClient) c_mgr = ConfigurationManager(storage_client_mock) val = { 'plugin': { 'default': data['plugin'], 'description': 'Python module name of the plugin to load', 'type': 'string' } } with patch.object(connect, 'get_storage', return_value=storage_client_mock): with patch.object(storage_client_mock, 'query_tbl_with_payload', side_effect=q_result): with patch.object(storage_client_mock, 'insert_into_tbl', return_value={ 'rows_affected': 1, "response": "inserted" }) as insert_table_patch: with patch.object( c_mgr, 'create_category', return_value=async_mock(None)) as patch_create_cat: with patch.object(server.Server.scheduler, 'save_schedule', return_value=async_mock( "")) as patch_save_schedule: with patch.object( server.Server.scheduler, 'get_schedule_by_name', return_value=async_mock_get_schedule( )) as patch_get_schedule: resp = await client.post('/foglamp/service', data=json.dumps(data)) server.Server.scheduler = None assert 200 == resp.status result = await resp.text() json_response = json.loads(result) assert { 'id': '2129cc95-c841-441a-ad39-6469a87dbc8b', 'name': 'furnace4' } == json_response patch_get_schedule.assert_called_once_with( data['name']) patch_save_schedule.called_once_with() patch_create_cat.assert_called_once_with( category_name=data['name'], category_description=description, category_value=val, keep_original_items=False) args, kwargs = insert_table_patch.call_args assert 'scheduled_processes' == args[0] p = json.loads(args[1]) assert { 'name': 'furnace4', 'script': '["services/south"]' } == p
def test_init(test_data): with pytest.raises(ValueError): DataFrameBatch(df="nope", config=config_template) # should create the dir structure based on auto # batch sizing batches = DataFrameBatch(df=test_data, config=config_template, batch_size=15) first_row = [ "ID_code", "target", "var_0", "var_1", "var_2", "var_3", "var_4", "var_5", "var_6", "var_7", "var_8", "var_9", "var_10", "var_11", "var_12", ] assert batches.batches[0].headers == first_row assert len(batches.batches.keys()) == 14 for i, batch in batches.batches.items(): assert Path(batch.checkpoint_dir).is_dir() assert Path(batch.checkpoint_dir).name == f"batch_{i}" orig_headers = json.loads( open(Path(config_template["checkpoint_dir"]) / ORIG_HEADERS).read() ) assert list(set(orig_headers)) == list(set(test_data.columns)) batches.create_training_data() df = pd.read_csv( batches.batches[0].input_data_path, sep=config_template["field_delimiter"] ) assert len(df.columns) == len(first_row) with pytest.raises(ValueError): batches.train_batch(99) with patch("gretel_synthetics.batch.train") as mock_train: batches.train_batch(5) arg = batches.batches[5].config mock_train.assert_called_with(arg, None) with patch("gretel_synthetics.batch.train") as mock_train: batches.train_all_batches() args = [b.config for b in batches.batches.values()] called_args = [] for _, a, _ in mock_train.mock_calls: called_args.append(a[0]) assert args == called_args with pytest.raises(ValueError): batches.set_batch_validator(5, "foo") with pytest.raises(ValueError): batches.set_batch_validator(99, simple_validator) batches.set_batch_validator(5, simple_validator) assert batches.batches[5].validator("1,2,3,4,5") # load validator back from disk batches.batches[5].load_validator_from_file() assert batches.batches[5].validator("1,2,3,4,5") # generate lines, simulating generation the max # valid line count def good(): return GenText( text="1,2,3,4,5", valid=random.choice([None, True]), delimiter="," ) def bad(): return GenText(text="1,2,3", valid=False, delimiter=",") with patch("gretel_synthetics.batch.generate_text") as mock_gen: mock_gen.return_value = [good(), good(), good(), bad(), bad(), good(), good()] summary = batches.generate_batch_lines(5, max_invalid=1) assert summary.is_valid check_call = mock_gen.mock_calls[0] _, _, kwargs = check_call assert kwargs["max_invalid"] == 1 with patch("gretel_synthetics.batch.generate_text") as mock_gen: mock_gen.return_value = [good(), good(), good(), bad(), bad(), good(), good()] summary = batches.generate_batch_lines(5) assert summary.is_valid with patch("gretel_synthetics.batch.generate_text") as mock_gen: mock_gen.return_value = [good(), good(), good(), bad(), bad(), good()] summary = batches.generate_batch_lines(5) assert not summary.is_valid with patch.object(batches, "generate_batch_lines") as mock_gen: batches.generate_all_batch_lines(max_invalid=15) assert mock_gen.call_count == len(batches.batches.keys()) check_call = mock_gen.mock_calls[0] _, _, kwargs = check_call assert kwargs["max_invalid"] == 15 # get synthetic df line = GenText( text="1,2,3,4,5,6,7,8,9,10,11,12,13,14,15", valid=True, delimiter="," ) with patch("gretel_synthetics.batch.generate_text") as mock_gen: mock_gen.return_value = [line] * len(batches.batches[10].headers) batches.generate_batch_lines(10) assert len(batches.batches[10].synthetic_df) == len(batches.batches[10].headers)
def run_thread(self, client_id): client_path = os.path.join(test_path, 'client{}'.format(client_id)) testargs = ['basicswap-run', '-datadir=' + client_path, '-regtest'] with patch.object(sys, 'argv', testargs): runSystem.main()