def test_scaling(self): # Test integer scaling from float # Analyze headers cannot do float-integer scaling hdr = self.header_class() assert_true(hdr.default_x_flip) shape = (1,2,3) hdr.set_data_shape(shape) hdr.set_data_dtype(np.float32) data = np.ones(shape, dtype=np.float64) S = BytesIO() # Writing to float datatype doesn't need scaling hdr.data_to_fileobj(data, S) rdata = hdr.data_from_fileobj(S) assert_array_almost_equal(data, rdata) # Now test writing to integers hdr.set_data_dtype(np.int32) # Writing to int needs scaling, and raises an error if we can't scale if not hdr.has_data_slope: assert_raises(HeaderTypeError, hdr.data_to_fileobj, data, BytesIO()) # But if we aren't scaling, convert the floats to integers and write with np.errstate(invalid='ignore'): hdr.data_to_fileobj(data, S, rescale=False) rdata = hdr.data_from_fileobj(S) assert_true(np.allclose(data, rdata)) # This won't work for floats that aren't close to integers data_p5 = data + 0.5 with np.errstate(invalid='ignore'): hdr.data_to_fileobj(data_p5, S, rescale=False) rdata = hdr.data_from_fileobj(S) assert_false(np.allclose(data_p5, rdata))
def test_as_float_array(): # Test function for as_float_array X = np.ones((3, 10), dtype=np.int32) X = X + np.arange(10, dtype=np.int32) # Checks that the return type is ok X2 = as_float_array(X, copy=False) np.testing.assert_equal(X2.dtype, np.float32) # Another test X = X.astype(np.int64) X2 = as_float_array(X, copy=True) # Checking that the array wasn't overwritten assert_true(as_float_array(X, False) is not X) # Checking that the new type is ok np.testing.assert_equal(X2.dtype, np.float64) # Here, X is of the right type, it shouldn't be modified X = np.ones((3, 2), dtype=np.float32) assert_true(as_float_array(X, copy=False) is X) # Test that if X is fortran ordered it stays X = np.asfortranarray(X) assert_true(np.isfortran(as_float_array(X, copy=True))) # Test the copy parameter with some matrices matrices = [ np.matrix(np.arange(5)), sp.csc_matrix(np.arange(5)).toarray(), sparse_random_matrix(10, 10, density=0.10).toarray() ] for M in matrices: N = as_float_array(M, copy=True) N[0, 0] = np.nan assert_false(np.isnan(M).any())
def test_file_move(self): source_dir = Directory.objects.create(name='test_mv_file_src', owner=self.user, parent_directory=self.home_dir) target_dir = Directory.objects.create(name='test_mv_file_dst', owner=self.user, parent_directory=self.home_dir) doc = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=source_dir) # Verify original paths before move operation response = self.client.get('/desktop/api2/doc/get', {'uuid': source_dir.uuid}) data = json.loads(response.content) assert_equal('/test_mv_file_src', data['path']) response = self.client.get('/desktop/api2/doc/get', {'uuid': doc.uuid}) data = json.loads(response.content) assert_equal('/test_mv_file_src/query1.sql', data['path']) response = self.client.post('/desktop/api2/doc/move', { 'source_doc_uuid': json.dumps(doc.uuid), 'destination_doc_uuid': json.dumps(target_dir.uuid) }) data = json.loads(response.content) assert_equal(0, data['status'], data) # Verify that the paths are updated response = self.client.get('/desktop/api2/docs', {'uuid': source_dir.uuid}) data = json.loads(response.content) assert_false(any(doc['uuid'] == doc.uuid for doc in data['children']), data['children']) response = self.client.get('/desktop/api2/doc/get', {'uuid': doc.uuid}) data = json.loads(response.content) assert_equal('/test_mv_file_dst/query1.sql', data['path'])
def test_fetch_stored_values_fixed(self): c = self.c c.one.fetch = mock.MagicMock() c.two.fetch = mock.MagicMock() c.fetch_stored_values(only_fixed=True) nt.assert_true(c.one.fetch.called) nt.assert_false(c.two.fetch.called)
def test_incremental(self): sp = self.sp sp.push('%%cellm line2\n') nt.assert_true(sp.push_accepts_more()) #1 sp.push('\n') # In this case, a blank line should end the cell magic nt.assert_false(sp.push_accepts_more()) #2
def test_make_dig_points(): """Test application of Polhemus HSP to info""" dig_points = _read_dig_points(hsp_fname) info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None) assert_false(info['dig']) info['dig'] = _make_dig_points(dig_points=dig_points) assert_true(info['dig']) assert_array_equal(info['dig'][0]['r'], [-106.93, 99.80, 68.81]) dig_points = _read_dig_points(elp_fname) nasion, lpa, rpa = dig_points[:3] info = create_info(ch_names=['Test Ch'], sfreq=1000., ch_types=None) assert_false(info['dig']) info['dig'] = _make_dig_points(nasion, lpa, rpa, dig_points[3:], None) assert_true(info['dig']) idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION) assert_array_equal(info['dig'][idx]['r'], np.array([1.3930, 13.1613, -4.6967])) assert_raises(ValueError, _make_dig_points, nasion[:2]) assert_raises(ValueError, _make_dig_points, None, lpa[:2]) assert_raises(ValueError, _make_dig_points, None, None, rpa[:2]) assert_raises(ValueError, _make_dig_points, None, None, None, dig_points[:, :2]) assert_raises(ValueError, _make_dig_points, None, None, None, None, dig_points[:, :2])
def test_get_courses_has_no_templates(self): courses = self.store.get_courses() for course in courses: assert_false( course.location.org == 'edx' and course.location.course == 'templates', '{0} is a template course'.format(course) )
def test_exception(): """ An exception during temporary directory """ from testre.temporary import temporary_directory class CustomException(Exception): """ You know, for testing """ with assert_raises(CustomException): with temporary_directory() as tempdir: temp = tempdir assert_true(temp.exists()) for filename in ('foo', 'bar', 'baz'): filepath = temp / filename with filepath.open('w') as f_out: f_out.write(u'read me') assert_true(filepath.exists()) raise CustomException assert_false(temp.exists())
def test_data_scaling(self): hdr = self.header_class() hdr.set_data_shape((1,2,3)) hdr.set_data_dtype(np.int16) S3 = BytesIO() data = np.arange(6, dtype=np.float64).reshape((1,2,3)) # This uses scaling hdr.data_to_fileobj(data, S3) data_back = hdr.data_from_fileobj(S3) # almost equal assert_array_almost_equal(data, data_back, 4) # But not quite assert_false(np.all(data == data_back)) # This is exactly the same call, just testing it works twice data_back2 = hdr.data_from_fileobj(S3) assert_array_equal(data_back, data_back2, 4) # Rescaling is the default hdr.data_to_fileobj(data, S3, rescale=True) data_back = hdr.data_from_fileobj(S3) assert_array_almost_equal(data, data_back, 4) assert_false(np.all(data == data_back)) # This doesn't use scaling, and so gets perfect precision hdr.data_to_fileobj(data, S3, rescale=False) data_back = hdr.data_from_fileobj(S3) assert_true(np.all(data == data_back))
def test_add_ipam_error_invalid_response(self): """ Tests CNI add, IPAM response is not valid json. """ # Configure. self.command = CNI_CMD_ADD ipam_stdout = "{some invalid json}" # Set the return code to 0, even though IPAM failed. This shouldn't # ever happen, but at least we know we're ready for it. We have other # test cases that handle rc != 0. self.set_ipam_result(0, ipam_stdout, "") # Create plugin. p = self.create_plugin() # Mock DatastoreClient such that no endpoints exist. self.client.get_endpoint.side_effect = KeyError # Execute. with assert_raises(SystemExit) as err: p.execute() e = err.exception # Assert failure. assert_equal(e.code, ERR_CODE_GENERIC) # Assert an endpoint was not created. assert_false(self.client.create_endpoint.called) # Assert a profile was not set. assert_false(self.client.append_profiles_to_endpoint.called)
def test_add_etcd_down(self): """ Tests CNI add, etcd is not running when we attempt to get an Endpoint from etcd. """ # Configure. self.command = CNI_CMD_ADD ip4 = "10.0.0.1/32" ip6 = "0:0:0:0:0:ffff:a00:1" ipam_stdout = json.dumps({"ip4": {"ip": ip4}, "ip6": {"ip": ip6}}) self.set_ipam_result(0, ipam_stdout, "") # Mock out get_endpoint to raise DataStoreError. self.client.get_endpoint.side_effect = DataStoreError # Create plugin. p = self.create_plugin() # Execute. with assert_raises(SystemExit) as err: p.execute() e = err.exception # Assert failure. assert_equal(e.code, ERR_CODE_GENERIC) # Assert an endpoint was not created. assert_false(self.client.create_endpoint.called) # Assert a profile was not set. assert_false(self.client.append_profiles_to_endpoint.called)
def test_add_kubernetes_docker_host_networking(self): """ Test CNI add in k8s docker when NetworkMode == host. """ # Configure. self.cni_args = "K8S_POD_NAME=podname;K8S_POD_NAMESPACE=default" self.command = CNI_CMD_ADD ip4 = "10.0.0.1/32" ip6 = "0:0:0:0:0:ffff:a00:1" ipam_stdout = json.dumps({"ip4": {"ip": ip4}, "ip6": {"ip": ip6}}) self.set_ipam_result(0, ipam_stdout, "") # Create plugin. p = self.create_plugin() # Mock NetworkMode == host. inspect_result = {"HostConfig": {"NetworkMode": "host"}} self.m_docker_client().inspect_container.return_value = inspect_result # Execute. with assert_raises(SystemExit) as err: p.execute() e = err.exception # Assert success. assert_equal(e.code, 0) # Assert an endpoint was not created. assert_false(self.client.create_endpoint.called)
def test_add_ipam_error(self): """ Tests CNI add, IPAM plugin fails. The plugin should return an error code and print the IPAM result, but should not need to clean anything up since IPAM is the first step in CNI add. """ # Configure. self.command = CNI_CMD_ADD ip4 = "10.0.0.1/32" ip6 = "0:0:0:0:0:ffff:a00:1" ipam_stdout = json.dumps({"code": 100, "msg": "Test IPAM error"}) self.set_ipam_result(100, ipam_stdout, "") # Mock DatastoreClient such that no endpoints exist. self.client.get_endpoint.side_effect = KeyError # Create plugin. p = self.create_plugin() # Execute. with assert_raises(SystemExit) as err: p.execute() e = err.exception # Assert success. assert_equal(e.code, ERR_CODE_GENERIC) # Assert an endpoint was not created. assert_false(self.client.create_endpoint.called)
def test_space_net_alpha_grid_pure_spatial(): rng = check_random_state(42) X = rng.randn(10, 100) y = np.arange(X.shape[0]) for is_classif in [True, False]: assert_false(np.any(np.isnan(_space_net_alpha_grid( X, y, l1_ratio=0., logistic=is_classif))))
def test_pickler_proxy(): h = Hist(5, 0, 1, name='hist') f = tempfile.NamedTemporaryFile(suffix='.root') with root_open(f.name, 'recreate') as outfile: dump([h], outfile) class IsCalled(object): def __init__(self, func): self.func = func self.called = False def __call__(self, path): if path != '_pickle;1': self.called = True return self.func(path) with root_open(f.name) as infile: infile.Get = IsCalled(infile.Get) hlist = load(infile, use_proxy=False) assert_true(infile.Get.called) with root_open(f.name) as infile: infile.Get = IsCalled(infile.Get) hlist = load(infile, use_proxy=True) assert_false(infile.Get.called) assert_equal(hlist[0].name, 'hist') assert_true(infile.Get.called) f.close()
def test_graph_single_edge(self): G = nx.Graph() G.add_edge(0, 1) assert_true(nx.is_edge_cover(G, {(0, 0), (1, 1)})) assert_true(nx.is_edge_cover(G, {(0, 1), (1, 0)})) assert_true(nx.is_edge_cover(G, {(0, 1)})) assert_false(nx.is_edge_cover(G, {(0, 0)}))
def test_versions(): fake_name = '_a_fake_package' fake_pkg = types.ModuleType(fake_name) assert_false('fake_pkg' in sys.modules) # Not inserted yet assert_bad(fake_name) try: sys.modules[fake_name] = fake_pkg # No __version__ yet assert_good(fake_name) # With no version check assert_bad(fake_name, '1.0') # We can make an arbitrary callable to check version assert_good(fake_name, lambda pkg: True) # Now add a version fake_pkg.__version__ = '2.0' # We have fake_pkg > 1.0 for min_ver in (None, '1.0', LooseVersion('1.0'), lambda pkg: True): assert_good(fake_name, min_ver) # We never have fake_pkg > 100.0 for min_ver in ('100.0', LooseVersion('100.0'), lambda pkg: False): assert_bad(fake_name, min_ver) # Check error string for bad version pkg, _, _ = optional_package(fake_name, min_version='3.0') try: pkg.some_method except TripWireError as err: assert_equal(str(err), 'These functions need _a_fake_package version >= 3.0') finally: del sys.modules[fake_name]
def test_has_format(self): ctrl = TestDataDirectoryController(self.data) assert_false(ctrl.has_format()) ctrl.mark_dirty() assert_false(ctrl.has_format()) ctrl.data.initfile = os.path.join('source', '__init__.html') assert_true(ctrl.has_format())
def check_inheritable_attribute(self, attribute, value): # `attribute` isn't a basic attribute of Sequence assert_false(hasattr(SequenceDescriptor, attribute)) # `attribute` is added by InheritanceMixin assert_true(hasattr(InheritanceMixin, attribute)) root = SequenceFactory.build(policy={attribute: str(value)}) ProblemFactory.build(parent=root) # InheritanceMixin will be used when processing the XML assert_in(InheritanceMixin, root.xblock_mixins) seq = self.process_xml(root) assert_equals(seq.unmixed_class, SequenceDescriptor) assert_not_equals(type(seq), SequenceDescriptor) # `attribute` is added to the constructed sequence, because # it's in the InheritanceMixin assert_equals(value, getattr(seq, attribute)) # `attribute` is a known attribute, so we shouldn't include it # in xml_attributes assert_not_in(attribute, seq.xml_attributes)
def test_ext_eq(): ext = Nifti1Extension('comment', '123') assert_true(ext == ext) assert_false(ext != ext) ext2 = Nifti1Extension('comment', '124') assert_false(ext == ext2) assert_true(ext != ext2)
def test_job_permissions(self): # Login as ourself finish = SHARE_JOBS.set_for_testing(True) try: response = TestJobBrowserWithHadoop.client.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user='******'not_me', is_superuser=False, groupname='test') grant_access("not_me", "test", "jobbrowser") finish = SHARE_JOBS.set_for_testing(True) try: response = client_not_me.get('/jobbrowser/jobs/?format=json&user='******'/jobbrowser/jobs/?format=json&user=') assert_false(TestJobBrowserWithHadoop.hadoop_job_id_short in response.content) finally: finish()
def test_ensure_home_directory_sync_ldap_users_groups(): URL = reverse(sync_ldap_users_groups) reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() cluster = pseudo_hdfs4.shared_cluster() c = make_logged_in_client(cluster.superuser, is_superuser=True) cluster.fs.setuser(cluster.superuser) reset = [] # Set to nonsensical value just to force new config usage. # Should continue to use cached connection. reset.append(desktop.conf.LDAP.LDAP_SERVERS.set_for_testing(get_nonsense_config())) try: c.post(reverse(add_ldap_users), dict(server='nonsense', username_pattern='curly', password1='test', password2='test')) assert_false(cluster.fs.exists('/user/curly')) assert_true(c.post(URL, dict(server='nonsense', ensure_home_directory=True))) assert_true(cluster.fs.exists('/user/curly')) finally: for finish in reset: finish() if cluster.fs.exists('/user/curly'): cluster.fs.rmtree('/user/curly')
def test_check1(self): # Test everything is ok if folder and filename are correct. test, traceback = self.task.check() assert_true(test) assert_false(traceback) array = self.task.get_from_database('Test_array') assert_equal(array.dtype.names, ('Freq', 'Log'))
def test_collectios_list(self): c = self.conn logger.info("Creationg three collections sample[1..3]") c.collection.sample1.create() c.collection.sample2.create() c.collection.sample3.create() logger.info("Getting list of collections") names = c.collection() for n in ["sample1", "sample2", "sample3"]: assert_true(n in names) logger.info("Deleting two of three collections") c.collection.sample1.delete() c.collection.sample3.delete() names = c.collection() for n in ["sample1", "sample3"]: assert_false(n in names) assert_true("sample2" in names) logger.info("Removing last collection") c.collection.sample2.delete()
def test_useradmin_ldap_integration(): reset_all_users() reset_all_groups() # Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection ldap_access.CACHED_LDAP_CONN = LdapTestConnection() # Try importing a user import_ldap_user("larry", import_by_dn=False) larry = User.objects.get(username="******") assert_true(larry.first_name == "Larry") assert_true(larry.last_name == "Stooge") assert_true(larry.email == "*****@*****.**") assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL)) # Should be a noop sync_ldap_users() sync_ldap_groups() assert_equal(len(User.objects.all()), 1) assert_equal(len(Group.objects.all()), 0) # Should import a group, but will only sync already-imported members import_ldap_group("Test Administrators", import_members=False, import_by_dn=False) assert_equal(len(User.objects.all()), 1) assert_equal(len(Group.objects.all()), 1) test_admins = Group.objects.get(name="Test Administrators") assert_equal(len(test_admins.user_set.all()), 1) assert_equal(test_admins.user_set.all()[0].username, larry.username) # Import all members of TestUsers import_ldap_group("TestUsers", import_members=True, import_by_dn=False) test_users = Group.objects.get(name="TestUsers") assert_true(LdapGroup.objects.filter(group=test_users).exists()) assert_equal(len(test_users.user_set.all()), 3) ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test("moe", "TestUsers") import_ldap_group("TestUsers", import_members=False, import_by_dn=False) assert_equal(len(test_users.user_set.all()), 2) assert_equal(len(User.objects.get(username="******").groups.all()), 0) ldap_access.CACHED_LDAP_CONN.add_user_group_for_test("moe", "TestUsers") import_ldap_group("TestUsers", import_members=False, import_by_dn=False) assert_equal(len(test_users.user_set.all()), 3) assert_equal(len(User.objects.get(username="******").groups.all()), 1) # Make sure that if a Hue user already exists with a naming collision, we # won't overwrite any of that user's information. hue_user = User.objects.create(username="******", first_name="Different", last_name="Guy") import_ldap_user("otherguy", import_by_dn=False) hue_user = User.objects.get(username="******") assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE)) assert_equal(hue_user.first_name, "Different") # Make sure Hue groups with naming collisions don't get marked as LDAP groups hue_group = Group.objects.create(name="OtherGroup") hue_group.user_set.add(hue_user) hue_group.save() import_ldap_group("OtherGroup", import_members=False, import_by_dn=False) assert_false(LdapGroup.objects.filter(group=hue_group).exists()) assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
def test_estimable(): rng = np.random.RandomState(20120713) N, P = (40, 10) X = rng.normal(size=(N, P)) C = rng.normal(size=(1, P)) isestimable = tools.isestimable assert_true(isestimable(C, X)) assert_true(isestimable(np.eye(P), X)) for row in np.eye(P): assert_true(isestimable(row, X)) X = np.ones((40, 2)) assert_true(isestimable([1, 1], X)) assert_false(isestimable([1, 0], X)) assert_false(isestimable([0, 1], X)) assert_false(isestimable(np.eye(2), X)) halfX = rng.normal(size=(N, 5)) X = np.hstack([halfX, halfX]) assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X)) assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X)) assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X)) # Test array-like for design XL = X.tolist() assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL)) # Test ValueError for incorrect number of columns X = rng.normal(size=(N, 5)) for n in range(1, 4): assert_raises(ValueError, isestimable, np.ones((n,)), X) assert_raises(ValueError, isestimable, np.eye(4), X)
def test_dataverse_root_not_published(self, mock_files, mock_connection, mock_text): mock_connection.return_value = create_mock_connection() mock_files.return_value = [] mock_text.return_value = 'Do you want to publish?' self.project.set_privacy('public') self.project.save() alias = self.node_settings.dataverse_alias doi = self.node_settings.dataset_doi external_account = create_external_account() self.user.external_accounts.add(external_account) self.user.save() self.node_settings.set_auth(external_account, self.user) self.node_settings.dataverse_alias = alias self.node_settings.dataset_doi = doi self.node_settings.save() url = api_url_for('dataverse_root_folder', pid=self.project._primary_key) # Contributor gets draft, no options res = self.app.get(url, auth=self.user.auth) assert_true(res.json[0]['permissions']['edit']) assert_false(res.json[0]['hasPublishedFiles']) assert_equal(res.json[0]['version'], 'latest') # Non-contributor gets nothing user2 = AuthUserFactory() res = self.app.get(url, auth=user2.auth) assert_equal(res.json, [])
def test_check2(self): # Test handling wrong folder and filename. self.task.folder = '{rr}' self.task.filename = '{tt}' test, traceback = self.task.check() assert_false(test) assert_equal(len(traceback), 2)
def test_disable_pixel_switching_current_off(self): c = self.c c._axes_manager.indices = (1, 1) c.active = False c.active_is_multidimensional = True c.active_is_multidimensional = False nt.assert_false(c.active)
def test_finite_range_nan(): # Test finite range method and has_nan property for in_arr, res in ( ([[-1, 0, 1],[np.inf, np.nan, -np.inf]], (-1, 1)), (np.array([[-1, 0, 1],[np.inf, np.nan, -np.inf]]), (-1, 1)), ([[np.nan],[np.nan]], (np.inf, -np.inf)), # all nans slices (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), ([[-np.inf],[np.inf]], (np.inf, -np.inf)), # all infs slices (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([np.nan], (np.inf, -np.inf)), ([np.inf], (np.inf, -np.inf)), ([-np.inf], (np.inf, -np.inf)), ([np.inf, 1], (1, 1)), # only look at finite values ([-np.inf, 1], (1, 1)), ([[],[]], (np.inf, -np.inf)), # empty array (np.array([[-3, 0, 1], [2, -1, 4]], dtype=np.int), (-3, 4)), (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), ([0., 1, 2, 3], (0,3)), # Complex comparison works as if they are floats ([[np.nan, -1-100j, 2], [-2, np.nan, 1+100j]], (-2, 2)), ([[np.nan, -1, 2-100j], [-2+100j, np.nan, 1]], (-2+100j, 2-100j)), ): for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)), (SlopeArrayWriter, {}), (SlopeArrayWriter, dict(calc_scale=False)), (SlopeInterArrayWriter, {}), (SlopeInterArrayWriter, dict(calc_scale=False))): for out_type in NUMERIC_TYPES: has_nan = np.any(np.isnan(in_arr)) try: aw = awt(in_arr, out_type, **kwargs) except WriterError: continue # Should not matter about the order of finite range method call # and has_nan property - test this is true assert_equal(aw.has_nan, has_nan) assert_equal(aw.finite_range(), res) aw = awt(in_arr, out_type, **kwargs) assert_equal(aw.finite_range(), res) assert_equal(aw.has_nan, has_nan) # Check float types work as complex in_arr = np.array(in_arr) if in_arr.dtype.kind == 'f': c_arr = in_arr.astype(np.complex) try: aw = awt(c_arr, out_type, **kwargs) except WriterError: continue aw = awt(c_arr, out_type, **kwargs) assert_equal(aw.has_nan, has_nan) assert_equal(aw.finite_range(), res) # Structured type cannot be nan and we can test this a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) aw = awt(a, a.dtype, **kwargs) assert_raises(TypeError, aw.finite_range) assert_false(aw.has_nan)
def test_unmask(): # A delta in 3D shape = (10, 20, 30, 40) generator = np.random.RandomState(42) data4D = generator.rand(*shape) data3D = data4D[..., 0] mask = generator.randint(2, size=shape[:3]) mask_img = Nifti1Image(mask, np.eye(4)) mask = mask.astype(bool) masked4D = data4D[mask, :].T unmasked4D = data4D.copy() unmasked4D[-mask, :] = 0 masked3D = data3D[mask] unmasked3D = data3D.copy() unmasked3D[-mask] = 0 # 4D Test, test value ordering at the same time. t = unmask(masked4D, mask_img, order="C").get_data() assert_equal(t.ndim, 4) assert_true(t.flags["C_CONTIGUOUS"]) assert_false(t.flags["F_CONTIGUOUS"]) assert_array_equal(t, unmasked4D) t = unmask([masked4D], mask_img, order="F") t = [t_.get_data() for t_ in t] assert_true(isinstance(t, types.ListType)) assert_equal(t[0].ndim, 4) assert_false(t[0].flags["C_CONTIGUOUS"]) assert_true(t[0].flags["F_CONTIGUOUS"]) assert_array_equal(t[0], unmasked4D) # 3D Test - check both with Nifti1Image and file for create_files in (False, True): with write_tmp_imgs(mask_img, create_files=create_files) as filename: t = unmask(masked3D, filename, order="C").get_data() assert_equal(t.ndim, 3) assert_true(t.flags["C_CONTIGUOUS"]) assert_false(t.flags["F_CONTIGUOUS"]) assert_array_equal(t, unmasked3D) t = unmask([masked3D], filename, order="F") t = [t_.get_data() for t_ in t] assert_true(isinstance(t, types.ListType)) assert_equal(t[0].ndim, 3) assert_false(t[0].flags["C_CONTIGUOUS"]) assert_true(t[0].flags["F_CONTIGUOUS"]) assert_array_equal(t[0], unmasked3D) # 5D test shape5D = (10, 20, 30, 40, 41) data5D = generator.rand(*shape5D) mask = generator.randint(2, size=shape5D[:-1]) mask_img = Nifti1Image(mask, np.eye(4)) mask = mask.astype(bool) masked5D = data5D[mask, :].T unmasked5D = data5D.copy() unmasked5D[-mask, :] = 0 t = unmask(masked5D, mask_img).get_data() assert_equal(t.ndim, len(shape5D)) assert_array_equal(t, unmasked5D) t = unmask([masked5D], mask_img) t = [t_.get_data() for t_ in t] assert_true(isinstance(t, types.ListType)) assert_equal(t[0].ndim, len(shape5D)) assert_array_equal(t[0], unmasked5D) # Error test dummy = generator.rand(500) if np_version >= [1, 7, 1]: assert_raises(IndexError, unmask, dummy, mask_img) assert_raises(IndexError, unmask, [dummy], mask_img) else: assert_raises(ValueError, unmask, dummy, mask_img) assert_raises(ValueError, unmask, [dummy], mask_img)
def test_handle_gist_link(self): with patch('desktop.lib.botserver.views.slack_client.chat_unfurl' ) as chat_unfurl: with patch('desktop.lib.botserver.views._make_unfurl_payload' ) as mock_unfurl_payload: with patch('desktop.lib.botserver.views._get_gist_document' ) as _get_gist_document: with patch( 'desktop.lib.botserver.views.slack_client.users_info' ) as users_info: with patch( 'desktop.lib.botserver.views.send_result_file' ) as send_result_file: channel_id = "channel" message_ts = "12.1" user_id = "<@user_id>" doc_data = {"statement_raw": "SELECT 98765"} links = [{ "url": "http://demo.gethue.com/hue/gist?uuid=some_uuid" }] _get_gist_document.return_value = Mock( data=json.dumps(doc_data), owner=self.user, extra='mysql') mock_unfurl_payload.return_value = { 'payload': {}, 'file_status': False, } # Slack user who is Hue user sends link users_info.return_value = { "ok": True, "user": { "profile": { "email": "*****@*****.**" } } } handle_on_link_shared(channel_id, message_ts, links, user_id) assert_true(chat_unfurl.called) assert_false(send_result_file.called) # Gist document does not exist _get_gist_document.side_effect = PopupException( 'Gist does not exist') gist_url = "https://demo.gethue.com/hue/gist?uuid=6d1c407b-d999-4dfd-ad23-d3a46c19a427" assert_raises(PopupException, handle_on_link_shared, "channel", "12.1", [{ "url": gist_url }], "<@user_id>") # Cannot unfurl with invalid gist link inv_gist_url = "http://demo.gethue.com/hue/gist?uuids/=invalid_link" assert_raises(PopupException, handle_on_link_shared, "channel", "12.1", [{ "url": inv_gist_url }], "<@user_id>")
def assert_not_in(x, container): assert_false(x in container, msg="%r in %r" % (x, container))
def test_error_code_return(self, log_mock, requests_mock): """SlackApp - Gather Logs - Bad Response""" requests_mock.return_value = Mock(status_code=404) assert_false(self._app._gather_logs()) log_mock.assert_called_with('Received bad response from slack')
def test_single_end_tokens(self): """Test the tokens of an unpaired unindexed structure""" structure = '151T' read_structure = ReadStructure(structure) assert_false(read_structure.is_indexed) assert_false(read_structure.is_dual_indexed) assert_true(read_structure.is_single_end) assert_false(read_structure.is_paired_end) assert_false(read_structure.has_indexes) assert_false(read_structure.has_skips) assert_false(read_structure.has_umi)
def test_filter_is_img(self): assert_false(fs.filter_is_img('foo.bar')) assert_false(fs.filter_is_img('foo.png.bar')) assert_true(fs.filter_is_img('foo.png')) assert_true(fs.filter_is_img('foo.jpg'))
def test_nan(): data = np.ones((9, 9, 9)) data[0] = np.nan data[:, 0] = np.nan data[:, :, 0] = np.nan data[-1] = np.nan data[:, -1] = np.nan data[:, :, -1] = np.nan data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) masker = NiftiMasker(mask_args=dict(opening=0)) masker.fit(img) mask = masker.mask_img_.get_data() assert_true(mask[1:-1, 1:-1, 1:-1].all()) assert_false(mask[0].any()) assert_false(mask[:, 0].any()) assert_false(mask[:, :, 0].any()) assert_false(mask[-1].any()) assert_false(mask[:, -1].any()) assert_false(mask[:, :, -1].any())
def test_collection_exists(self): assert_false(self.db.collection_exists('does_not_exist'))
def test_user_admin(): FUNNY_NAME = '~`!@#$%^&*()_-+={}[]|\;"<>?/,.' FUNNY_NAME_QUOTED = urllib.quote(FUNNY_NAME) reset_all_users() reset_all_groups() useradmin.conf.DEFAULT_USER_GROUP.set_for_testing('test_default') useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(False) reset_password_policy() c = make_logged_in_client('test', is_superuser=True) user = User.objects.get(username='******') # Test basic output. response = c.get('/useradmin/') assert_true(len(response.context["users"]) > 0) assert_true("Hue Users" in response.content) # Test editing a superuser # Just check that this comes back response = c.get('/useradmin/users/edit/test') # Edit it, to add a first and last name response = c.post('/useradmin/users/edit/test', dict(username="******", first_name=u"Inglés", last_name=u"Español", is_superuser="******", is_active="True"), follow=True) assert_true("User information updated" in response.content, "Notification should be displayed in: %s" % response.content) # Edit it, can't change username response = c.post('/useradmin/users/edit/test', dict(username="******", first_name=u"Inglés", last_name=u"Español", is_superuser="******", is_active="True"), follow=True) assert_true("You cannot change a username" in response.content) # Now make sure that those were materialized response = c.get('/useradmin/users/edit/test') assert_equal(smart_unicode("Inglés"), response.context["form"].instance.first_name) assert_true("Español" in response.content) # Shouldn't be able to demote to non-superuser response = c.post('/useradmin/users/edit/test', dict(username="******", first_name=u"Inglés", last_name=u"Español", is_superuser=False, is_active=True)) assert_true("You cannot remove" in response.content, "Shouldn't be able to remove the last superuser") # Shouldn't be able to delete oneself response = c.post('/useradmin/users/delete', {u'user_ids': [user.id]}) assert_true("You cannot remove yourself" in response.content, "Shouldn't be able to delete the last superuser") # Let's try changing the password response = c.post('/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", is_superuser=True, password1="foo", password2="foobar")) assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors, "Should have complained about mismatched password") # Old password not confirmed response = c.post('/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", password1="foo", password2="foo", is_active=True, is_superuser=True)) assert_equal(["The old password does not match the current password."], response.context["form"]["password_old"].errors, "Should have complained about old password") # Good now response = c.post('/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", password1="foo", password2="foo", password_old="test", is_active=True, is_superuser=True)) assert_true(User.objects.get(username="******").is_superuser) assert_true(User.objects.get(username="******").check_password("foo")) # Change it back! response = c.post('/useradmin/users/edit/test', dict(username="******", first_name="Tom", last_name="Tester", password1="test", password2="test", password_old="foo", is_active="True", is_superuser="******")) assert_true(User.objects.get(username="******").check_password("test")) assert_true(make_logged_in_client(username = "******", password = "******"), "Check that we can still login.") # Check new user form for default group group = get_default_user_group() response = c.get('/useradmin/users/new') assert_true(response) assert_true(('<option value="%s" selected="selected">%s</option>' % (group.id, group.name)) in str(response)) # Create a new regular user (duplicate name) response = c.post('/useradmin/users/new', dict(username="******", password1="test", password2="test")) assert_equal({ 'username': ["User with this Username already exists."]}, response.context["form"].errors) # Create a new regular user (for real) response = c.post('/useradmin/users/new', dict(username=FUNNY_NAME, password1="test", password2="test", is_active="True")) response = c.get('/useradmin/') assert_true(FUNNY_NAME_QUOTED in response.content) assert_true(len(response.context["users"]) > 1) assert_true("Hue Users" in response.content) # Validate profile is created. assert_true(UserProfile.objects.filter(user__username=FUNNY_NAME).exists()) # Need to give access to the user for the rest of the test group = Group.objects.create(name="test-group") perm = HuePermission.objects.get(app='useradmin', action='access') GroupPermission.objects.create(group=group, hue_permission=perm) # Verify that we can modify user groups through the user admin pages response = c.post('/useradmin/users/new', dict(username="******", password1="test", password2="test", groups=[group.pk])) User.objects.get(username='******') assert_true(User.objects.get(username='******').groups.filter(name='test-group').exists()) response = c.post('/useradmin/users/edit/group_member', dict(username="******", groups=[])) assert_false(User.objects.get(username='******').groups.filter(name='test-group').exists()) # Check permissions by logging in as the new user c_reg = make_logged_in_client(username=FUNNY_NAME, password="******") test_user = User.objects.get(username=FUNNY_NAME) test_user.groups.add(Group.objects.get(name="test-group")) test_user.save() # Regular user should be able to modify oneself response = c_reg.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,), dict(username = FUNNY_NAME, first_name = "Hello", is_active = True, groups=[group.id for group in test_user.groups.all()]), follow=True) assert_equal(response.status_code, 200) response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,), follow=True) assert_equal(response.status_code, 200) assert_equal("Hello", response.context["form"].instance.first_name) funny_user = User.objects.get(username=FUNNY_NAME) # Can't edit other people. response = c_reg.post("/useradmin/users/delete", {u'user_ids': [funny_user.id]}) assert_true("You must be a superuser" in response.content, "Regular user can't edit other people") # Revert to regular "test" user, that has superuser powers. c_su = make_logged_in_client() # Inactivate FUNNY_NAME c_su.post('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,), dict(username = FUNNY_NAME, first_name = "Hello", is_active = False)) # Now make sure FUNNY_NAME can't log back in response = c_reg.get('/useradmin/users/edit/%s' % (FUNNY_NAME_QUOTED,)) assert_true(response.status_code == 302 and "login" in response["location"], "Inactivated user gets redirected to login page") # Delete that regular user funny_profile = get_profile(test_user) response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]}) assert_equal(302, response.status_code) assert_false(User.objects.filter(username=FUNNY_NAME).exists()) assert_false(UserProfile.objects.filter(id=funny_profile.id).exists()) # Bulk delete users u1 = User.objects.create(username='******', password="******") u2 = User.objects.create(username='******', password="******") assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 2) response = c_su.post('/useradmin/users/delete', {u'user_ids': [u1.id, u2.id]}) assert_equal(User.objects.filter(username__in=['u1', 'u2']).count(), 0) # Make sure that user deletion works if the user has never performed a request. funny_user = User.objects.create(username=FUNNY_NAME, password='******') assert_true(User.objects.filter(username=FUNNY_NAME).exists()) assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists()) response = c_su.post('/useradmin/users/delete', {u'user_ids': [funny_user.id]}) assert_equal(302, response.status_code) assert_false(User.objects.filter(username=FUNNY_NAME).exists()) assert_false(UserProfile.objects.filter(user__username=FUNNY_NAME).exists()) # You shouldn't be able to create a user without a password response = c_su.post('/useradmin/users/new', dict(username="******")) assert_true("You must specify a password when creating a new user." in response.content)
def test_no_creators(self): """Test a product with no creators """ product = self.amazon.lookup(ItemId="8420658537") assert_false(product.creators)
def test_import_submission(ctx): # type: (ScriptContext) -> None ingest_db = ctx.db.get_ingest_db() app_db = ctx.db.get_app_db() fingerprint_db = ctx.db.get_fingerprint_db() # first submission submission_id = insert_submission( ingest_db, { 'fingerprint': TEST_1_FP_RAW, 'length': TEST_1_LENGTH, 'bitrate': 192, 'account_id': 1, 'application_id': 1, 'format': 'FLAC', 'mbid': '1f143d2b-db04-47cc-82a0-eee6efaa1142', 'puid': '7c1c6753-c834-44b1-884a-a5166c093139', }) query = tables.submission.select(tables.submission.c.id == submission_id) submission = ingest_db.execute(query).fetchone() assert_false(submission['handled']) fingerprint = import_submission(ingest_db, app_db, fingerprint_db, ctx.index, submission) assert fingerprint is not None assert_equals(1, fingerprint['id']) assert_equals(5, fingerprint['track_id']) query = tables.submission.select(tables.submission.c.id == submission_id) submission = ingest_db.execute(query).fetchone() assert_true(submission['handled']) query = tables.track_mbid.select( tables.track_mbid.c.track_id == fingerprint['track_id']) track_mbid = fingerprint_db.execute(query).fetchone() assert_equals(1, track_mbid['submission_count']) query = tables.track_puid.select( tables.track_puid.c.track_id == fingerprint['track_id']) track_puid = fingerprint_db.execute(query).fetchone() assert_equals(1, track_puid['submission_count']) query = tables.fingerprint.select( tables.fingerprint.c.id == fingerprint['id']) fingerprint = fingerprint_db.execute(query).fetchone() assert fingerprint is not None assert_equals(1, fingerprint['submission_count']) assert_equals(1, fingerprint['format_id']) # second submission submission_id = insert_submission( ingest_db, { 'fingerprint': TEST_2_FP_RAW, 'length': TEST_2_LENGTH, 'bitrate': 192, 'source_id': 1, 'format_id': 1, }) query = tables.submission.select(tables.submission.c.id == submission_id) submission = ingest_db.execute(query).fetchone() assert_false(submission['handled']) fingerprint = import_submission(ingest_db, app_db, fingerprint_db, ctx.index, submission) assert fingerprint is not None assert_equals(2, fingerprint['id']) assert_equals(6, fingerprint['track_id']) query = tables.submission.select(tables.submission.c.id == submission_id) submission = ingest_db.execute(query).fetchone() assert_true(submission['handled']) # third submission (same as the first one) submission_id = insert_submission( ingest_db, { 'fingerprint': TEST_1_FP_RAW, 'length': TEST_1_LENGTH, 'bitrate': 192, 'source_id': 1, 'format_id': 1, 'mbid': '1f143d2b-db04-47cc-82a0-eee6efaa1142', 'puid': '7c1c6753-c834-44b1-884a-a5166c093139', }) query = tables.submission.select(tables.submission.c.id == submission_id) submission = ingest_db.execute(query).fetchone() assert_false(submission['handled']) fingerprint = import_submission(ingest_db, app_db, fingerprint_db, ctx.index, submission) assert fingerprint is not None assert_equals(1, fingerprint['id']) assert_equals(5, fingerprint['track_id']) query = tables.submission.select(tables.submission.c.id == submission_id) submission = ingest_db.execute(query).fetchone() assert_true(submission['handled']) query = tables.track_mbid.select( tables.track_mbid.c.track_id == fingerprint['track_id']) track_mbid = fingerprint_db.execute(query).fetchone() assert_equals(2, track_mbid['submission_count']) query = tables.track_puid.select( tables.track_puid.c.track_id == fingerprint['track_id']) track_puid = fingerprint_db.execute(query).fetchone() assert_equals(2, track_puid['submission_count']) query = tables.fingerprint.select( tables.fingerprint.c.id == fingerprint['id']) fingerprint = fingerprint_db.execute(query).fetchone() assert fingerprint is not None assert_equals(2, fingerprint['submission_count'])
def test_process_subkeys_nested_records(self): """Rules Engine - Required Subkeys with Nested Records""" def cloudtrail_us_east_logs(rec): return ( 'us-east' in rec['awsRegion'] and 'AWS' in rec['requestParameters']['program'] ) rule_attrs = RuleAttributes( rule_name='cloudtrail_us_east_logs', rule_function=cloudtrail_us_east_logs, matchers=[], datatypes=[], logs=['test_log_type_json_nested'], outputs=['s3:sample_bucket'], req_subkeys={'requestParameters': ['program']}, context={} ) data = json.dumps({ 'Records': [ { 'eventVersion': '1.05', 'eventID': '2', 'eventTime': '3', 'requestParameters': { 'program': 'AWS CLI' }, 'eventType': 'CreateSomeResource', 'responseElements': 'Response', 'awsRegion': 'us-east-1', 'eventName': 'CreateResource', 'userIdentity': { 'name': 'john', 'key': 'AVC124313414' }, 'eventSource': 'Kinesis', 'requestID': '12345', 'userAgent': 'AWS CLI v1.3109', 'sourceIPAddress': '127.0.0.1', 'recipientAccountId': '123456123456' }, { 'eventVersion': '1.05', 'eventID': '2', 'eventTime': '3', 'requestParameters': { 'program': 'AWS UI' }, 'eventType': 'CreateSomeOtherResource', 'responseElements': 'Response', 'awsRegion': 'us-east-2', 'eventName': 'CreateResource', 'userIdentity': { 'name': 'ann', 'key': 'AD114313414' }, 'eventSource': 'Lambda', 'requestID': '12345', 'userAgent': 'Google Chrome 42', 'sourceIPAddress': '127.0.0.2', 'recipientAccountId': '123456123456' }, { 'eventVersion': '1.05', 'eventID': '2', 'eventTime': '3', # Translates from null in JSON to None in Python 'requestParameters': None, 'eventType': 'CreateSomeResource', 'responseElements': 'Response', 'awsRegion': 'us-east-1', 'eventName': 'CreateResource', 'userIdentity': { 'name': 'john', 'key': 'AVC124313414' }, 'eventSource': 'Kinesis', 'requestID': '12345', 'userAgent': 'AWS CLI', 'sourceIPAddress': '127.0.0.1', 'recipientAccountId': '123456123456' } ] }) schema = self.config['logs']['test_cloudtrail']['schema'] options = self.config['logs']['test_cloudtrail']['configuration'] parser_class = get_parser('json') parser = parser_class(options) parsed_result = parser.parse(schema, data) valid_record = [ rec for rec in parsed_result if rec['requestParameters'] is not None][0] valid_subkey_check = StreamRules.process_subkeys(valid_record, 'json', rule_attrs) assert_true(valid_subkey_check) invalid_record = [ rec for rec in parsed_result if rec['requestParameters'] is None][0] invalid_subkey_check = StreamRules.process_subkeys(invalid_record, 'json', rule_attrs) assert_false(invalid_subkey_check)
def test_generate_headers_empty_response(self, requests_mock): """OneLoginApp - Generate Headers, Empty Response""" requests_mock.return_value = Mock(status_code=200, json=Mock(return_value=None)) assert_false(self._app._generate_headers())
def test_user_admin_password_policy(): reset_all_users() reset_all_groups() # Set up password policy password_hint = password_error_msg = ("The password must be at least 8 characters long, " "and must contain both uppercase and lowercase letters, " "at least one number, and at least one special character.") password_rule = "^(?=.*?[A-Z])(?=(.*[a-z]){1,})(?=(.*[\d]){1,})(?=(.*[\W_]){1,}).{8,}$" useradmin.conf.PASSWORD_POLICY.IS_ENABLED.set_for_testing(True) useradmin.conf.PASSWORD_POLICY.PWD_RULE.set_for_testing(password_rule) useradmin.conf.PASSWORD_POLICY.PWD_HINT.set_for_testing(password_hint) useradmin.conf.PASSWORD_POLICY.PWD_ERROR_MESSAGE.set_for_testing(password_error_msg) reset_password_policy() # Test first-ever login with password policy enabled c = Client() response = c.get('/accounts/login/') assert_equal(200, response.status_code) assert_true(response.context['first_login_ever']) response = c.post('/accounts/login/', dict(username="******", password="******")) assert_true(response.context['first_login_ever']) assert_equal([password_error_msg], response.context["form"]["password"].errors) response = c.post('/accounts/login/', dict(username="******", password="******"), follow=True) assert_equal(200, response.status_code) assert_true(User.objects.get(username="******").is_superuser) assert_true(User.objects.get(username="******").check_password("foobarTest1[")) c.get('/accounts/logout') # Test changing a user's password c = make_logged_in_client('superuser', is_superuser=True) # Test password hint is displayed response = c.get('/useradmin/users/edit/superuser') assert_true(password_hint in response.content) # Password is less than 8 characters response = c.post('/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foo", password2="foo")) assert_equal([password_error_msg], response.context["form"]["password1"].errors) # Password is more than 8 characters long but does not have a special character response = c.post('/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foobarTest1", password2="foobarTest1")) assert_equal([password_error_msg], response.context["form"]["password1"].errors) # Password1 and Password2 are valid but they do not match response = c.post('/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foobarTest1??", password2="foobarTest1?", password_old="foobarTest1[", is_active=True)) assert_equal(["Passwords do not match."], response.context["form"]["password2"].errors) # Password is valid now c.post('/useradmin/users/edit/superuser', dict(username="******", is_superuser=True, password1="foobarTest1[", password2="foobarTest1[", password_old="test", is_active=True)) assert_true(User.objects.get(username="******").is_superuser) assert_true(User.objects.get(username="******").check_password("foobarTest1[")) # Test creating a new user response = c.get('/useradmin/users/new') assert_true(password_hint in response.content) # Password is more than 8 characters long but does not have a special character response = c.post('/useradmin/users/new', dict(username="******", is_superuser=False, password1="foo", password2="foo")) assert_equal({'password1': [password_error_msg], 'password2': [password_error_msg]}, response.context["form"].errors) # Password is more than 8 characters long but does not have a special character response = c.post('/useradmin/users/new', dict(username="******", is_superuser=False, password1="foobarTest1", password2="foobarTest1")) assert_equal({'password1': [password_error_msg], 'password2': [password_error_msg]}, response.context["form"].errors) # Password1 and Password2 are valid but they do not match response = c.post('/useradmin/users/new', dict(username="******", is_superuser=False, password1="foobarTest1[", password2="foobarTest1?")) assert_equal({'password2': ["Passwords do not match."]}, response.context["form"].errors) # Password is valid now c.post('/useradmin/users/new', dict(username="******", is_superuser=False, password1="foobarTest1[", password2="foobarTest1[", is_active=True)) assert_false(User.objects.get(username="******").is_superuser) assert_true(User.objects.get(username="******").check_password("foobarTest1["))
def test_get_onelogin_events_no_headers(self): """OneLoginApp - Get OneLogin Events, No Headers""" assert_false(self._app._get_onelogin_events())
def check_mmap(hdr, offset, proxy_class, has_scaling=False, unscaled_is_view=True): """ Assert that array proxies return memory maps as expected Parameters ---------- hdr : object Image header instance offset : int Offset in bytes of image data in file (that we will write) proxy_class : class Class of image array proxy to test has_scaling : {False, True} True if the `hdr` says to apply scaling to the output data, False otherwise. unscaled_is_view : {True, False} True if getting the unscaled data returns a view of the array. If False, then type of returned array will depend on whether numpy has the old viral (< 1.12) memmap behavior (returns memmap) or the new behavior (returns ndarray). See: https://github.com/numpy/numpy/pull/7406 """ shape = hdr.get_data_shape() arr = np.arange(np.prod(shape), dtype=hdr.get_data_dtype()).reshape(shape) fname = 'test.bin' # Whether unscaled array memory backed by memory map (regardless of what # numpy says). unscaled_really_mmap = unscaled_is_view # Whether scaled array memory backed by memory map (regardless of what # numpy says). scaled_really_mmap = unscaled_really_mmap and not has_scaling # Whether ufunc on memmap return memmap viral_memmap = memmap_after_ufunc() with InTemporaryDirectory(): with open(fname, 'wb') as fobj: fobj.write(b' ' * offset) fobj.write(arr.tostring(order='F')) for mmap, expected_mode in ( # mmap value, expected memmap mode # mmap=None -> no mmap value # expected mode=None -> no memmap returned (None, 'c'), (True, 'c'), ('c', 'c'), ('r', 'r'), (False, None)): kwargs = {} if mmap is not None: kwargs['mmap'] = mmap prox = proxy_class(fname, hdr, **kwargs) unscaled = prox.get_unscaled() back_data = np.asanyarray(prox) unscaled_is_mmap = isinstance(unscaled, np.memmap) back_is_mmap = isinstance(back_data, np.memmap) if expected_mode is None: assert_false(unscaled_is_mmap) assert_false(back_is_mmap) else: assert_equal(unscaled_is_mmap, viral_memmap or unscaled_really_mmap) assert_equal(back_is_mmap, viral_memmap or scaled_really_mmap) if scaled_really_mmap: assert_equal(back_data.mode, expected_mode) del prox, back_data # Check that mmap is keyword-only assert_raises(TypeError, proxy_class, fname, hdr, True) # Check invalid values raise error assert_raises(ValueError, proxy_class, fname, hdr, mmap='rw') assert_raises(ValueError, proxy_class, fname, hdr, mmap='r+')
def test_with_multiedges_self_loops(self): G = cycle_graph(10) XG = nx.Graph() XG.add_nodes_from(G) XG.add_weighted_edges_from((u, v, u) for u, v in G.edges()) XGM = nx.MultiGraph() XGM.add_nodes_from(G) XGM.add_weighted_edges_from((u, v, u) for u, v in G.edges()) XGM.add_edge(0, 1, weight=2) # multiedge XGS = nx.Graph() XGS.add_nodes_from(G) XGS.add_weighted_edges_from((u, v, u) for u, v in G.edges()) XGS.add_edge(0, 0, weight=100) # self loop # Dict of dicts # with self loops, OK dod = to_dict_of_dicts(XGS) GG = from_dict_of_dicts(dod, create_using=nx.Graph) assert_nodes_equal(XGS.nodes(), GG.nodes()) assert_edges_equal(XGS.edges(), GG.edges()) GW = to_networkx_graph(dod, create_using=nx.Graph) assert_nodes_equal(XGS.nodes(), GW.nodes()) assert_edges_equal(XGS.edges(), GW.edges()) GI = nx.Graph(dod) assert_nodes_equal(XGS.nodes(), GI.nodes()) assert_edges_equal(XGS.edges(), GI.edges()) # Dict of lists # with self loops, OK dol = to_dict_of_lists(XGS) GG = from_dict_of_lists(dol, create_using=nx.Graph) # dict of lists throws away edge data so set it to none enone = [(u, v, {}) for (u, v, d) in XGS.edges(data=True)] assert_nodes_equal(sorted(XGS.nodes()), sorted(GG.nodes())) assert_edges_equal(enone, sorted(GG.edges(data=True))) GW = to_networkx_graph(dol, create_using=nx.Graph) assert_nodes_equal(sorted(XGS.nodes()), sorted(GW.nodes())) assert_edges_equal(enone, sorted(GW.edges(data=True))) GI = nx.Graph(dol) assert_nodes_equal(sorted(XGS.nodes()), sorted(GI.nodes())) assert_edges_equal(enone, sorted(GI.edges(data=True))) # Dict of dicts # with multiedges, OK dod = to_dict_of_dicts(XGM) GG = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=True) assert_nodes_equal(sorted(XGM.nodes()), sorted(GG.nodes())) assert_edges_equal(sorted(XGM.edges()), sorted(GG.edges())) GW = to_networkx_graph(dod, create_using=nx.MultiGraph, multigraph_input=True) assert_nodes_equal(sorted(XGM.nodes()), sorted(GW.nodes())) assert_edges_equal(sorted(XGM.edges()), sorted(GW.edges())) GI = nx.MultiGraph(dod) # convert can't tell whether to duplicate edges! assert_nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) #assert_not_equal(sorted(XGM.edges()), sorted(GI.edges())) assert_false(sorted(XGM.edges()) == sorted(GI.edges())) GE = from_dict_of_dicts(dod, create_using=nx.MultiGraph, multigraph_input=False) assert_nodes_equal(sorted(XGM.nodes()), sorted(GE.nodes())) assert_not_equal(sorted(XGM.edges()), sorted(GE.edges())) GI = nx.MultiGraph(XGM) assert_nodes_equal(sorted(XGM.nodes()), sorted(GI.nodes())) assert_edges_equal(sorted(XGM.edges()), sorted(GI.edges())) GM = nx.MultiGraph(G) assert_nodes_equal(sorted(GM.nodes()), sorted(G.nodes())) assert_edges_equal(sorted(GM.edges()), sorted(G.edges())) # Dict of lists # with multiedges, OK, but better write as DiGraph else you'll # get double edges dol = to_dict_of_lists(G) GG = from_dict_of_lists(dol, create_using=nx.MultiGraph) assert_nodes_equal(sorted(G.nodes()), sorted(GG.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GG.edges())) GW = to_networkx_graph(dol, create_using=nx.MultiGraph) assert_nodes_equal(sorted(G.nodes()), sorted(GW.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GW.edges())) GI = nx.MultiGraph(dol) assert_nodes_equal(sorted(G.nodes()), sorted(GI.nodes())) assert_edges_equal(sorted(G.edges()), sorted(GI.edges()))
def test_get_onelogin_events_empty_response(self, requests_mock): """OneLoginApp - Get OneLogin Events, Empty Response""" self._app._auth_headers = True requests_mock.return_value = Mock(status_code=200, json=Mock(return_value=None)) assert_false(self._app._get_onelogin_events())
def test_scaling_needed(): # Structured types return True if dtypes same, raise error otherwise dt_def = [('f', 'i4')] arr = np.ones(10, dt_def) for t in NUMERIC_TYPES: assert_raises(WriterError, ArrayWriter, arr, t) narr = np.ones(10, t) assert_raises(WriterError, ArrayWriter, narr, dt_def) assert_false(ArrayWriter(arr).scaling_needed()) assert_false(ArrayWriter(arr, dt_def).scaling_needed()) # Any numeric type that can cast, needs no scaling for in_t in NUMERIC_TYPES: for out_t in NUMERIC_TYPES: if np.can_cast(in_t, out_t): aw = ArrayWriter(np.ones(10, in_t), out_t) assert_false(aw.scaling_needed()) for in_t in NUMERIC_TYPES: # Numeric types to complex never need scaling arr = np.ones(10, in_t) for out_t in COMPLEX_TYPES: assert_false(ArrayWriter(arr, out_t).scaling_needed()) # Attempts to scale from complex to anything else fails for in_t in COMPLEX_TYPES: for out_t in FLOAT_TYPES + IUINT_TYPES: arr = np.ones(10, in_t) assert_raises(WriterError, ArrayWriter, arr, out_t) # Scaling from anything but complex to floats is OK for in_t in FLOAT_TYPES + IUINT_TYPES: arr = np.ones(10, in_t) for out_t in FLOAT_TYPES: assert_false(ArrayWriter(arr, out_t).scaling_needed()) # For any other output type, arrays with no data don't need scaling for in_t in FLOAT_TYPES + IUINT_TYPES: arr_0 = np.zeros(10, in_t) arr_e = [] for out_t in IUINT_TYPES: assert_false(ArrayWriter(arr_0, out_t).scaling_needed()) assert_false(ArrayWriter(arr_e, out_t).scaling_needed()) # Going to (u)ints, non-finite arrays don't need scaling for writers that # can do scaling because these use finite_range to threshold the input data, # but ArrayWriter does not do this. so scaling_needed is True for in_t in FLOAT_TYPES: arr_nan = np.zeros(10, in_t) + np.nan arr_inf = np.zeros(10, in_t) + np.inf arr_minf = np.zeros(10, in_t) - np.inf arr_mix = np.array([np.nan, np.inf, -np.inf], dtype=in_t) for out_t in IUINT_TYPES: for arr in (arr_nan, arr_inf, arr_minf, arr_mix): assert_true( ArrayWriter(arr, out_t, check_scaling=False).scaling_needed()) assert_false(SlopeArrayWriter(arr, out_t).scaling_needed()) assert_false( SlopeInterArrayWriter(arr, out_t).scaling_needed()) # Floats as input always need scaling for in_t in FLOAT_TYPES: arr = np.ones(10, in_t) for out_t in IUINT_TYPES: # We need an arraywriter that will tolerate construction when # scaling is needed assert_true(SlopeArrayWriter(arr, out_t).scaling_needed()) # in-range (u)ints don't need scaling for in_t in IUINT_TYPES: in_info = np.iinfo(in_t) in_min, in_max = in_info.min, in_info.max for out_t in IUINT_TYPES: out_info = np.iinfo(out_t) out_min, out_max = out_info.min, out_info.max if in_min >= out_min and in_max <= out_max: arr = np.array([in_min, in_max], in_t) assert_true(np.can_cast(arr.dtype, out_t)) # We've already tested this with can_cast above, but... assert_false(ArrayWriter(arr, out_t).scaling_needed()) continue # The output data type does not include the input data range max_min = max(in_min, out_min) # 0 for input or output uint min_max = min(in_max, out_max) arr = np.array([max_min, min_max], in_t) assert_false(ArrayWriter(arr, out_t).scaling_needed()) assert_true(SlopeInterArrayWriter(arr + 1, out_t).scaling_needed()) if in_t in INT_TYPES: assert_true( SlopeInterArrayWriter(arr - 1, out_t).scaling_needed())
def test_scatterplot_error_message(): # Get standard brain mask mr_directory = get_data_directory() standard = "%s/MNI152_T1_8mm_brain_mask.nii.gz" %(mr_directory) standard = nibabel.load(standard) unzip = lambda l:tuple(zip(*l)) # This is the error message we should see error = re.compile('Scatterplot Comparison Correlations Not Possible') # Case 1: provided pdmask masks all voxels (eg, no overlap in images) data1 = norm.rvs(size=500) data2 = norm.rvs(size=500) image1 = numpy.zeros(standard.shape) image2 = numpy.zeros(standard.shape) x,y,z = numpy.where(standard.get_data()==1) idx = list(zip(x,y,z)) image1_voxels = unzip(idx[0:500]) image2_voxels = unzip(idx[1500:2000]) image1[image1_voxels] = data1 image2[image2_voxels] = data2 image1 = nibabel.nifti1.Nifti1Image(image1,affine=standard.get_affine(),header=standard.get_header()) image2 = nibabel.nifti1.Nifti1Image(image2,affine=standard.get_affine(),header=standard.get_header()) html_snippet,data_table = scatterplot_compare(images=[image1,image2], reference = standard, image_names=["image 1","image 2"], corr_type="pearson") html_snippet = " ".join(html_snippet) assert_true(bool(error.search(html_snippet))) # Case 2: fewer than 3 voxels overlapping data1 = norm.rvs(size=2) data2 = norm.rvs(size=2) image1 = numpy.zeros(standard.shape) image2 = numpy.zeros(standard.shape) x,y,z = numpy.where(standard.get_data()==1) idx = list(zip(x,y,z)) idx = unzip(idx[10:12]) image1[idx] = data1 image2[idx] = data2 image1 = nibabel.nifti1.Nifti1Image(image1,affine=standard.get_affine(),header=standard.get_header()) image2 = nibabel.nifti1.Nifti1Image(image2,affine=standard.get_affine(),header=standard.get_header()) html_snippet,data_table = scatterplot_compare(images=[image1,image2], reference = standard, image_names=["image 1","image 2"], corr_type="pearson") html_snippet = " ".join(html_snippet) assert_true(bool(error.search(html_snippet))) # Case 2: But 3 should work data1 = norm.rvs(size=3) data2 = norm.rvs(size=3) image1 = numpy.zeros(standard.shape) image2 = numpy.zeros(standard.shape) x,y,z = numpy.where(standard.get_data()==1) idx = list(zip(x,y,z)) idx = unzip(idx[10:13]) image1[idx] = data1 image2[idx] = data2 image1 = nibabel.nifti1.Nifti1Image(image1,affine=standard.get_affine(),header=standard.get_header()) image2 = nibabel.nifti1.Nifti1Image(image2,affine=standard.get_affine(),header=standard.get_header()) html_snippet,data_table = scatterplot_compare(images=[image1,image2], reference = standard, image_names=["image 1","image 2"], corr_type="pearson") html_snippet = " ".join(html_snippet) assert_false(bool(error.search(html_snippet)))
def test_limited_to_blocks(self): tools.assert_false(self.subcls_limited.service.is_available(Anonymous()))
def test_finite_range_nan(): # Test finite range method and has_nan property for in_arr, res in ( ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), ([np.nan], (np.inf, -np.inf)), ([np.inf], (np.inf, -np.inf)), ([-np.inf], (np.inf, -np.inf)), ([np.inf, 1], (1, 1)), # only look at finite values ([-np.inf, 1], (1, 1)), ([[], []], (np.inf, -np.inf)), # empty array (np.array([[-3, 0, 1], [2, -1, 4]], dtype=np.int), (-3, 4)), (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), ([0., 1, 2, 3], (0, 3)), # Complex comparison works as if they are floats ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), ): for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)), (SlopeArrayWriter, {}), (SlopeArrayWriter, dict(calc_scale=False)), (SlopeInterArrayWriter, {}), (SlopeInterArrayWriter, dict(calc_scale=False))): for out_type in NUMERIC_TYPES: has_nan = np.any(np.isnan(in_arr)) try: aw = awt(in_arr, out_type, **kwargs) except WriterError: continue # Should not matter about the order of finite range method call # and has_nan property - test this is true assert_equal(aw.has_nan, has_nan) assert_equal(aw.finite_range(), res) aw = awt(in_arr, out_type, **kwargs) assert_equal(aw.finite_range(), res) assert_equal(aw.has_nan, has_nan) # Check float types work as complex in_arr = np.array(in_arr) if in_arr.dtype.kind == 'f': c_arr = in_arr.astype(np.complex) try: aw = awt(c_arr, out_type, **kwargs) except WriterError: continue aw = awt(c_arr, out_type, **kwargs) assert_equal(aw.has_nan, has_nan) assert_equal(aw.finite_range(), res) # Structured type cannot be nan and we can test this a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) aw = awt(a, a.dtype, **kwargs) assert_raises(TypeError, aw.finite_range) assert_false(aw.has_nan)
def test_common_model_subclass_is_not_registered_after_declaration(self): tools.assert_false(ModelDummy in Service.registered_services)
def test_is_available_works_with_limited_to(self): c = Client() c.failme = True tools.assert_false(self.subcls_limited.service.is_available(c))
def test_non_blueprint_rest_error_routing(self): blueprint = Blueprint('test', __name__) api = flask_restbolt.Api(blueprint) api.add_resource(HelloWorld(), '/hi', endpoint="hello") api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye") app = Flask(__name__) app.register_blueprint(blueprint, url_prefix='/blueprint') api2 = flask_restbolt.Api(app) api2.add_resource(HelloWorld(), '/hi', endpoint="hello") api2.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye") with app.test_request_context('/hi', method='POST'): assert_false(api._should_use_fr_error_handler()) assert_true(api2._should_use_fr_error_handler()) assert_false(api._has_fr_route()) assert_true(api2._has_fr_route()) with app.test_request_context('/blueprint/hi', method='POST'): assert_true(api._should_use_fr_error_handler()) assert_false(api2._should_use_fr_error_handler()) assert_true(api._has_fr_route()) assert_false(api2._has_fr_route()) api._should_use_fr_error_handler = Mock(return_value=False) api2._should_use_fr_error_handler = Mock(return_value=False) with app.test_request_context('/bye'): assert_false(api._has_fr_route()) assert_true(api2._has_fr_route()) with app.test_request_context('/blueprint/bye'): assert_true(api._has_fr_route()) assert_false(api2._has_fr_route())
def test_is_available_uses_defined_callback(self): tools.assert_true(self.subcls.service.is_available(1)) tools.assert_false(self.subcls.service.is_available(2))
def test_read_invalid_scope(self): for scope in (Scope.preferences, Scope.user_info, Scope.user_state): key = KeyValueStore.Key(scope, None, None, 'foo') with assert_raises(InvalidScopeError): self.kvs.get(key) assert_false(self.kvs.has(key))
def test_uniform(): hist = Hist(10, 0, 1) assert_true(hist.uniform()) hist = Hist2D(10, 0, 1, [1, 10, 100]) assert_false(hist.uniform()) assert_true(hist.uniform(axis=0))
def test_arn_match_apigw_withaccount(self): assert_false( is_arn_match( "apigateway", "arn:*:apigateway:*::*", "arn:aws:apigateway:us-east-1:123412341234:/restapis/a123456789/*" ))
def _check_delete_key_error(self, key): self.kvs.delete(key) with assert_raises(KeyError): self.kvs.get(key) assert_false(self.kvs.has(key))