def validate_prop_group(group: h5py.Group) -> None: """Validate the passed hdf5 **group**, ensuring it is compatible with :func:`create_prop_group` and :func:`create_prop_group`. This method is called automatically when an exception is raised by :func:`update_prop_dset`. Parameters ---------- group : :class:`h5py.Group` The to-be validated hdf5 Group. Raises ------ :exc:`AssertionError` Raised if the validation process fails. """ # noqa: E501 assertion.isinstance(group, h5py.Group) idx_ref = group.attrs['index'] idx = group.file[idx_ref] iterator = ((k, v) for k, v in group.items() if not k.endswith('_names')) for name, dset in iterator: assertion.le(len(dset), len(idx), message=f'{name!r} invalid dataset length') assertion.contains(dset.dims[0].keys(), 'index', message=f'{name!r} missing dataset scale') assertion.eq(dset.dims[0]['index'], idx, message=f'{name!r} invalid dataset scale')
def test_create_database_4() -> None: """Test the backwards compatiblity of pre-``0.4`` databases.""" shutil.copytree(DB_PATH_OLD4, DB_PATH_NEW4) db = Database(DB_PATH_NEW4) with db.hdf5('r', libver='latest') as f: grp = f['ligand'] scale = grp['index'] pdb = PDBContainer.from_hdf5(grp) assertion.eq(grp['atoms'].dims[0]['index'], scale) assertion.eq(grp['bonds'].dims[0]['index'], scale) assertion.eq(grp['atom_count'].dims[0]['index'], scale) assertion.eq(grp['bond_count'].dims[0]['index'], scale) assertion.eq(grp['atoms'].dims[0].label, 'index') assertion.eq(grp['bonds'].dims[0].label, 'index') assertion.eq(grp['atom_count'].dims[0].label, 'index') assertion.eq(grp['bond_count'].dims[0].label, 'index') assertion.eq(grp['atoms'].dims[1].label, 'atoms') assertion.eq(grp['bonds'].dims[1].label, 'bonds') pdb.validate_hdf5(grp) assertion.contains(grp, 'properties') formula = grp['properties/formula'] assertion.len_eq(formula, len(scale)) assertion.eq(formula.dims[0]['index'], scale) ref = np.rec.array(None, dtype=LIG_IDX_DTYPE, shape=(3, )) ref[:] = b'' assertion.eq(pdb.scale, ref, post_process=np.all)
def test_create_csv() -> None: """Test :func:`dataCAT.create_database.create_csv`.""" dtype1 = {'hdf5 index': int, 'formula': str, 'settings': str, 'opt': bool} dtype2 = {'hdf5 index': int, 'settings': str, 'opt': bool} filename1 = create_csv(PATH, 'ligand') filename2 = create_csv(PATH, 'qd') assertion.eq(filename1, LIGAND_PATH) assertion.eq(filename2, QD_PATH) df1 = pd.read_csv(LIGAND_PATH, index_col=[0, 1], header=[0, 1], dtype=dtype1) df2 = pd.read_csv(QD_PATH, index_col=[0, 1, 2, 3], header=[0, 1], dtype=dtype2) assertion.eq(df1.shape, (1, 4)) assertion.eq(df2.shape, (1, 5)) assertion.eq(df1.index.names, ['smiles', 'anchor']) assertion.eq(df2.index.names, ['core', 'core anchor', 'ligand smiles', 'ligand anchor']) assertion.eq(df1.columns.names, ['index', 'sub index']) assertion.eq(df2.columns.names, ['index', 'sub index']) assertion.contains(df1.index, ('-', '-')) assertion.contains(df2.index, ('-', '-', '-', '-')) np.testing.assert_array_equal( df1.values, np.array([[-1, 'str', False, 'str']], dtype=object)) np.testing.assert_array_equal( df2.values, np.array([[-1, -1, False, 'str', 'str']], dtype=object)) assertion.assert_(create_csv, PATH, 'bob', exception=ValueError)
def test_dissociate_surface() -> None: """Tests for :func:`dissociate_surface`.""" idx_tup = (319, [319], [320, 319], [[320, 319], [158, 57], [156, 155]]) at_idx_iter = iter([319, 319, 320, 319, 320, 319, 158, 57, 156, 155]) mol_iter = chain.from_iterable(dissociate_surface(MOL, i) for i in idx_tup) for i, mol in zip(at_idx_iter, mol_iter): assertion.contains(np.asarray(mol), XYZ[i], invert=True) assertion.assert_(next, dissociate_surface(MOL, i, k=0), exception=ValueError) assertion.assert_(next, dissociate_surface(MOL, i, k=999), exception=ValueError) assertion.assert_(next, dissociate_surface(MOL, i, lig_count=999), exception=ValueError) assertion.assert_(next, dissociate_surface(MOL, i, lig_count=-1), exception=ValueError) assertion.assert_(next, dissociate_surface(MOL, i, symbol='bob'), exception=PTError) assertion.assert_(next, dissociate_surface(MOL, i, symbol=999), exception=PTError) assertion.assert_(next, dissociate_surface(MOL, i, symbol=9.5), exception=TypeError)
def test_setattr() -> None: """Test :class:`~nanoutils.SetAttr`.""" assertion.is_(OBJ.obj, _Test) assertion.eq(OBJ.name, 'a') assertion.is_(OBJ.value, False) assertion.is_(OBJ.attr, _Test.a) try: OBJ.attr = False assertion.is_(OBJ.attr, False) finally: OBJ.attr = True assertion.contains(repr(OBJ), SetAttr.__name__) assertion.contains(repr(OBJ), object.__repr__(OBJ.obj)) assertion.contains(repr(OBJ), reprlib.repr(OBJ.value)) assertion.contains(repr(OBJ), 'a') obj2 = SetAttr(_Test, 'a', False) obj3 = SetAttr(_Test, 'a', True) assertion.eq(OBJ, obj2) assertion.ne(OBJ, obj3) assertion.ne(OBJ, 0) assertion.assert_(OBJ.__reduce__, exception=TypeError) assertion.is_(copy.copy(OBJ), OBJ) assertion.is_(copy.deepcopy(OBJ), OBJ) assertion.truth(hash(OBJ)) assertion.eq(hash(OBJ), OBJ._hash) with OBJ: assertion.is_(_Test.a, False) assertion.is_(_Test.a, True)
def test_read_basis(): """Test that the basis are read correctly.""" BASIS_FILE = PATH / "BASIS_MOLOPT" for key, data in zip(*readCp2KBasis(BASIS_FILE)): # The formats contains a list assertion.len(key.basisFormat) # Atoms are either 1 or two characters assertion.le(len(key.atom), 2) # All basis are MOLOPT assertion.contains(key.basis, "MOLOPT") # There is a list of exponents and coefficinets assertion.len(data.exponents) assertion.len(data.coefficients[0])
def test_typing() -> None: """Tests for :mod:`nanoutils.typing_utils`.""" assertion.is_(Literal, t.Literal) assertion.is_(Final, t.Final) assertion.is_(final, t.final) assertion.is_(Protocol, t.Protocol) assertion.is_(TypedDict, t.TypedDict) assertion.is_(runtime_checkable, t.runtime_checkable) if sys.version_info >= (3, 8): assertion.is_(SupportsIndex, t.SupportsIndex) else: assertion.contains(SupportsIndex.__bases__, Protocol) assertion.hasattr(SupportsIndex, '__index__')
def test_create_hdf5() -> None: """Test :func:`dataCAT.create_database.create_hdf5`.""" ref_keys1 = ('qd', 'qd_no_opt', 'core', 'core_no_opt', 'ligand', 'ligand_no_opt') ref_keys2 = ('job_settings_BDE', 'job_settings_qd_opt', 'job_settings_crs') filename = create_hdf5(PATH) assertion.eq(filename, HDF5_PATH) with h5py.File(HDF5_PATH, 'r', libver='latest') as f: for item in ref_keys1: assertion.contains(f.keys(), item) for item in ref_keys2: assertion.contains(f.keys(), item) assertion.eq(f[item].ndim, 3)
def test_filemanagerabc() -> None: """Test :class:`dataCAT.context_managers.FileManagerABC`.""" file1 = join(PATH, 'qd.csv') file2 = join(PATH, 'qd.csv') file3 = join(PATH, 'ligand.csv') obj1 = OpenQD(file1, write=False) obj2 = OpenQD(file2, write=False) obj3 = OpenQD(file3, write=False) assertion.eq(obj1, obj2) assertion.eq(hash(obj1), hash(obj2)) assertion.ne(obj1, obj3) assertion.ne(obj1, 1) obj1_str = repr(obj1) assertion.contains(obj1_str, obj1.__class__.__name__) assertion.contains(obj1_str, str(obj1.write)) if sys.platform != 'win32': assertion.contains(obj1_str, str(obj1.filename)) assertion.is_(copy.copy(obj1), obj1) assertion.is_(copy.deepcopy(obj1), obj1) dump = pickle.dumps(obj1) load = pickle.loads(dump) assertion.eq(load, obj1)
def test_get_exc_message() -> None: """Test :meth:`AssertionManager._get_exc_message`.""" ex = TypeError("object of type 'int' has no len()") func1 = len args = (1,) str1: str = assertion._get_exc_message(ex, func1, *args, invert=False, output=None) # type: ignore[attr-defined] # noqa: E501 str2: str = assertion._get_exc_message(ex, func1, *args, invert=True, output=None) # type: ignore[attr-defined] # noqa: E501 ref1 = """output = len(obj); assert output exception: TypeError = "object of type 'int' has no len()" output: NoneType = None obj: int = 1""" ref2 = """output = not len(obj); assert output exception: TypeError = "object of type 'int' has no len()" output: NoneType = None obj: int = 1""" assertion.eq(str1, ref1) assertion.eq(str2, ref2) func2: Callable = assertion._get_exc_message # type: ignore[attr-defined] assertion.assert_(func2, ex, 1, exception=TypeError) func3 = partial(len) str3 = assertion._get_exc_message(ex, func3) # type: ignore[attr-defined] assertion.contains(str3, 'functools.partial(<built-in function len>)') class Func(): def __call__(self): pass str4: str = assertion._get_exc_message(ex, Func()) # type: ignore[attr-defined] assertion.contains(str4, 'output = func(); assert output')
def test_assert_() -> None: """Test :meth:`AssertionManager.assert_`.""" assertion.assert_(len, [1]) assertion.assert_(len, [], invert=True) assertion.assert_(len, 1, exception=TypeError) assertion.assert_(len, [1], [1], exception=TypeError) assertion.assert_(len, [1], bob=1, exception=TypeError) try: assertion.assert_(len, [1], exception=bool) except TypeError: pass else: raise AssertionError('Failed to raise a TypeError') try: assertion.assert_(len, [1], exception=AssertionError) except ValueError: pass else: raise AssertionError('Failed to raise a ValueError') try: assertion.eq(1, 1, exception=TypeError, message='<MARKER>') except AssertionError as ex: assertion.contains(str(ex), '<MARKER>') else: raise AssertionError('Failed to raise an AssertionError') try: assertion.contains(1, 1, exception=ValueError) except AssertionError as ex: assertion.isinstance(ex.__cause__, TypeError) else: raise AssertionError('Failed to raise an AssertionError') func = operator.__invert__ assertion(False, post_process=func) assertion(True, invert=True, post_process=func) try: assertion.truth(False, message='Funcy custom message') except AssertionError as ex: assertion.contains(str(ex), 'Funcy custom message') else: raise AssertionError('Failed to raise am AssertionError')
def test_validate_prop_group() -> None: """Test :func:`dataCAT.validate_prop_group`.""" with h5py.File(HDF5_TMP, 'a') as f: scale1 = f.create_dataset('index1', shape=(100, ), dtype=int) scale1.make_scale('index') group = create_prop_group(f, scale1) group.create_dataset('test1', shape=(100, ), dtype=int) try: validate_prop_group(group) except AssertionError as ex1: assertion.contains(str(ex1), 'missing dataset scale') else: raise AssertionError("Failed to raise an AssertionError") del group['test1'] dset1 = group.create_dataset('test2', shape=(200, ), dtype=int) dset1.dims[0].label = 'index' dset1.dims[0].attach_scale(scale1) try: validate_prop_group(group) except AssertionError as ex2: assertion.contains(str(ex2), 'invalid dataset length') else: raise AssertionError("Failed to raise an AssertionError") del group['test2'] scale2 = f.create_dataset('index2', shape=(100, ), dtype=int) scale2.make_scale('index') dset2 = group.create_dataset('test3', shape=(100, ), dtype=int) dset2.dims[0].label = 'index' dset2.dims[0].attach_scale(scale2) try: validate_prop_group(group) except AssertionError as ex3: assertion.contains(str(ex3), 'invalid dataset scale') else: raise AssertionError("Failed to raise an AssertionError") del group['test3'] create_prop_dset(group, 'test4') validate_prop_group(group)
def test_repr(self, seq: Sequence[int], view: SequenceView[int]) -> None: assertion.contains(repr(view), SequenceView.__name__) assertion.contains(repr(view), repr(seq))
def test_contains(self, seq: Sequence[int], view: SequenceView[int], i: int) -> None: assertion.contains(view, i) assertion.eq(i in view, i in seq)
def test_contains() -> None: """Test :meth:`AssertionManager.contains`.""" assertion.contains([1], 1) assertion.contains({1}, 1) assertion.contains({1: None}, 1) assertion.contains([1], 2, invert=True) assertion.contains([1], 5, 6, 7, exception=TypeError) assertion.contains(5, 5, exception=TypeError)