class TestFitExponential(object): def setUp(self): self.x = np.linspace(0, 250, 251) self.a_ref = 20.0 self.y = np.exp(-self.x/self.a_ref) def tearDown(self): del self.x del self.a_ref del self.y @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_fit_simple(self): a = polymer.fit_exponential_decay(self.x, self.y) assert_(a == self.a_ref) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_fit_noisy(self): noise = np.sin(self.x) * 0.01 y2 = noise + self.y a = polymer.fit_exponential_decay(self.x, y2) assert_almost_equal(a, self.a_ref, decimal=3)
class TestPersistenceLength(object): def setUp(self): self.u = MDAnalysis.Universe(Plength) def tearDown(self): del self.u def test_ag_VE(self): ags = [self.u.atoms[:10], self.u.atoms[10:110]] assert_raises(ValueError, polymer.PersistenceLength, ags) def _make_p(self): ags = [r.atoms.select_atoms('name C* N*') for r in self.u.residues] p = polymer.PersistenceLength(ags) return p def test_run(self): p = self._make_p() p.run() assert_(len(p.results) == 280) assert_almost_equal(p.lb, 1.485, 3) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_fit(self): p = self._make_p() p.run() p.perform_fit() assert_almost_equal(p.lp, 6.504, 3) assert_(len(p.fit) == len(p.results)) @dec.skipif(module_not_found('matplotlib'), "Test skipped because matplotlib is not available.") @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_plot_ax_return(self): '''Ensure that a matplotlib axis object is returned when plot() is called.''' import matplotlib p = self._make_p() p.run() p.perform_fit() actual = p.plot() expected = matplotlib.axes.Axes assert_(isinstance(actual, expected)) def test_raise_NoDataError(self): '''Ensure that a NoDataError is raised if perform_fit() is called before the run() method of AnalysisBase.''' p = self._make_p() assert_raises(NoDataError, p.perform_fit)
class TestPersistenceLength(object): def setUp(self): self.u = MDAnalysis.Universe(Plength) def tearDown(self): del self.u def test_ag_VE(self): ags = [self.u.atoms[:10], self.u.atoms[10:110]] assert_raises(ValueError, polymer.PersistenceLength, ags) def _make_p(self): ags = [r.select_atoms('type C or type N') for r in self.u.residues] p = polymer.PersistenceLength(ags) return p def test_run(self): p = self._make_p() p.run() assert_(len(p.results) == 280) assert_almost_equal(p.lb, 1.485, 3) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_fit(self): p = self._make_p() p.run() p.perform_fit() assert_almost_equal(p.lp, 6.504, 3) assert_(len(p.fit) == len(p.results))
class TestNCDFReader2(TestCase): """NCDF Trajectory with positions and forces. Contributed by Albert Solernou """ @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.u = mda.Universe(PFncdf_Top, PFncdf_Trj) self.prec = 3 def tearDown(self): self.u.trajectory.close() del self.u def test_positions_1(self): """Check positions on first frame""" self.u.trajectory[0] ref_1 = np.array([[-0.11980818, 18.70524979, 11.6477766], [-0.44717646, 18.61727142, 12.59919548], [-0.60952115, 19.47885513, 11.22137547]], dtype=np.float32) assert_array_almost_equal(ref_1, self.u.atoms.positions[:3], self.prec) def test_positions_2(self): """Check positions on second frame""" self.u.trajectory[1] ref_2 = np.array([[-0.13042036, 18.6671524, 11.69647026], [-0.46643803, 18.60186768, 12.646698], [-0.46567637, 19.49173927, 11.21922874]], dtype=np.float32) assert_array_almost_equal(ref_2, self.u.atoms.positions[:3], self.prec) def test_forces_1(self): """Check forces on first frame""" self.u.trajectory[0] ref_1 = np.array([[49.23017883, -97.05565643, -86.09863281], [2.97547197, 29.84169388, 11.12069607], [-15.93093777, 14.43616867, 30.25889015]], dtype=np.float32) assert_array_almost_equal(ref_1, self.u.atoms.forces[:3], self.prec) def test_forces_2(self): """Check forces on second frame""" self.u.trajectory[1] ref_2 = np.array([[116.39096832, -145.44448853, -151.3155365], [-18.90058327, 27.20145798, 1.95245135], [-31.08556366, 14.95863628, 41.10367966]], dtype=np.float32) assert_array_almost_equal(ref_2, self.u.atoms.forces[:3], self.prec) def test_time_1(self): """Check time on first frame""" ref = 35.02 assert_almost_equal(ref, self.u.trajectory[0].time, self.prec) def test_time_2(self): """Check time on second frame""" ref = 35.04 assert_almost_equal(ref, self.u.trajectory[1].time, self.prec)
class Test_density_from_Universe(TestCase): topology = TPR trajectory = XTC delta = 2.0 selections = {'static': "name OW", 'dynamic': "name OW and around 4 (protein and resnum 1-10)", } references = {'static': {'meandensity': 0.016764271713091212, }, 'static_sliced': {'meandensity': 0.0067057088794023143, }, 'dynamic': {'meandensity': 0.00062423404854011104, }, } precision = 5 @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): self.outfile = 'density.dx' self.universe = mda.Universe(self.topology, self.trajectory) def tearDown(self): del self.universe def check_density_from_Universe(self, atomselection, ref_meandensity, **kwargs): import MDAnalysis.analysis.density with tempdir.in_tempdir(): D = MDAnalysis.analysis.density.density_from_Universe( self.universe, atomselection=atomselection, delta=self.delta, **kwargs) assert_almost_equal(D.grid.mean(), ref_meandensity, err_msg="mean density does not match") D.export(self.outfile) D2 = MDAnalysis.analysis.density.Density(self.outfile) assert_almost_equal(D.grid, D2.grid, decimal=self.precision, err_msg="DX export failed: different grid sizes") def test_density_from_Universe(self): self.check_density_from_Universe( self.selections['static'], self.references['static']['meandensity']) def test_density_from_Universe_sliced(self): self.check_density_from_Universe( self.selections['static'], self.references['static_sliced']['meandensity'], start=1, stop=-1, step=2, ) def test_density_from_Universe_update_selection(self): self.check_density_from_Universe( self.selections['dynamic'], self.references['dynamic']['meandensity'], update_selections=True)
class TestLeafletFinder(TestCase): @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): self.universe = MDAnalysis.Universe(Martini_membrane_gro, Martini_membrane_gro) self.lipid_heads = self.universe.select_atoms("name PO4") def tearDown(self): del self.universe def test_leaflet_finder(self): from MDAnalysis.analysis.leaflet import LeafletFinder lfls = LeafletFinder(self.universe, self.lipid_heads, pbc=True) top_heads, bottom_heads = lfls.groups() # Make top be... on top. if top_heads.center_of_geometry()[2] < bottom_heads.center_of_geometry( )[2]: top_heads, bottom_heads = (bottom_heads, top_heads) assert_equal(top_heads.indices, np.arange(1, 2150, 12), err_msg="Found wrong leaflet lipids") assert_equal(bottom_heads.indices, np.arange(2521, 4670, 12), err_msg="Found wrong leaflet lipids")
class TestNCDFWriterErrors(object): @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.tmpdir = tempdir.TempDir() self.outfile = os.path.join(self.tmpdir.name, 'out.ncdf') def tearDown(self): try: os.unlink(self.outfile) except OSError: pass del self.tmpdir del self.outfile def test_zero_atoms_VE(self): from MDAnalysis.coordinates.TRJ import NCDFWriter assert_raises(ValueError, NCDFWriter, self.outfile, 0) def test_wrong_n_atoms(self): from MDAnalysis.coordinates.TRJ import NCDFWriter with NCDFWriter(self.outfile, 100) as w: u = make_Universe(trajectory=True) assert_raises(IOError, w.write, u.trajectory.ts) def test_no_ts(self): # no ts supplied at any point from MDAnalysis.coordinates.TRJ import NCDFWriter with NCDFWriter(self.outfile, 100) as w: assert_raises(IOError, w.write_next_timestep)
class TestDist(TestCase): '''Tests for MDAnalysis.analysis.distances.dist(). Imports do not happen at the top level of the module because of the scipy dependency.''' @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): import MDAnalysis.analysis.distances import scipy import scipy.spatial self.u = MDAnalysis.Universe(GRO) self.ag = self.u.atoms[:20] self.u2 = MDAnalysis.Universe(GRO) self.ag2 = self.u2.atoms[:20] self.ag2.positions = np.random.shuffle(self.ag2.positions) self.expected = np.diag( scipy.spatial.distance.cdist(self.ag.positions, self.ag2.positions)) def tearDown(self): del self.u del self.ag del self.u2 del self.ag2 del self.expected def test_pairwise_dist(self): '''Ensure that pairwise distances between atoms are correctly calculated.''' actual = MDAnalysis.analysis.distances.dist(self.ag, self.ag2)[2] assert_equal(actual, self.expected) def test_pairwise_dist_offset_effect(self): '''Test that feeding in offsets to dist() doesn't alter pairwise distance matrix.''' actual = MDAnalysis.analysis.distances.dist(self.ag, self.ag2, offset=229)[2] assert_equal(actual, self.expected) def test_offset_calculation(self): '''Test that offsets fed to dist() are correctly calculated.''' actual = MDAnalysis.analysis.distances.dist(self.ag, self.ag2, offset=33)[:2] assert_equal( actual, np.array([self.ag.atoms.resids + 33, self.ag2.atoms.resids + 33])) def test_mismatch_exception(self): '''A ValueError should be raised if the two atomgroups don't have the same number of atoms.''' with self.assertRaises(ValueError): MDAnalysis.analysis.distances.dist(self.ag[:19], self.ag2)
class TestContactMatrix(TestCase): @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): import MDAnalysis.analysis.distances self.coord = np.array( [ [1, 1, 1], [5, 5, 5], [1.1, 1.1, 1.1], [11, 11, 11], # neighboring image with pbc [21, 21, 21] ], # non neighboring image with pbc dtype=np.float32) self.box = np.array([10, 10, 10], dtype=np.float32) self.shape = (5, 5) self.res_no_pbc = np.array( [[1, 0, 1, 0, 0], [0, 1, 0, 0, 0], [1, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], dtype=np.bool) self.res_pbc = np.array( [[1, 0, 1, 1, 1], [0, 1, 0, 0, 0], [1, 0, 1, 1, 1], [1, 0, 1, 1, 1], [1, 0, 1, 1, 1]], dtype=np.bool) def test_np(self): contacts = MDAnalysis.analysis.distances.contact_matrix( self.coord, cutoff=1, returntype="numpy") assert_equal(contacts.shape, self.shape, "wrong shape (should be {0})".format(self.shape)) assert_equal(contacts, self.res_no_pbc) def test_sparse(self): contacts = MDAnalysis.analysis.distances.contact_matrix( self.coord, cutoff=1.5, returntype="sparse") assert_equal(contacts.shape, self.shape, "wrong shape (should be {0})".format(self.shape)) assert_equal(contacts.toarray(), self.res_no_pbc) def test_box_numpy(self): contacts = MDAnalysis.analysis.distances.contact_matrix(self.coord, box=self.box, cutoff=1) assert_equal(contacts.shape, self.shape, "wrong shape (should be {0})".format(self.shape)) assert_equal(contacts, self.res_pbc) def test_box_sparse(self): contacts = MDAnalysis.analysis.distances.contact_matrix( self.coord, box=self.box, cutoff=1, returntype='sparse') assert_equal(contacts.shape, self.shape, "wrong shape (should be {0})".format(self.shape)) assert_equal(contacts.toarray(), self.res_pbc)
class TestFitExponential(object): def setUp(self): self.x = np.linspace(0, 250, 251) self.a_ref = 20.0 self.y = np.exp(-self.x / self.a_ref) def tearDown(self): del self.x del self.a_ref del self.y @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_fit_simple(self): a = polymer.fit_exponential_decay(self.x, self.y) assert_(a == self.a_ref) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_fit_noisy(self): y2 = self.y + (np.random.random(len(self.y)) - 0.5) * 0.05 a = polymer.fit_exponential_decay(self.x, y2) assert_(np.rint(a) == self.a_ref)
class TestBetween(TestCase): '''Tests for MDAnalysis.analysis.distances.between(). Imports do not happen at the top level of the module because of the scipy dependency.''' @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): import MDAnalysis.analysis.distances import scipy import scipy.spatial self.u = MDAnalysis.Universe(GRO) self.ag = self.u.atoms[:10] self.ag2 = self.u.atoms[12:33] self.group = self.u.atoms[40:] self.distance = 5.9 self.distance_matrix_1 = scipy.spatial.distance.cdist( self.group.positions, self.ag.positions) self.mask_1 = np.unique( np.where(self.distance_matrix_1 <= self.distance)[0]) self.group_filtered = self.group[self.mask_1] self.distance_matrix_2 = scipy.spatial.distance.cdist( self.group_filtered.positions, self.ag2.positions) self.mask_2 = np.unique( np.where(self.distance_matrix_2 <= self.distance)[0]) self.expected = self.group_filtered[self.mask_2].indices def tearDown(self): del self.u del self.ag del self.ag2 del self.group del self.distance del self.distance_matrix_1 del self.distance_matrix_2 del self.mask_1 del self.mask_2 del self.group_filtered del self.expected def test_between_simple_case_indices_only(self): '''Test MDAnalysis.analysis.distances.between() for a simple input case. Checks the sorted atom indices of returned AtomGroup against sorted expected index values.''' actual = sorted( MDAnalysis.analysis.distances.between(self.group, self.ag, self.ag2, self.distance).indices) assert_equal(actual, self.expected)
class TestDensity(TestCase): nbins = 3, 4, 5 counts = 100 Lmax = 10. @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): import MDAnalysis.analysis.density self.bins = [np.linspace(0, self.Lmax, n + 1) for n in self.nbins] h, edges = np.histogramdd(self.Lmax * np.random.random( (self.counts, 3)), bins=self.bins) self.D = MDAnalysis.analysis.density.Density( h, edges, parameters={'isDensity': False}, units={'length': 'A'}) self.D.make_density() def test_shape(self): assert_equal(self.D.grid.shape, self.nbins) def test_edges(self): for dim, (edges, fixture) in enumerate(zip(self.D.edges, self.bins)): assert_almost_equal(edges, fixture, err_msg="edges[{0}] mismatch".format(dim)) def test_midpoints(self): midpoints = [0.5 * (b[:-1] + b[1:]) for b in self.bins] for dim, (mp, fixture) in enumerate(zip(self.D.midpoints, midpoints)): assert_almost_equal(mp, fixture, err_msg="midpoints[{0}] mismatch".format(dim)) def test_delta(self): deltas = np.array([self.Lmax]) / np.array(self.nbins) assert_almost_equal(self.D.delta, deltas) def test_grid(self): dV = self.D.delta.prod() # orthorhombic grids only! # counts = (rho[0] * dV[0] + rho[1] * dV[1] ...) = sum_i rho[i] * dV assert_almost_equal(self.D.grid.sum() * dV, self.counts) def test_origin(self): midpoints = [0.5 * (b[:-1] + b[1:]) for b in self.bins] origin = [m[0] for m in midpoints] assert_almost_equal(self.D.origin, origin)
class _BaseHausdorffDistance(TestCase): '''Base Class setup and unit tests for various Hausdorff distance calculation properties.''' @dec.skipif(module_not_found('scipy'), 'scipy not available') def setUp(self): self.random_angles = np.random.random((100, )) * np.pi * 2 self.random_columns = np.column_stack( (self.random_angles, self.random_angles, np.zeros((100, )))) self.random_columns[..., 0] = np.cos(self.random_columns[..., 0]) self.random_columns[..., 1] = np.sin(self.random_columns[..., 1]) self.random_columns_2 = np.column_stack( (self.random_angles, self.random_angles, np.zeros((100, )))) self.random_columns_2[1:, 0] = np.cos(self.random_columns_2[1:, 0]) * 2.0 self.random_columns_2[1:, 1] = np.sin(self.random_columns_2[1:, 1]) * 2.0 # move one point farther out so we don't have two perfect circles self.random_columns_2[0, 0] = np.cos(self.random_columns_2[0, 0]) * 3.3 self.random_columns_2[0, 1] = np.sin(self.random_columns_2[0, 1]) * 3.3 self.path_1 = self.random_columns self.path_2 = self.random_columns_2 def tearDown(self): del self.random_angles del self.random_columns del self.random_columns_2 del self.path_1 del self.path_2 def test_symmetry(self): '''Ensure that the undirected (symmetric) Hausdorff distance is actually symmetric for a given Hausdorff metric, h.''' forward = self.h(self.path_1, self.path_2) reverse = self.h(self.path_2, self.path_1) self.assertEqual(forward, reverse) def test_hausdorff_value(self): '''Test that the undirected Hausdorff distance matches expected value for the simple case here.''' actual = self.h(self.path_1, self.path_2) # unless I pin down the random generator # seems unstable to use decimal > 2 assert_almost_equal(actual, self.expected, decimal=2)
class _NCDFReaderTest(_TRJReaderTest): @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.universe = mda.Universe(self.topology, self.filename) self.prec = 3 def test_slice_iteration(self): frames = [ts.frame for ts in self.universe.trajectory[4:-2:4]] assert_equal(frames, np.arange(self.universe.trajectory.n_frames)[4:-2:4], err_msg="slicing did not produce the expected frames") def test_metadata(self): data = self.universe.trajectory.trjfile assert_equal(data.Conventions, 'AMBER') assert_equal(data.ConventionVersion, '1.0')
class _NCDFReaderTest(_TRJReaderTest): @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.universe = mda.Universe(self.topology, self.filename) self.prec = 3 def test_slice_iteration(self): frames = [ts.frame for ts in self.universe.trajectory[4:-2:4]] assert_equal(frames, np.arange(self.universe.trajectory.n_frames)[4:-2:4], err_msg="slicing did not produce the expected frames") def test_metadata(self): data = self.universe.trajectory.trjfile assert_equal(data.Conventions, 'AMBER') assert_equal(data.ConventionVersion, '1.0') def test_dt(self): ref = 0.0 assert_almost_equal(ref, self.universe.trajectory.dt, self.prec) assert_almost_equal(ref, self.universe.trajectory.ts.dt, self.prec) def test_get_writer(self): with self.universe.trajectory.Writer('out.ncdf') as w: assert_(w.n_atoms == len(self.universe.atoms)) assert_(w.remarks.startswith('AMBER NetCDF format')) def test_get_writer_custom_n_atoms(self): with self.universe.trajectory.Writer('out.ncdf', n_atoms=42, remarks='Hi!') as w: assert_(w.n_atoms == 42) assert_(w.remarks == 'Hi!') def test_wrong_natoms(self): assert_raises(ValueError, mda.coordinates.TRJ.NCDFReader, self.filename, n_atoms=2) def test_read_on_closed(self): self.universe.trajectory.close() assert_raises(IOError, self.universe.trajectory.__getitem__, 2)
class TestNCDF2DCD(TestCase): @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.u = mda.Universe(PRMncdf, NCDF) # create the DCD self.tmpdir = tempdir.TempDir() self.dcd = self.tmpdir.name + '/ncdf-2-dcd.dcd' DCD = mda.Writer(self.dcd, n_atoms=self.u.atoms.n_atoms) for ts in self.u.trajectory: DCD.write(ts) DCD.close() self.w = mda.Universe(PRMncdf, self.dcd) def tearDown(self): try: os.unlink(self.dcd) except (AttributeError, OSError): pass del self.u del self.w del self.tmpdir @attr('issue') def test_unitcell(self): """NCDFReader: Test that DCDWriter correctly writes the CHARMM unit cell""" for ts_orig, ts_copy in zip(self.u.trajectory, self.w.trajectory): assert_almost_equal( ts_orig.dimensions, ts_copy.dimensions, 3, err_msg="NCDF->DCD: unit cell dimensions wrong at frame {0:d}".format( ts_orig.frame)) def test_coordinates(self): for ts_orig, ts_copy in zip(self.u.trajectory, self.w.trajectory): assert_almost_equal( self.u.atoms.positions, self.w.atoms.positions, 3, err_msg="NCDF->DCD: coordinates wrong at frame {0:d}".format( ts_orig.frame))
class _GromacsWriterIssue117(TestCase): """Issue 117: Cannot write XTC or TRR from AMBER NCDF""" ext = None prec = 5 @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.universe = mda.Universe(PRMncdf, NCDF) self.tmpdir = tempdir.TempDir() self.outfile = self.tmpdir.name + '/xdr-writer-issue117' + self.ext self.Writer = mda.Writer(self.outfile, n_atoms=self.universe.atoms.n_atoms) def tearDown(self): try: os.unlink(self.outfile) except: pass del self.universe del self.Writer @attr('issue') def test_write_trajectory(self): """Test writing Gromacs trajectories from AMBER NCDF (Issue 117)""" self.universe.trajectory for ts in self.universe.trajectory: self.Writer.write_next_timestep(ts) self.Writer.close() uw = mda.Universe(PRMncdf, self.outfile) # check that the coordinates are identical for each time step for orig_ts, written_ts in zip(self.universe.trajectory, uw.trajectory): assert_array_almost_equal(written_ts._pos, orig_ts._pos, self.prec, err_msg="coordinate mismatch " "between original and written " "trajectory at frame %d (orig) vs %d " "(written)" % (orig_ts.frame, written_ts.frame))
class TestLeafletFinder(TestCase): @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): self.universe = MDAnalysis.Universe(Martini_membrane_gro, Martini_membrane_gro) self.lipid_heads = self.universe.select_atoms("name PO4") self.lipid_head_string = "name PO4" def tearDown(self): del self.universe del self.lipid_heads del self.lipid_head_string def test_leaflet_finder(self): from MDAnalysis.analysis.leaflet import LeafletFinder lfls = LeafletFinder(self.universe, self.lipid_heads, pbc=True) top_heads, bottom_heads = lfls.groups() # Make top be... on top. if top_heads.center_of_geometry()[2] < bottom_heads.center_of_geometry()[2]: top_heads,bottom_heads = (bottom_heads,top_heads) assert_equal(top_heads.indices, np.arange(1,2150,12), err_msg="Found wrong leaflet lipids") assert_equal(bottom_heads.indices, np.arange(2521,4670,12), err_msg="Found wrong leaflet lipids") def test_string_vs_atomgroup_proper(self): from MDAnalysis.analysis.leaflet import LeafletFinder lfls_ag = LeafletFinder(self.universe, self.lipid_heads, pbc=True) lfls_string = LeafletFinder(self.universe, self.lipid_head_string, pbc=True) groups_ag = lfls_ag.groups() groups_string = lfls_string.groups() assert_equal(groups_string[0].indices, groups_ag[0].indices) assert_equal(groups_string[1].indices, groups_ag[1].indices) def test_optimize_cutoff(self): from MDAnalysis.analysis.leaflet import optimize_cutoff cutoff, N = optimize_cutoff(self.universe, self.lipid_heads, pbc=True) assert_equal(N, 2) assert_almost_equal(cutoff, 10.5, decimal=4)
class _NCDFWriterTest(TestCase): @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.universe = mda.Universe(self.topology, self.filename) self.prec = 5 ext = ".ncdf" self.tmpdir = tempdir.TempDir() self.outfile = os.path.join(self.tmpdir.name, 'ncdf-writer-1' + ext) self.outtop = os.path.join(self.tmpdir.name, 'ncdf-writer-top.pdb') self.Writer = mda.coordinates.TRJ.NCDFWriter def tearDown(self): for f in self.outfile, self.outtop: try: os.unlink(f) except OSError: pass del self.universe del self.Writer del self.tmpdir def test_write_trajectory(self): t = self.universe.trajectory with self.Writer(self.outfile, t.n_atoms, dt=t.dt) as W: self._copy_traj(W) self._check_new_traj() import netCDF4 #for issue #518 -- preserve float32 data in ncdf output dataset = netCDF4.Dataset(self.outfile, 'r', format='NETCDF3') coords = dataset.variables['coordinates'] time = dataset.variables['time'] assert_equal(coords.dtype, np.float32, err_msg='ncdf coord output not float32') assert_equal(time.dtype, np.float32, err_msg='ncdf time output not float32') def test_OtherWriter(self): t = self.universe.trajectory with t.OtherWriter(self.outfile) as W: self._copy_traj(W) self._check_new_traj() def _copy_traj(self, writer): for ts in self.universe.trajectory: writer.write_next_timestep(ts) def _check_new_traj(self): uw = mda.Universe(self.topology, self.outfile) # check that the trajectories are identical for each time step for orig_ts, written_ts in zip(self.universe.trajectory, uw.trajectory): assert_array_almost_equal(written_ts._pos, orig_ts._pos, self.prec, err_msg="coordinate mismatch between " "original and written trajectory at " "frame %d (orig) vs %d (written)" % (orig_ts.frame, written_ts.frame)) # not a good test because in the example trajectory all times are 0 assert_almost_equal(orig_ts.time, written_ts.time, self.prec, err_msg="Time for step {0} are not the " "same.".format(orig_ts.frame)) assert_array_almost_equal(written_ts.dimensions, orig_ts.dimensions, self.prec, err_msg="unitcells are not identical") # check that the NCDF data structures are the same nc_orig = self.universe.trajectory.trjfile nc_copy = uw.trajectory.trjfile for k, dim in nc_orig.dimensions.items(): try: dim_new = nc_copy.dimensions[k] except KeyError: raise AssertionError("NCDFWriter did not write " "dimension '{}'".format(k)) else: assert_equal(len(dim), len(dim_new), err_msg="Dimension '{0}' size mismatch".format(k)) for k, v in nc_orig.variables.items(): try: v_new = nc_copy.variables[k] except KeyError: raise AssertionError("NCDFWriter did not write " "variable '{}'".format(k)) else: try: assert_array_almost_equal(v[:], v_new[:], self.prec, err_msg="Variable '{}' not " "written correctly".format(k)) except TypeError: assert_array_equal(v[:], v_new[:], err_msg="Variable {} not written " "correctly".format(k)) @attr('slow') def test_TRR2NCDF(self): trr = mda.Universe(GRO, TRR) with self.Writer(self.outfile, trr.trajectory.n_atoms, velocities=True) as W: for ts in trr.trajectory: W.write_next_timestep(ts) uw = mda.Universe(GRO, self.outfile) for orig_ts, written_ts in zip(trr.trajectory, uw.trajectory): assert_array_almost_equal(written_ts._pos, orig_ts._pos, self.prec, err_msg="coordinate mismatch between " "original and written trajectory at " "frame %d (orig) vs %d (written)" % (orig_ts.frame, written_ts.frame)) assert_array_almost_equal(written_ts._velocities, orig_ts._velocities, self.prec, err_msg="velocity mismatch between " "original and written trajectory at " "frame %d (orig) vs %d (written)" % (orig_ts.frame, written_ts.frame)) assert_almost_equal(orig_ts.time, written_ts.time, self.prec, err_msg="Time for step {0} are not the " "same.".format(orig_ts.frame)) assert_array_almost_equal(written_ts.dimensions, orig_ts.dimensions, self.prec, err_msg="unitcells are not identical") del trr @attr('issue') def test_write_AtomGroup(self): """test to write NCDF from AtomGroup (Issue 116)""" p = self.universe.select_atoms("not resname WAT") p.write(self.outtop) with self.Writer(self.outfile, n_atoms=p.n_atoms) as W: for ts in self.universe.trajectory: W.write(p) uw = mda.Universe(self.outtop, self.outfile) pw = uw.atoms for orig_ts, written_ts in zip(self.universe.trajectory, uw.trajectory): assert_array_almost_equal(p.positions, pw.positions, self.prec, err_msg="coordinate mismatch between " "original and written trajectory at " "frame %d (orig) vs %d (written)" % (orig_ts.frame, written_ts.frame)) assert_almost_equal(orig_ts.time, written_ts.time, self.prec, err_msg="Time for step {0} are not the " "same.".format(orig_ts.frame)) assert_array_almost_equal(written_ts.dimensions, orig_ts.dimensions, self.prec, err_msg="unitcells are not identical")
class TestPCA(object): """ Test the PCA class """ def setUp(self): self.u = MDAnalysis.Universe(PSF, DCD) self.u.transfer_to_memory() self.pca = pca.PCA(self.u, select='backbone and name CA', align=False) self.pca.run() self.n_atoms = self.u.select_atoms('backbone and name CA').n_atoms def test_cov(self): atoms = self.u.select_atoms('backbone and name CA') xyz = np.zeros((self.pca.n_frames, self.pca._n_atoms * 3)) for i, ts in enumerate(self.u.trajectory): xyz[i] = atoms.positions.ravel() cov = np.cov(xyz, rowvar=0) assert_array_almost_equal(self.pca.cov, cov, 4) def test_cum_var(self): assert_almost_equal(self.pca.cumulated_variance[-1], 1) l = self.pca.cumulated_variance l = np.sort(l) assert_almost_equal(self.pca.cumulated_variance, l, 5) def test_pcs(self): assert_equal(self.pca.p_components.shape, (self.n_atoms * 3, self.n_atoms * 3)) def test_different_steps(self): dot = self.pca.transform(self.u.select_atoms('backbone and name CA'), start=5, stop=7, step=1) assert_equal(dot.shape, (2, self.n_atoms * 3)) def test_transform(self): ag = self.u.select_atoms('backbone and name CA') pca_space = self.pca.transform(ag, n_components=1) assert_equal(pca_space.shape, (self.u.trajectory.n_frames, 1)) # Accepts universe as input, but shapes are not aligned due to n_atoms @raises(ValueError) def test_transform_mismatch(self): pca_space = self.pca.transform(self.u, n_components=1) assert_equal(pca_space.shape, (self.u.trajectory.n_frames, 1)) @staticmethod def test_transform_universe(): u1 = MDAnalysis.Universe(waterPSF, waterDCD) u2 = MDAnalysis.Universe(waterPSF, waterDCD) pca_test = pca.PCA(u1).run() pca_test.transform(u2) @staticmethod @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_cosine_content(): rand = MDAnalysis.Universe(RANDOM_WALK_TOPO, RANDOM_WALK) pca_random = pca.PCA(rand).run() dot = pca_random.transform(rand.atoms) content = pca.cosine_content(dot, 0) assert_almost_equal(content, .99, 1)
class TestEncoreDimensionalityReduction(TestCase): @dec.skipif(parser_not_found('DCD'), 'DCD parser not available. Are you using python 3?') def setUp(self): # Create universe from templates defined in setUpClass self.ens1 = mda.Universe( self.ens1_template.filename, self.ens1_template.trajectory.timeseries(format='fac'), format=mda.coordinates.memory.MemoryReader) self.ens2 = mda.Universe( self.ens2_template.filename, self.ens2_template.trajectory.timeseries(format='fac'), format=mda.coordinates.memory.MemoryReader) def tearDownClass(self): del self.ens1 del self.ens2 @classmethod @dec.skipif(parser_not_found('DCD'), 'DCD parser not available. Are you using python 3?') def setUpClass(cls): # To speed up tests, we read in trajectories from file only once, # and then recreate them from their coordinate array for each test super(TestEncoreDimensionalityReduction, cls).setUpClass() cls.ens1_template = mda.Universe(PSF, DCD) cls.ens2_template = mda.Universe(PSF, DCD2) cls.ens1_template.transfer_to_memory() cls.ens2_template.transfer_to_memory() # Filter ensembles to only include every 5th frame cls.ens1_template = mda.Universe( cls.ens1_template.filename, np.copy( cls.ens1_template.trajectory.timeseries( format='fac')[::5, :, :]), format=mda.coordinates.memory.MemoryReader) cls.ens2_template = mda.Universe( cls.ens2_template.filename, np.copy( cls.ens2_template.trajectory.timeseries( format='fac')[::5, :, :]), format=mda.coordinates.memory.MemoryReader) @classmethod def tearDownClass(cls): del cls.ens1_template del cls.ens2_template @dec.slow def test_dimensionality_reduction_one_ensemble(self): dimension = 2 coordinates, details = encore.reduce_dimensionality(self.ens1) print(coordinates) assert_equal( coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}". format(coordinates)) @dec.slow def test_dimensionality_reduction_two_ensembles(self): dimension = 2 coordinates, details = \ encore.reduce_dimensionality([self.ens1, self.ens2]) assert_equal( coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}". format(coordinates)) @dec.slow def test_dimensionality_reduction_three_ensembles_two_identical(self): coordinates, details = \ encore.reduce_dimensionality([self.ens1, self.ens2, self.ens1]) coordinates_ens1 = coordinates[:, np.where( details["ensemble_membership"] == 1)] coordinates_ens3 = coordinates[:, np.where( details["ensemble_membership"] == 3)] assert_almost_equal( coordinates_ens1, coordinates_ens3, decimal=0, err_msg="Unexpected result in dimensionality reduction: {0}". format(coordinates)) @dec.slow def test_dimensionality_reduction_specified_dimension(self): dimension = 3 coordinates, details = encore.reduce_dimensionality( [self.ens1, self.ens2], method=encore.StochasticProximityEmbeddingNative( dimension=dimension)) assert_equal( coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}". format(coordinates)) @dec.slow def test_dimensionality_reduction_SPENative_direct(self): dimension = 2 method = encore.StochasticProximityEmbeddingNative(dimension=dimension) distance_matrix = encore.get_distance_matrix(self.ens1) coordinates, details = method(distance_matrix) assert_equal( coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}". format(coordinates)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_dimensionality_reduction_PCA_direct(self): dimension = 2 method = encore.PrincipalComponentAnalysis(dimension=dimension) coordinates = self.ens1.trajectory.timeseries(format='fac') coordinates = np.reshape(coordinates, (coordinates.shape[0], -1)) coordinates, details = method(coordinates) assert_equal( coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}". format(coordinates)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_dimensionality_reduction_different_method(self): dimension = 3 coordinates, details = \ encore.reduce_dimensionality( [self.ens1, self.ens2], method=encore.PrincipalComponentAnalysis(dimension=dimension)) assert_equal( coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}". format(coordinates)) @dec.slow def test_dimensionality_reduction_two_methods(self): dims = [2, 3] coordinates, details = \ encore.reduce_dimensionality( [self.ens1, self.ens2], method=[encore.StochasticProximityEmbeddingNative(dims[0]), encore.StochasticProximityEmbeddingNative(dims[1])]) assert_equal(coordinates[1].shape[0], dims[1]) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_dimensionality_reduction_two_different_methods(self): dims = [2, 3] coordinates, details = \ encore.reduce_dimensionality( [self.ens1, self.ens2], method=[encore.StochasticProximityEmbeddingNative(dims[0]), encore.PrincipalComponentAnalysis(dims[1])]) assert_equal(coordinates[1].shape[0], dims[1])
class TestEncoreClustering(TestCase): @dec.skipif(parser_not_found('DCD'), 'DCD parser not available. Are you using python 3?') def setUp(self): # Create universe from templates defined in setUpClass self.ens1 = mda.Universe( self.ens1_template.filename, self.ens1_template.trajectory.timeseries(format='fac'), format=mda.coordinates.memory.MemoryReader) self.ens2 = mda.Universe( self.ens2_template.filename, self.ens2_template.trajectory.timeseries(format='fac'), format=mda.coordinates.memory.MemoryReader) def tearDownClass(self): del self.ens1 del self.ens2 @classmethod @dec.skipif(parser_not_found('DCD'), 'DCD parser not available. Are you using python 3?') def setUpClass(cls): # To speed up tests, we read in trajectories from file only once, # and then recreate them from their coordinate array for each test super(TestEncoreClustering, cls).setUpClass() cls.ens1_template = mda.Universe(PSF, DCD) cls.ens2_template = mda.Universe(PSF, DCD2) cls.ens1_template.transfer_to_memory() cls.ens2_template.transfer_to_memory() # Filter ensembles to only include every 5th frame cls.ens1_template = mda.Universe( cls.ens1_template.filename, np.copy( cls.ens1_template.trajectory.timeseries( format='fac')[::5, :, :]), format=mda.coordinates.memory.MemoryReader) cls.ens2_template = mda.Universe( cls.ens2_template.filename, np.copy( cls.ens2_template.trajectory.timeseries( format='fac')[::5, :, :]), format=mda.coordinates.memory.MemoryReader) @classmethod def tearDownClass(cls): del cls.ens1_template del cls.ens2_template @dec.slow def test_clustering_one_ensemble(self): cluster_collection = encore.cluster(self.ens1) expected_value = 7 assert_equal( len(cluster_collection), expected_value, err_msg="Unexpected results: {0}".format(cluster_collection)) @dec.slow def test_clustering_two_ensembles(self): cluster_collection = encore.cluster([self.ens1, self.ens2]) expected_value = 14 assert_equal( len(cluster_collection), expected_value, err_msg="Unexpected results: {0}".format(cluster_collection)) @dec.slow def test_clustering_three_ensembles_two_identical(self): cluster_collection = encore.cluster([self.ens1, self.ens2, self.ens1]) expected_value = 40 assert_equal( len(cluster_collection), expected_value, err_msg="Unexpected result: {0}".format(cluster_collection)) @dec.slow def test_clustering_two_methods(self): cluster_collection = encore.cluster( [self.ens1], method=[ encore.AffinityPropagationNative(), encore.AffinityPropagationNative() ]) assert_equal( len(cluster_collection[0]), len(cluster_collection[1]), err_msg="Unexpected result: {0}".format(cluster_collection)) @dec.slow def test_clustering_AffinityPropagationNative_direct(self): method = encore.AffinityPropagationNative() distance_matrix = encore.get_distance_matrix(self.ens1) cluster_assignment, details = method(distance_matrix) expected_value = 7 assert_equal( len(set(cluster_assignment)), expected_value, err_msg="Unexpected result: {0}".format(cluster_assignment)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_clustering_AffinityPropagation_direct(self): method = encore.AffinityPropagation() distance_matrix = encore.get_distance_matrix(self.ens1) cluster_assignment, details = method(distance_matrix) expected_value = 7 assert_equal( len(set(cluster_assignment)), expected_value, err_msg="Unexpected result: {0}".format(cluster_assignment)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_clustering_KMeans_direct(self): clusters = 10 method = encore.KMeans(clusters) coordinates = self.ens1.trajectory.timeseries(format='fac') coordinates = np.reshape(coordinates, (coordinates.shape[0], -1)) cluster_assignment, details = method(coordinates) assert_equal( len(set(cluster_assignment)), clusters, err_msg="Unexpected result: {0}".format(cluster_assignment)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_clustering_DBSCAN_direct(self): method = encore.DBSCAN(eps=0.5, min_samples=2) distance_matrix = encore.get_distance_matrix(self.ens1) cluster_assignment, details = method(distance_matrix) expected_value = 2 assert_equal( len(set(cluster_assignment)), expected_value, err_msg="Unexpected result: {0}".format(cluster_assignment)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_clustering_two_different_methods(self): cluster_collection = encore.cluster( [self.ens1], method=[ encore.AffinityPropagation(preference=-7.5), encore.DBSCAN(min_samples=2) ]) print(cluster_collection) print(cluster_collection) assert_equal( len(cluster_collection[0]), len(cluster_collection[1]), err_msg="Unexpected result: {0}".format(cluster_collection)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_clustering_method_w_no_distance_matrix(self): cluster_collection = encore.cluster([self.ens1], method=encore.KMeans(10)) print(cluster_collection) assert_equal( len(cluster_collection), 10, err_msg="Unexpected result: {0}".format(cluster_collection)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_clustering_two_methods_one_w_no_distance_matrix(self): cluster_collection = encore.cluster( [self.ens1], method=[encore.KMeans(17), encore.AffinityPropagationNative()]) print(cluster_collection) assert_equal( len(cluster_collection[0]), len(cluster_collection[0]), err_msg="Unexpected result: {0}".format(cluster_collection)) @dec.slow @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") def test_sklearn_affinity_propagation(self): cc1 = encore.cluster([self.ens1]) cc2 = encore.cluster([self.ens1], method=encore.AffinityPropagation()) assert_equal(len(cc1), len(cc2), err_msg="Native and sklearn implementations of affinity " "propagation don't agree: mismatch in number of " "clusters: {0} {1}".format(len(cc1), len(cc2)))
class TestHydrogenBondAutocorrel(object): def setUp(self): u = self.u = mda.Universe(TRZ_psf, TRZ) self.H = u.atoms.select_atoms('name Hn') self.N = u.atoms.select_atoms('name N') self.O = u.atoms.select_atoms('name O') self.excl_list = (np.array(range(len(self.H))), np.array(range(len(self.O)))) def tearDown(self): del self.H del self.N del self.O del self.u del self.excl_list # regression tests for different conditions def test_continuous(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', sample_time=0.06, ) hbond.run() assert_array_almost_equal( hbond.solution['results'], np.array([ 1., 0.92668623, 0.83137828, 0.74486804, 0.67741936, 0.60263932 ], dtype=np.float32)) def test_continuous_excl(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', exclusions=self.excl_list, sample_time=0.06, ) hbond.run() assert_array_almost_equal( hbond.solution['results'], np.array([ 1., 0.92668623, 0.83137828, 0.74486804, 0.67741936, 0.60263932 ], dtype=np.float32)) def test_intermittent(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='intermittent', sample_time=0.06, ) hbond.run() assert_array_almost_equal( hbond.solution['results'], np.array([ 1., 0.92668623, 0.84310848, 0.79325515, 0.76392961, 0.72287393 ], dtype=np.float32)) def test_intermittent_timecut(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='intermittent', time_cut=0.01, # time cut at traj.dt == continuous sample_time=0.06, ) hbond.run() assert_array_almost_equal( hbond.solution['results'], np.array([ 1., 0.92668623, 0.83137828, 0.74486804, 0.67741936, 0.60263932 ], dtype=np.float32)) def test_intermittent_excl(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='intermittent', exclusions=self.excl_list, sample_time=0.06, ) hbond.run() assert_array_almost_equal( hbond.solution['results'], np.array([ 1., 0.92668623, 0.84310848, 0.79325515, 0.76392961, 0.72287393 ], dtype=np.float32)) # For `solve` the test trajectories aren't long enough # So spoof the results and check that solver finds solution @dec.skipif(module_not_found('scipy')) def test_solve_continuous(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', sample_time=0.06, ) def actual_function_cont(t): A1 = 0.75 A2 = 0.25 tau1 = 0.5 tau2 = 0.1 return A1 * np.exp(-t / tau1) + A2 * np.exp(-t / tau2) hbond.solution['time'] = time = np.arange(0, 0.06, 0.001) hbond.solution['results'] = actual_function_cont(time) hbond.solve() assert_array_almost_equal( hbond.solution['fit'], np.array([0.75, 0.5, 0.1]), ) @dec.skipif(module_not_found('scipy')) def test_solve_intermittent(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='intermittent', sample_time=0.06, ) def actual_function_int(t): A1 = 0.33 A2 = 0.33 A3 = 0.34 tau1 = 5 tau2 = 1 tau3 = 0.1 return A1 * np.exp(-t / tau1) + A2 * np.exp( -t / tau2) + A3 * np.exp(-t / tau3) hbond.solution['time'] = time = np.arange(0, 6.0, 0.01) hbond.solution['results'] = actual_function_int(time) hbond.solve() assert_array_almost_equal( hbond.solution['fit'], np.array([0.33, 0.33, 5, 1, 0.1]), ) def test_save(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', sample_time=0.06, ) hbond.run() with tempdir.in_tempdir(): hbond.save_results('hbondout.npz') loaded = np.load('hbondout.npz') assert_('time' in loaded) assert_('results' in loaded) # setup errors def test_wronglength_DA(self): assert_raises( ValueError, HBAC, self.u, hydrogens=self.H[:-1], acceptors=self.O, donors=self.N, bond_type='intermittent', exclusions=self.excl_list, sample_time=0.06, ) def test_exclusions(self): excl_list2 = self.excl_list[0], self.excl_list[1][:-1] assert_raises( ValueError, HBAC, self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='intermittent', exclusions=excl_list2, sample_time=0.06, ) def test_bond_type_VE(self): assert_raises( ValueError, HBAC, self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='marzipan', exclusions=self.excl_list, sample_time=0.06, ) @dec.skipif(module_not_found('scipy')) def test_solve_before_run_VE(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', sample_time=0.06, ) assert_raises(ValueError, hbond.solve) @mock.patch('MDAnalysis.coordinates.TRZ.TRZReader._read_frame') def test_unslicable_traj_VE(self, mock_read): mock_read.side_effect = TypeError assert_raises(ValueError, HBAC, self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', sample_time=0.06) def test_save_without_run_VE(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', sample_time=0.06, ) assert_raises(ValueError, hbond.save_results) def test_repr(self): hbond = HBAC( self.u, hydrogens=self.H, acceptors=self.O, donors=self.N, bond_type='continuous', sample_time=0.06, ) assert_(isinstance(repr(hbond), six.string_types))
class TestPSAnalysis(TestCase): @dec.skipif(parser_not_found('DCD'), 'DCD parser not available. Are you using python 3?') @dec.skipif(module_not_found('matplotlib'), "Test skipped because matplotlib is not available.") @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def setUp(self): self.tmpdir = tempdir.TempDir() self.iu1 = np.triu_indices(3, k=1) self.universe1 = mda.Universe(PSF, DCD) self.universe2 = mda.Universe(PSF, DCD2) self.universe_rev = mda.Universe(PSF, DCD) self.universes = [self.universe1, self.universe2, self.universe_rev] self.psa = PSA.PSAnalysis(self.universes, path_select='name CA', targetdir=self.tmpdir.name) self.psa.generate_paths(align=True) self.psa.paths[-1] = self.psa.paths[-1][:: -1, :, :] # reverse third path self._run() self._plot() def _run(self): self.psa.run(metric='hausdorff') self.hausd_matrix = self.psa.get_pairwise_distances() self.psa.run(metric='discrete_frechet') self.frech_matrix = self.psa.get_pairwise_distances() self.hausd_dists = self.hausd_matrix[self.iu1] self.frech_dists = self.frech_matrix[self.iu1] def _plot(self): self.plot_data = self.psa.plot() def tearDown(self): del self.universe1 del self.universe2 del self.universe_rev del self.psa del self.tmpdir def test_hausdorff_bound(self): err_msg = "Some Frechet distances are smaller than corresponding " \ + "Hausdorff distances" assert_array_less(self.hausd_dists, self.frech_dists, err_msg) def test_reversal_hausdorff(self): err_msg = "Hausdorff distances changed after path reversal" assert_array_almost_equal(self.hausd_matrix[1, 2], self.hausd_matrix[0, 1], decimal=3, err_msg=err_msg) def test_reversal_frechet(self): err_msg = "Frechet distances did not increase after path reversal" assert_(self.frech_matrix[1, 2] >= self.frech_matrix[0, 1], err_msg) def test_dendrogram_produced(self): err_msg = "Dendrogram dictionary object was not produced" assert_(type(self.plot_data[1]) is dict, err_msg) def test_dist_mat_to_vec_i_less_j(self): """Test the index of corresponding distance vector is correct if i < j""" err_msg = "dist_mat_to_vec function returning wrong values" assert_equal(PSA.dist_mat_to_vec(5, 3, 4), 9, err_msg) def test_dist_mat_to_vec_i_greater_j(self): """Test the index of corresponding distance vector is correct if i > j""" err_msg = "dist_mat_to_vec function returning wrong values" assert_equal(PSA.dist_mat_to_vec(5, 4, 3), 9, err_msg) def test_dist_mat_to_vec_input_numpy_integer_32(self): """Test whether inputs are supported as numpy integers rather than normal Integers""" err_msg = "dist_mat_to_vec function returning wrong values" assert_equal( PSA.dist_mat_to_vec(np.int32(5), np.int32(3), np.int32(4)), np.int32(9), err_msg) def test_dist_mat_to_vec_input_numpy_integer_16(self): """Test whether inputs are supported as numpy integers rather than normal Integers""" err_msg = "dist_mat_to_vec function returning wrong values" assert_equal( PSA.dist_mat_to_vec(np.int16(5), np.int16(3), np.int16(4)), np.int16(9), err_msg)
class TestNCDFWriterVelsForces(TestCase): """Test writing NCDF trajectories with a mixture of options""" @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): self.tmpdir = tempdir.TempDir() self.outfile = self.tmpdir.name + '/ncdf-write-vels-force.ncdf' self.prec = 3 self.top = XYZ_mini self.n_atoms = 3 self.ts1 = mda.coordinates.TRJ.Timestep(self.n_atoms, velocities=True, forces=True) self.ts1._pos[:] = np.arange(self.n_atoms * 3).reshape(self.n_atoms, 3) self.ts1._velocities[:] = np.arange(self.n_atoms * 3).reshape( self.n_atoms, 3) + 100 self.ts1._forces[:] = np.arange(self.n_atoms * 3).reshape( self.n_atoms, 3) + 200 self.ts2 = mda.coordinates.TRJ.Timestep(self.n_atoms, velocities=True, forces=True) self.ts2._pos[:] = np.arange(self.n_atoms * 3).reshape( self.n_atoms, 3) + 300 self.ts2._velocities[:] = np.arange(self.n_atoms * 3).reshape( self.n_atoms, 3) + 400 self.ts2._forces[:] = np.arange(self.n_atoms * 3).reshape( self.n_atoms, 3) + 500 def tearDown(self): try: os.unlink(self.outfile) except: pass del self.ts1 del self.ts2 del self.tmpdir def _write_ts(self, pos, vel, force): """Write the two reference timesteps, then open them up and check values pos vel and force are bools which define whether these properties should be in TS """ with mda.Writer(self.outfile, n_atoms=self.n_atoms, velocities=vel, forces=force) as w: w.write(self.ts1) w.write(self.ts2) u = mda.Universe(self.top, self.outfile) for ts, ref_ts in zip(u.trajectory, [self.ts1, self.ts2]): if pos: assert_almost_equal(ts._pos, ref_ts._pos, self.prec) else: assert_raises(mda.NoDataError, getattr, ts, 'positions') if vel: assert_almost_equal(ts._velocities, ref_ts._velocities, self.prec) else: assert_raises(mda.NoDataError, getattr, ts, 'velocities') if force: assert_almost_equal(ts._forces, ref_ts._forces, self.prec) else: assert_raises(mda.NoDataError, getattr, ts, 'forces') u.trajectory.close() def test_pos(self): self._write_ts(True, False, False) def test_pos_vel(self): self._write_ts(True, True, False) def test_pos_force(self): self._write_ts(True, False, True) def test_pos_vel_force(self): self._write_ts(True, True, True)
class TestEncore(TestCase): @dec.skipif(parser_not_found('DCD'), 'DCD parser not available. Are you using python 3?') def setUp(self): # Create universe from templates defined in setUpClass self.ens1 = mda.Universe( self.ens1_template.filename, self.ens1_template.trajectory.timeseries(format='fac'), format=mda.coordinates.memory.MemoryReader) self.ens2 = mda.Universe( self.ens2_template.filename, self.ens2_template.trajectory.timeseries(format='fac'), format=mda.coordinates.memory.MemoryReader) def tearDown(self): del self.ens1 del self.ens2 @classmethod @dec.skipif(parser_not_found('DCD'), 'DCD parser not available. Are you using python 3?') def setUpClass(cls): # To speed up tests, we read in trajectories from file only once, # and then recreate them from their coordinate array for each test super(TestEncore, cls).setUpClass() cls.ens1_template = mda.Universe(PSF, DCD) cls.ens2_template = mda.Universe(PSF, DCD2) cls.ens1_template.transfer_to_memory() cls.ens2_template.transfer_to_memory() # Filter ensembles to only include every 5th frame cls.ens1_template = mda.Universe( cls.ens1_template.filename, np.copy( cls.ens1_template.trajectory.timeseries( format='fac')[::5, :, :]), format=mda.coordinates.memory.MemoryReader) cls.ens2_template = mda.Universe( cls.ens2_template.filename, np.copy( cls.ens2_template.trajectory.timeseries( format='fac')[::5, :, :]), format=mda.coordinates.memory.MemoryReader) @classmethod def tearDownClass(cls): del cls.ens1_template del cls.ens2_template @staticmethod def test_triangular_matrix(): scalar = 2 size = 3 expected_value = 1.984 filename = tempfile.mktemp() + ".npz" triangular_matrix = encore.utils.TriangularMatrix(size=size) triangular_matrix[0, 1] = expected_value assert_equal( triangular_matrix[0, 1], expected_value, err_msg= "Data error in TriangularMatrix: read/write are not consistent") assert_equal( triangular_matrix[0, 1], triangular_matrix[1, 0], err_msg="Data error in TriangularMatrix: matrix non symmetrical") triangular_matrix.savez(filename) triangular_matrix_2 = encore.utils.TriangularMatrix(size=size, loadfile=filename) assert_equal( triangular_matrix_2[0, 1], expected_value, err_msg= "Data error in TriangularMatrix: loaded matrix non symmetrical") triangular_matrix_3 = encore.utils.TriangularMatrix(size=size) triangular_matrix_3.loadz(filename) assert_equal( triangular_matrix_3[0, 1], expected_value, err_msg= "Data error in TriangularMatrix: loaded matrix non symmetrical") incremented_triangular_matrix = triangular_matrix + scalar assert_equal( incremented_triangular_matrix[0, 1], expected_value + scalar, err_msg="Error in TriangularMatrix: addition of scalar gave\ inconsistent results") triangular_matrix += scalar assert_equal( triangular_matrix[0, 1], expected_value + scalar, err_msg="Error in TriangularMatrix: addition of scalar gave\ inconsistent results") multiplied_triangular_matrix_2 = triangular_matrix_2 * scalar assert_equal( multiplied_triangular_matrix_2[0, 1], expected_value * scalar, err_msg="Error in TriangularMatrix: multiplication by scalar gave\ inconsistent results") triangular_matrix_2 *= scalar assert_equal( triangular_matrix_2[0, 1], expected_value * scalar, err_msg="Error in TriangularMatrix: multiplication by scalar gave\ inconsistent results") @staticmethod def test_parallel_calculation(): def function(x): return x**2 arguments = [tuple([i]) for i in np.arange(0, 100)] parallel_calculation = encore.utils.ParallelCalculation( function=function, n_jobs=4, args=arguments) results = parallel_calculation.run() for i, r in enumerate(results): assert_equal(r[1], arguments[i][0]**2, err_msg="Unexpeted results from ParallelCalculation") def test_rmsd_matrix_with_superimposition(self): conf_dist_matrix = encore.confdistmatrix.conformational_distance_matrix( self.ens1, encore.confdistmatrix.set_rmsd_matrix_elements, selection="name CA", pairwise_align=True, weights='mass', n_jobs=1) reference = rms.RMSD(self.ens1, select="name CA") reference.run() for i, rmsd in enumerate(reference.rmsd): assert_almost_equal( conf_dist_matrix[0, i], rmsd[2], decimal=3, err_msg= "calculated RMSD values differ from the reference implementation" ) def test_rmsd_matrix_with_superimposition_custom_weights(self): conf_dist_matrix = encore.confdistmatrix.conformational_distance_matrix( self.ens1, encore.confdistmatrix.set_rmsd_matrix_elements, selection="name CA", pairwise_align=True, weights='mass', n_jobs=1) conf_dist_matrix_custom = encore.confdistmatrix.conformational_distance_matrix( self.ens1, encore.confdistmatrix.set_rmsd_matrix_elements, selection="name CA", pairwise_align=True, weights=(self.ens1.atoms.CA.masses, self.ens1.atoms.CA.masses), n_jobs=1) for i in range(conf_dist_matrix_custom.size): assert_almost_equal(conf_dist_matrix_custom[0, i], conf_dist_matrix[0, i]) def test_rmsd_matrix_without_superimposition(self): selection_string = "name CA" selection = self.ens1.select_atoms(selection_string) reference_rmsd = [] coordinates = self.ens1.trajectory.timeseries(selection, format='fac') for coord in coordinates: reference_rmsd.append( rms.rmsd(coordinates[0], coord, superposition=False)) confdist_matrix = encore.confdistmatrix.conformational_distance_matrix( self.ens1, encore.confdistmatrix.set_rmsd_matrix_elements, selection=selection_string, pairwise_align=False, weights='mass', n_jobs=1) print(repr(confdist_matrix.as_array()[0, :])) assert_almost_equal( confdist_matrix.as_array()[0, :], reference_rmsd, decimal=3, err_msg="calculated RMSD values differ from reference") @staticmethod def test_ensemble_superimposition(): aligned_ensemble1 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble1, aligned_ensemble1, select="name CA", in_memory=True).run() aligned_ensemble2 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble2, aligned_ensemble2, select="name *", in_memory=True).run() rmsfs1 = rms.RMSF(aligned_ensemble1.select_atoms('name *')) rmsfs1.run() rmsfs2 = rms.RMSF(aligned_ensemble2.select_atoms('name *')) rmsfs2.run() assert_equal( sum(rmsfs1.rmsf) > sum(rmsfs2.rmsf), True, err_msg= "Ensemble aligned on all atoms should have lower full-atom RMSF " "than ensemble aligned on only CAs.") @staticmethod def test_ensemble_superimposition_to_reference_non_weighted(): aligned_ensemble1 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble1, aligned_ensemble1, select="name CA", in_memory=True).run() aligned_ensemble2 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble2, aligned_ensemble2, select="name *", in_memory=True).run() rmsfs1 = rms.RMSF(aligned_ensemble1.select_atoms('name *')) rmsfs1.run() rmsfs2 = rms.RMSF(aligned_ensemble2.select_atoms('name *')) rmsfs2.run() assert_equal( sum(rmsfs1.rmsf) > sum(rmsfs2.rmsf), True, err_msg= "Ensemble aligned on all atoms should have lower full-atom RMSF " "than ensemble aligned on only CAs.") def test_hes_to_self(self): results, details = encore.hes([self.ens1, self.ens1]) result_value = results[0, 1] expected_value = 0. assert_almost_equal( result_value, expected_value, err_msg="Harmonic Ensemble Similarity to itself not zero: {0:f}". format(result_value)) def test_hes(self): results, details = encore.hes([self.ens1, self.ens2], weights='mass') result_value = results[0, 1] min_bound = 1E5 self.assertGreater( result_value, min_bound, msg= "Unexpected value for Harmonic Ensemble Similarity: {0:f}. Expected {1:f}." .format(result_value, min_bound)) def test_hes_custom_weights(self): results, details = encore.hes([self.ens1, self.ens2], weights='mass') results_custom, details_custom = encore.hes( [self.ens1, self.ens2], weights=(self.ens1.atoms.CA.masses, self.ens2.atoms.CA.masses)) result_value = results[0, 1] result_value_custom = results_custom[0, 1] assert_almost_equal(result_value, result_value_custom) def test_hes_align(self): # This test is massively sensitive! # Get 5260 when masses were float32? results, details = encore.hes([self.ens1, self.ens2], align=True) result_value = results[0, 1] expected_value = 2047.05 assert_almost_equal( result_value, expected_value, decimal=-3, err_msg= "Unexpected value for Harmonic Ensemble Similarity: {0:f}. Expected {1:f}." .format(result_value, expected_value)) def test_ces_to_self(self): results, details = \ encore.ces([self.ens1, self.ens1], clustering_method=encore.AffinityPropagationNative(preference = -3.0)) result_value = results[0, 1] expected_value = 0. assert_almost_equal( result_value, expected_value, err_msg="ClusteringEnsemble Similarity to itself not zero: {0:f}". format(result_value)) def test_ces(self): results, details = encore.ces([self.ens1, self.ens2]) result_value = results[0, 1] expected_value = 0.51 assert_almost_equal( result_value, expected_value, decimal=2, err_msg= "Unexpected value for Cluster Ensemble Similarity: {0:f}. Expected {1:f}." .format(result_value, expected_value)) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_dres_to_self(self): results, details = encore.dres([self.ens1, self.ens1]) result_value = results[0, 1] expected_value = 0. assert_almost_equal( result_value, expected_value, decimal=2, err_msg= "Dim. Reduction Ensemble Similarity to itself not zero: {0:f}". format(result_value)) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_dres(self): results, details = encore.dres([self.ens1, self.ens2], selection="name CA and resnum 1-10") result_value = results[0, 1] upper_bound = 0.6 self.assertLess( result_value, upper_bound, msg= "Unexpected value for Dim. reduction Ensemble Similarity: {0:f}. Expected {1:f}." .format(result_value, upper_bound)) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_dres_without_superimposition(self): distance_matrix = encore.get_distance_matrix(encore.merge_universes( [self.ens1, self.ens2]), superimpose=False) results, details = encore.dres([self.ens1, self.ens2], distance_matrix=distance_matrix) result_value = results[0, 1] expected_value = 0.68 assert_almost_equal( result_value, expected_value, decimal=1, err_msg= "Unexpected value for Dim. reduction Ensemble Similarity: {0:f}. Expected {1:f}." .format(result_value, expected_value)) def test_ces_convergence(self): expected_values = [0.3443593, 0.1941854, 0.06857104, 0.] results = encore.ces_convergence(self.ens1, 5) print(results) for i, ev in enumerate(expected_values): assert_almost_equal( ev, results[i], decimal=2, err_msg= "Unexpected value for Clustering Ensemble similarity in convergence estimation" ) @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_dres_convergence(self): expected_values = [0.3, 0.] results = encore.dres_convergence(self.ens1, 10) assert_almost_equal( results[:, 0], expected_values, decimal=1, err_msg= "Unexpected value for Dim. reduction Ensemble similarity in convergence estimation" ) @dec.slow def test_hes_error_estimation(self): expected_average = 10 expected_stdev = 12 averages, stdevs = encore.hes([self.ens1, self.ens1], estimate_error=True, bootstrapping_samples=10, selection="name CA and resnum 1-10") average = averages[0, 1] stdev = stdevs[0, 1] assert_almost_equal( average, expected_average, decimal=-2, err_msg= "Unexpected average value for bootstrapped samples in Harmonic Ensemble imilarity" ) assert_almost_equal( stdev, expected_stdev, decimal=-2, err_msg= "Unexpected standard daviation for bootstrapped samples in Harmonic Ensemble imilarity" ) @dec.slow def test_ces_error_estimation(self): expected_average = 0.03 expected_stdev = 0.31 averages, stdevs = encore.ces( [self.ens1, self.ens1], estimate_error=True, bootstrapping_samples=10, clustering_method=encore.AffinityPropagationNative( preference=-2.0), selection="name CA and resnum 1-10") average = averages[0, 1] stdev = stdevs[0, 1] assert_almost_equal( average, expected_average, decimal=1, err_msg= "Unexpected average value for bootstrapped samples in Clustering Ensemble similarity" ) assert_almost_equal( stdev, expected_stdev, decimal=0, err_msg= "Unexpected standard daviation for bootstrapped samples in Clustering Ensemble similarity" ) @dec.skipif(module_not_found('sklearn'), "Test skipped because sklearn is not available.") @dec.slow def test_ces_error_estimation_ensemble_bootstrap(self): # Error estimation using a method that does not take a distance # matrix as input, and therefore relies on bootstrapping the ensembles # instead expected_average = 0.03 expected_stdev = 0.02 averages, stdevs = encore.ces( [self.ens1, self.ens1], estimate_error=True, bootstrapping_samples=10, clustering_method=encore.KMeans(n_clusters=2), selection="name CA and resnum 1-10") average = averages[0, 1] stdev = stdevs[0, 1] assert_almost_equal( average, expected_average, decimal=1, err_msg= "Unexpected average value for bootstrapped samples in Clustering Ensemble similarity" ) assert_almost_equal( stdev, expected_stdev, decimal=1, err_msg= "Unexpected standard daviation for bootstrapped samples in Clustering Ensemble similarity" ) @dec.slow @dec.skipif(module_not_found('scipy'), "Test skipped because scipy is not available.") def test_dres_error_estimation(self): average_upper_bound = 0.3 stdev_upper_bound = 0.2 averages, stdevs = encore.dres([self.ens1, self.ens1], estimate_error=True, bootstrapping_samples=10, selection="name CA and resnum 1-10") average = averages[0, 1] stdev = stdevs[0, 1] self.assertLess( average, average_upper_bound, msg= "Unexpected average value for bootstrapped samples in Dim. reduction Ensemble similarity" ) self.assertLess( stdev, stdev_upper_bound, msg= "Unexpected standard deviation for bootstrapped samples in Dim. reduction Ensemble imilarity" )
class TestNCDF(BaseTimestepInterfaceTest): @dec.skipif(module_not_found("netCDF4"), "Test skipped because netCDF is not available.") def setUp(self): u = self.u = mda.Universe(PRMncdf, NCDF) self.ts = u.trajectory.ts
class TestHOLEtraj(TestCase): filename = MULTIPDB_HOLE start = 5 stop = 7 # HOLE is so slow so we only run it once and keep it in # the class; note that you may not change universe.trajectory # (eg iteration) because this is not safe in parallel @classmethod def setUpClass(cls): cls.universe = MDAnalysis.Universe(cls.filename) if not executable_not_found("hole"): with tempdir.in_tempdir(): H = HOLEtraj(cls.universe, start=cls.start, stop=cls.stop, raseed=31415) H.run() cls.H = H else: cls.H = None cls.frames = [ ts.frame for ts in cls.universe.trajectory[cls.start:cls.stop] ] @classmethod def tearDownClass(cls): del cls.H del cls.universe # This is VERY slow on 11 frames so we just take 2 @attr('slow') @dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found") def test_HOLEtraj(self): assert_array_equal( sorted(self.H.profiles.keys()), self.frames, err_msg="H.profiles.keys() should contain the frame numbers") data = np.transpose([(len(p), p.rxncoord.mean(), p.radius.min()) for p in self.H.profiles.values()]) assert_array_equal(data[0], [401, 399], err_msg="incorrect profile lengths") assert_array_almost_equal(data[1], [1.98767, 0.0878], err_msg="wrong mean HOLE rxncoord") assert_array_almost_equal(data[2], [1.19819, 1.29628], err_msg="wrong minimum radius") @attr('slow') @dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found") def test_min_radius(self): assert_array_almost_equal(self.H.min_radius(), np.array([[5., 1.19819], [6., 1.29628]]), err_msg="min_radius() array not correct") @attr('slow') @dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found") @dec.skipif(module_not_found("matplotlib")) def test_plot(self): import matplotlib.axes ax = self.H.plot(label=True) assert_(isinstance(ax, matplotlib.axes.Axes), msg="H.plot() did not produce an Axes instance") @attr('slow') @dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found") @dec.skipif(module_not_found("matplotlib")) def test_plot3D(self): import mpl_toolkits.mplot3d ax = self.H.plot3D() assert_(isinstance(ax, mpl_toolkits.mplot3d.Axes3D), msg="H.plot3D() did not produce an Axes3D instance") @attr('slow') @dec.skipif(executable_not_found("hole"), msg="Test skipped because HOLE not found") @dec.skipif(module_not_found("matplotlib")) def test_plot3D_rmax(self): import mpl_toolkits.mplot3d ax = self.H.plot3D(rmax=2.5) assert_(isinstance(ax, mpl_toolkits.mplot3d.Axes3D), msg="H.plot3D(rmax=float) did not produce an Axes3D instance")