def test_discretizer(self): reader_gen = DataInMemory(data=self.generated_data) # check if exception safe api.discretizer(reader_gen)._chain[-1].get_output() api.discretizer(reader_gen, transform=api.tica())._chain[-1].get_output() api.discretizer(reader_gen, cluster=api.cluster_uniform_time())._chain[-1].get_output() api.discretizer(reader_gen, transform=api.pca(), cluster=api.cluster_regspace(dmin=10))._chain[-1].get_output()
def test_no_transform(self): reader_xtc = api.source(self.traj_files, top=self.pdb_file) api.pipeline([reader_xtc, api.cluster_kmeans(k=10)])._chain[-1].get_output() api.pipeline([reader_xtc, api.cluster_regspace(dmin=10)])._chain[-1].get_output() api.pipeline([reader_xtc, api.cluster_uniform_time()])._chain[-1].get_output()
def test_regspace_nthreads(self): for metric in ('euclidean', 'minRMSD'): self.clustering.estimate(self.src, n_jobs=1, dmin=self.dmin, metric=metric) cl2 = cluster_regspace(self.src, n_jobs=2, dmin=self.dmin, metric=metric) np.testing.assert_equal(self.clustering.clustercenters, cl2.clustercenters)
def test_is_parametrized(self): # construct pipeline with all possible transformers p = api.pipeline( [ api.source(self.traj_files, top=self.pdb_file), api.tica(), api.pca(), api.cluster_kmeans(k=50), api.cluster_regspace(dmin=50), api.cluster_uniform_time(k=20) ], run=False ) self.assertFalse(p._is_parametrized(), "If run=false, the pipeline should not be parametrized.") p.parametrize() self.assertTrue(p._is_parametrized(), "If parametrized was called, the pipeline should be parametrized.")
def test1d_data(self): data = np.random.random(100) cluster_regspace(data, dmin=0.3)