def test_bad_metric(self): ground_truth_data = dummy_data.IdentityObservationsData() representation_function = lambda x: np.zeros_like(x, dtype=np.float64) random_state = np.random.RandomState(0) scores = sap_score.compute_sap( ground_truth_data, representation_function, random_state, None, 3000, 3000, continuous_factors=True) self.assertBetween(scores["SAP_score"], 0.0, 0.2)
def test_metric(self): ground_truth_data = dummy_data.IdentityObservationsData() representation_function = lambda x: np.array(x, dtype=np.float64) random_state = np.random.RandomState(0) scores = dci.compute_dci(ground_truth_data, representation_function, random_state, None, 1000, 1000) self.assertBetween(scores["disentanglement"], 0.9, 1.0) self.assertBetween(scores["completeness"], 0.9, 1.0)
def test_bad_metric(self): gin.bind_parameter("discretizer.discretizer_fn", _identity_discretizer) gin.bind_parameter("discretizer.num_bins", 10) ground_truth_data = dummy_data.IdentityObservationsData() representation_function = np.zeros_like random_state = np.random.RandomState(0) scores = mig.compute_mig(ground_truth_data, representation_function, random_state, None, 3000) self.assertBetween(scores["discrete_mig"], 0.0, 0.2)
def test_bad_metric(self): gin.bind_parameter("discretizer.discretizer_fn", _identity_discretizer) gin.bind_parameter("discretizer.num_bins", 10) ground_truth_data = dummy_data.IdentityObservationsData() representation_function = lambda x: np.zeros_like(x, dtype=np.float64) random_state = np.random.RandomState(0) scores = irs.compute_irs(ground_truth_data, representation_function, random_state, None, 0.99, 3000, 3000) self.assertBetween(scores["IRS"], 0.0, 0.1)
def test_duplicated_latent_space(self): ground_truth_data = dummy_data.IdentityObservationsData() def representation_function(x): x = np.array(x, dtype=np.float64) return np.hstack([x, x]) random_state = np.random.RandomState(0) scores = sap_score.compute_sap( ground_truth_data, representation_function, random_state, None, 3000, 3000, continuous_factors=True) self.assertBetween(scores["SAP_score"], 0.0, 0.2)
def test_bad_metric(self): ground_truth_data = dummy_data.IdentityObservationsData() representation_function = np.zeros_like random_state = np.random.RandomState(0) scores = factor_vae.compute_factor_vae(ground_truth_data, representation_function, random_state, None, 5, 3000, 2000, 2500) self.assertBetween(scores["train_accuracy"], 0.0, 0.2) self.assertBetween(scores["eval_accuracy"], 0.0, 0.2)
def test_metric(self): gin.bind_parameter("discretizer.discretizer_fn", _identity_discretizer) gin.bind_parameter("discretizer.num_bins", 10) ground_truth_data = dummy_data.IdentityObservationsData() representation_function = lambda x: np.array(x, dtype=np.float64) random_state = np.random.RandomState(0) scores = modularity_explicitness.compute_modularity_explicitness( ground_truth_data, representation_function, random_state, None, 3000, 3000) self.assertBetween(scores["modularity_score"], 0.9, 1.0)
def test_metric(self): ground_truth_data = dummy_data.IdentityObservationsData() representation_function = lambda x: x random_state = np.random.RandomState(0) scores = beta_vae.compute_beta_vae_sklearn(ground_truth_data, representation_function, random_state, None, 5, 2000, 2000) self.assertBetween(scores["train_accuracy"], 0.9, 1.0) self.assertBetween(scores["eval_accuracy"], 0.9, 1.0)
def test_duplicated_latent_space(self): ground_truth_data = dummy_data.IdentityObservationsData() def representation_function(x): x = np.array(x, dtype=np.float64) return np.hstack([x, x]) random_state = np.random.RandomState(0) scores = dci.compute_dci( ground_truth_data, representation_function, random_state, None, 1000, 1000) self.assertBetween(scores["disentanglement"], 0.9, 1.0) target = 1. - np.log(2)/np.log(10) self.assertBetween(scores["completeness"], target-.1, target+.1)
def test_duplicated_latent_space(self): gin.bind_parameter("discretizer.discretizer_fn", _identity_discretizer) gin.bind_parameter("discretizer.num_bins", 10) ground_truth_data = dummy_data.IdentityObservationsData() def representation_function(x): x = np.array(x, dtype=np.float64) return np.hstack([x, x]) random_state = np.random.RandomState(0) scores = mig.compute_mig(ground_truth_data, representation_function, random_state, None, 3000) self.assertBetween(scores["discrete_mig"], 0.0, 0.1)
def test_bad_metric(self): ground_truth_data = dummy_data.IdentityObservationsData() random_state_rep = np.random.RandomState(0) # The representation which randomly permutes the factors, should have equal # non-zero importance which should give a low modularity score. def representation_function(x): code = np.array(x, dtype=np.float64) for i in range(code.shape[0]): code[i, :] = random_state_rep.permutation(code[i, :]) return code random_state = np.random.RandomState(0) scores = dci.compute_dci( ground_truth_data, representation_function, random_state, None, 1000, 1000) self.assertBetween(scores["disentanglement"], 0.0, 0.2) self.assertBetween(scores["completeness"], 0.0, 0.2)
def test_bad_metric(self): gin.bind_parameter("discretizer.discretizer_fn", _identity_discretizer) gin.bind_parameter("discretizer.num_bins", 10) ground_truth_data = dummy_data.IdentityObservationsData() random_state_rep = np.random.RandomState(0) # The representation which randomly permutes the factors, should have equal # non-zero MI which should give a low modularity score. def representation_function(x): code = np.array(x, dtype=np.float64) for i in range(code.shape[0]): code[i, :] = random_state_rep.permutation(code[i, :]) return code random_state = np.random.RandomState(0) scores = modularity_explicitness.compute_modularity_explicitness( ground_truth_data, representation_function, random_state, None, 20000, 20000) self.assertBetween(scores["modularity_score"], 0.0, 0.2)