def test_diagonal_empty_codes(self):
     importance_matrix = np.array([[
         1.,
         0.,
     ], [0., 1.], [0., 0.]])
     result = modularity_explicitness.modularity(importance_matrix)
     np.testing.assert_allclose(result, 2. / 3.)
def aggregation_modularity(matrix, ys):
  """Aggregation function of the modularity score."""
  del ys
  score = {}
  score["modularity"] = modularity_explicitness.modularity(matrix)
  modularity_per_code = compute_modularity_per_code(matrix)
  assert len(modularity_per_code) == matrix.shape[0], "Wrong length."
  for i in range(len(modularity_per_code)):
    score["modularity.code_{}".format(i)] = modularity_per_code[i]
  return score
Пример #3
0
def compute_local_modularity(ground_truth_data,
                             representation_function,
                             random_state,
                             artifact_dir=None,
                             num_train=gin.REQUIRED,
                             num_local_clusters=gin.REQUIRED,
                             batch_size=16):
    """Computes the modularity metric according to Sec 3.

  Args:
    ground_truth_data: GroundTruthData to be sampled from.
    representation_function: Function that takes observations as input and
      outputs a dim_representation sized representation for each observation.
    random_state: Numpy random state used for randomness.
    artifact_dir: Optional path to directory where artifacts can be saved.
    num_train: Number of points used for training.
    num_local_clusters: how many times to run the local mig calculation.
    batch_size: Batch size for sampling.

  Returns:
    Dictionary with average modularity score and average explicitness
      (train and test).
  """
    del artifact_dir
    mod_results = []
    for modrun in range(num_local_clusters):
        #print("Generating training set %d." % modrun)
        mus_train, ys_train = utils.generate_local_batch_factor_code(
            ground_truth_data, representation_function, num_train,
            random_state, batch_size)
        discretized_mus = utils.make_discretizer(mus_train)
        #print(mus_train.shape, ys_train.shape)
        mutual_information = utils.discrete_mutual_info(
            discretized_mus, ys_train)
        # Mutual information should have shape [num_codes, num_factors].
        assert mutual_information.shape[0] == mus_train.shape[0]
        assert mutual_information.shape[1] == ys_train.shape[0]
        mod_results.append(
            modularity_explicitness.modularity(mutual_information))
    mod_results = np.array(mod_results)
    scores = {}
    scores["modularity_score"] = np.mean(mod_results)
    scores["local_modularity_scores_samples"] = mod_results.tolist()
    return scores
 def test_one_code_two_factors(self):
     importance_matrix = np.diag(5. * np.ones(5))
     importance_matrix = np.hstack([importance_matrix, importance_matrix])
     result = modularity_explicitness.modularity(importance_matrix)
     np.testing.assert_allclose(result, 1. - 1. / 9)
 def test_missed_factors(self):
     importance_matrix = np.diag(5. * np.ones(5))
     result = modularity_explicitness.modularity(importance_matrix[:2, :])
     np.testing.assert_allclose(result, 1.0)
 def test_redundant_codes(self):
     importance_matrix = np.diag(5. * np.ones(5))
     importance_matrix = np.vstack([importance_matrix, importance_matrix])
     result = modularity_explicitness.modularity(importance_matrix)
     np.testing.assert_allclose(result, 1.)
 def test_zero(self):
     importance_matrix = np.zeros(shape=[10, 10], dtype=np.float64)
     result = modularity_explicitness.modularity(importance_matrix)
     np.testing.assert_allclose(result, .0)
 def test_diagonal(self):
     importance_matrix = np.diag(5. * np.ones(5))
     result = modularity_explicitness.modularity(importance_matrix)
     np.testing.assert_allclose(result, 1.0)