Esempio n. 1
0
 def test_r2_score(self):
   """Test that R^2 metric passes basic sanity tests"""
   verbosity = "high"
   np.random.seed(123)
   n_samples = 10
   y_true = np.random.rand(n_samples,)
   y_pred = np.random.rand(n_samples,)
   regression_metric = Metric(metrics.r2_score, verbosity=verbosity)
   assert np.isclose(metrics.r2_score(y_true, y_pred),
                     regression_metric.compute_metric(y_true, y_pred))
Esempio n. 2
0
 def test_r2_score(self):
     """Test that R^2 metric passes basic sanity tests"""
     verbosity = "high"
     np.random.seed(123)
     n_samples = 10
     y_true = np.random.rand(n_samples, )
     y_pred = np.random.rand(n_samples, )
     regression_metric = Metric(metrics.r2_score, verbosity=verbosity)
     assert np.isclose(metrics.r2_score(y_true, y_pred),
                       regression_metric.compute_metric(y_true, y_pred))
Esempio n. 3
0
    task_scores = {
        task: []
        for task in range(len(test_dataset.get_task_names()))
    }
    for (task, support) in support_generator:
        # Train model on support
        sklearn_model = RandomForestClassifier(class_weight="balanced",
                                               n_estimators=50)
        model = SklearnModel(sklearn_model, model_dir)
        model.fit(support)

        # Test model
        task_dataset = get_task_dataset_minus_support(test_dataset, support,
                                                      task)
        y_pred = model.predict_proba(task_dataset)
        score = metric.compute_metric(task_dataset.y, y_pred, task_dataset.w)
        #print("Score on task %s is %s" % (str(task), str(score)))
        task_scores[task].append(score)

    # Join information for all tasks.
    mean_task_scores = {}
    for task in range(len(test_dataset.get_task_names())):
        mean_task_scores[task] = np.mean(np.array(task_scores[task]))
    print("Fold %s" % str(fold))
    print(mean_task_scores)

    for (fold_task, task) in zip(fold_tasks,
                                 range(len(test_dataset.get_task_names()))):
        all_scores[fold_task] = mean_task_scores[task]

print("All scores")
Esempio n. 4
0
      test_dataset, range(len(test_dataset.get_task_names())), n_pos, n_neg,
      n_trials, replace)

  # Compute accuracies
  task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}
  for (task, support) in support_generator:
    # Train model on support
    sklearn_model = RandomForestClassifier(
        class_weight="balanced", n_estimators=50)
    model = SklearnModel(sklearn_model, model_dir)
    model.fit(support)

    # Test model
    task_dataset = get_task_dataset_minus_support(test_dataset, support, task)
    y_pred = model.predict_proba(task_dataset)
    score = metric.compute_metric(
        task_dataset.y, y_pred, task_dataset.w)
    #print("Score on task %s is %s" % (str(task), str(score)))
    task_scores[task].append(score)

  # Join information for all tasks.
  mean_task_scores = {}
  for task in range(len(test_dataset.get_task_names())):
    mean_task_scores[task] = np.mean(np.array(task_scores[task]))
  print("Fold %s" % str(fold))
  print(mean_task_scores)

  for (fold_task, task) in zip(fold_tasks, range(len(test_dataset.get_task_names()))):
    all_scores[fold_task] = mean_task_scores[task]

print("All scores")
print(all_scores)