def foo(self, actual, prediction):
        _, _, gt_data_for_sum = create_target_table(self.sub_gcd_res, actual)
        _, _, actual_data_for_sum = create_target_table(self.sub_gcd_res, prediction[:, 1])

        all_accuracies = dict([
                                  [rep, accuracy_by_repetition(actual_data_for_sum, gt_data_for_sum,
                                                               number_of_repetition=rep)]
                                  for rep in range(1,11)])

        print ", ".join([
                            "acc {}:{}".format(k, v)
                            for k, v in all_accuracies.iteritems()])
        return all_accuracies
Пример #2
0
        # plt.show()
        auc_score = roc_auc_score(test_target_gcd, test_prediction[:, 1])
        print "auc_score:{0}".format(auc_score)
        sub_gcd_res = create_data_for_compare_by_repetition(file_name)
        # sub_gcd_res = dict(train_trial=gcd_res['train_trial'][gcd_res['train_mode'] != 1],
        # train_block=gcd_res['train_block'][gcd_res['train_mode'] != 1],
        # stimulus=gcd_res['stimulus'][gcd_res['train_mode'] != 1])

        _, _, gt_data_for_sum = create_target_table(sub_gcd_res,
                                                    test_target_gcd)
        _, _, actual_data_for_sum = create_target_table(
            sub_gcd_res, test_prediction[:, 1])

        print "accuracy_by_repetition {0}".format(
            accuracy_by_repetition(actual_data_for_sum,
                                   gt_data_for_sum,
                                   number_of_repetition=10))

    results.append(
        dict(subject_name=subject_name,
             test_prediction=test_prediction,
             auc_score=auc_score))
    break

# In[4]:

import keras

# In[20]:

# In[73]: