コード例 #1
0
 def run(self):
     """
     Runs ER at all thresholds
     :return predicted_labels: List of lists of predicted labels.
                               predicted_labels[threshold_index] = dict [identifier, cluster label]
     :return metrics: List of lists of metric objects.
                      metrics[threshold_index] = Metrics object
     :return er_objects: List of EntityResolution objects.
                         er_objects[threshold_index] = EntityResolution
     :return new_metrics_objects: List of NewMetrics objects.
                                 new_metrics_objects[threshold_index] = NewMetrics
     """
     er = EntityResolution()
     #weak_match_function = LogisticMatchFunction(self._database_train, self._labels_train, self._train_pair_seed, 0.5)
     weak_match_function = ForestMatchFunction(self._database_train, self._labels_train, self._train_pair_seed, 0.5)
     print 'Testing pairwise match function on test database'
     ROC = weak_match_function.test(self._database_validation, self._labels_validation, self._validation_seed)
     #ROC.make_plot()
     metrics_list = list()
     labels_list = list()
     new_metrics_list = list()
     class_balance_test = count_pairwise_class_balance(self._labels_test)
     blocks = BlockingScheme(self._database_test, single_block=True)
     for threshold in self.thresholds:
         print 'Running entity resolution at threshold =', threshold
         weak_match_function.set_decision_threshold(threshold)
         labels_pred = weak_connected_components(self._database_test, weak_match_function, blocks)
         #labels_pred = er.run(self._database_test, weak_match_function, single_block=True, max_block_size=np.Inf,
         #                     cores=1)
         metrics_list.append(Metrics(self._labels_test, labels_pred))
         new_metrics_list.append(NewMetrics(self._database_test, labels_pred, weak_match_function, class_balance_test))
         labels_list.append(labels_pred)
     return labels_list, metrics_list, new_metrics_list
コード例 #2
0
def main():
    """
    Runs a single entity resolution on data (real or synthetic) using a match function (logistic regression, decision
    tree, or random forest)
    """
    data_type = 'real'
    decision_threshold = 0.7
    train_class_balance = 0.5
    max_block_size = 1000
    cores = 2
    if data_type == 'synthetic':
        database_train = SyntheticDatabase(100, 10, 10)
        corruption = 0.1
        corruption_array = corruption*np.random.normal(loc=0.0, scale=1.0, size=[1000,
                                                       database_train.database.feature_descriptor.number])
        database_train.corrupt(corruption_array)

        database_validation = SyntheticDatabase(100, 10, 10)
        corruption_array = corruption*np.random.normal(loc=0.0, scale=1.0, size=[1000,
                                                       database_validation.database.feature_descriptor.number])
        database_validation.corrupt(corruption_array)

        database_test = SyntheticDatabase(10, 10, 10)
        corruption_array = corruption*np.random.normal(loc=0.0, scale=1.0, size=[1000,
                                                       database_test.database.feature_descriptor.number])
        database_test.corrupt(corruption_array)
        labels_train = database_train.labels
        labels_validation = database_validation.labels
        labels_test = database_test.labels
        database_train = database_train.database
        database_validation = database_validation.database
        database_test = database_test.database
        single_block = True
    elif data_type == 'real':
        # Uncomment to use all features (annotations and LM)
        #database_train = Database('../data/trafficking/cluster_subsample0_10000.csv', header_path='../data/trafficking/cluster_subsample_header_all.csv')
        #database_validation = Database('../data/trafficking/cluster_subsample1_10000.csv', header_path='../data/trafficking/cluster_subsample_header_all.csv')
        #database_test = Database('../data/trafficking/cluster_subsample2_10000.csv', header_path='../data/trafficking/cluster_subsample_header_all.csv')

        # Uncomment to only use annotation features
        #database_train = Database('../data/trafficking/cluster_subsample0_10000.csv', header_path='../data/trafficking/cluster_subsample_header_annotations.csv')
        #database_validation = Database('../data/trafficking/cluster_subsample1_10000.csv', header_path='../data/trafficking/cluster_subsample_header_annotations.csv')
        #database_test = Database('../data/trafficking/cluster_subsample2_10000.csv', header_path='../data/trafficking/cluster_subsample_header_annotations.csv')

        # Uncomment to only use LM features
        database_train = Database('../data/trafficking/cluster_subsample0_10000.csv', header_path='../data/trafficking/cluster_subsample_header_LM.csv')
        database_validation = Database('../data/trafficking/cluster_subsample1_10000.csv', header_path='../data/trafficking/cluster_subsample_header_LM.csv')
        database_test = Database('../data/trafficking/cluster_subsample2_10000.csv', header_path='../data/trafficking/cluster_subsample_header_LM.csv')

        labels_train = fast_strong_cluster(database_train)
        labels_validation = fast_strong_cluster(database_validation)
        labels_test = fast_strong_cluster(database_test)
        single_block = False
    else:
        Exception('Invalid experiment type'+data_type)

    entities = deepcopy(database_test)
    blocking_scheme = BlockingScheme(entities, max_block_size, single_block=single_block)

    train_seed = generate_pair_seed(database_train, labels_train, train_class_balance, require_direct_match=True, max_minor_class=5000)
    validation_seed = generate_pair_seed(database_validation, labels_validation, 0.5, require_direct_match=True, max_minor_class=5000)
    # forest_all = ForestMatchFunction(database_all_train, labels_train, train_seed, decision_threshold)
    # forest_all.test(database_all_validation, labels_validation, validation_seed)
    # tree_all = TreeMatchFunction(database_all_train, labels_train, train_seed, decision_threshold)
    # tree_all.test(database_all_validation, labels_validation, validation_seed)
    # logistic_all = LogisticMatchFunction(database_all_train, labels_train, train_seed, decision_threshold)
    # logistic_all.test(database_all_validation, labels_validation, validation_seed)

    forest_annotations = ForestMatchFunction(database_train, labels_train, train_seed, decision_threshold)
    roc = forest_annotations.test(database_validation, labels_validation, validation_seed)
    #roc.make_plot()
    #plt.show()

    # tree_annotations = TreeMatchFunction(database_annotations_train, labels_train, train_seed, decision_threshold)
    # tree_annotations.test(database_annotations_validation, labels_validation, validation_seed)
    # logistic_annotations = LogisticMatchFunction(database_annotations_train, labels_train, train_seed, decision_threshold)
    # logistic_annotations.test(database_annotations_validation, labels_validation, validation_seed)

    # forest_LM = ForestMatchFunction(database_LM_train, labels_train, train_seed, decision_threshold)
    # forest_LM.test(database_LM_validation, labels_validation, validation_seed)
    # tree_LM = TreeMatchFunction(database_LM_train, labels_train, train_seed, decision_threshold)
    # tree_LM.test(database_LM_validation, labels_validation, validation_seed)
    # logistic_LM = LogisticMatchFunction(database_LM_train, labels_train, train_seed, decision_threshold)
    # logistic_LM.test(database_LM_validation, labels_validation, validation_seed)

    # forest_all.roc.write_rates('match_forest_all.csv')
    # tree_all.roc.write_rates('match_tree_all.csv')
    # logistic_all.roc.write_rates('match_logistic_all.csv')
    #
    # forest_annotations.roc.write_rates('match_forest_annotations.csv')
    # tree_annotations.roc.write_rates('match_tree_annotations.csv')
    # logistic_annotations.roc.write_rates('match_logistic_annotations.csv')
    #
    # forest_LM.roc.write_rates('match_forest_LM.csv')
    # tree_LM.roc.write_rates('match_tree_LM.csv')
    # logistic_LM.roc.write_rates('match_logistic_LM.csv')
    # ax = forest_all.roc.make_plot()
    # _ = tree_all.roc.make_plot(ax=ax)
    # _ = logistic_all.roc.make_plot(ax=ax)
    # plt.show()
    #forest_annotations.roc.make_plot()
    #plt.show()

    #entities.merge(strong_labels)

    #er = EntityResolution()
    #weak_labels = er.run(entities, match_function, blocking_scheme, cores=cores)
    weak_labels = weak_connected_components(database_test, forest_annotations, blocking_scheme)
    entities.merge(weak_labels)
    #strong_labels = fast_strong_cluster(entities)
    #entities.merge(strong_labels)

    # out = open('ER.csv', 'w')
    # out.write('phone,cluster_id\n')
    # for cluster_counter, (entity_id, entity) in enumerate(entities.records.iteritems()):
    #     phone_index = 21
    #     for phone in entity.features[phone_index]:
    #         out.write(str(phone)+','+str(cluster_counter)+'\n')
    # out.close()

    print 'Metrics using strong features as surrogate label. Entity resolution run using weak and strong features'
    metrics = Metrics(labels_test, weak_labels)
    # estimated_test_class_balance = count_pairwise_class_balance(labels_test)
    # new_metrics = NewMetrics(database_all_test, weak_labels, forest_all, estimated_test_class_balance)
    metrics.display()