예제 #1
0
    def _learnBlocking(self, eta, epsilon):

        confident_nonduplicates = training.semiSupervisedNonDuplicates(self.data_sample, self.data_model)

        self.training_pairs[0].extend(confident_nonduplicates)

        predicate_functions = (
            predicates.wholeFieldPredicate,
            predicates.tokenFieldPredicate,
            predicates.commonIntegerPredicate,
            predicates.sameThreeCharStartPredicate,
            predicates.sameFiveCharStartPredicate,
            predicates.sameSevenCharStartPredicate,
            predicates.nearIntegersPredicate,
            predicates.commonFourGram,
            predicates.commonSixGram,
        )

        tfidf_thresholds = [0.2, 0.4, 0.6, 0.8]
        full_string_records = {}
        fields = self.data_model["fields"].keys()

        for pair in self.data_sample[0:2000]:
            for k, v in pair:
                full_string_records[k] = " ".join(v[field] for field in fields)

        df_index = tfidf.documentFrequency(full_string_records)

        learned_predicates = blocking.blockTraining(
            self.training_pairs, predicate_functions, self.data_model, tfidf_thresholds, df_index, eta, epsilon
        )

        return learned_predicates
예제 #2
0
파일: dedupe.py 프로젝트: derwiki/dedupe
    def _learnBlocking(self, data_d, eta, epsilon):
        confident_nonduplicates = blocking.semiSupervisedNonDuplicates(self.data_d,
                                                                       self.data_model)
                                                                       

        self.training_pairs[0].extend(confident_nonduplicates)

        predicate_functions = (predicates.wholeFieldPredicate,
                               predicates.tokenFieldPredicate,
                               predicates.commonIntegerPredicate,
                               predicates.sameThreeCharStartPredicate,
                               predicates.sameFiveCharStartPredicate,
                               predicates.sameSevenCharStartPredicate,
                               predicates.nearIntegersPredicate,
                               predicates.commonFourGram,
                               predicates.commonSixGram,
                               )

        tfidf_thresholds = [0.2, 0.4, 0.6, 0.8]
        full_string_records = {}
        for k, v in data_d.iteritems() :
          document = ''
          for field in self.data_model['fields'].keys() :
            document += v[field]
            document += ' '
          full_string_records[k] = document

        self.df_index = tfidf.documentFrequency(full_string_records)

        blocker = blocking.Blocking(self.training_pairs,
                                    predicate_functions,
                                    self.data_model,
                                    tfidf_thresholds,
                                    self.df_index,
                                    eta,
                                    epsilon
                                    )

        learned_predicates = blocker.trainBlocking()

        return learned_predicates