def __init__(self, tokenizer, overlap_size=1): # check if the input tokenizer is valid validate_tokenizer(tokenizer) # check if the overlap size is valid validate_threshold(overlap_size, 'OVERLAP') self.tokenizer = tokenizer self.overlap_size = overlap_size super(self.__class__, self).__init__()
def __init__(self, tokenizer, sim_measure_type, threshold): # check if the input tokenizer is valid validate_tokenizer(tokenizer) # check if the sim_measure_type is valid validate_sim_measure_type(sim_measure_type) # check if the threshold is valid validate_threshold(threshold, sim_measure_type) self.tokenizer = tokenizer self.sim_measure_type = sim_measure_type self.threshold = threshold super(self.__class__, self).__init__()
def __init__(self, tokenizer, overlap_size=1, comp_op='>=', allow_missing=False): # check if the input tokenizer is valid validate_tokenizer(tokenizer) # check if the overlap size is valid validate_threshold(overlap_size, 'OVERLAP') # check if the comparison operator is valid validate_comp_op_for_sim_measure(comp_op, 'OVERLAP') self.tokenizer = tokenizer self.overlap_size = overlap_size self.comp_op = comp_op super(self.__class__, self).__init__(allow_missing)
def dice_join(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, comp_op='>=', allow_empty=True, allow_missing=False, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, show_progress=True): """Join two tables using Dice similarity measure. For two sets X and Y, the Dice similarity score between them is given by: :math:`dice(X, Y) = \\frac{2 * |X \\cap Y|}{|X| + |Y|}` In the case where both X and Y are empty sets, we define their Dice score to be 1. Finds tuple pairs from left table and right table such that the Dice similarity between the join attributes satisfies the condition on input threshold. For example, if the comparison operator is '>=', finds tuple pairs whose Dice similarity between the strings that are the values of the join attributes is greater than or equal to the input threshold, as specified in "threshold". Args: ltable (DataFrame): left input table. rtable (DataFrame): right input table. l_key_attr (string): key attribute in left table. r_key_attr (string): key attribute in right table. l_join_attr (string): join attribute in left table. r_join_attr (string): join attribute in right table. tokenizer (Tokenizer): tokenizer to be used to tokenize join attributes. threshold (float): Dice similarity threshold to be satisfied. comp_op (string): comparison operator. Supported values are '>=', '>' and '=' (defaults to '>='). allow_empty (boolean): flag to indicate whether tuple pairs with empty set of tokens in both the join attributes should be included in the output (defaults to True). allow_missing (boolean): flag to indicate whether tuple pairs with missing value in at least one of the join attributes should be included in the output (defaults to False). If this flag is set to True, a tuple in ltable with missing value in the join attribute will be matched with every tuple in rtable and vice versa. l_out_attrs (list): list of attribute names from the left table to be included in the output table (defaults to None). r_out_attrs (list): list of attribute names from the right table to be included in the output table (defaults to None). l_out_prefix (string): prefix to be used for the attribute names coming from the left table, in the output table (defaults to 'l\_'). r_out_prefix (string): prefix to be used for the attribute names coming from the right table, in the output table (defaults to 'r\_'). out_sim_score (boolean): flag to indicate whether similarity score should be included in the output table (defaults to True). Setting this flag to True will add a column named '_sim_score' in the output table. This column will contain the similarity scores for the tuple pairs in the output. n_jobs (int): number of parallel jobs to use for the computation (defaults to 1). If -1 is given, all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code will be used (i.e., equivalent to the default). show_progress (boolean): flag to indicate whether task progress should be displayed to the user (defaults to True). Returns: An output table containing tuple pairs that satisfy the join condition (DataFrame). """ # check if the input tables are dataframes validate_input_table(ltable, 'left table') validate_input_table(rtable, 'right table') # check if the key attributes and join attributes exist validate_attr(l_key_attr, ltable.columns, 'key attribute', 'left table') validate_attr(r_key_attr, rtable.columns, 'key attribute', 'right table') validate_attr(l_join_attr, ltable.columns, 'join attribute', 'left table') validate_attr(r_join_attr, rtable.columns, 'join attribute', 'right table') # check if the join attributes are not of numeric type validate_attr_type(l_join_attr, ltable[l_join_attr].dtype, 'join attribute', 'left table') validate_attr_type(r_join_attr, rtable[r_join_attr].dtype, 'join attribute', 'right table') # check if the input tokenizer is valid validate_tokenizer(tokenizer) # check if the input threshold is valid validate_threshold(threshold, 'DICE') # check if the comparison operator is valid validate_comp_op_for_sim_measure(comp_op, 'DICE') # check if the output attributes exist validate_output_attrs(l_out_attrs, ltable.columns, r_out_attrs, rtable.columns) # check if the key attributes are unique and do not contain missing values validate_key_attr(l_key_attr, ltable, 'left table') validate_key_attr(r_key_attr, rtable, 'right table') # set return_set flag of tokenizer to be True, in case it is set to False revert_tokenizer_return_set_flag = False if not tokenizer.get_return_set(): tokenizer.set_return_set(True) revert_tokenizer_return_set_flag = True # remove redundant attrs from output attrs. l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr) r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr) # get attributes to project. l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_join_attr) r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_join_attr) # Do a projection on the input dataframes to keep only the required # attributes. Then, remove rows with missing value in join attribute from # the input dataframes. Then, convert the resulting dataframes into ndarray. ltable_array = convert_dataframe_to_array(ltable, l_proj_attrs, l_join_attr) rtable_array = convert_dataframe_to_array(rtable, r_proj_attrs, r_join_attr) # computes the actual number of jobs to launch. n_jobs = min(get_num_processes_to_launch(n_jobs), len(rtable_array)) if n_jobs <= 1: # if n_jobs is 1, do not use any parallel code. output_table = set_sim_join(ltable_array, rtable_array, l_proj_attrs, r_proj_attrs, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, 'DICE', threshold, comp_op, allow_empty, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, show_progress) else: # if n_jobs is above 1, split the right table into n_jobs splits and # join each right table split with the whole of left table in a separate # process. r_splits = split_table(rtable_array, n_jobs) results = Parallel(n_jobs=n_jobs)(delayed(set_sim_join)( ltable_array, r_splits[job_index], l_proj_attrs, r_proj_attrs, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, 'DICE', threshold, comp_op, allow_empty, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, ( show_progress and (job_index == n_jobs - 1))) for job_index in range(n_jobs)) output_table = pd.concat(results) # If allow_missing flag is set, then compute all pairs with missing value in # at least one of the join attributes and then add it to the output # obtained from the join. if allow_missing: missing_pairs = get_pairs_with_missing_value( ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, show_progress) output_table = pd.concat([output_table, missing_pairs]) # add an id column named '_id' to the output table. output_table.insert(0, '_id', range(0, len(output_table))) # revert the return_set flag of tokenizer, in case it was modified. if revert_tokenizer_return_set_flag: tokenizer.set_return_set(False) return output_table
def apply_matcher(candset, candset_l_key_attr, candset_r_key_attr, ltable, rtable, l_key_attr, r_key_attr, l_match_attr, r_match_attr, tokenizer, sim_function, threshold, comp_op='>=', allow_missing=False, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, show_progress=True): """Find matching string pairs from the candidate set (typically produced by applying a filter to two tables) by applying a matcher of form (sim_function comp_op threshold). Specifically, this method computes the input similarity function on string pairs in the candidate set and checks if the resulting score satisfies the input threshold (depending on the comparison operator). Args: candset (DataFrame): input candidate set. candset_l_key_attr (string): attribute in candidate set which is a key in left table. candset_r_key_attr (string): attribute in candidate set which is a key in right table. ltable (DataFrame): left input table. rtable (DataFrame): right input table. l_key_attr (string): key attribute in left table. r_key_attr (string): key attribute in right table. l_match_attr (string): attribute in left table on which the matcher should be applied. r_match_attr (string): attribute in right table on which the matcher should be applied. tokenizer (Tokenizer): tokenizer to be used to tokenize the match attributes. If set to None, the matcher is applied directly on the match attributes. sim_function (function): matcher function to be applied. threshold (float): threshold to be satisfied. comp_op (string): comparison operator. Supported values are '>=', '>', ' <=', '<', '=' and '!=' (defaults to '>='). allow_missing (boolean): flag to indicate whether tuple pairs with missing value in at least one of the match attributes should be included in the output (defaults to False). l_out_attrs (list): list of attribute names from the left table to be included in the output table (defaults to None). r_out_attrs (list): list of attribute names from the right table to be included in the output table (defaults to None). l_out_prefix (string): prefix to be used for the attribute names coming from the left table, in the output table (defaults to 'l\_'). r_out_prefix (string): prefix to be used for the attribute names coming from the right table, in the output table (defaults to 'r\_'). out_sim_score (boolean): flag to indicate whether similarity score should be included in the output table (defaults to True). Setting this flag to True will add a column named '_sim_score' in the output table. This column will contain the similarity scores for the tuple pairs in the output. n_jobs (int): number of parallel jobs to use for the computation (defaults to 1). If -1 is given, all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code will be used (i.e., equivalent to the default). show_progress (boolean): flag to indicate whether task progress should be displayed to the user (defaults to True). Returns: An output table containing tuple pairs from the candidate set that survive the matcher (DataFrame). """ # check if the input candset is a dataframe validate_input_table(candset, 'candset') # check if the candset key attributes exist validate_attr(candset_l_key_attr, candset.columns, 'left key attribute', 'candset') validate_attr(candset_r_key_attr, candset.columns, 'right key attribute', 'candset') # check if the input tables are dataframes validate_input_table(ltable, 'left table') validate_input_table(rtable, 'right table') # check if the key attributes and join attributes exist validate_attr(l_key_attr, ltable.columns, 'key attribute', 'left table') validate_attr(r_key_attr, rtable.columns, 'key attribute', 'right table') validate_attr(l_match_attr, ltable.columns, 'match attribute', 'left table') validate_attr(r_match_attr, rtable.columns, 'match attribute', 'right table') # check if the output attributes exist validate_output_attrs(l_out_attrs, ltable.columns, r_out_attrs, rtable.columns) # check if the input tokenizer is valid, if it is not None if tokenizer is not None: validate_tokenizer(tokenizer) # check if the comparison operator is valid validate_comp_op(comp_op) # check if the key attributes are unique and do not contain missing values validate_key_attr(l_key_attr, ltable, 'left table') validate_key_attr(r_key_attr, rtable, 'right table') # check for empty candset if candset.empty: return candset # remove redundant attrs from output attrs. l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr) r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr) # get attributes to project. l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_match_attr) r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_match_attr) # do a projection on the input dataframes. Note that this doesn't create a # copy of the dataframes. It only creates a view on original dataframes. ltable_projected = ltable[l_proj_attrs] rtable_projected = rtable[r_proj_attrs] # computes the actual number of jobs to launch. n_jobs = min(get_num_processes_to_launch(n_jobs), len(candset)) # If a tokenizer is provided, we can optimize by tokenizing each value # only once by caching the tokens of l_match_attr and r_match_attr. But, # this can be a bad strategy in case the candset has very few records # compared to the original tables. Hence, we check if the sum of tuples in # ltable and rtable is less than twice the number of tuples in the candset. # If yes, we decide to cache the token values. Else, we do not cache the # tokens as the candset is small. l_tokens = None r_tokens = None if tokenizer is not None and (len(ltable) + len(rtable) < len(candset) * 2): l_tokens = generate_tokens(ltable_projected, l_key_attr, l_match_attr, tokenizer) r_tokens = generate_tokens(rtable_projected, r_key_attr, r_match_attr, tokenizer) if n_jobs <= 1: # if n_jobs is 1, do not use any parallel code. output_table = _apply_matcher_split( candset, candset_l_key_attr, candset_r_key_attr, ltable_projected, rtable_projected, l_key_attr, r_key_attr, l_match_attr, r_match_attr, tokenizer, sim_function, threshold, comp_op, allow_missing, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, show_progress, l_tokens, r_tokens) else: # if n_jobs is above 1, split the candset into n_jobs splits and apply # the matcher on each candset split in a separate process. candset_splits = split_table(candset, n_jobs) results = Parallel(n_jobs=n_jobs)(delayed(_apply_matcher_split)( candset_splits[job_index], candset_l_key_attr, candset_r_key_attr, ltable_projected, rtable_projected, l_key_attr, r_key_attr, l_match_attr, r_match_attr, tokenizer, sim_function, threshold, comp_op, allow_missing, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, (show_progress and ( job_index == n_jobs - 1)), l_tokens, r_tokens) for job_index in range(n_jobs)) output_table = pd.concat(results) return output_table
def dice_join_py(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, comp_op='>=', allow_empty=True, allow_missing=False, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, show_progress=True): """Join two tables using Dice similarity measure. For two sets X and Y, the Dice similarity score between them is given by: :math:`dice(X, Y) = \\frac{2 * |X \\cap Y|}{|X| + |Y|}` In the case where both X and Y are empty sets, we define their Dice score to be 1. Finds tuple pairs from left table and right table such that the Dice similarity between the join attributes satisfies the condition on input threshold. For example, if the comparison operator is '>=', finds tuple pairs whose Dice similarity between the strings that are the values of the join attributes is greater than or equal to the input threshold, as specified in "threshold". Args: ltable (DataFrame): left input table. rtable (DataFrame): right input table. l_key_attr (string): key attribute in left table. r_key_attr (string): key attribute in right table. l_join_attr (string): join attribute in left table. r_join_attr (string): join attribute in right table. tokenizer (Tokenizer): tokenizer to be used to tokenize join attributes. threshold (float): Dice similarity threshold to be satisfied. comp_op (string): comparison operator. Supported values are '>=', '>' and '=' (defaults to '>='). allow_empty (boolean): flag to indicate whether tuple pairs with empty set of tokens in both the join attributes should be included in the output (defaults to True). allow_missing (boolean): flag to indicate whether tuple pairs with missing value in at least one of the join attributes should be included in the output (defaults to False). If this flag is set to True, a tuple in ltable with missing value in the join attribute will be matched with every tuple in rtable and vice versa. l_out_attrs (list): list of attribute names from the left table to be included in the output table (defaults to None). r_out_attrs (list): list of attribute names from the right table to be included in the output table (defaults to None). l_out_prefix (string): prefix to be used for the attribute names coming from the left table, in the output table (defaults to 'l\_'). r_out_prefix (string): prefix to be used for the attribute names coming from the right table, in the output table (defaults to 'r\_'). out_sim_score (boolean): flag to indicate whether similarity score should be included in the output table (defaults to True). Setting this flag to True will add a column named '_sim_score' in the output table. This column will contain the similarity scores for the tuple pairs in the output. n_jobs (int): number of parallel jobs to use for the computation (defaults to 1). If -1 is given, all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code will be used (i.e., equivalent to the default). show_progress (boolean): flag to indicate whether task progress should be displayed to the user (defaults to True). Returns: An output table containing tuple pairs that satisfy the join condition (DataFrame). """ # check if the input tables are dataframes validate_input_table(ltable, 'left table') validate_input_table(rtable, 'right table') # check if the key attributes and join attributes exist validate_attr(l_key_attr, ltable.columns, 'key attribute', 'left table') validate_attr(r_key_attr, rtable.columns, 'key attribute', 'right table') validate_attr(l_join_attr, ltable.columns, 'join attribute', 'left table') validate_attr(r_join_attr, rtable.columns, 'join attribute', 'right table') # check if the join attributes are not of numeric type validate_attr_type(l_join_attr, ltable[l_join_attr].dtype, 'join attribute', 'left table') validate_attr_type(r_join_attr, rtable[r_join_attr].dtype, 'join attribute', 'right table') # check if the input tokenizer is valid validate_tokenizer(tokenizer) # check if the input threshold is valid validate_threshold(threshold, 'DICE') # check if the comparison operator is valid validate_comp_op_for_sim_measure(comp_op, 'DICE') # check if the output attributes exist validate_output_attrs(l_out_attrs, ltable.columns, r_out_attrs, rtable.columns) # check if the key attributes are unique and do not contain missing values validate_key_attr(l_key_attr, ltable, 'left table') validate_key_attr(r_key_attr, rtable, 'right table') # set return_set flag of tokenizer to be True, in case it is set to False revert_tokenizer_return_set_flag = False if not tokenizer.get_return_set(): tokenizer.set_return_set(True) revert_tokenizer_return_set_flag = True # remove redundant attrs from output attrs. l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr) r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr) # get attributes to project. l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_join_attr) r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_join_attr) # Do a projection on the input dataframes to keep only the required # attributes. Then, remove rows with missing value in join attribute from # the input dataframes. Then, convert the resulting dataframes into ndarray. ltable_array = convert_dataframe_to_array(ltable, l_proj_attrs, l_join_attr) rtable_array = convert_dataframe_to_array(rtable, r_proj_attrs, r_join_attr) # computes the actual number of jobs to launch. n_jobs = min(get_num_processes_to_launch(n_jobs), len(rtable_array)) if n_jobs <= 1: # if n_jobs is 1, do not use any parallel code. output_table = set_sim_join(ltable_array, rtable_array, l_proj_attrs, r_proj_attrs, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, 'DICE', threshold, comp_op, allow_empty, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, show_progress) else: # if n_jobs is above 1, split the right table into n_jobs splits and # join each right table split with the whole of left table in a separate # process. r_splits = split_table(rtable_array, n_jobs) results = Parallel(n_jobs=n_jobs)(delayed(set_sim_join)( ltable_array, r_splits[job_index], l_proj_attrs, r_proj_attrs, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, 'DICE', threshold, comp_op, allow_empty, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, (show_progress and (job_index==n_jobs-1))) for job_index in range(n_jobs)) output_table = pd.concat(results) # If allow_missing flag is set, then compute all pairs with missing value in # at least one of the join attributes and then add it to the output # obtained from the join. if allow_missing: missing_pairs = get_pairs_with_missing_value( ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, show_progress) output_table = pd.concat([output_table, missing_pairs]) # add an id column named '_id' to the output table. output_table.insert(0, '_id', range(0, len(output_table))) # revert the return_set flag of tokenizer, in case it was modified. if revert_tokenizer_return_set_flag: tokenizer.set_return_set(False) return output_table
def edit_dist_join(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, threshold, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, tokenizer=create_qgram_tokenizer(2)): """Join two tables using edit distance similarity measure. Finds tuple pairs from ltable and rtable such that EditDistance(ltable.l_join_attr, rtable.r_join_attr) <= threshold Args: ltable, rtable : Pandas data frame l_key_attr, r_key_attr : String, key attribute from ltable and rtable l_join_attr, r_join_attr : String, join attribute from ltable and rtable tokenizer : Tokenizer object, tokenizer to be used to tokenize join attributes threshold : int, edit distance threshold to be satisfied l_out_attrs, r_out_attrs : list of attributes to be included in the output table from ltable and rtable l_out_prefix, r_out_prefix : String, prefix to be used in the attribute names of the output table out_sim_score : boolean, indicates if edit distance needs to be included in the output table Returns: result : Pandas data frame """ # check if the input tables are dataframes validate_input_table(ltable, 'left table') validate_input_table(rtable, 'right table') # check if the key attributes and join attributes exist validate_attr(l_key_attr, ltable.columns, 'key attribute', 'left table') validate_attr(r_key_attr, rtable.columns, 'key attribute', 'right table') validate_attr(l_join_attr, ltable.columns, 'join attribute', 'left table') validate_attr(r_join_attr, rtable.columns, 'join attribute', 'right table') # check if the input tokenizer is valid validate_tokenizer(tokenizer) # check if the input threshold is valid validate_threshold(threshold, 'EDIT_DISTANCE') # check if the output attributes exist validate_output_attrs(l_out_attrs, ltable.columns, r_out_attrs, rtable.columns) # check if the key attributes are unique and do not contain missing values validate_key_attr(l_key_attr, ltable, 'left table') validate_key_attr(r_key_attr, rtable, 'right table') # convert threshold to integer (incase if it is float) threshold = int(floor(threshold)) if n_jobs == 1: output_table = _edit_dist_join_split(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score) output_table.insert(0, '_id', range(0, len(output_table))) return output_table else: r_splits = split_table(rtable, n_jobs) results = Parallel(n_jobs=n_jobs)(delayed(_edit_dist_join_split)( ltable, s, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score) for s in r_splits) output_table = pd.concat(results) output_table.insert(0, '_id', range(0, len(output_table))) return output_table
def jaccard_join(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1): """Join two tables using jaccard similarity measure. Finds tuple pairs from ltable and rtable such that Jaccard(ltable.l_join_attr, rtable.r_join_attr) >= threshold Args: ltable, rtable : Pandas data frame l_key_attr, r_key_attr : String, key attribute from ltable and rtable l_join_attr, r_join_attr : String, join attribute from ltable and rtable tokenizer : Tokenizer object, tokenizer to be used to tokenize join attributes threshold : float, jaccard threshold to be satisfied l_out_attrs, r_out_attrs : list of attributes to be included in the output table from ltable and rtable l_out_prefix, r_out_prefix : String, prefix to be used in the attribute names of the output table out_sim_score : boolean, indicates if similarity score needs to be included in the output table Returns: result : Pandas data frame """ # check if the input tables are dataframes validate_input_table(ltable, 'left table') validate_input_table(rtable, 'right table') # check if the key attributes and join attributes exist validate_attr(l_key_attr, ltable.columns, 'key attribute', 'left table') validate_attr(r_key_attr, rtable.columns, 'key attribute', 'right table') validate_attr(l_join_attr, ltable.columns, 'join attribute', 'left table') validate_attr(r_join_attr, rtable.columns, 'join attribute', 'right table') # check if the input tokenizer is valid validate_tokenizer(tokenizer) # check if the input threshold is valid validate_threshold(threshold, 'JACCARD') # check if the output attributes exist validate_output_attrs(l_out_attrs, ltable.columns, r_out_attrs, rtable.columns) # check if the key attributes are unique and do not contain missing values validate_key_attr(l_key_attr, ltable, 'left table') validate_key_attr(r_key_attr, rtable, 'right table') if n_jobs == 1: output_table = _set_sim_join_split(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, 'JACCARD', threshold, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score) output_table.insert(0, '_id', range(0, len(output_table))) return output_table else: r_splits = split_table(rtable, n_jobs) results = Parallel(n_jobs=n_jobs)(delayed(_set_sim_join_split)( ltable, r_split, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, 'JACCARD', threshold, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score) for r_split in r_splits) output_table = pd.concat(results) output_table.insert(0, '_id', range(0, len(output_table))) return output_table
def overlap_join(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, comp_op='>=', allow_missing=False, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, show_progress=True): """Join two tables using overlap measure. For two sets X and Y, the overlap between them is given by: :math:`overlap(X, Y) = |X \\cap Y|` Finds tuple pairs from left table and right table such that the overlap between the join attributes satisfies the condition on input threshold. For example, if the comparison operator is '>=', finds tuple pairs whose overlap between the strings that are the values of the join attributes is greater than or equal to the input threshold, as specified in "threshold". Args: ltable (DataFrame): left input table. rtable (DataFrame): right input table. l_key_attr (string): key attribute in left table. r_key_attr (string): key attribute in right table. l_join_attr (string): join attribute in left table. r_join_attr (string): join attribute in right table. tokenizer (Tokenizer): tokenizer to be used to tokenize join attributes. threshold (float): overlap threshold to be satisfied. comp_op (string): comparison operator. Supported values are '>=', '>' and '=' (defaults to '>='). allow_missing (boolean): flag to indicate whether tuple pairs with missing value in at least one of the join attributes should be included in the output (defaults to False). If this flag is set to True, a tuple in ltable with missing value in the join attribute will be matched with every tuple in rtable and vice versa. l_out_attrs (list): list of attribute names from the left table to be included in the output table (defaults to None). r_out_attrs (list): list of attribute names from the right table to be included in the output table (defaults to None). l_out_prefix (string): prefix to be used for the attribute names coming from the left table, in the output table (defaults to 'l\_'). r_out_prefix (string): prefix to be used for the attribute names coming from the right table, in the output table (defaults to 'r\_'). out_sim_score (boolean): flag to indicate whether similarity score should be included in the output table (defaults to True). Setting this flag to True will add a column named '_sim_score' in the output table. This column will contain the similarity scores for the tuple pairs in the output. n_jobs (int): number of parallel jobs to use for the computation (defaults to 1). If -1 is given, all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code will be used (i.e., equivalent to the default). show_progress (boolean): flag to indicate whether task progress should be displayed to the user (defaults to True). Returns: An output table containing tuple pairs that satisfy the join condition (DataFrame). """ # check if the input tokenizer is valid validate_tokenizer(tokenizer) # set return_set flag of tokenizer to be True, in case it is set to False revert_tokenizer_return_set_flag = False if not tokenizer.get_return_set(): tokenizer.set_return_set(True) revert_tokenizer_return_set_flag = True # use overlap filter to perform the join. overlap_filter = OverlapFilter(tokenizer, threshold, comp_op, allow_missing) output_table = overlap_filter.filter_tables(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, n_jobs, show_progress) # revert the return_set flag of tokenizer, in case it was modified. if revert_tokenizer_return_set_flag: tokenizer.set_return_set(False) return output_table
def apply_matcher(candset, candset_l_key_attr, candset_r_key_attr, ltable, rtable, l_key_attr, r_key_attr, l_match_attr, r_match_attr, tokenizer, sim_function, threshold, comp_op='>=', allow_missing=False, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, show_progress=True): """Find matching string pairs from the candidate set (typically produced by applying a filter to two tables) by applying a matcher of form (sim_function comp_op threshold). Specifically, this method computes the input similarity function on string pairs in the candidate set and checks if the resulting score satisfies the input threshold (depending on the comparison operator). Args: candset (DataFrame): input candidate set. candset_l_key_attr (string): attribute in candidate set which is a key in left table. candset_r_key_attr (string): attribute in candidate set which is a key in right table. ltable (DataFrame): left input table. rtable (DataFrame): right input table. l_key_attr (string): key attribute in left table. r_key_attr (string): key attribute in right table. l_match_attr (string): attribute in left table on which the matcher should be applied. r_match_attr (string): attribute in right table on which the matcher should be applied. tokenizer (Tokenizer): tokenizer to be used to tokenize the match attributes. If set to None, the matcher is applied directly on the match attributes. sim_function (function): matcher function to be applied. threshold (float): threshold to be satisfied. comp_op (string): comparison operator. Supported values are '>=', '>', ' <=', '<', '=' and '!=' (defaults to '>='). allow_missing (boolean): flag to indicate whether tuple pairs with missing value in at least one of the match attributes should be included in the output (defaults to False). l_out_attrs (list): list of attribute names from the left table to be included in the output table (defaults to None). r_out_attrs (list): list of attribute names from the right table to be included in the output table (defaults to None). l_out_prefix (string): prefix to be used for the attribute names coming from the left table, in the output table (defaults to 'l\_'). r_out_prefix (string): prefix to be used for the attribute names coming from the right table, in the output table (defaults to 'r\_'). out_sim_score (boolean): flag to indicate whether similarity score should be included in the output table (defaults to True). Setting this flag to True will add a column named '_sim_score' in the output table. This column will contain the similarity scores for the tuple pairs in the output. n_jobs (int): number of parallel jobs to use for the computation (defaults to 1). If -1 is given, all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code will be used (i.e., equivalent to the default). show_progress (boolean): flag to indicate whether task progress should be displayed to the user (defaults to True). Returns: An output table containing tuple pairs from the candidate set that survive the matcher (DataFrame). """ # check if the input candset is a dataframe validate_input_table(candset, 'candset') # check if the candset key attributes exist validate_attr(candset_l_key_attr, candset.columns, 'left key attribute', 'candset') validate_attr(candset_r_key_attr, candset.columns, 'right key attribute', 'candset') # check if the input tables are dataframes validate_input_table(ltable, 'left table') validate_input_table(rtable, 'right table') # check if the key attributes and join attributes exist validate_attr(l_key_attr, ltable.columns, 'key attribute', 'left table') validate_attr(r_key_attr, rtable.columns, 'key attribute', 'right table') validate_attr(l_match_attr, ltable.columns, 'match attribute', 'left table') validate_attr(r_match_attr, rtable.columns, 'match attribute', 'right table') # check if the output attributes exist validate_output_attrs(l_out_attrs, ltable.columns, r_out_attrs, rtable.columns) # check if the input tokenizer is valid, if it is not None if tokenizer is not None: validate_tokenizer(tokenizer) # check if the comparison operator is valid validate_comp_op(comp_op) # check if the key attributes are unique and do not contain missing values validate_key_attr(l_key_attr, ltable, 'left table') validate_key_attr(r_key_attr, rtable, 'right table') # check for empty candset if candset.empty: return candset # remove redundant attrs from output attrs. l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr) r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr) # get attributes to project. l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_match_attr) r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_match_attr) # do a projection on the input dataframes. Note that this doesn't create a # copy of the dataframes. It only creates a view on original dataframes. ltable_projected = ltable[l_proj_attrs] rtable_projected = rtable[r_proj_attrs] # computes the actual number of jobs to launch. n_jobs = min(get_num_processes_to_launch(n_jobs), len(candset)) # If a tokenizer is provided, we can optimize by tokenizing each value # only once by caching the tokens of l_match_attr and r_match_attr. But, # this can be a bad strategy in case the candset has very few records # compared to the original tables. Hence, we check if the sum of tuples in # ltable and rtable is less than twice the number of tuples in the candset. # If yes, we decide to cache the token values. Else, we do not cache the # tokens as the candset is small. l_tokens = None r_tokens = None if tokenizer is not None and (len(ltable) + len(rtable) < len(candset)*2): l_tokens = generate_tokens(ltable_projected, l_key_attr, l_match_attr, tokenizer) r_tokens = generate_tokens(rtable_projected, r_key_attr, r_match_attr, tokenizer) if n_jobs <= 1: # if n_jobs is 1, do not use any parallel code. output_table = _apply_matcher_split(candset, candset_l_key_attr, candset_r_key_attr, ltable_projected, rtable_projected, l_key_attr, r_key_attr, l_match_attr, r_match_attr, tokenizer, sim_function, threshold, comp_op, allow_missing, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, show_progress, l_tokens, r_tokens) else: # if n_jobs is above 1, split the candset into n_jobs splits and apply # the matcher on each candset split in a separate process. candset_splits = split_table(candset, n_jobs) results = Parallel(n_jobs=n_jobs)(delayed(_apply_matcher_split)( candset_splits[job_index], candset_l_key_attr, candset_r_key_attr, ltable_projected, rtable_projected, l_key_attr, r_key_attr, l_match_attr, r_match_attr, tokenizer, sim_function, threshold, comp_op, allow_missing, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, (show_progress and (job_index==n_jobs-1)), l_tokens, r_tokens) for job_index in range(n_jobs)) output_table = pd.concat(results) return output_table
def overlap_join_py(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, tokenizer, threshold, comp_op='>=', allow_missing=False, l_out_attrs=None, r_out_attrs=None, l_out_prefix='l_', r_out_prefix='r_', out_sim_score=True, n_jobs=1, show_progress=True): """Join two tables using overlap measure. For two sets X and Y, the overlap between them is given by: :math:`overlap(X, Y) = |X \\cap Y|` Finds tuple pairs from left table and right table such that the overlap between the join attributes satisfies the condition on input threshold. For example, if the comparison operator is '>=', finds tuple pairs whose overlap between the strings that are the values of the join attributes is greater than or equal to the input threshold, as specified in "threshold". Args: ltable (DataFrame): left input table. rtable (DataFrame): right input table. l_key_attr (string): key attribute in left table. r_key_attr (string): key attribute in right table. l_join_attr (string): join attribute in left table. r_join_attr (string): join attribute in right table. tokenizer (Tokenizer): tokenizer to be used to tokenize join attributes. threshold (float): overlap threshold to be satisfied. comp_op (string): comparison operator. Supported values are '>=', '>' and '=' (defaults to '>='). allow_missing (boolean): flag to indicate whether tuple pairs with missing value in at least one of the join attributes should be included in the output (defaults to False). If this flag is set to True, a tuple in ltable with missing value in the join attribute will be matched with every tuple in rtable and vice versa. l_out_attrs (list): list of attribute names from the left table to be included in the output table (defaults to None). r_out_attrs (list): list of attribute names from the right table to be included in the output table (defaults to None). l_out_prefix (string): prefix to be used for the attribute names coming from the left table, in the output table (defaults to 'l\_'). r_out_prefix (string): prefix to be used for the attribute names coming from the right table, in the output table (defaults to 'r\_'). out_sim_score (boolean): flag to indicate whether similarity score should be included in the output table (defaults to True). Setting this flag to True will add a column named '_sim_score' in the output table. This column will contain the similarity scores for the tuple pairs in the output. n_jobs (int): number of parallel jobs to use for the computation (defaults to 1). If -1 is given, all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code will be used (i.e., equivalent to the default). show_progress (boolean): flag to indicate whether task progress should be displayed to the user (defaults to True). Returns: An output table containing tuple pairs that satisfy the join condition (DataFrame). """ # check if the input tokenizer is valid validate_tokenizer(tokenizer) # set return_set flag of tokenizer to be True, in case it is set to False revert_tokenizer_return_set_flag = False if not tokenizer.get_return_set(): tokenizer.set_return_set(True) revert_tokenizer_return_set_flag = True # use overlap filter to perform the join. overlap_filter = OverlapFilter(tokenizer, threshold, comp_op, allow_missing) output_table = overlap_filter.filter_tables(ltable, rtable, l_key_attr, r_key_attr, l_join_attr, r_join_attr, l_out_attrs, r_out_attrs, l_out_prefix, r_out_prefix, out_sim_score, n_jobs, show_progress) # revert the return_set flag of tokenizer, in case it was modified. if revert_tokenizer_return_set_flag: tokenizer.set_return_set(False) return output_table