def drop_cols(df, col_list):
    if not isinstance(col_list, list):
        col_list = [col_list]
    if cm.is_dfinfo_present(df):
        if _is_table_or_candset(df):
            if not _is_table(df):
                key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key\
                    = cm.get_metadata_for_candset(df, logger, False)
                col_list = gh.list_diff(col_list, [key, fk_ltable, fk_rtable])
                col_list = gh.list_drop_duplicates(col_list)
            else:
                key = cm.get_key(df)
                col_list = gh.list_diff(col_list, [key])
                col_list = gh.list_drop_duplicates(col_list)
        new_df = df.drop(col_list, axis=1)
        cm.init_properties(new_df)
        cm.copy_properties(df, new_df)
    else:
        new_df = df[col_list]

    return new_df
def drop_cols(df, col_list):
    if not isinstance(col_list, list):
        col_list = [col_list]
    if cm.is_dfinfo_present(df):
        if _is_table_or_candset(df):
            if not _is_table(df):
                key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key\
                    = cm.get_metadata_for_candset(df, logger, False)
                col_list = gh.list_diff(col_list, [key, fk_ltable, fk_rtable])
                col_list = gh.list_drop_duplicates(col_list)
            else:
                key = cm.get_key(df)
                col_list = gh.list_diff(col_list, [key])
                col_list = gh.list_drop_duplicates(col_list)
        new_df = df.drop(col_list, axis=1)
        cm.init_properties(new_df)
        cm.copy_properties(df, new_df)
    else:
        new_df = df[col_list]

    return new_df
def project_cols(df, col_list):
    if not isinstance(col_list, list):
        col_list = [col_list]
    if cm.is_dfinfo_present(df):
        if _is_table_or_candset(df):
            if not _is_table(df):
                key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key\
                    = cm.get_metadata_for_candset(df, logger, False)
                updated_col_list = [key, fk_ltable, fk_rtable]
                updated_col_list.extend(col_list)
                col_list = gh.list_drop_duplicates(updated_col_list)
            else:
                key = cm.get_key(df)
                updated_col_list = [key]
                updated_col_list.extend(col_list)
                col_list = gh.list_drop_duplicates(updated_col_list)
        new_df = df[col_list]
        cm.init_properties(new_df)
        cm.copy_properties(df, new_df)
    else:
        new_df = df[col_list]

    return new_df
def project_cols(df, col_list):
    if not isinstance(col_list, list):
        col_list = [col_list]
    if cm.is_dfinfo_present(df):
        if _is_table_or_candset(df):
            if not _is_table(df):
                key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key\
                    = cm.get_metadata_for_candset(df, logger, False)
                updated_col_list = [key, fk_ltable, fk_rtable]
                updated_col_list.extend(col_list)
                col_list = gh.list_drop_duplicates(updated_col_list)
            else:
                key = cm.get_key(df)
                updated_col_list = [key]
                updated_col_list.extend(col_list)
                col_list = gh.list_drop_duplicates(updated_col_list)
        new_df = df[col_list]
        cm.init_properties(new_df)
        cm.copy_properties(df, new_df)
    else:
        new_df = df[col_list]

    return new_df
Example #5
0
    def _fit_ex_attrs(self, table, exclude_attrs, target_attr):
        """
        This function supports the fit method, where the DataFrame can be
        given as input along with what attributes must be excluded and the
        target attribute.
        """
        # Validate the input parameters.
        # # We expect the input table to be of type pandas DataFrame.
        if not isinstance(table, pd.DataFrame):
            logger.error('Input table is not of type DataFrame')
            raise AssertionError('Input table is not of type DataFrame')

        # Convert the exclude attributes into list (if the input is not of list)
        if not isinstance(exclude_attrs, list):
            exclude_attrs = [exclude_attrs]

        # Check if the exclude attributes are present in the input table. If
        # not, raise an error.
        if not ch.check_attrs_present(table, exclude_attrs):
            logger.error(
                'The attributes mentioned in exclude_attrs is not present ' \
                'in the input table')
            raise AssertionError(
                'The attributes mentioned in exclude_attrs is not present ' \
                'in the input table')

        # Check if the target attribute is present in the input table. If
        # not, raise an error.
        if not ch.check_attrs_present(table, target_attr):
            logger.error('The target_attr is not present in the input table')
            raise AssertionError(
                'The target_attr is not present in the input table')

        # We now remove duplicate attributes from the exclude_attrs
        exclude_attrs = gh.list_drop_duplicates(exclude_attrs)

        # We explicitly append target attribute to exclude attributes
        if target_attr not in exclude_attrs:
            exclude_attrs.append(target_attr)

        # Now, we get the attributes to project
        attributes_to_project = gh.list_diff(list(table.columns),
                                             exclude_attrs)

        # Get the predictors and the target attribute from the input table
        # based on the exclude attrs and the target attribute.
        x = table[attributes_to_project]
        y = table[target_attr]

        self._fit_sklearn(x, y, check_rem=False)
    def _fit_ex_attrs(self, table, exclude_attrs, target_attr):
        """
        This function supports the fit method, where the DataFrame can be
        given as input along with what attributes must be excluded and the
        target attribute.
        """
        # Validate the input parameters.
        # # We expect the input table to be of type pandas DataFrame.
        if not isinstance(table, pd.DataFrame):
            logger.error('Input table is not of type DataFrame')
            raise AssertionError('Input table is not of type DataFrame')

        # Convert the exclude attributes into list (if the input is not of list)
        if not isinstance(exclude_attrs, list):
            exclude_attrs = [exclude_attrs]

        # Check if the exclude attributes are present in the input table. If
        # not, raise an error.
        if not ch.check_attrs_present(table, exclude_attrs):
            logger.error(
                'The attributes mentioned in exclude_attrs is not present ' \
                'in the input table')
            raise AssertionError(
                'The attributes mentioned in exclude_attrs is not present ' \
                'in the input table')

        # Check if the target attribute is present in the input table. If
        # not, raise an error.
        if not ch.check_attrs_present(table, target_attr):
            logger.error('The target_attr is not present in the input table')
            raise AssertionError(
                'The target_attr is not present in the input table')

        # We now remove duplicate attributes from the exclude_attrs
        exclude_attrs = gh.list_drop_duplicates(exclude_attrs)

        # We explicitly append target attribute to exclude attributes
        if target_attr not in exclude_attrs:
            exclude_attrs.append(target_attr)

        # Now, we get the attributes to project
        attributes_to_project = gh.list_diff(list(table.columns), exclude_attrs)

        # Get the predictors and the target attribute from the input table
        # based on the exclude attrs and the target attribute.
        x = table[attributes_to_project]
        y = table[target_attr]

        self._fit_sklearn(x, y, check_rem=False)
def _get_xy_data_ex(table, exclude_attrs, target_attr):
    # Validate the input parameters
    # # We expect the input table to be of type pandas DataFrame
    if not isinstance(table, pd.DataFrame):
        logger.error('Input table is not of type DataFrame')
        raise AssertionError(
            logger.error('Input table is not of type dataframe'))

    # We expect exclude attributes to be of type list. If not convert it into
    #  a list.
    if not isinstance(exclude_attrs, list):
        exclude_attrs = [exclude_attrs]

    # Check if the exclude attributes are present in the input table
    if not check_attrs_present(table, exclude_attrs):
        logger.error('The attributes mentioned in exclude_attrs '
                     'is not present '
                     'in the input table')
        raise AssertionError('The attributes mentioned in exclude_attrs '
                             'is not present '
                             'in the input table')
    # Check if the target attribute is present in the input table
    if not check_attrs_present(table, target_attr):
        logger.error('The target_attr is not present in the input table')
        raise AssertionError(
            'The target_attr is not present in the input table')

    # Drop the duplicates from the exclude attributes
    exclude_attrs = list_drop_duplicates(exclude_attrs)

    # Explicitly add the target attribute to exclude attribute (if it is not
    # already present)
    if target_attr not in exclude_attrs:
        exclude_attrs.append(target_attr)

    # Project the list of attributes that should be used for scikit-learn's
    # functions.
    attrs_to_project = list_diff(list(table.columns), exclude_attrs)

    # Get the values for x
    x = table[attrs_to_project].values
    # Get the values for x
    y = table[target_attr].values
    y = y.ravel()  # to mute warnings from svm and cross validation
    # Return x and y
    return x, y
def _get_xy_data_ex(table, exclude_attrs, target_attr):
    # Validate the input parameters
    # # We expect the input table to be of type pandas DataFrame
    validate_object_type(table, pd.DataFrame)
    # We expect exclude attributes to be of type list. If not convert it into
    #  a list.
    if not isinstance(exclude_attrs, list):
        exclude_attrs = [exclude_attrs]

    # Check if the exclude attributes are present in the input table
    if not check_attrs_present(table, exclude_attrs):
        logger.error('The attributes mentioned in exclude_attrs '
                     'is not present '
                     'in the input table')
        raise AssertionError(
            'The attributes mentioned in exclude_attrs '
            'is not present '
            'in the input table')
    # Check if the target attribute is present in the input table
    if not check_attrs_present(table, target_attr):
        logger.error('The target_attr is not present in the input table')
        raise AssertionError(
            'The target_attr is not present in the input table')

    # Drop the duplicates from the exclude attributes
    exclude_attrs = list_drop_duplicates(exclude_attrs)

    # Explicitly add the target attribute to exclude attribute (if it is not
    # already present)
    if target_attr not in exclude_attrs:
        exclude_attrs.append(target_attr)

    # Project the list of attributes that should be used for scikit-learn's
    # functions.
    attrs_to_project = list_diff(list(table.columns), exclude_attrs)

    # Get the values for x
    x = table[attrs_to_project].values
    # Get the values for x
    y = table[target_attr].values
    y = y.ravel()  # to mute warnings from svm and cross validation
    # Return x and y
    return x, y
def _vis_debug_rf(matcher, train, test, exclude_attrs, target_attr,
                  show_window=True):
    """
    Wrapper function for debugging the Random Forest matcher visually.
    """
    try:
        from PyQt5 import QtWidgets
        from py_entitymatching.gui.debug_gui_base import MainWindowManager
    except ImportError:
        raise ImportError('PyQt5 is not installed. Please install PyQt5 to use '
                      'GUI related functions in py_entitymatching.')


    # Validate the input parameters
    # # We expect the matcher to be of type RfMatcher
    if not isinstance(matcher, RFMatcher):
        logger.error('Input matcher is not of type '
                     'Random Forest matcher')
        raise AssertionError('Input matcher is not of type '
                             'Random Forest matcher')

    # # We expect the target attribute to be of type string.
    validate_object_type(target_attr, six.string_types, error_prefix='Target attribute')

    # # Check whether the exclude attributes are indeed present in the train
    #  DataFrame.
    if not check_attrs_present(train, exclude_attrs):
        logger.error('The exclude attrs are not in train table columns')
        raise AssertionError('The exclude attrs are not in the train table columns')

    # # Check whether the target attribute is indeed present in the train
    #  DataFrame.
    if not check_attrs_present(train, target_attr):
        logger.error('The target attr is not in train table columns')
        raise AssertionError('The target attr is not in the train table columns')

    # # Check whether the exclude attributes are indeed present in the test
    #  DataFrame.
    if not check_attrs_present(test, exclude_attrs):
        logger.error('The exclude attrs are not in test table columns')
        raise AssertionError('The exclude attrs are not in the test table columns')


    # The exclude attributes is expected to be of type list, if not
    # explicitly convert this into a list.
    if not isinstance(exclude_attrs, list):
        exclude_attrs = [exclude_attrs]

    # Drop the duplicates from the exclude attributes
    exclude_attrs = list_drop_duplicates(exclude_attrs)

    # If the target attribute is not present in the exclude attributes,
    # then explicitly add it to the exclude attributes.
    if target_attr not in exclude_attrs:
        exclude_attrs.append(target_attr)

    # Now, fit using training data
    matcher.fit(table=train, exclude_attrs=exclude_attrs,
                target_attr=target_attr)

    # Get a column name to store the predictions.
    predict_attr_name = get_name_for_predict_column(test.columns)

    # Predict using the test data
    predicted = matcher.predict(table=test, exclude_attrs=exclude_attrs,
                                target_attr=predict_attr_name, append=True,
                                inplace=False)

    # Get the evaluation summary.
    eval_summary = em.eval_matches(predicted, target_attr, predict_attr_name)
    em._viewapp = QtWidgets.QApplication.instance()
    if em._viewapp is None:
        em._viewapp = QtWidgets.QApplication([])

    # Get metric in a form that can be displayed from the evaluation summary
    metric = _get_metric(eval_summary)

    # Get false negatives and false positives as a DataFrame
    fp_dataframe = _get_dataframe(predicted, eval_summary['false_pos_ls'])
    fn_dataframe = _get_dataframe(predicted, eval_summary['false_neg_ls'])

    # Get the main window application

    app = em._viewapp

    m = MainWindowManager(matcher, "rf", exclude_attrs, metric, predicted, fp_dataframe,
                          fn_dataframe)

    # If the show window is true, then display the window.
    if show_window:
        m.show()
        app.exec_()
def impute_table(table,
                 exclude_attrs=None,
                 missing_val='NaN',
                 strategy='mean',
                 axis=0,
                 val_all_nans=0,
                 verbose=True):
    """
    Impute table containing missing values.

    Args:
        table (DataFrame): DataFrame which values should be imputed.
        exclude_attrs (List) : list of attribute names to be excluded from
            imputing (defaults to None).
        missing_val (string or int):  The placeholder for the missing values.
            All occurrences of `missing_values` will be imputed.
            For missing values encoded as np.nan, use the string value 'NaN'
            (defaults to 'NaN').
        strategy (string): String that specifies on how to impute values. Valid
            strings: 'mean', 'median', 'most_frequent' (defaults to 'mean').
        axis (int):  axis=1 along rows, and axis=0 along columns  (defaults
            to 0).
        val_all_nans (float): Value to fill in if all the values in the column
            are NaN.

    Returns:
        Imputed DataFrame.


    Raises:
        AssertionError: If `table` is not of type pandas DataFrame.

    Examples:
        >>> import py_entitymatching as em
        >>> # H is the feature vector which should be imputed. Specifically, impute the missing values
        >>> # in each column, with the mean of that column
        >>> H = em.impute_table(H, exclude_attrs=['_id', 'ltable_id', 'rtable_id'], strategy='mean')


    """
    # Validate input paramaters
    # # We expect the input table to be of type pandas DataFrame
    if not isinstance(table, pd.DataFrame):
        logger.error('Input table is not of type DataFrame')
        raise AssertionError('Input table is not of type DataFrame')

    ch.log_info(
        logger, 'Required metadata: cand.set key, fk ltable, '
        'fk rtable, '
        'ltable, rtable, ltable key, rtable key', verbose)

    # # Get metadata
    key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = \
        cm.get_metadata_for_candset(
            table,
            logger, verbose)

    # # Validate metadata
    cm._validate_metadata_for_candset(table, key, fk_ltable, fk_rtable, ltable,
                                      rtable, l_key, r_key, logger, verbose)

    fv_columns = table.columns

    if exclude_attrs == None:
        feature_names = fv_columns

    else:

        # Check if the exclude attributes are present in the input table
        if not ch.check_attrs_present(table, exclude_attrs):
            logger.error('The attributes mentioned in exclude_attrs '
                         'is not present '
                         'in the input table')
            raise AssertionError('The attributes mentioned in exclude_attrs '
                                 'is not present '
                                 'in the input table')
        # We expect exclude attributes to be of type list. If not convert it into
        #  a list.
        if not isinstance(exclude_attrs, list):
            exclude_attrs = [exclude_attrs]

        # Drop the duplicates from the exclude attributes
        exclude_attrs = gh.list_drop_duplicates(exclude_attrs)

        cols = [c not in exclude_attrs for c in fv_columns]
        feature_names = fv_columns[cols]
    # print feature_names
    table_copy = table.copy()
    projected_table = table_copy[feature_names]

    projected_table_values = projected_table.values

    imp = Imputer(missing_values=missing_val, strategy=strategy, axis=axis)
    imp.fit(projected_table_values)
    imp.statistics_[pd.np.isnan(imp.statistics_)] = val_all_nans
    projected_table_values = imp.transform(projected_table_values)
    table_copy[feature_names] = projected_table_values
    # Update catalog
    cm.init_properties(table_copy)
    cm.copy_properties(table, table_copy)

    return table_copy
def _vis_debug_dt(matcher,
                  train,
                  test,
                  exclude_attrs,
                  target_attr,
                  show_window=True):
    """
    Wrapper function for debugging the Random Forest matcher visually.
    """

    try:
        from PyQt5 import QtWidgets
        from py_entitymatching.gui.debug_gui_base import MainWindowManager
    except ImportError:
        raise ImportError(
            'PyQt5 is not installed. Please install PyQt5 to use '
            'GUI related functions in py_entitymatching.')

    # Validate the input parameters
    # # We expect the matcher to be of type DTMatcher
    if not isinstance(matcher, DTMatcher):
        logger.error('Input matcher is not of type Decision Tree matcher')
        raise AssertionError('Input matcher is not of type '
                             'Decision Tree matcher')

    # # We expect the target attribute to be of type string.
    validate_object_type(target_attr,
                         six.string_types,
                         error_prefix='Target attribute')

    # # Check whether the exclude attributes are indeed present in the train
    #  DataFrame.
    if not ch.check_attrs_present(train, exclude_attrs):
        logger.error('The exclude attrs are not in train table columns')
        raise AssertionError('The exclude attrs are not in the '
                             'train table columns')

    # # Check whether the target attribute is indeed present in the train
    #  DataFrame.
    if not ch.check_attrs_present(train, target_attr):
        logger.error('The target attr is not in train table columns')
        raise AssertionError('The target attr is not in the '
                             'train table columns')

    # # Check whether the exclude attributes are indeed present in the test
    #  DataFrame.
    if not ch.check_attrs_present(test, exclude_attrs):
        logger.error('The exclude attrs are not in test table columns')
        raise AssertionError('The exclude attrs are not in the '
                             'test table columns')

    # The exclude attributes is expected to be of type list, if not
    # explicitly convert this into a list.
    if not isinstance(exclude_attrs, list):
        exclude_attrs = [exclude_attrs]

    # Drop the duplicates from the exclude attributes
    exclude_attrs = gh.list_drop_duplicates(exclude_attrs)

    # If the target attribute is not present in the exclude attributes,
    # then explicitly add it to the exclude attributes.
    if target_attr not in exclude_attrs:
        exclude_attrs.append(target_attr)

    # Now, fit using training data
    matcher.fit(table=train,
                exclude_attrs=exclude_attrs,
                target_attr=target_attr)

    # Get a column name to store the predictions.
    predict_attr_name = get_name_for_predict_column(test.columns)

    # Predict using the test data
    predicted = matcher.predict(table=test,
                                exclude_attrs=exclude_attrs,
                                target_attr=predict_attr_name,
                                append=True,
                                inplace=False)

    # Get the evaluation summary.
    eval_summary = eval_matches(predicted, target_attr, predict_attr_name)

    # Get metric in a form that can be displayed from the evaluation summary
    metric = _get_metric(eval_summary)

    # Get false negatives and false positives as a DataFrame
    fp_dataframe = _get_dataframe(predicted, eval_summary['false_pos_ls'])
    fn_dataframe = _get_dataframe(predicted, eval_summary['false_neg_ls'])

    em._viewapp = QtWidgets.QApplication.instance()

    if em._viewapp is None:
        em._viewapp = QtWidgets.QApplication([])
    app = em._viewapp

    # Get the main window application
    app = em._viewapp
    m = MainWindowManager(matcher, "dt", exclude_attrs, metric, predicted,
                          fp_dataframe, fn_dataframe)
    # If the show window is true, then display the window.
    if show_window:
        m.show()
        app.exec_()
Example #12
0
def combine_blocker_outputs_via_union(blocker_output_list,
                                      l_prefix='ltable_',
                                      r_prefix='rtable_',
                                      verbose=False):
    """
    Combines multiple blocker outputs by doing a union of their tuple pair
    ids (foreign key ltable, foreign key rtable).

    Specifically, this function takes in a list of DataFrames (candidate
    sets, typically the
    output from blockers) and returns a consolidated DataFrame. The output
    DataFrame contains the union of tuple pair ids (foreign key ltable,
    foreign key rtable) and other attributes from the input list of DataFrames.

    This function makes some assumptions about the input DataFrames. First,
    each DataFrame is expected to contain the following metadata in the
    catalog: key, fk_ltable, fk_rtable, ltable, and rtable. Second,
    all the DataFrames must be a result of blocking from the same underlying
    tables. Concretely the ltable and rtable properties must refer to the
    same DataFrame across all the input tables. Third, all the input
    DataFrames must have the same fk_ltable and fk_rtable properties.
    Finally, in each input DataFrame, for the attributes included from the
    ltable or rtable, the attribute names must be prefixed with the given
    l_prefix and r_prefix in the function.

    The input DataFrames may contain different attribute lists and it demands
    the question of how to combine them. Currently py_entitymatching takes an union
    of attribute names that has prefix l_prefix or r_prefix across
    input tables. After taking the union, for each tuple id pair included
    in output, the attribute values (for union-ed attribute names) are
    probed from ltable/rtable and included in the output.

    A subtle point to note here is,  if an input DataFrame has a column
    added by user (say label for some reason), then that column will not
    be present in the output. The reason is, the same column may not be
    present in other candidate sets so it is not clear about how to
    combine them. One possibility is to include label in output for all
    tuple id pairs, but set as NaN for the values not present. Currently
    py_entitymatching does not include such columns and addressing it will be part
    of future work.

    Args:
        blocker_output_list (list of DataFrames): The list of DataFrames that
            should be combined.
        l_prefix (string): The prefix given to the attributes from the ltable.
        r_prefix (string): The prefix given to the attributes from the rtable.
        verbose (boolean): A flag to indicate whether more detailed information
            about the execution steps should be printed out (default value is
            False).

    Returns:
        A new DataFrame with the combined tuple pairs and other attributes from
        all the blocker lists.

    Raises:
        AssertionError: If `l_prefix` is not of type string.
        AssertionError: If `r_prefix` is not of type string.
        AssertionError: If the length of the input DataFrame list is 0.
        AssertionError: If `blocker_output_list` is not a list of
            DataFrames.
        AssertionError: If the ltables are different across the input list of
            DataFrames.
        AssertionError: If the rtables are different across the input list of
            DataFrames.
        AssertionError: If the `fk_ltable` values are different across the
            input list of DataFrames.
        AssertionError: If the `fk_rtable` values are different across the
            input list of DataFrames.
    """

    # validate input parameters

    # The l_prefix is expected to be of type string
    if not isinstance(l_prefix, six.string_types):
        logger.error('l_prefix is not of type string')
        raise AssertionError('l_prefix is not of type string')

    # The r_prefix is expected to be of type string
    if not isinstance(r_prefix, six.string_types):
        logger.error('r_prefix is not of type string')
        raise AssertionError('r_prefix is not of type string')

    # We cannot combine empty DataFrame list
    if not len(blocker_output_list) > 0:
        logger.error('There no DataFrames to combine')
        raise AssertionError('There are no DataFrames to combine')

    # Validate the assumptions about the input tables.
    # # 1) All the input object must be DataFrames
    # # 2) All the input DataFrames must have the metadata as that of a
    # candidate set
    # # 3) All the input DataFrames must have the same fk_ltable and fk_rtable
    _validate_lr_tables(blocker_output_list)

    # # Get the ltable and rtable. We take it from the first DataFrame as all
    #  the DataFrames contain the same ltables and rtables
    ltable = cm.get_ltable(blocker_output_list[0])
    rtable = cm.get_rtable(blocker_output_list[0])

    # # Get the fk_ltable and fk_rtable. We take it from the first DataFrame as
    #  all the DataFrames contain the same ltables and rtables
    fk_ltable = cm.get_fk_ltable(blocker_output_list[0])
    fk_rtable = cm.get_fk_rtable(blocker_output_list[0])

    # Retrieve the keys for the ltable and rtables.
    l_key = cm.get_key(ltable)
    r_key = cm.get_key(rtable)

    # Check if the fk_ltable is starting with the given prefix, if not its
    # not an error. Just raise a warning.
    if fk_ltable.startswith(l_prefix) is False:
        logger.warning(
            'Foreign key for ltable is not starting with the given prefix ('
            '%s)', l_prefix)

    # Check if the fk_rtable is starting with the given prefix, if not its
    # not an error. Just raise a warning.
    if fk_rtable.startswith(r_prefix) is False:
        logger.warning(
            'Foreign key for rtable is not starting with the given prefix ('
            '%s)', r_prefix)

    # Initialize lists
    # # keep track of projected tuple pair ids
    tuple_pair_ids = []
    # # keep track of output attributes from the left table
    l_output_attrs = []
    # # keep track of output attributes from the right table
    r_output_attrs = []

    # for each DataFrame in the given list, project out tuple pair ids, get the
    #  attributes from the ltable and rtable
    for data_frame in blocker_output_list:
        # Project out the tuple pair ids. A tuple pair id is a fk_ltable,
        # fk_rtable pair
        projected_tuple_pair_ids = data_frame[[fk_ltable, fk_rtable]]
        # Update the list that tracks tuple pair ids
        tuple_pair_ids.append(projected_tuple_pair_ids)

        # Get the columns, which should be segregated into the attributes
        # from the ltable and table
        col_set = (gh.list_diff(list(data_frame.columns),
                                [fk_ltable, fk_rtable,
                                 cm.get_key(data_frame)]))

        # Segregate the columns as attributes from the ltable and rtable
        l_attrs, r_attrs = _lr_cols(col_set, l_prefix, r_prefix)

        # Update the l_output_attrs, r_output_attrs
        l_output_attrs.extend(l_attrs)
        # the reason we use extend because l_attrs a list
        r_output_attrs.extend(r_attrs)

    ch.log_info(
        logger, 'Concatenating the tuple pair ids across given '
        'blockers ...', verbose)

    # concatenate the tuple pair ids from the list of input DataFrames
    concatenated_tuple_pair_ids = pd.concat(tuple_pair_ids)

    ch.log_info(logger, 'Concatenating the tuple pair ids ... DONE', verbose)
    ch.log_info(logger, 'Deduplicating the tuple pair ids ...', verbose)

    # Deduplicate the DataFrame. Now the returned DataFrame will contain
    # unique tuple pair ids.

    # noinspection PyUnresolvedReferences
    deduplicated_tuple_pair_ids = concatenated_tuple_pair_ids.drop_duplicates()

    ch.log_info(logger, 'Deduplicating the tuple pair ids ... DONE', verbose)

    # Construct output table
    # # Get unique list of attributes across different tables
    l_output_attrs = gh.list_drop_duplicates(l_output_attrs)
    r_output_attrs = gh.list_drop_duplicates(r_output_attrs)

    # Reset the index that might have lingered from concatenation.
    deduplicated_tuple_pair_ids.reset_index(inplace=True, drop=True)

    # Add the output attribtues from the ltable and rtable.
    # NOTE: This approach may be inefficient as it probes the ltable, rtable
    # to get the attribute values. A better way would be to fill the
    # attribute values from the input list of DataFrames. This attribute values
    # could be harvested (at the expense of some space) while we iterate the
    # input blocker output list for the first time.

    # noinspection PyProtectedMember
    consolidated_data_frame = gh._add_output_attributes(
        deduplicated_tuple_pair_ids,
        fk_ltable,
        fk_rtable,
        ltable,
        rtable,
        l_key,
        r_key,
        l_output_attrs,
        r_output_attrs,
        l_prefix,
        r_prefix,
        validate=False)
    # Sort the DataFrame ordered by fk_ltable and fk_rtable.
    # The function "sort" will be depreciated in the newer versions of
    # pandas DataFrame, and it will replaced by 'sort_values' function. So we
    # will first try to use sort_values if this fails we will use sort.
    try:
        consolidated_data_frame.sort_values([fk_ltable, fk_rtable],
                                            inplace=True)
    except AttributeError:
        consolidated_data_frame.sort([fk_ltable, fk_rtable], inplace=True)

    # update the catalog for the consolidated DataFrame
    # First get a column name for the key
    key = ch.get_name_for_key(consolidated_data_frame.columns)
    # Second, add the column name as the key
    consolidated_data_frame = ch.add_key_column(consolidated_data_frame, key)
    # Third, reset the index to remove any out of order index  values from
    # the sort.
    consolidated_data_frame.reset_index(inplace=True, drop=True)
    # Finally, set the properties for the consolidated DataFrame in the catalog
    cm.set_candset_properties(consolidated_data_frame, key, fk_ltable,
                              fk_rtable, ltable, rtable)

    # Return the consolidated DataFrame
    return consolidated_data_frame
def combine_blocker_outputs_via_union(
        blocker_output_list,
        l_prefix='ltable_',
        r_prefix='rtable_',
        verbose=False):
    """
    Combines multiple blocker outputs by doing a union of their tuple pair
    ids (foreign key ltable, foreign key rtable).

    Specifically, this function takes in a list of DataFrames (candidate
    sets, typically the
    output from blockers) and returns a consolidated DataFrame. The output
    DataFrame contains the union of tuple pair ids (foreign key ltable,
    foreign key rtable) and other attributes from the input list of DataFrames.

    This function makes some assumptions about the input DataFrames. First,
    each DataFrame is expected to contain the following metadata in the
    catalog: key, fk_ltable, fk_rtable, ltable, and rtable. Second,
    all the DataFrames must be a result of blocking from the same underlying
    tables. Concretely the ltable and rtable properties must refer to the
    same DataFrame across all the input tables. Third, all the input
    DataFrames must have the same fk_ltable and fk_rtable properties.
    Finally, in each input DataFrame, for the attributes included from the
    ltable or rtable, the attribute names must be prefixed with the given
    l_prefix and r_prefix in the function.

    The input DataFrames may contain different attribute lists and it demands
    the question of how to combine them. Currently py_entitymatching takes an union
    of attribute names that has prefix l_prefix or r_prefix across
    input tables. After taking the union, for each tuple id pair included
    in output, the attribute values (for union-ed attribute names) are
    probed from ltable/rtable and included in the output.

    A subtle point to note here is,  if an input DataFrame has a column
    added by user (say label for some reason), then that column will not
    be present in the output. The reason is, the same column may not be
    present in other candidate sets so it is not clear about how to
    combine them. One possibility is to include label in output for all
    tuple id pairs, but set as NaN for the values not present. Currently
    py_entitymatching does not include such columns and addressing it will be part
    of future work.

    Args:
        blocker_output_list (list of DataFrames): The list of DataFrames that
            should be combined.
        l_prefix (string): The prefix given to the attributes from the ltable.
        r_prefix (string): The prefix given to the attributes from the rtable.
        verbose (boolean): A flag to indicate whether more detailed information
            about the execution steps should be printed out (default value is
            False).

    Returns:
        A new DataFrame with the combined tuple pairs and other attributes from
        all the blocker lists.

    Raises:
        AssertionError: If `l_prefix` is not of type string.
        AssertionError: If `r_prefix` is not of type string.
        AssertionError: If the length of the input DataFrame list is 0.
        AssertionError: If `blocker_output_list` is not a list of
            DataFrames.
        AssertionError: If the ltables are different across the input list of
            DataFrames.
        AssertionError: If the rtables are different across the input list of
            DataFrames.
        AssertionError: If the `fk_ltable` values are different across the
            input list of DataFrames.
        AssertionError: If the `fk_rtable` values are different across the
            input list of DataFrames.

    Examples:

        >>> import py_entitymatching as em
        >>> ab = em.AttrEquivalenceBlocker()
        >>> C = ab.block_tables(A, B, 'zipcode', 'zipcode')
        >>> ob = em.OverlapBlocker()
        >>> D = ob.block_candset(C, 'address', 'address')
        >>> block_f = em.get_features_for_blocking(A, B)
        >>> rb = em.RuleBasedBlocker()
        >>> rule = ['address_address_lev(ltuple, rtuple) > 6']
        >>> rb.add_rule(rule, block_f)
        >>> E = rb.block_tables(A, B)
        >>> F = em.combine_blocker_outputs_via_union([C, E])


    """

    # validate input parameters

    # The l_prefix is expected to be of type string
    py_entitymatching.utils.validation_helper.validate_object_type(l_prefix, six.string_types, 'l_prefix')

    # The r_prefix is expected to be of type string
    py_entitymatching.utils.validation_helper.validate_object_type(r_prefix, six.string_types, 'r_prefix')

    # We cannot combine empty DataFrame list
    if not len(blocker_output_list) > 0:
        logger.error('There no DataFrames to combine')
        raise AssertionError('There are no DataFrames to combine')

    # Validate the assumptions about the input tables.
    # # 1) All the input object must be DataFrames
    # # 2) All the input DataFrames must have the metadata as that of a
    # candidate set
    # # 3) All the input DataFrames must have the same fk_ltable and fk_rtable
    _validate_lr_tables(blocker_output_list)

    # # Get the ltable and rtable. We take it from the first DataFrame as all
    #  the DataFrames contain the same ltables and rtables
    ltable = cm.get_ltable(blocker_output_list[0])
    rtable = cm.get_rtable(blocker_output_list[0])

    # # Get the fk_ltable and fk_rtable. We take it from the first DataFrame as
    #  all the DataFrames contain the same ltables and rtables
    fk_ltable = cm.get_fk_ltable(blocker_output_list[0])
    fk_rtable = cm.get_fk_rtable(blocker_output_list[0])

    # Retrieve the keys for the ltable and rtables.
    l_key = cm.get_key(ltable)
    r_key = cm.get_key(rtable)

    # Check if the fk_ltable is starting with the given prefix, if not its
    # not an error. Just raise a warning.
    if fk_ltable.startswith(l_prefix) is False:
        logger.warning(
            'Foreign key for ltable is not starting with the given prefix ('
            '%s)', l_prefix)

    # Check if the fk_rtable is starting with the given prefix, if not its
    # not an error. Just raise a warning.
    if fk_rtable.startswith(r_prefix) is False:
        logger.warning(
            'Foreign key for rtable is not starting with the given prefix ('
            '%s)', r_prefix)

    # Initialize lists
    # # keep track of projected tuple pair ids
    tuple_pair_ids = []
    # # keep track of output attributes from the left table
    l_output_attrs = []
    # # keep track of output attributes from the right table
    r_output_attrs = []

    # for each DataFrame in the given list, project out tuple pair ids, get the
    #  attributes from the ltable and rtable
    for data_frame in blocker_output_list:
        # Project out the tuple pair ids. A tuple pair id is a fk_ltable,
        # fk_rtable pair
        projected_tuple_pair_ids = data_frame[[fk_ltable, fk_rtable]]
        # Update the list that tracks tuple pair ids
        tuple_pair_ids.append(projected_tuple_pair_ids)

        # Get the columns, which should be segregated into the attributes
        # from the ltable and table
        col_set = (
            gh.list_diff(list(data_frame.columns),
                         [fk_ltable, fk_rtable, cm.get_key(data_frame)]))

        # Segregate the columns as attributes from the ltable and rtable
        l_attrs, r_attrs = _lr_cols(col_set, l_prefix, r_prefix)

        # Update the l_output_attrs, r_output_attrs
        l_output_attrs.extend(l_attrs)
        # the reason we use extend because l_attrs a list
        r_output_attrs.extend(r_attrs)

    ch.log_info(logger, 'Concatenating the tuple pair ids across given '
                        'blockers ...', verbose)

    # concatenate the tuple pair ids from the list of input DataFrames
    concatenated_tuple_pair_ids = pd.concat(tuple_pair_ids)

    ch.log_info(logger, 'Concatenating the tuple pair ids ... DONE', verbose)
    ch.log_info(logger, 'Deduplicating the tuple pair ids ...', verbose)

    # Deduplicate the DataFrame. Now the returned DataFrame will contain
    # unique tuple pair ids.

    # noinspection PyUnresolvedReferences
    deduplicated_tuple_pair_ids = concatenated_tuple_pair_ids.drop_duplicates()

    ch.log_info(logger, 'Deduplicating the tuple pair ids ... DONE', verbose)

    # Construct output table
    # # Get unique list of attributes across different tables
    l_output_attrs = gh.list_drop_duplicates(l_output_attrs)
    r_output_attrs = gh.list_drop_duplicates(r_output_attrs)

    # Reset the index that might have lingered from concatenation.
    deduplicated_tuple_pair_ids.reset_index(inplace=True, drop=True)

    # Add the output attribtues from the ltable and rtable.
    # NOTE: This approach may be inefficient as it probes the ltable, rtable
    # to get the attribute values. A better way would be to fill the
    # attribute values from the input list of DataFrames. This attribute values
    # could be harvested (at the expense of some space) while we iterate the
    # input blocker output list for the first time.

    # noinspection PyProtectedMember
    consolidated_data_frame = gh._add_output_attributes(
        deduplicated_tuple_pair_ids, fk_ltable,
        fk_rtable,
        ltable, rtable, l_key, r_key,
        l_output_attrs, r_output_attrs,
        l_prefix,
        r_prefix,
        validate=False)
    # Sort the DataFrame ordered by fk_ltable and fk_rtable.
    # The function "sort" will be depreciated in the newer versions of
    # pandas DataFrame, and it will replaced by 'sort_values' function. So we
    # will first try to use sort_values if this fails we will use sort.
    try:
        consolidated_data_frame.sort_values([fk_ltable, fk_rtable],
                                            inplace=True)
    except AttributeError:
        consolidated_data_frame.sort([fk_ltable, fk_rtable], inplace=True)

    # update the catalog for the consolidated DataFrame
    # First get a column name for the key
    key = ch.get_name_for_key(consolidated_data_frame.columns)
    # Second, add the column name as the key
    consolidated_data_frame = ch.add_key_column(consolidated_data_frame, key)
    # Third, reset the index to remove any out of order index  values from
    # the sort.
    consolidated_data_frame.reset_index(inplace=True, drop=True)
    # Finally, set the properties for the consolidated DataFrame in the catalog
    cm.set_candset_properties(consolidated_data_frame, key, fk_ltable,
                              fk_rtable, ltable,
                              rtable)

    # Return the consolidated DataFrame
    return consolidated_data_frame