def generate_matched_ids(
    distances:pd.DataFrame,
    DF:pd.DataFrame,
    clustering_params:dict,
    base_data_directory:str,
    match_job_id=str,
    block_name='',
) -> pd.DataFrame:
    
    logger.info('Beginning clustering & id generation.')
    distances = square_distance_matrix(distances)
    ioutils.write_dataframe(distances.reset_index(), filepath=f'{base_data_directory}/match_cache/square_distances/{match_job_id}/{block_name}')

    ids = cluster(
        distances, **clustering_params
    )
    ioutils.write_dataframe(ids.reset_index(), filepath=f'{base_data_directory}/match_cache/raw_cluster_ids/{match_job_id}/{block_name}')
    max_cluster_id = ids.max()
    replacement_ids = pd.Series(range(max_cluster_id + 1, max_cluster_id + len(ids[ids == -1]) + 1), index=ids[ids==-1].index)
    ids[ids == -1] = replacement_ids
    logger.debug(f'IDs: {ids}')
    logger.debug(f'Replaced noisy singleton ids with \n{replacement_ids}')
    
    logger.debug(f'Adding the block name ({block_name}) to the matched_ids.')
    ids = block_name + ids.astype(str)
    logger.debug(f'New IDs: \n{ids}')
    
    df = DF.copy()
    
    df['matched_id'] = ids
    logger.info('Matched ids generated')

    return (df)                 
def cluster(
    distances:pd.DataFrame,
    eps:float=0.5,
    min_samples:int=1,
    algorithm:str='auto',
    leaf_size:int=30,
    n_jobs:int=1
) -> pd.DataFrame:
    """ Cluster the scored entities into individuals. Return the cluster ids
    indexed with the source row_id.
    """

    logger.info('Beginning clustering.')

    clusterer = DBSCAN(
        eps=eps,
        min_samples=min_samples,
        metric='precomputed',
        metric_params=None,
        algorithm=algorithm,
        leaf_size=leaf_size,
        p=None,
        n_jobs=n_jobs
    )

    clusterer.fit(X=distances)
    logger.info('Clustering done! Assigning matched ids.')

    return pd.Series(
        index=distances.index,
        data=clusterer.labels_
    )
Exemple #3
0
    def block_and_match(self, df):
        ## We will split-apply-combinei
        logger.debug(f'df sent to block-and-match has the following columns: {df.dtypes}')
        logger.info(f'Blocking by {self.blocking_rules}')
        grouped = df.groupby([utils.unpack_blocking_rule(df, column_name, position) for column_name, position in self.blocking_rules.items()])
        logger.info(f'Applying matcher to {len(grouped)} blocks.')
        all_block_metadata = {}

        matches = {}

        for key, group in grouped:
            logger.debug(f"Matching group {key} of size {len(group)}")
            
            if len(group) > 1:
                matches[key], block_metadata = self.match(group, key)
            else:
                block_metadata = {
                    'size': 1,
                    'n_pairs': 0,
                    'contrasts': None,
                    'scores': None
                }
                logger.debug(f"Group {key} only has one record, making a singleton id")
                matches[key] = cluster.generate_singleton_id(group, str(key))

            logger.debug('Wrapping up block')
            all_block_metadata[key] = block_metadata

        logger.debug('All blocks done! Yehaw!')
        self.metadata['blocks'] = all_block_metadata
        return pd.concat(matches.values())
def read_merged_data(base_data_directory: str, event_type: str,
                     keys: list) -> pd.DataFrame:
    # Read the data in and select the necessary columns
    merged_filepath = f'{base_data_directory}/{event_type}/merged'
    logger.info(f"Reading data from {merged_filepath}")
    df = pd.read_csv(merged_filepath, sep='|')

    df['person_index'] = utils.concatenate_person_index(df, keys)
    df.set_index('person_index', drop=True, inplace=True)

    return df
def join_matched_and_merged_data(right_df: pd.DataFrame,
                                 base_data_directory: str, event_type: str,
                                 person_keys: list,
                                 primary_keys: list) -> pd.DataFrame:
    left_df = read_merged_data(base_data_directory, event_type,
                               person_keys)[primary_keys]

    df = left_df.merge(right=right_df['matched_id'].to_frame(),
                       left_index=True,
                       right_index=True,
                       copy=False,
                       validate='many_to_one')
    logger.info(f'Joined match ids to merged data for {event_type}')

    return df
def write_one_event_type(df: pd.DataFrame, base_data_directory: str,
                         event_type: str, person_keys: list,
                         primary_keys: list, match_job_id: str) -> str:
    # Join the matched ids to the source data
    logger.info(f'Joining matches to merged data for {event_type}')
    df = join_matched_and_merged_data(df, base_data_directory, event_type,
                                      person_keys, primary_keys)

    # Cache the current match to S3
    logger.info(f'Writing data for {base_data_directory} {event_type} to S3.')
    write_dataframe(
        df=df,
        filepath=f'{base_data_directory}/{event_type}/matches/{match_job_id}')
    write_dataframe(df=df,
                    filepath=f'{base_data_directory}/{event_type}/matched')

    return f'{base_data_directory}/{event_type}/matched'
def write_matched_data(matches: pd.DataFrame, base_data_directory: str,
                       person_keys: list, schema_pk_lookup: dict,
                       match_job_id: str) -> dict:
    write_dataframe(
        df=matches.reset_index(),
        filepath=
        f'{base_data_directory}/match_cache/matcher_results/{match_job_id}')
    matched_results_paths = {}
    logger.debug(schema_pk_lookup)
    for event_type, primary_keys in schema_pk_lookup.items():
        logger.info(
            f'Writing matched data for {base_data_directory} {event_type}')
        matched_results_paths[event_type] = write_one_event_type(
            df=matches,
            base_data_directory=base_data_directory,
            event_type=event_type,
            person_keys=person_keys,
            primary_keys=primary_keys,
            match_job_id=match_job_id)

    return matched_results_paths
def load_one_event_type(base_data_directory: str, event_type: str, keys: list,
                        match_job_id: str) -> pd.DataFrame:
    logger.info(
        f'Loading {event_type} data for matching from {base_data_directory}.')

    try:
        df = read_merged_data(base_data_directory, event_type, keys)

        # Dropping columns that we don't need for matching
        df = df[keys]

        # Keeping track of the event_type
        df['event_type'] = event_type

        logger.info(f'{event_type} data loaded from S3.')

        return df

    except FileNotFoundError as e:
        logger.info(
            f'No merged file found for {event_type} in {base_data_directory}. Skipping.'
        )
        pass
def preprocess(df:pd.DataFrame, match_job_id:str, base_data_directory:str) -> pd.DataFrame:
    # full_name
    # full name is only given if name parts are not. maybe we should do some preprocessing on full names to create
    # name parts and use only the name parts, especially since it is possible for the jail and HMIS systems to
    # differ on what they use

    # prefix
    # we should preprocess prefixes to remove punctuation and possibly spaces
    if 'prefix' in df.columns:
        logger.debug('Removing punctuation from prefixes')
        df['prefix'] = df['prefix'].str.replace('[^\w\s]','')

    # first_name
    # potential preprocessing steps:
    # - remove punctuation
    # - create: full_first_name, first_word_first_name
    # - try using second+ word of first name as middle name if no middle name 

    # middle_name
    # potential preprocessing steps:
    # - remove punctuation
    # - create: full_middle_name, first_word_middle_name, second_word_middle_name

    # last_name
    # potential preprocessing steps:
    # - remove punctuation
    # - create: full_last_name, first_word_last_name, second_word_last_name

    # suffix
    if 'suffix' in df.columns:
        logger.debug('Removing punctuation from suffixes')
        df['suffix'] = df['suffix'].str.replace('[^\w\s]','')

    # dob
    # MUST BE CAST TO DATETIME DURING PREPROCESSING
    if 'dob' in df.columns:
        logger.debug('Converting date of birth to datetime')
        df['dob'] = pd.to_datetime(df['dob'])

    # ssn
    # THIS SHOULD BE CONVERTED TO STRING. The SSN consists of 3 words, and numerical distances are only
    # VAGUELY meaningful (e.g., the first 3 digits increase roughly east to west but not in a rigorous way,
    # and the second 2 digits are given out in a fixed but non-monotonic order)
    # the first three digits are the "area code" of where the person was registered.
    # most people living in an area will have one of a few local area codes; therefore, the distinctiveness
    # of the area code may be useful for matching. we may want to preprocess ssn to extract the area code
    # to make this comparison.
    if 'ssn' in df.columns:
        logger.debug('Converting social security number to str')
        df['ssn'] = df['ssn'].astype(str)

    # dmv_number
    # THIS SHOULD BE CAST TO STRING. In some jurisdictions, they are strings and in others ints. To ensure
    # that we can generalize here, we need to convert to string for all of them.
    if 'dmv_number' in df.columns:
        logger.debug('Converting dmv number to str')
        df['dmv_number'] = df['dmv_number'].astype(str)

    # race
    # make race into a list
    # eventually, we will want to combine secondary race and race into a single field
    if 'race' in df.columns:
        logger.debug('Converting race to list')
        df['race'] = df['race'].fillna('').str.split(',')
        logger.debug(f"Races observed in preprocessed df: {df['race']}")

    # ethnicity
    # ethnicity encodes only Hispanic/Not Hispanic. for some databases, Hispanic is actually included
    # in the race categories instead of in a separate field. we may want to do some pre-processing to
    # to add H to the race list where the ethnicity field contains 'Hispanic'

    logger.info('Preprocessing done!')
    logger.debug(f"The preprocessed dataframe has the following columns: {df.columns}")
    logger.debug(f"The preprocessed dimensions of the dataframe is: {df.shape}")
    ioutils.write_dataframe(df.reset_index(), filepath=f'{base_data_directory}/match_cache/preprocessed_data/{match_job_id}')
    return df
Exemple #10
0
def do_match(
    base_data_directory:str,
    schema_pk_lookup:dict,
    upload_id:str=None,
    notify_webapp:bool=True,
    config_path:str='matcher_config.yaml'
):
    with open(config_path) as f:
        config = yaml.load(f)

    # Initializing: let's get started by collecting and logging some job metadata
    metadata = {
        'match_job_start_time': datetime.datetime.now(),
        'match_job_id': utils.unique_match_job_id(),
        'base_data_directory': base_data_directory,
        'config': config
    }
    logger.info("Matching process started with the following configuration:")
    for key, value in config.items():
         logger.info(f"Matcher config {key}: {value}")

    try:
        # Loading: collect matching data (keys) for all available event types & record which event types were found
        logger.info('Loading data for matching.')
        df, event_types_read = ioutils.load_data_for_matching(
            base_data_directory,
            list(schema_pk_lookup.keys()),
            config['keys'],
            metadata['match_job_id']
        )
        metadata['event_types_read'] = list(event_types_read)
        metadata['loaded_data_columns'] = list(df.columns.values)
        metadata['loaded_data_shape'] = list(df.shape)
        metadata['data_loaded_time'] = datetime.datetime.now()

        # Preprocessing: enforce data types and split/combine columns for feartures
        logger.info('Doing some preprocessing on the columns')
        df = preprocess.preprocess(df, metadata['match_job_id'], base_data_directory)
        metadata['preprocessed_data_columns'] = list(df.columns.values)
        metadata['preprocessed_data_shape'] = list(df.shape)
        metadata['data_preprocessed_time'] = datetime.datetime.now()

        # Matching: block the data, generate pairs and features, and cluster entities
        logger.info(f"Running matcher")
        match_object = matcher.Matcher(
            base_data_directory=base_data_directory,
            match_job_id=metadata['match_job_id'],
            clustering_rules=config['clusterer']['args'],
            contrast_rules=config['contrasts'],
            blocking_rules=config['blocking_rules']
        )
        matches = match_object.block_and_match(df=df)
        metadata['data_matched_time'] = datetime.datetime.now()
        metadata.update(match_object.metadata)
        logger.debug('Matching done!')

        logger.debug(f"Number of matched pairs: {len(matches)}")

        # Writing: Join the matched ids to the source data for each event & write to S3 and postgres
        logger.info('Writing matched results!')
        matched_results_paths = ioutils.write_matched_data(
            matches=matches,
            base_data_directory=base_data_directory,
            person_keys=config['keys'],
            schema_pk_lookup={event_type:schema_pk_lookup[event_type] for event_type in event_types_read},
            match_job_id=metadata['match_job_id']
        )
        metadata['data_written_time'] = datetime.datetime.now()
        ioutils.write_dict_to_yaml(metadata, f"{base_data_directory}/match_cache/metadata/{metadata['match_job_id']}")

        logger.info('Finished')
        match_end_time = datetime.datetime.now()
        match_runtime =  match_end_time - metadata['match_job_start_time']

        match_successful = True
        status_message = 'new matches are available. Yipee!'

    except Exception as e:
        match_end_time = datetime.datetime.now()
        match_run_time = match_end_time - metadata['match_job_start_time']
        match_successful = False
        status_message = 'matching failed. SAD!'
        try:
            matched_results_paths
        except NameError:
            matched_results_paths = None

        try:
            match_end_time
        except NameError:
            match_end_time = datetime.datetime.now()

        try:
            match_runtime
        except NameError:
            match_runtime = match_end_time - metadata['match_job_start_time']

        logger.error(f'Matcher failed with message "{str(e)}"')

    finally:
        if notify_webapp:
            job = q.enqueue_call(
                func='backend.match_finished',
                args=(
                    matched_results_paths,
                    metadata['match_job_id'],
                    metadata['match_job_start_time'],
                    match_end_time,
                    match_successful,
                    match_runtime,
                    upload_id
                ),
                result_ttl=5000,
                timeout=3600
            )
            logger.info(f'Notified the webapp that {status_message}')
        logger.info('Matcher done!!')
Exemple #11
0
def write_dict_to_yaml(dict_to_write: dict, filepath: str):
    logger.debug(f'Writing some dictionary data to {filepath}! Oooooo!')
    with smart_open.smart_open(filepath, 'wb') as fout:
        fout.write(yaml.dump(dict_to_write).encode())
    logger.info(f'Wrote metadata to {filepath}')
Exemple #12
0
def write_dataframe(df: pd.DataFrame, filepath: str) -> None:
    with smart_open.smart_open(filepath, 'wb') as fout:
        fout.write(df.to_csv(sep='|', index=False).encode())

    logger.info(f'Wrote data to {filepath}')
Exemple #13
0
def generate_singleton_id(df:pd.DataFrame, block_name:str) -> pd.DataFrame:
    df['matched_id'] = block_name + '0'
    logger.info(f'Singleton has id {df.matched_id.values[0]}')
    return df