def testDateStandardiser( self): # - - - - - - - - - - - - - - - - - - - - - """Test date standardiser routines""" return ds = standardisation.DateStandardiser( descript="Test date standardiser", parse_form=self.date_parse_formats, input_fields=["in_date"], output_fiel=["day", "month", "year"], ) rs = standardisation.RecordStandardiser( descr="Test record standardiser", input_dataset=self.in_ds, output_dataset=self.out_ds, comp_stand_list=[ds], pass_fiel=[("pass1", "out_pass1"), ("pass2", "out_pass2")], ) for (date_str, date_res) in self.dates: clean_date_str = ds.clean_component(date_str) test_date_res = ds.standardise(date_str, clean_date_str) assert date_res == test_date_res, ( "Wrong date standardisation: %s, should be: %s" % (str(test_date_res), str(date_res))) rs.standardise() # Use record standardiser and write output file # Test the content of the output data set # test_ds = dataset.DataSetCSV( description="Test standardised data set", access_mode="read", rec_ident="rec_id", field_list=[], header_line=True, write_header=True, file_name="test-standardised-dataset.csv", ) i = 0 for (rec_id, rec_list) in test_ds.readall(): test_day = rec_list[0] test_month = rec_list[1] test_year = rec_list[2] true_day = self.dates[i][1][0] true_month = self.dates[i][1][1] true_year = self.dates[i][1][2] assert test_day == true_day, (i, rec_list[0:3], self.dates[i][1]) assert test_month == true_month, (i, rec_list[0:3], self.dates[i][1]) assert test_year == true_year, (i, rec_list[0:3], self.dates[i][1]) i += 1
def testDateStandardiser( self): # - - - - - - - - - - - - - - - - - - - - - """Test date standardiser routines""" return ds = standardisation.DateStandardiser( descript='Test date standardiser', parse_form=self.date_parse_formats, input_fields=['in_date'], output_fiel=['day', 'month', 'year']) rs = standardisation.RecordStandardiser( descr='Test record standardiser', input_dataset=self.in_ds, output_dataset=self.out_ds, comp_stand_list=[ds], pass_fiel=[('pass1', 'out_pass1'), ('pass2', 'out_pass2')]) for (date_str, date_res) in self.dates: clean_date_str = ds.clean_component(date_str) test_date_res = ds.standardise(date_str, clean_date_str) assert date_res == test_date_res, \ 'Wrong date standardisation: %s, should be: %s' % \ (str(test_date_res), str(date_res)) rs.standardise() # Use record standardiser and write output file # Test the content of the output data set # test_ds = dataset.DataSetCSV(description='Test standardised data set', access_mode='read', rec_ident='rec_id', field_list=[], header_line=True, write_header=True, file_name='test-standardised-dataset.csv') i = 0 for (rec_id, rec_list) in test_ds.readall(): test_day = rec_list[0] test_month = rec_list[1] test_year = rec_list[2] true_day = self.dates[i][1][0] true_month = self.dates[i][1][1] true_year = self.dates[i][1][2] assert test_day == true_day, (i, rec_list[0:3], self.dates[i][1]) assert test_month == true_month, (i, rec_list[0:3], self.dates[i][1]) assert test_year == true_year, (i, rec_list[0:3], self.dates[i][1]) i += 1
# definitions # ----------------------------------------------------------------------------- # The publicly available Census data set with synthetic personal names and # addresses (taken from SecondString data repository). # # - The 'entity_id' attribute (2nd attribute) contains entity numbers. # - No record identifier is available. # census_ds_A = dataset.DataSetCSV(description='Census data set A', access_mode='read', delimiter='\t', rec_ident='rec_id', header_line=False, field_list=[('relation',0), ('entity_id',1), ('surname',2), ('given_name',3), ('middle_initial',4), ('zipcode',5), ('suburb',6)], file_name = './data/secondstring/censusTextSegmentedA.tab') census_ds_B = dataset.DataSetCSV(description='Census data set B', access_mode='read', delimiter='\t', rec_ident='rec_id', header_line=False, field_list=[('relation',0), ('entity_id',1), ('surname',2),
def setUp(self): self.in_ds = dataset.DataSetCSV(descri='A standardisation test data set', access_mode='read', rec_ident = 'rec_id', field_list = [], header_line=True, write_header=True, file_name='test-standard-dataset.csv') self.out_ds = dataset.DataSetCSV(descrip='The standardised test data set', access_mode='write', rec_ident = 'rec_id', field_list = [('day',0), ('month',1), ('year',2), ('country_code',3), ('country_name',4), ('area_code',5), ('number',6), ('extension',7), ('title',8), ('gender_guess',9), ('given_name',10), ('alt_given_name',11), ('surname',12), ('alt_surname',13), ('out_pass1',14), ('out_pass2',15)], header_line=True, write_header=True, file_name='test-standardised-dataset.csv') self.dates = [['Sep 1, 68', ['1', '9', '1968']], ['18 Jan 2002', ['18','1', '2002']], ['17:2:2002', ['17','2', '2002']], ['2002-02-25', ['25','2', '2002']], ['18,03,2001', ['18','3', '2001']], ['21.12.1999', ['21','12','1999']], ['February 18,19', ['18','2', '1919']], ['23\\July\\1968', ['23','7', '1968']], ['18-02-2002', ['18','2', '2002']], ['5/03/01', ['5', '3', '2001']], ['19680429', ['29','4', '1968']], ['600810', ['10','8', '1960']], ['3:05:2000', ['3', '5', '2000']], ['30.11.1989', ['30','11','1989']], ["1. January '70", ['1', '1', '1970']], ['01011970', ['1', '1', '1970']], ['10011970', ['10','1', '1970']], ['31 dec 1969', ['31','12','1969']], ['30 december 69', ['30','12','1969']], ['01011970', ['1', '1', '1970']], ['13 Feb 1945', ['13','2', '1945']], ['Feb 13, \'45', ['13','2', '1945']], ['April 29 1968', ['29','4', '1968']], ['29-4=68', ['29','4', '1968']], ['11-01-1972', ['11','1', '1972']], ['January 10. 1972', ['10','1', '1972']], ['29 Feb 1932', ['29','2', '1932']], ['29 Feb 32', ['29','2', '1932']], ['11 Jun 1902', ['11','6', '1902']], ['11 Jul 1989', ['11','7', '1989']], ['12111968', ['12','11','1968']], [' 21111969 ', ['21','11','1969']]] self.date_parse_formats = ['%d %m %Y', # 24 04 2002 or 24 4 2002 '%d %B %Y', # 24 April 2002 '%d %b %Y', # 24 Apr 2002 '%m %d %Y', # 04 24 2002 or 4 24 2002 '%B %d %Y', # April 24 2002 '%b %d %Y', # Apr 24 2002 '%Y %m %d', # 2002 04 24 or 2002 4 24 '%Y %B %d', # 2002 April 24 '%Y %b %d', # 2002 Apr 24 '%d %m %y', # 24 04 02 or 24 4 02 '%d %B %y', # 24 April 02 '%d %b %y', # 24 Apr 02 '%y %m %d', # 02 04 24 or 02 4 24 '%y %B %d', # 02 April 24 '%y %b %d', # 02 Apr 24 '%m %d %y', # 04 24 02 or 4 24 02 '%B %d %y', # April 24 02 '%b %d %y'] # Apr 24 02 self.phonenums = \ [('++61 2 6125 5690', ['61', 'Australia', '02', '6125-5690', '']), ('0061 02 6125 5690', ['61', 'Australia', '02', '6125-5690', '']), ('0061 02 6125-5690', ['61', 'Australia', '02', '6125-5690', '']), ('41 312 17 84', ['41', 'Switzerland', '', '312 17 84', '']), ('6125 0010', ['61', 'Australia', '', '6125-0010', '']), ('1-800-764-0432', ['1', 'USA/Canada', '800', '764-0432', '']), ('02 6125 0010', ['61', 'Australia', '02', '6125-0010', '']), ('00 1 317-923 4523', ['1', 'USA/Canada', '317', '923-4523', '']), ('1 317-923 4523', ['1', 'USA/Canada', '317', '923-4523', '']), ('00111 41 312 17 84', ['41', 'Switzerland', '', '312 17 84', '']), ('00001 41 312 17 84', ['41', 'Switzerland', '', '312 17 84', '']), ('01 41 312 17 84', ['41', 'Switzerland', '', '312 17 84', '']), ('1-541-754-3010', ['1', 'USA/Canada', '541', '754-3010', '']), ('754-3010', ['1', 'USA/Canada', '', '754-3010', '']), ('754-3010ext 42', ['1', 'USA/Canada', '', '754-3010','42']), ('754-3010x 42', ['1', 'USA/Canada', '', '754-3010','42']), ('754-3010 ext 42', ['1', 'USA/Canada', '', '754-3010','42']), ('754-3010 ext. 42', ['1', 'USA/Canada', '', '754-3010','42']), ('754-3010 x. 42', ['1', 'USA/Canada', '', '754-3010','42']), ('754-3010 x42', ['1', 'USA/Canada', '', '754-3010','42']), ('(541) 754-3010', ['1', 'USA/Canada', '541', '754-3010', '']), ('+1-541-754-3010', ['1', 'USA/Canada', '541', '754-3010', '']), ('191 541 754 3010', ['', '', '', '915417543010', '']), ('001-541-754-3010', ['1', 'USA/Canada', '541', '754-3010', '']), ('636-48018', ['61', 'Australia', '', '6364-8018', '']), ('(089) / 636-48018', ['1', 'USA/Canada', '896', '364-8018', '']), ('+49-89-636-48018', ['49', 'Germany', '', '89 636 48018', '']), ('19-49-89-636-48018', ['', '', '', '9498963648018', '']), ('+61 (02) 6125 0101', ['61', 'Australia', '02', '6125-0101', '']), ('++61 (02) 6125 0101', ['61', 'Australia', '02', '6125-0101', '']), ('++61 (2) 6125 0101', ['61', 'Australia', '02', '6125-0101', '']), ('11 +61 (2) 6125 0101', ['', '', '', '161261250101', '']), ('0011 ++61 (2) 6125 0101', ['61', 'Australia', '02', '6125-0101', '']), ('0111 ++61 (2) 6125 0101', ['61', 'Australia', '02', '6125-0101', '']), ('0111 61 02 6125 0101', ['61', 'Australia', '02', '6125-0101', '']), ('61 (2) 6125 0101', ['61', 'Australia', '02', '6125-0101', ''])] # Names with given names first # self.names_gnames = \ [('', ['','','','','','']), ('Peter Christen', ['male', '','peter', '','christen', '']), ('"DR" Peter Christen', ['male', 'dr', 'peter', '','christen', '']), ('<mr> Peter Christen', ['male', 'mr', 'peter', '','christen', '']), ('{ Dr > Peter Christen', ['male', 'dr', 'peter', '','christen', '']), (' " Dr Peter Christen', ['male', 'dr', 'peter', '','christen', '']), ('Peter () Christen', ['male', 'dr', 'peter', '','christen', '']), ('Peter Christen(DR]]', ['male', 'dr', 'peter', '','christen', '']), ('Peter Christen (mister', ['male', 'mr', 'peter', '','christen', '']), ('Peter Christen " mr', ['male', 'mr', 'peter', '','christen', '']), ('Peter Christen {mr } ', ['male', 'mr', 'peter', '','christen', '']), ('Peter Christen "dr"', ['male', 'dr', 'peter', '','christen', '']), (' ( ) Peter Christen', ['male', '','peter', '','christen', '']), ('Peter " " Christen', ['male', '','peter', '','christen', '']), ('Peter (> Christen', ['male', '','peter', '','christen', '']), (',Peter Christen--', ['male', '','peter', '','christen', '']), ('-,- Peter Christen-,-', ['male', '','peter', '','christen', '']), (' // Peter Christen//', ['male', '','peter', '','christen', '']), ('(Peter,Christen ) ', ['male', '','peter', '','christen', '']), ('[Peter Christen]', ['male', '','peter', '','christen', '']), ('<<Peter , Christen>>', ['male', '','peter', '','christen', '']), ('{ Peter Christen }', ['male', '','peter', '','christen', '']), ('"Peter Christen"', ['male', '','peter', '','christen', '']), ("''Peter ; Christen''", ['male', '','peter', '','christen', '']), ("'|Peter ?: Christen'|", ['male', '','peter', '','christen', '']), ('Mr peter Christen', ['male','mr','peter', '','christen', '']), ('Mister Peter CHRISTEN', ['male','mr','peter', '','christen', '']), ('Petra~ Christen', ['female', '','petra', '','christen', '']), ('Ms petra Christen', ['female','ms','petra', '','christen', '']), ('Misses Petra CHRISTEN', ['female','ms','petra', '','christen', '']), ('Peter Marco Jones', ['male','','peter','mark','jones','']), ('peter almond', ['male','','peter','','almond','']), ('almond peter', ['male','','peter','','almond','']), ('Peter', ['male','','peter','','','']), ('alison de francesco', ['','','','','','']), ('alison de-francesco', ['','','','','','']), ('peter de la placa', ['','','','','','']), ('peter marco de la placa', ['','','','','','']), ('maria petra de la placa-miller', ['','','','','','']), ('maria petra vonder felde', ['','','','','','']), ('Christen', ['','','','','christen','']), ('Jane', ['female','','jane','','','']), ('miss anita', ['female','ms','anita','','','']), ('mr p. christen', ['male','mr','p','','christen','']), ('Peter mary jones', ['','','peter','mary','jones','']), ('mr Peter mary jones', ['male','mr','peter','mary','jones','']), ('mister Paul PETER jones-miller', ['male','mr','paul','peter','jones','miller']), ('peter known as pete', ['male','','peter','peter','','']), ('nee miller', ['','','nee','','miller','']), ('peter de nee', ['','','peter','','de nee','']), ('paul saint nee', ['','','paul','','saint nee','']), ('saint paul nee', ['','','saint paul','','nee','']), ('paula miller (nee jones)', ['','','','','','']), ('peter, son of nee miller', ['','','','','','']), ('peter (known as pete) christen', ['','','','','','']), ('peter (known as "pete") christen', ['','','','','','']), ('peter christen miller', ['','','','','','']), ('peter christen-miller', ['','','','','','']), ('peter joe christen-miller', ['','','','','','']), ("peter 'joe' christen-miller", ['','','','','','']), ('"sharky" peter miller', ['','','','','','']), ("'barbie' sue smith-jones", ['','','','','','']), ('sue "barbie" smith meyer', ['','','','','','']), ('sue known as "barbie" smith meyer', ['','','','','','']), ("sue 'barbie' smith-jones", ['','','','','','']), ("sue 'barbie' smith jones", ['','','','','','']), ('sue baby of maria jones', ['','','','','','']), ('jane co lo-schiavo', ['','','','','','']), ('martina louis barber', ['','','','','','']), ('lisa-anne hennessy', ['','','','','','']), ('michelle southam-byrnes', ['','','','','','']), ('nicole win jordan', ['','','','','','']), ('caroline and clarke', ['','','','','','']), ('jocelyn or buskens', ['','','','','','']), ('yee fung nee cheng', ['','','','','','']), ('jenny khaw nee yii', ['','','','','','']), ('roslyn kay sta maria', ['','','','','','']), ('shelley lee di stefano', ['','','','','','']), ('li qing van huisstede', ['','','','','','']), ('patricia ann van den hurk', ['','','','','','']), ('kim maree nguyen su', ['','','','','','']), ('adriana haile de lange', ['','','','','','']), ("jodene akke op't land", ['','','','','','']), ('cleo ann di blasio', ['','','','','','']), ('debbie saphire st quintin', ['','','','','','']), ('nehmat e el chaar', ['','','','','','']), ('yan chen ping yang', ['','','','','','']), ('sharon leoni van ant werpen', ['','','','','','']), ('nicole maria de oliveira', ['','','','','','']), ('sonia denni de arman', ['','','','','','']), ('nicole dan de arman', ['','','','','','']), ('johdy louise dal santo', ['','','','','','']), ('tamara lou st. john-morton', ['','','','','','']), ('mercy jacq john peter', ['','','','','','']), ('carly evelyn de st germain', ['','','','','','']), ('rachael jane van buuren', ['','','','','','']), ('joanna lilli van ryswyk', ['','','','','','']), ('melissa ma romijn-van stey', ['','','','','','']), ('wong jing ling huang', ['','','','','','']), ('julie maree mackenzie - hun', ['','','','','','']), ('joanne agnes righettli (dr)', ['','dr','','','','']), ('siu har ng (hung)', ['','','','','','']), ('anne-maree lawrence-franks', ['','','','','','']), ('mao-yao rong-fong', ['','','','','','']), ('wai-fun wheeler-smith', ['','','','','','']), ('lee-anne westerbrook-sim', ['','','','','','']), ('kasey-lee so-chan', ['','','','','','']), ('sherri-anne hilder-penningt', ['','','','','','']), ('yoon-sun ahn-wu', ['','','','','','']), ('ying-xia yu-guo', ['','','','','','']), ('hee-jing hyde-page', ['','','','','','']), ('mary-anne chung-kwon', ['','','','','','']), ('marie-reine attallah-boulos', ['','','','','','']), ('tracy-lea zanco-hinds', ['','','','','','']), ('tracy-maria beardow-brooks', ['','','','','','']), ('el-masri sheehan-hill', ['','','','','','']), ('vicki-maree cheryle-anne', ['','','','','','']), ('vicki-mare sheehan-anna', ['','','','','','']), ('cindy-lou mckie-bailey', ['','','','','','']), ('jo-ann bakoss-parson', ['','','','','','']), ('wan-ching tsui-chan', ['','','','','','']), ('sue-ellen bruechert-reich', ['','','','','','']), ('anna-marie vearing-brown', ['','','','','','']), ("lisa-jane o'connor", ['','','','','','']), ("julie-anne o'malley", ['','','','','','']), ("mary-jane o'doherty", ['','','','','','']), ("jose-carol o'leary", ['','','','','','']), ("rose-merrie o'kane", ['','','','','','']), ("ymeka-emily o'neill", ['','','','','','']), ] ## check field spill - have 2 input fields # Names with surnames first # self.names_snames = [('Christen Peter', ['male', '','peter', '','christen', '']), ('Christen, Peter', ['male', '','peter', '','christen', '']), ('Mr Christen Peter', ['male','mr','peter', '','christen', '']), ('Mister CHRISTEN, Peter', ['male','mr','peter', '','christen', '']), ('Christen Petra', ['female', '','petra', '','christen', '']), ('Ms Christen, petra', ['female','ms','petra', '','christen', '']), ('Misses CHRISTEN, PETRA', ['female','ms','petra', '','christen', '']), ('peter almond', ['male','','peter','','almond','']), ('almond peter', ['male','','peter','','almond','']), ('', ['','','','','','']), ('Peter', ['male','','peter','','','']), ('Christen', ['','','','','christen','']), ('Jane', ['female','','jane','','','']), ('miss anita', ['female','ms','anita','','','']), ('mr p. christen', ['male','mr','p','','christen','']), ('jones, Peter mary', ['','','peter','mary','jones','']), ('mr jones Peter mary', ['male','mr','peter','mary','jones','']), ('mister jones-miller, Paul PETER', ['male','mr','paul','peter','jones','miller']), ] self.name_male_titles = ['mr'] self.name_female_titles = ['ms'] src_data_dir = '..'+os.sep+'data'+os.sep+'lookup'+os.sep self.name_tag_table = lookup.TagLookupTable(descr='Name tag test table') self.name_tag_table.load([src_data_dir+'givenname_f.tbl', src_data_dir+'givenname_m.tbl', src_data_dir+'name_misc.tbl', src_data_dir+'name_prefix.tbl', src_data_dir+'name_title.tbl', src_data_dir+'saints.tbl', src_data_dir+'surname.tbl']) self.name_corr_list = lookup.CorrectionList(descr = 'Name corr test list') self.name_corr_list.load(src_data_dir+'name_corr.lst')
def testPhoneNumStandardiserNone(self): """Test phone number standardiser routines""" return ps = standardisation.PhoneNumStandardiser(descript = \ 'Test phone number standardiser', input_fields = ['in_phonenum'], output_fiel = ['country_code', None, 'area_code', 'number', None]) rs = standardisation.RecordStandardiser(descr = 'Test record standardiser', input_dataset = self.in_ds, output_dataset = self.out_ds, comp_stand_list = [ps]) for (phonenum_str, phonenum_res) in self.phonenums: clean_phonenum_str = ps.clean_component(phonenum_str) test_phonenum_res = ps.standardise(phonenum_str, clean_phonenum_str) assert phonenum_res == test_phonenum_res, \ 'Wrong phone number standardisation: %s, should be: %s' % \ (str(test_phonenum_res), str(phonenum_res)) rs.standardise() # Use record standardiser and write output file # Test the content of the output data set # test_ds = dataset.DataSetCSV(description='Test standardised data set', access_mode='read', rec_ident = 'rec_id', field_list = [], header_line=True, write_header=True, file_name='test-standardised-dataset.csv') i = 0 for (rec_id, rec_list) in test_ds.readall(): test_country_code = rec_list[3] test_country_name = rec_list[4] test_area_code = rec_list[5] test_number = rec_list[6] test_extension = rec_list[7] true_country_code = self.phonenums[i][1][0] true_area_code = self.phonenums[i][1][2] true_number = self.phonenums[i][1][3] assert test_country_code == true_country_code, \ (i, rec_list[3:8], self.phonenums[i][1]) assert test_country_name == '', \ (i, rec_list[3:8], self.phonenums[i][1]) assert test_area_code == true_area_code, \ (i, rec_list[3:8], self.phonenums[i][1]) assert test_number == true_number, \ (i, rec_list[3:8], self.phonenums[i][1]) assert test_extension == '', \ (i, rec_list[3:8], self.phonenums[i][1]) i += 1
# ----------------------------------------------------------------------------- # The publicly available Census data set with synthetic personal names and # addresses (taken from SecondString data repository). # # - The 'entity_id' attribute (2nd attribute) contains entity numbers. # - No record identifier is available. # census_ds_A = dataset.DataSetCSV( description="Census data set A", access_mode="read", delimiter="\t", rec_ident="rec_id", header_line=False, field_list=[ ("relation", 0), ("entity_id", 1), ("surname", 2), ("given_name", 3), ("middle_initial", 4), ("zipcode", 5), ("suburb", 6), ], file_name="./data/secondstring/censusTextSegmentedA.tab", ) census_ds_B = dataset.DataSetCSV( description="Census data set B", access_mode="read", delimiter="\t", rec_ident="rec_id", header_line=False,
# ----------------------------------------------------------------------------- # Febrl project type: Deduplicate # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Define input data set A: # data_set_a = dataset.DataSetCSV(description="Data set generated by Febrl GUI", access_mode="read", strip_fields=True, miss_val=[''], rec_ident="ID", file_name="/home/jclark/projects/dpla_appfest/test_sample.csv", header_line=True, delimiter=",", field_list = [("ID",0), ("Title",1), ("Creator",2), ("Subject",3), ("Publisher",4), ("Description",5), ("Type",6)]) # ----------------------------------------------------------------------------- # Define field comparison functions # fc_funct_1 = comparison.FieldComparatorWinkler(agree_weight = 1.0, description = "Winkler-Subject-Subject",
my_logger.setLevel(log_level) # ----------------------------------------------------------------------------- # Febrl project type: Link # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Define input data set A: # data_set_a = dataset.DataSetCSV( description="Data set generated by Febrl GUI", access_mode="read", strip_fields=True, miss_val=[''], rec_ident="PIS", file_name="/home/rodrigo/Projetos/reclink/teste/sdf1103.csv", header_line=True, delimiter=",", field_list=[("PIS", 0), ("RGP", 1), ("NOME", 2), ("UF", 3), ("MUNICIPIO", 4)]) # Define input data set B: # data_set_b = dataset.DataSetCSV( description="Data set generated by Febrl GUI", access_mode="read", strip_fields=True, miss_val=[''], rec_ident="PIS", file_name="/home/rodrigo/Projetos/reclink/teste/sdf1703.csv",
# ----------------------------------------------------------------------------- # Febrl project type: Link # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Define input data set A: # data_set_a = dataset.DataSetCSV(description="Data set generated by Febrl GUI (note TAB data set is implemented as CSV data set with \t delimiter)", access_mode="read", strip_fields=False, miss_val=[''], rec_ident="field-0", file_name="C:\Users\Abdullah\Dropbox\rla\TestData\DataIn5k1.txt", header_line=False, delimiter="\t", field_list = [("field-0",0), ("field-1",1), ("field-2",2), ("field-3",3), ("field-4",4), ("field-5",5)]) # Define input data set B: # data_set_b = dataset.DataSetCSV(description="Data set generated by Febrl GUI (note TAB data set is implemented as CSV data set with \t delimiter)", access_mode="read", strip_fields=False, miss_val=[''], rec_ident="field-0", file_name="C:\Users\Abdullah\Dropbox\rla\TestData\DataIn5k2.txt",
ds_size, corruption) print 'size = %d, corruption = %d:\n' % (ds_size, corruption) all_result_file.write('size = %d, corruption = %d:\n' % (ds_size, corruption)) # Define input data set A: # data_set_a = dataset.DataSetCSV( description="Data set generated by Febrl GUI", access_mode="read", strip_fields=True, miss_val=[''], rec_ident='r', file_name=file_name_with_suffix, header_line=False, delimiter=",", field_list=[("field-0", 0), ("field-1", 1), ("field-2", 2), ("field-3", 3), ("field-4", 4), ("field-5", 5), ("field-6", 6), ("field-7", 7), ("field-8", 8), ("field-9", 9), ("field-10", 10), ("field-11", 11), ("field-12", 12), ("field-13", 13), ("field-14", 14), ("field-15", 15), ("field-16", 16), ("field-17", 17)]) # ----------------------------------------------------------------------------- # Define field comparison functions # fc_funct_1 = comparison.FieldComparatorExactString( agree_weight=1.0, description="Str-Exact-field-17-field-17",
def SaveMatchDataSet(match_set, dataset1, id_field1, new_dataset_name1, dataset2=None, id_field2=None, new_dataset_name2=None): """Save the original data set(s) with an additional field (attribute) that contains match identifiers. This functions creates unique match identifiers (one for each matched pair of record identifiers in the given match set), and inserts them into a new attribute (field) of a data set(s) which will be written. If the record identifier field is not one of the fields in the input data set, then additionally such a field will be added to the output data set (with the name of the record identifier from the input data set). Currently the output data set(s) to be written will be CSV type data sets. Match identifiers as or the form 'mid00001', 'mid0002', etc. with the number of digits depending upon the total number of matches in the match set. If a record is involved in several matches, then the match identifiers will be separated by a semi-colon (;). Only one new data set will be created for deduplication, and two new data sets for linkage. For a deduplication, it is assumed that the second data set is set to None. """ auxiliary.check_is_set('match_set', match_set) auxiliary.check_is_not_none('dataset1', dataset1) auxiliary.check_is_string('id_field1', id_field1) auxiliary.check_is_string('new_dataset_name1', new_dataset_name1) if (dataset2 != None): # A linkage, check second set of parameters auxiliary.check_is_not_none('dataset2', dataset2) auxiliary.check_is_string('id_field2', id_field2) auxiliary.check_is_string('new_dataset_name2', new_dataset_name2) do_link = True else: do_link = False match_rec_id_list = list(match_set) # Make a list so it can be sorted match_rec_id_list.sort() if (len(match_set) > 0): num_digit = max(1, int(math.ceil(math.log(len(match_set), 10)))) else: num_digit = 1 mid_count = 1 # Counter for match identifiers # Generate a dictionary with record identifiers as keys and lists of match # identifiers as values # match_id_dict1 = {} # For first data set match_id_dict2 = {} # For second data set, not required for deduplication for rec_id_tuple in match_rec_id_list: rec_id1, rec_id2 = rec_id_tuple mid_count_str = '%s' % (mid_count) this_mid = 'mid%s' % (mid_count_str.zfill(num_digit)) rec_id1_mid_list = match_id_dict1.get(rec_id1, []) rec_id1_mid_list.append(this_mid) match_id_dict1[rec_id1] = rec_id1_mid_list if (do_link == True): # Do the same for second data set rec_id2_mid_list = match_id_dict2.get(rec_id2, []) rec_id2_mid_list.append(this_mid) match_id_dict2[rec_id2] = rec_id2_mid_list else: # Same dicionary for deduplication rec_id2_mid_list = match_id_dict1.get(rec_id2, []) rec_id2_mid_list.append(this_mid) match_id_dict1[rec_id2] = rec_id2_mid_list mid_count += 1 # Now initialise new data set(s) for output based on input data set(s) - - - # First need to generate field list from input data set # if (dataset1.dataset_type == 'CSV'): new_dataset1_field_list = dataset1.field_list[:] # Make a copy of list last_col_index = new_dataset1_field_list[-1][1] + 1 elif (dataset1.dataset_type == 'COL'): new_dataset1_field_list = [] col_index = 0 for (field, col_width) in dataset1.field_list: new_dataset1_field_list.append((field, col_index)) col_index += 1 last_col_index = col_index # Check if the record identifier is not a normal input field (in which case # it has to be written into the output data set as well) # rec_ident_name = dataset1.rec_ident add_rec_ident = True for (field_name, field_data) in dataset1.field_list: if (field_name == rec_ident_name): add_rec_ident = False break if (add_rec_ident == True): # Put record identifier into first column new_dataset1_field_list.append((rec_ident_name, last_col_index)) last_col_index += 1 # Append match id field # new_dataset1_field_list.append((id_field1, last_col_index)) new_dataset1_description = dataset1.description + ' with match identifiers' new_dataset1 = dataset.DataSetCSV(description=new_dataset1_description, access_mode='write', rec_ident=dataset1.rec_ident, header_line=True, write_header=True, strip_fields=dataset1.strip_fields, miss_val=dataset1.miss_val, field_list=new_dataset1_field_list, delimiter=dataset1.delimiter, file_name=new_dataset_name1) # Read all records, add match identifiers and write into new data set # for (rec_id, rec_list) in dataset1.readall(): if (add_rec_ident == True): # Add record identifier rec_list.append(rec_id) mid_list = match_id_dict1.get(rec_id, []) mid_str = ';'.join(mid_list) rec_list.append(mid_str) new_dataset1.write({rec_id: rec_list}) new_dataset1.finalise() if (do_link == True ): # Second data set for linkage only - - - - - - - - - - if (dataset2.dataset_type == 'CSV'): new_dataset2_field_list = dataset2.field_list[:] # Make a copy of list last_col_index = new_dataset2_field_list[-1][1] + 1 elif (dataset2.dataset_type == 'COL'): new_dataset2_field_list = [] col_index = 0 for (field, col_width) in dataset2.field_list: new_dataset2_field_list.append((field, col_index)) col_index += 1 last_col_index = col_index # Check if the record identifier is not an normal input field (in which # case it has to be written into the output data set as well) # rec_ident_name = dataset2.rec_ident add_rec_ident = True for (field_name, field_data) in dataset2.field_list: if (field_name == rec_ident_name): add_rec_ident = False break if (add_rec_ident == True): # Put record identifier into first column new_dataset2_field_list.append((rec_ident_name, last_col_index)) last_col_index += 1 # Append match id field # new_dataset2_field_list.append((id_field2, last_col_index)) new_dataset2_description = dataset2.description + ' with match identifiers' new_dataset2 = dataset.DataSetCSV(description=new_dataset2_description, access_mode='write', rec_ident=dataset2.rec_ident, header_line=True, write_header=True, strip_fields=dataset2.strip_fields, miss_val=dataset2.miss_val, field_list=new_dataset2_field_list, file_name=new_dataset_name2) # Read all records, add match identifiers and write into new data set # for (rec_id, rec_list) in dataset2.readall(): if (add_rec_ident == True): # Add record identifier rec_list.append(rec_id) mid_list = match_id_dict2.get(rec_id, []) mid_str = ';'.join(mid_list) rec_list.append(mid_str) new_dataset2.write({rec_id: rec_list}) new_dataset2.finalise()
# ----------------------------------------------------------------------------- # Define input data set A: # data_set_a = dataset.DataSetCSV(description="Data set generated by Febrl GUI", access_mode="read", strip_fields=True, miss_val=[''], rec_ident="REC_ID", file_name="../datasets/cora.csv", header_line=False, delimiter=",", field_list = [("field-0",0), ("ID",1), ("AUTHORS",2), ("field-3",3), ("TITLE",4), ("field-5",5), ("VENUE",6), # 10.5% missing ("ADDRESS",7), # 77.5% missing ("PUBLISHER",8), # 69.9% missing ("YEAR",9), # 12.3% missing ("PAGES",10), # 32.9% missing ("field-11",11), ("field-12",12), ("field-13",13)]) # ----------------------------------------------------------------------------- # Define field comparison functions #
def setUp(self): self.in_ds = dataset.DataSetCSV( descri="A standardisation test data set", access_mode="read", rec_ident="rec_id", field_list=[], header_line=True, write_header=True, file_name="test-standard-dataset.csv", ) self.out_ds = dataset.DataSetCSV( descrip="The standardised test data set", access_mode="write", rec_ident="rec_id", field_list=[ ("day", 0), ("month", 1), ("year", 2), ("country_code", 3), ("country_name", 4), ("area_code", 5), ("number", 6), ("extension", 7), ("title", 8), ("gender_guess", 9), ("given_name", 10), ("alt_given_name", 11), ("surname", 12), ("alt_surname", 13), ("out_pass1", 14), ("out_pass2", 15), ], header_line=True, write_header=True, file_name="test-standardised-dataset.csv", ) self.dates = [ ["Sep 1, 68", ["1", "9", "1968"]], ["18 Jan 2002", ["18", "1", "2002"]], ["17:2:2002", ["17", "2", "2002"]], ["2002-02-25", ["25", "2", "2002"]], ["18,03,2001", ["18", "3", "2001"]], ["21.12.1999", ["21", "12", "1999"]], ["February 18,19", ["18", "2", "1919"]], ["23\\July\\1968", ["23", "7", "1968"]], ["18-02-2002", ["18", "2", "2002"]], ["5/03/01", ["5", "3", "2001"]], ["19680429", ["29", "4", "1968"]], ["600810", ["10", "8", "1960"]], ["3:05:2000", ["3", "5", "2000"]], ["30.11.1989", ["30", "11", "1989"]], ["1. January '70", ["1", "1", "1970"]], ["01011970", ["1", "1", "1970"]], ["10011970", ["10", "1", "1970"]], ["31 dec 1969", ["31", "12", "1969"]], ["30 december 69", ["30", "12", "1969"]], ["01011970", ["1", "1", "1970"]], ["13 Feb 1945", ["13", "2", "1945"]], ["Feb 13, '45", ["13", "2", "1945"]], ["April 29 1968", ["29", "4", "1968"]], ["29-4=68", ["29", "4", "1968"]], ["11-01-1972", ["11", "1", "1972"]], ["January 10. 1972", ["10", "1", "1972"]], ["29 Feb 1932", ["29", "2", "1932"]], ["29 Feb 32", ["29", "2", "1932"]], ["11 Jun 1902", ["11", "6", "1902"]], ["11 Jul 1989", ["11", "7", "1989"]], ["12111968", ["12", "11", "1968"]], [" 21111969 ", ["21", "11", "1969"]], ] self.date_parse_formats = [ "%d %m %Y", # 24 04 2002 or 24 4 2002 "%d %B %Y", # 24 April 2002 "%d %b %Y", # 24 Apr 2002 "%m %d %Y", # 04 24 2002 or 4 24 2002 "%B %d %Y", # April 24 2002 "%b %d %Y", # Apr 24 2002 "%Y %m %d", # 2002 04 24 or 2002 4 24 "%Y %B %d", # 2002 April 24 "%Y %b %d", # 2002 Apr 24 "%d %m %y", # 24 04 02 or 24 4 02 "%d %B %y", # 24 April 02 "%d %b %y", # 24 Apr 02 "%y %m %d", # 02 04 24 or 02 4 24 "%y %B %d", # 02 April 24 "%y %b %d", # 02 Apr 24 "%m %d %y", # 04 24 02 or 4 24 02 "%B %d %y", # April 24 02 "%b %d %y", ] # Apr 24 02 self.phonenums = [ ("++61 2 6125 5690", ["61", "Australia", "02", "6125-5690", ""]), ("0061 02 6125 5690", ["61", "Australia", "02", "6125-5690", ""]), ("0061 02 6125-5690", ["61", "Australia", "02", "6125-5690", ""]), ("41 312 17 84", ["41", "Switzerland", "", "312 17 84", ""]), ("6125 0010", ["61", "Australia", "", "6125-0010", ""]), ("1-800-764-0432", ["1", "USA/Canada", "800", "764-0432", ""]), ("02 6125 0010", ["61", "Australia", "02", "6125-0010", ""]), ("00 1 317-923 4523", ["1", "USA/Canada", "317", "923-4523", ""]), ("1 317-923 4523", ["1", "USA/Canada", "317", "923-4523", ""]), ("00111 41 312 17 84", ["41", "Switzerland", "", "312 17 84", ""]), ("00001 41 312 17 84", ["41", "Switzerland", "", "312 17 84", ""]), ("01 41 312 17 84", ["41", "Switzerland", "", "312 17 84", ""]), ("1-541-754-3010", ["1", "USA/Canada", "541", "754-3010", ""]), ("754-3010", ["1", "USA/Canada", "", "754-3010", ""]), ("754-3010ext 42", ["1", "USA/Canada", "", "754-3010", "42"]), ("754-3010x 42", ["1", "USA/Canada", "", "754-3010", "42"]), ("754-3010 ext 42", ["1", "USA/Canada", "", "754-3010", "42"]), ("754-3010 ext. 42", ["1", "USA/Canada", "", "754-3010", "42"]), ("754-3010 x. 42", ["1", "USA/Canada", "", "754-3010", "42"]), ("754-3010 x42", ["1", "USA/Canada", "", "754-3010", "42"]), ("(541) 754-3010", ["1", "USA/Canada", "541", "754-3010", ""]), ("+1-541-754-3010", ["1", "USA/Canada", "541", "754-3010", ""]), ("191 541 754 3010", ["", "", "", "915417543010", ""]), ("001-541-754-3010", ["1", "USA/Canada", "541", "754-3010", ""]), ("636-48018", ["61", "Australia", "", "6364-8018", ""]), ("(089) / 636-48018", ["1", "USA/Canada", "896", "364-8018", ""]), ("+49-89-636-48018", ["49", "Germany", "", "89 636 48018", ""]), ("19-49-89-636-48018", ["", "", "", "9498963648018", ""]), ("+61 (02) 6125 0101", ["61", "Australia", "02", "6125-0101", ""]), ("++61 (02) 6125 0101", ["61", "Australia", "02", "6125-0101", ""]), ("++61 (2) 6125 0101", ["61", "Australia", "02", "6125-0101", ""]), ("11 +61 (2) 6125 0101", ["", "", "", "161261250101", ""]), ("0011 ++61 (2) 6125 0101", ["61", "Australia", "02", "6125-0101", ""]), ("0111 ++61 (2) 6125 0101", ["61", "Australia", "02", "6125-0101", ""]), ("0111 61 02 6125 0101", ["61", "Australia", "02", "6125-0101", ""]), ("61 (2) 6125 0101", ["61", "Australia", "02", "6125-0101", ""]), ] # Names with given names first # self.names_gnames = [ ("", ["", "", "", "", "", ""]), ("Peter Christen", ["male", "", "peter", "", "christen", ""]), ('"DR" Peter Christen', ["male", "dr", "peter", "", "christen", ""]), ("<mr> Peter Christen", ["male", "mr", "peter", "", "christen", ""]), ("{ Dr > Peter Christen", ["male", "dr", "peter", "", "christen", ""]), (' " Dr Peter Christen', ["male", "dr", "peter", "", "christen", ""]), ("Peter () Christen", ["male", "dr", "peter", "", "christen", ""]), ("Peter Christen(DR]]", ["male", "dr", "peter", "", "christen", ""]), ("Peter Christen (mister", ["male", "mr", "peter", "", "christen", ""]), ('Peter Christen " mr', ["male", "mr", "peter", "", "christen", ""]), ("Peter Christen {mr } ", ["male", "mr", "peter", "", "christen", ""]), ('Peter Christen "dr"', ["male", "dr", "peter", "", "christen", ""]), (" ( ) Peter Christen", ["male", "", "peter", "", "christen", ""]), ('Peter " " Christen', ["male", "", "peter", "", "christen", ""]), ("Peter (> Christen", ["male", "", "peter", "", "christen", ""]), (",Peter Christen--", ["male", "", "peter", "", "christen", ""]), ("-,- Peter Christen-,-", ["male", "", "peter", "", "christen", ""]), (" // Peter Christen//", ["male", "", "peter", "", "christen", ""]), ("(Peter,Christen ) ", ["male", "", "peter", "", "christen", ""]), ("[Peter Christen]", ["male", "", "peter", "", "christen", ""]), ("<<Peter , Christen>>", ["male", "", "peter", "", "christen", ""]), ("{ Peter Christen }", ["male", "", "peter", "", "christen", ""]), ('"Peter Christen"', ["male", "", "peter", "", "christen", ""]), ("''Peter ; Christen''", ["male", "", "peter", "", "christen", ""]), ("'|Peter ?: Christen'|", ["male", "", "peter", "", "christen", ""]), ("Mr peter Christen", ["male", "mr", "peter", "", "christen", ""]), ("Mister Peter CHRISTEN", ["male", "mr", "peter", "", "christen", ""]), ("Petra~ Christen", ["female", "", "petra", "", "christen", ""]), ("Ms petra Christen", ["female", "ms", "petra", "", "christen", ""]), ("Misses Petra CHRISTEN", ["female", "ms", "petra", "", "christen", ""]), ("Peter Marco Jones", ["male", "", "peter", "mark", "jones", ""]), ("peter almond", ["male", "", "peter", "", "almond", ""]), ("almond peter", ["male", "", "peter", "", "almond", ""]), ("Peter", ["male", "", "peter", "", "", ""]), ("alison de francesco", ["", "", "", "", "", ""]), ("alison de-francesco", ["", "", "", "", "", ""]), ("peter de la placa", ["", "", "", "", "", ""]), ("peter marco de la placa", ["", "", "", "", "", ""]), ("maria petra de la placa-miller", ["", "", "", "", "", ""]), ("maria petra vonder felde", ["", "", "", "", "", ""]), ("Christen", ["", "", "", "", "christen", ""]), ("Jane", ["female", "", "jane", "", "", ""]), ("miss anita", ["female", "ms", "anita", "", "", ""]), ("mr p. christen", ["male", "mr", "p", "", "christen", ""]), ("Peter mary jones", ["", "", "peter", "mary", "jones", ""]), ("mr Peter mary jones", ["male", "mr", "peter", "mary", "jones", ""]), ( "mister Paul PETER jones-miller", ["male", "mr", "paul", "peter", "jones", "miller"], ), ("peter known as pete", ["male", "", "peter", "peter", "", ""]), ("nee miller", ["", "", "nee", "", "miller", ""]), ("peter de nee", ["", "", "peter", "", "de nee", ""]), ("paul saint nee", ["", "", "paul", "", "saint nee", ""]), ("saint paul nee", ["", "", "saint paul", "", "nee", ""]), ("paula miller (nee jones)", ["", "", "", "", "", ""]), ("peter, son of nee miller", ["", "", "", "", "", ""]), ("peter (known as pete) christen", ["", "", "", "", "", ""]), ('peter (known as "pete") christen', ["", "", "", "", "", ""]), ("peter christen miller", ["", "", "", "", "", ""]), ("peter christen-miller", ["", "", "", "", "", ""]), ("peter joe christen-miller", ["", "", "", "", "", ""]), ("peter 'joe' christen-miller", ["", "", "", "", "", ""]), ('"sharky" peter miller', ["", "", "", "", "", ""]), ("'barbie' sue smith-jones", ["", "", "", "", "", ""]), ('sue "barbie" smith meyer', ["", "", "", "", "", ""]), ('sue known as "barbie" smith meyer', ["", "", "", "", "", ""]), ("sue 'barbie' smith-jones", ["", "", "", "", "", ""]), ("sue 'barbie' smith jones", ["", "", "", "", "", ""]), ("sue baby of maria jones", ["", "", "", "", "", ""]), ("jane co lo-schiavo", ["", "", "", "", "", ""]), ("martina louis barber", ["", "", "", "", "", ""]), ("lisa-anne hennessy", ["", "", "", "", "", ""]), ("michelle southam-byrnes", ["", "", "", "", "", ""]), ("nicole win jordan", ["", "", "", "", "", ""]), ("caroline and clarke", ["", "", "", "", "", ""]), ("jocelyn or buskens", ["", "", "", "", "", ""]), ("yee fung nee cheng", ["", "", "", "", "", ""]), ("jenny khaw nee yii", ["", "", "", "", "", ""]), ("roslyn kay sta maria", ["", "", "", "", "", ""]), ("shelley lee di stefano", ["", "", "", "", "", ""]), ("li qing van huisstede", ["", "", "", "", "", ""]), ("patricia ann van den hurk", ["", "", "", "", "", ""]), ("kim maree nguyen su", ["", "", "", "", "", ""]), ("adriana haile de lange", ["", "", "", "", "", ""]), ("jodene akke op't land", ["", "", "", "", "", ""]), ("cleo ann di blasio", ["", "", "", "", "", ""]), ("debbie saphire st quintin", ["", "", "", "", "", ""]), ("nehmat e el chaar", ["", "", "", "", "", ""]), ("yan chen ping yang", ["", "", "", "", "", ""]), ("sharon leoni van ant werpen", ["", "", "", "", "", ""]), ("nicole maria de oliveira", ["", "", "", "", "", ""]), ("sonia denni de arman", ["", "", "", "", "", ""]), ("nicole dan de arman", ["", "", "", "", "", ""]), ("johdy louise dal santo", ["", "", "", "", "", ""]), ("tamara lou st. john-morton", ["", "", "", "", "", ""]), ("mercy jacq john peter", ["", "", "", "", "", ""]), ("carly evelyn de st germain", ["", "", "", "", "", ""]), ("rachael jane van buuren", ["", "", "", "", "", ""]), ("joanna lilli van ryswyk", ["", "", "", "", "", ""]), ("melissa ma romijn-van stey", ["", "", "", "", "", ""]), ("wong jing ling huang", ["", "", "", "", "", ""]), ("julie maree mackenzie - hun", ["", "", "", "", "", ""]), ("joanne agnes righettli (dr)", ["", "dr", "", "", "", ""]), ("siu har ng (hung)", ["", "", "", "", "", ""]), ("anne-maree lawrence-franks", ["", "", "", "", "", ""]), ("mao-yao rong-fong", ["", "", "", "", "", ""]), ("wai-fun wheeler-smith", ["", "", "", "", "", ""]), ("lee-anne westerbrook-sim", ["", "", "", "", "", ""]), ("kasey-lee so-chan", ["", "", "", "", "", ""]), ("sherri-anne hilder-penningt", ["", "", "", "", "", ""]), ("yoon-sun ahn-wu", ["", "", "", "", "", ""]), ("ying-xia yu-guo", ["", "", "", "", "", ""]), ("hee-jing hyde-page", ["", "", "", "", "", ""]), ("mary-anne chung-kwon", ["", "", "", "", "", ""]), ("marie-reine attallah-boulos", ["", "", "", "", "", ""]), ("tracy-lea zanco-hinds", ["", "", "", "", "", ""]), ("tracy-maria beardow-brooks", ["", "", "", "", "", ""]), ("el-masri sheehan-hill", ["", "", "", "", "", ""]), ("vicki-maree cheryle-anne", ["", "", "", "", "", ""]), ("vicki-mare sheehan-anna", ["", "", "", "", "", ""]), ("cindy-lou mckie-bailey", ["", "", "", "", "", ""]), ("jo-ann bakoss-parson", ["", "", "", "", "", ""]), ("wan-ching tsui-chan", ["", "", "", "", "", ""]), ("sue-ellen bruechert-reich", ["", "", "", "", "", ""]), ("anna-marie vearing-brown", ["", "", "", "", "", ""]), ("lisa-jane o'connor", ["", "", "", "", "", ""]), ("julie-anne o'malley", ["", "", "", "", "", ""]), ("mary-jane o'doherty", ["", "", "", "", "", ""]), ("jose-carol o'leary", ["", "", "", "", "", ""]), ("rose-merrie o'kane", ["", "", "", "", "", ""]), ("ymeka-emily o'neill", ["", "", "", "", "", ""]), ] ## check field spill - have 2 input fields # Names with surnames first # self.names_snames = [ ("Christen Peter", ["male", "", "peter", "", "christen", ""]), ("Christen, Peter", ["male", "", "peter", "", "christen", ""]), ("Mr Christen Peter", ["male", "mr", "peter", "", "christen", ""]), ("Mister CHRISTEN, Peter", ["male", "mr", "peter", "", "christen", ""]), ("Christen Petra", ["female", "", "petra", "", "christen", ""]), ("Ms Christen, petra", ["female", "ms", "petra", "", "christen", ""]), ("Misses CHRISTEN, PETRA", ["female", "ms", "petra", "", "christen", ""]), ("peter almond", ["male", "", "peter", "", "almond", ""]), ("almond peter", ["male", "", "peter", "", "almond", ""]), ("", ["", "", "", "", "", ""]), ("Peter", ["male", "", "peter", "", "", ""]), ("Christen", ["", "", "", "", "christen", ""]), ("Jane", ["female", "", "jane", "", "", ""]), ("miss anita", ["female", "ms", "anita", "", "", ""]), ("mr p. christen", ["male", "mr", "p", "", "christen", ""]), ("jones, Peter mary", ["", "", "peter", "mary", "jones", ""]), ("mr jones Peter mary", ["male", "mr", "peter", "mary", "jones", ""]), ( "mister jones-miller, Paul PETER", ["male", "mr", "paul", "peter", "jones", "miller"], ), ] self.name_male_titles = ["mr"] self.name_female_titles = ["ms"] src_data_dir = ".." + os.sep + "data" + os.sep + "lookup" + os.sep self.name_tag_table = lookup.TagLookupTable( descr="Name tag test table") self.name_tag_table.load([ src_data_dir + "givenname_f.tbl", src_data_dir + "givenname_m.tbl", src_data_dir + "name_misc.tbl", src_data_dir + "name_prefix.tbl", src_data_dir + "name_title.tbl", src_data_dir + "saints.tbl", src_data_dir + "surname.tbl", ]) self.name_corr_list = lookup.CorrectionList( descr="Name corr test list") self.name_corr_list.load(src_data_dir + "name_corr.lst")
def testPhoneNumStandardiserNone(self): """Test phone number standardiser routines""" return ps = standardisation.PhoneNumStandardiser( descript="Test phone number standardiser", input_fields=["in_phonenum"], output_fiel=["country_code", None, "area_code", "number", None], ) rs = standardisation.RecordStandardiser( descr="Test record standardiser", input_dataset=self.in_ds, output_dataset=self.out_ds, comp_stand_list=[ps], ) for (phonenum_str, phonenum_res) in self.phonenums: clean_phonenum_str = ps.clean_component(phonenum_str) test_phonenum_res = ps.standardise(phonenum_str, clean_phonenum_str) assert phonenum_res == test_phonenum_res, ( "Wrong phone number standardisation: %s, should be: %s" % (str(test_phonenum_res), str(phonenum_res))) rs.standardise() # Use record standardiser and write output file # Test the content of the output data set # test_ds = dataset.DataSetCSV( description="Test standardised data set", access_mode="read", rec_ident="rec_id", field_list=[], header_line=True, write_header=True, file_name="test-standardised-dataset.csv", ) i = 0 for (rec_id, rec_list) in test_ds.readall(): test_country_code = rec_list[3] test_country_name = rec_list[4] test_area_code = rec_list[5] test_number = rec_list[6] test_extension = rec_list[7] true_country_code = self.phonenums[i][1][0] true_area_code = self.phonenums[i][1][2] true_number = self.phonenums[i][1][3] assert test_country_code == true_country_code, ( i, rec_list[3:8], self.phonenums[i][1], ) assert test_country_name == "", (i, rec_list[3:8], self.phonenums[i][1]) assert test_area_code == true_area_code, ( i, rec_list[3:8], self.phonenums[i][1], ) assert test_number == true_number, (i, rec_list[3:8], self.phonenums[i][1]) assert test_extension == "", (i, rec_list[3:8], self.phonenums[i][1]) i += 1
def run(): for ds in ['fl', 'nc']: if (ds == 'fl'): ds_dir = fl_ds_dir ds_keys_list = fl_keys_list ds_field_list = fl_field_list elif (ds == 'nc'): ds_dir = nc_ds_dir ds_keys_list = nc_keys_list ds_field_list = nc_field_list for corruption_percentage in corruption_percentage_list: for missing_percentage in missing_percentage_list: if (corruption_percentage == 5 and missing_percentage == 20): continue ds_path = os.path.join( ds_dir, '{}_missing_{}_corruption_{}.txt'.format( ds_size, missing_percentage, corruption_percentage)) # Define input data set A: # data_set_a = dataset.DataSetCSV(description="Data set", access_mode="read", strip_fields=True, miss_val=[''], rec_ident='r', file_name=ds_path, header_line=False, delimiter=",", field_list=ds_field_list) # ----------------------------------------------------------------------------- # Define field comparison functions # fc_funct_1 = comparison.FieldComparatorExactString( agree_weight=1.0, description="Str-Exact-field-17-field-17", disagree_weight=0.0, missing_weight=0.0) field_comp_list = [(fc_funct_1, "identifier", "identifier")] rec_comp = comparison.RecordComparator(data_set_a, data_set_a, field_comp_list) # ----------------------------------------------------------------------------- for keys_idx, keys in enumerate(ds_keys_list): for method_idx, method in enumerate(methods): # if(method_idx == 4): # continue for is_tight in [True, False]: result_file_path = myutil.get_result_path( ds_path, method, keys_idx, is_tight) result_file = open(result_file_path, 'a') for params_idx, params in enumerate( get_params_list(method_idx, keys_idx, is_tight)): if (is_params_not_allowed( keys_idx, method_idx, is_tight, params_idx) or myutil.is_result_already_stored( result_file_path, params_idx)): continue print(params) index_def = get_index_def( method_idx, keys, params, data_set_a, rec_comp) # init_logger for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) log_file_path = myutil.get_log_path( ds_path, method, keys_idx, is_tight, params_idx) logging.basicConfig(filename=log_file_path, filemode='w', level=logging.INFO) logging.getLogger() # ---------------------------------------------------------------------------- blocking_start_time = time.time() # Build and compact index index_def.build() index_def.compact() # Do record pair comparisons [field_names_list, w_vec_dict] = index_def.run() blocking_end_time = time.time() blocking_time = blocking_end_time - blocking_start_time # ----------------------------------------------------------------------------- comparison_start_time = time.time() # Define weight vector (record pair) classifier classifier = classification.FellegiSunter( lower_threshold=0.99, upper_threshold=0.99) # Unsupervised training of classifier class_w_vec_dict = w_vec_dict # Use orignal weight vector dictionary classifier.train(class_w_vec_dict, set(), set()) # Classify all weight vectors [m_set, nm_set, pm_set ] = classifier.classify(class_w_vec_dict) comparison_end_time = time.time() comparison_time = comparison_end_time - comparison_start_time # ----------------------------------------------------------------------------- # Define output file options # histo_str_list = output.GenerateHistogram( class_w_vec_dict, 1.0) print(histo_str_list) match_count, recall, reduction_ratio, total_comparisons = myutil.get_metrics( ds_size, duplicate_percentage, histo_str_list) # for line in histo_str_list: # print line match_file_path = myutil.get_matches_path( ds_path, method, keys_idx, is_tight, params_idx) output.SaveMatchStatusFile( class_w_vec_dict, m_set, match_file_path) print( '{} {} {}|{}, {:.2f}, {:.2f}, {}, {:.2f}, {:.2f}\n' .format(method, keys_idx, params_idx, match_count, recall, reduction_ratio, total_comparisons, blocking_time, comparison_time)) result_file.write( '{}|{}, {:.2f}, {:.2f}, {}, {:.2f}, {:.2f}\n' .format(params_idx, match_count, recall, reduction_ratio, total_comparisons, blocking_time, comparison_time)) result_file.flush()