def setUp(self, timeout_seconds=5): self.contexts = [] self.tempdir = TemporaryDirectory() self.contexts.append(self.tempdir) self.port = unused_tcp_port() if sys.version_info[0] == 2: module = "SimpleHTTPServer" elif sys.version_info[0] == 3: module = "http.server" else: raise Exception("unknown python version") self.contexts.append(ContextualChildProcess( [ "python", "-m", module, str(self.port), ], cwd=self.tempdir.name, ).__enter__()) end = time.time() + timeout_seconds while True: try: requests.get("http://0.0.0.0:{port}".format(port=self.port)) break except requests.ConnectionError: if time.time() > end: raise
def test_write_to_csv(): """ Test the write_to_csv function by checking whether the csv contains the correct number of lines. """ with testing.postgresql.Postgresql() as postgresql: # create an engine and generate a table with fake feature data engine = create_engine(postgresql.url()) create_schemas(engine=engine, features_tables=features_tables, labels=labels, states=states) with TemporaryDirectory() as temp_dir: planner = Planner(feature_start_time=datetime.datetime( 2010, 1, 1, 0, 0), label_names=['booking'], label_types=['binary'], states=['state_one AND state_two'], db_config=db_config, matrix_directory=temp_dir, user_metadata={}, engine=engine, builder_class=builders.HighMemoryCSVBuilder) # for each table, check that corresponding csv has the correct # of rows for table in features_tables: planner.builder.write_to_csv( ''' select * from features.features{} '''.format(features_tables.index(table)), 'test_csv.csv') reader = csv.reader( planner.builder.open_fh_for_reading('test_csv.csv')) assert (len([row for row in reader]) == len(table) + 1)
def test_make_entity_date_table(): """ Test that the make_entity_date_table function contains the correct values. """ dates = [ datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0), datetime.datetime(2016, 3, 1, 0, 0) ] # make a dataframe of entity ids and dates to test against ids_dates = create_entity_date_df(labels=labels, states=states, as_of_dates=dates, state_one=True, state_two=True, label_name='booking', label_type='binary', label_timespan='1 month') with testing.postgresql.Postgresql() as postgresql: # create an engine and generate a table with fake feature data engine = create_engine(postgresql.url()) create_schemas(engine=engine, features_tables=features_tables, labels=labels, states=states) with TemporaryDirectory() as temp_dir: planner = Planner(feature_start_time=datetime.datetime( 2010, 1, 1, 0, 0), label_names=['booking'], label_types=['binary'], states=['state_one AND state_two'], db_config=db_config, matrix_directory=temp_dir, user_metadata={}, engine=engine) engine.execute( 'CREATE TABLE features.tmp_entity_date (a int, b date);') # call the function to test the creation of the table entity_date_table_name = planner.builder.make_entity_date_table( as_of_times=dates, label_type='binary', label_name='booking', state='state_one AND state_two', matrix_uuid='my_uuid', matrix_type='train', label_timespan='1 month') # read in the table result = pd.read_sql( "select * from features.{} order by entity_id, as_of_date". format(entity_date_table_name), engine) labels_df = pd.read_sql('select * from labels.labels', engine) # compare the table to the test dataframe test = (result == ids_dates) assert (test.all().all())
def test_test_matrix(self): with testing.postgresql.Postgresql() as postgresql: # create an engine and generate a table with fake feature data engine = create_engine(postgresql.url()) create_schemas(engine=engine, features_tables=features_tables, labels=labels, states=states) dates = [ datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0), datetime.datetime(2016, 3, 1, 0, 0) ] with TemporaryDirectory() as temp_dir: planner = Planner(feature_start_time=datetime.datetime( 2010, 1, 1, 0, 0), label_names=['booking'], label_types=['binary'], states=['state_one AND state_two'], db_config=db_config, matrix_directory=temp_dir, user_metadata={}, engine=engine) matrix_dates = { 'first_as_of_time': datetime.datetime(2016, 1, 1, 0, 0), 'matrix_info_end_time': datetime.datetime(2016, 3, 1, 0, 0), 'as_of_times': dates } feature_dictionary = { 'features0': ['f1', 'f2'], 'features1': ['f3', 'f4'], } matrix_metadata = { 'matrix_id': 'hi', 'state': 'state_one AND state_two', 'label_name': 'booking', 'end_time': datetime.datetime(2016, 3, 1, 0, 0), 'feature_start_time': datetime.datetime(2016, 1, 1, 0, 0), 'label_timespan': '1 month' } uuid = metta.generate_uuid(matrix_metadata) planner.build_matrix(as_of_times=dates, label_name='booking', label_type='binary', feature_dictionary=feature_dictionary, matrix_directory=temp_dir, matrix_metadata=matrix_metadata, matrix_uuid=uuid, matrix_type='test') matrix_filename = os.path.join(temp_dir, '{}.csv'.format(uuid)) with open(matrix_filename, 'r') as f: reader = csv.reader(f) assert (len([row for row in reader]) == 6)
def test_badinput(self): with TemporaryDirectory() as temp_dir: planner = Planner( beginning_of_time = datetime.datetime(2010, 1, 1, 0, 0), label_names = ['booking'], label_types = ['binary'], states = ['state_one AND state_two'], db_config = db_config, matrix_directory = temp_dir, user_metadata = {}, engine = None, builder_class=builders.LowMemoryCSVBuilder ) rowlists = [ [ ('entity_id', 'date', 'f1'), (1, 3, 3), (4, 5, 6), (7, 8, 9), ], [ ('entity_id', 'date', 'f2'), (1, 2, 3), (4, 5, 9), (7, 8, 15), ], [ ('entity_id', 'date', 'f3'), (1, 2, 2), (4, 5, 20), (7, 8, 56), ], ] sourcefiles = [] for rows in rowlists: f = NamedTempFile() sourcefiles.append(f) writer = csv.writer(f) for row in rows: writer.writerow(row) f.seek(0) try: with self.assertRaises(ValueError): planner.builder.merge_feature_csvs( [f.name for f in sourcefiles], matrix_directory=temp_dir, matrix_uuid='1234' ) finally: for sourcefile in sourcefiles: sourcefile.close()
def test_write_collection(self): image = slicedimage.TileSet( ["x", "y", "ch", "hyb"], {'ch': 2, 'hyb': 2}, (100, 100), ) for hyb in range(2): for ch in range(2): tile = slicedimage.Tile( { 'x': (0.0, 0.01), 'y': (0.0, 0.01), }, { 'hyb': hyb, 'ch': ch, }, ) tile.numpy_array = numpy.zeros((100, 100)) tile.numpy_array[hyb, ch] = 1 image.add_tile(tile) collection = slicedimage.Collection() collection.add_partition("fov002", image) with TemporaryDirectory() as tempdir, \ tempfile.NamedTemporaryFile(suffix=".json", dir=tempdir) as partition_file: partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document( collection, partition_file.name) writer = codecs.getwriter("utf-8") json.dump(partition_doc, writer(partition_file)) partition_file.flush() basename = os.path.basename(partition_file.name) baseurl = "file://{}".format(os.path.dirname(partition_file.name)) loaded = slicedimage.Reader.parse_doc(basename, baseurl) for hyb in range(2): for ch in range(2): tiles = [_tile for _tile in loaded.tiles( lambda tile: (tile.indices['hyb'] == hyb and tile.indices['ch'] == ch))] self.assertEqual(len(tiles), 1) expected = numpy.zeros((100, 100)) expected[hyb, ch] = 1 self.assertEqual(tiles[0].numpy_array.all(), expected.all()) self.assertIsNotNone(tiles[0].sha256)
def test_badinput(self): """We assert column names, so replacing 'date' with 'as_of_date' should result in an error""" with TemporaryDirectory() as temp_dir: planner = Planner( feature_start_time=datetime.datetime(2010, 1, 1, 0, 0), label_names=['booking'], label_types=['binary'], states=['state_one AND state_two'], db_config=db_config, matrix_directory=temp_dir, user_metadata={}, engine=None, ) rowlists = [ [ ('entity_id', 'date', 'f1'), (1, 3, 3), (4, 5, 6), (7, 8, 9), ], [ ('entity_id', 'date', 'f2'), (1, 2, 3), (4, 5, 9), (7, 8, 15), ], [ ('entity_id', 'date', 'f3'), (1, 2, 2), (4, 5, 20), (7, 8, 56), ], ] filekeys = [] for rows in rowlists: filekey = uuid.uuid4() planner.builder.open_fh_for_writing(filekey) filekeys.append(filekey) writer = csv.writer(planner.builder.filehandles[filekey]) for row in rows: writer.writerow(row) with self.assertRaises(KeyError): planner.builder.merge_feature_csvs(filekeys, matrix_directory=temp_dir, matrix_uuid='1234')
def test_merge_feature_csvs_lowmem(self): with TemporaryDirectory() as temp_dir: planner = Planner( beginning_of_time = datetime.datetime(2010, 1, 1, 0, 0), label_names = ['booking'], label_types = ['binary'], states = ['state_one AND state_two'], db_config = db_config, matrix_directory = temp_dir, user_metadata = {}, engine = None, builder_class=builders.LowMemoryCSVBuilder ) rowlists = [ [ ('entity_id', 'date', 'label'), (1, 2, True), (4, 5, False), (7, 8, True), ], [ ('entity_id', 'date', 'f1'), (1, 2, 3), (4, 5, 6), (7, 8, 9), ], [ ('entity_id', 'date', 'f2'), (1, 2, 3), (4, 5, 9), (7, 8, 15), ], [ ('entity_id', 'date', 'f3'), (1, 2, 2), (4, 5, 20), (7, 8, 56), ], ] sourcefiles = [] for rows in rowlists: f = NamedTempFile() sourcefiles.append(f) writer = csv.writer(f) for row in rows: writer.writerow(row) f.seek(0) try: outfilename = planner.builder.merge_feature_csvs( [f.name for f in sourcefiles], matrix_directory=temp_dir, matrix_uuid='1234' ) with open(outfilename) as outfile: reader = csv.reader(outfile) result = [row for row in reader] self.assertEquals(result, [ ['entity_id', 'date', 'f1', 'f2', 'f3','label'], ['1', '2', '3', '3', '2', 'True'], ['4', '5', '6', '9', '20', 'False'], ['7', '8', '9', '15', '56', 'True'] ]) finally: for sourcefile in sourcefiles: sourcefile.close()
def test_write_labels_data(): """ Test the write_labels_data function by checking whether the query produces the correct labels """ # set up labeling config variables dates = [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0)] # make a dataframe of labels to test against labels_df = pd.DataFrame( labels, columns = [ 'entity_id', 'as_of_date', 'label_window', 'label_name', 'label_type', 'label' ] ) labels_df['as_of_date'] = convert_string_column_to_date(labels_df['as_of_date']) labels_df.set_index(['entity_id', 'as_of_date']) # create an engine and generate a table with fake feature data with testing.postgresql.Postgresql() as postgresql: engine = create_engine(postgresql.url()) create_schemas( engine, features_tables, labels, states ) with TemporaryDirectory() as temp_dir: planner = Planner( beginning_of_time = datetime.datetime(2010, 1, 1, 0, 0), label_names = ['booking'], label_types = ['binary'], states = ['state_one AND state_two'], db_config = db_config, matrix_directory = temp_dir, user_metadata = {}, engine = engine, builder_class=builders.LowMemoryCSVBuilder ) # make the entity-date table entity_date_table_name = planner.builder.make_entity_date_table( as_of_times=dates, label_type='binary', label_name='booking', state = 'state_one AND state_two', matrix_type='train', matrix_uuid='my_uuid', label_window='1 month' ) csv_filename = planner.builder.write_labels_data( label_name=label_name, label_type=label_type, label_window='1 month', matrix_uuid='my_uuid', entity_date_table_name=entity_date_table_name, ) df = pd.DataFrame.from_dict({ 'entity_id': [2, 3, 4, 4], 'as_of_date': ['2016-02-01', '2016-02-01', '2016-01-01', '2016-02-01'], 'booking': [0, 0, 1, 0], }).set_index(['entity_id', 'as_of_date']) result = pd.read_csv(csv_filename).set_index(['entity_id', 'as_of_date']) test = (result == df) assert(test.all().all())
def test_write_features_data(): dates = [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0)] # make dataframe for entity ids and dates ids_dates = create_entity_date_df( labels=labels, states=states, as_of_dates=dates, state_one=True, state_two=True, label_name='booking', label_type='binary', label_window='1 month' ) features = [['f1', 'f2'], ['f3', 'f4']] # make dataframes of features to test against features_dfs = [] for i, table in enumerate(features_tables): cols = ['entity_id', 'as_of_date'] + features[i] temp_df = pd.DataFrame( table, columns = cols ) temp_df['as_of_date'] = convert_string_column_to_date(temp_df['as_of_date']) features_dfs.append( ids_dates.merge( right = temp_df, how = 'left', on = ['entity_id', 'as_of_date'] ) ) # create an engine and generate a table with fake feature data with testing.postgresql.Postgresql() as postgresql: engine = create_engine(postgresql.url()) create_schemas( engine=engine, features_tables=features_tables, labels=labels, states=states ) with TemporaryDirectory() as temp_dir: planner = Planner( beginning_of_time = datetime.datetime(2010, 1, 1, 0, 0), label_names = ['booking'], label_types = ['binary'], states = ['state_one AND state_two'], db_config = db_config, matrix_directory = temp_dir, user_metadata = {}, engine = engine, builder_class=builders.LowMemoryCSVBuilder ) # make the entity-date table entity_date_table_name = planner.builder.make_entity_date_table( as_of_times=dates, label_type='binary', label_name='booking', state = 'state_one AND state_two', matrix_type='train', matrix_uuid='my_uuid', label_window='1 month' ) feature_dictionary = dict( ('features{}'.format(i), feature_list) for i, feature_list in enumerate(features) ) print(feature_dictionary) features_csv_names = planner.builder.write_features_data( as_of_times=dates, feature_dictionary=feature_dictionary, entity_date_table_name=entity_date_table_name, matrix_uuid='my_uuid' ) # get the queries and test them for feature_csv_name, df in zip(sorted(features_csv_names), features_dfs): df = df.fillna(0) df = df.reset_index() result = pd.read_csv(feature_csv_name).reset_index() result['as_of_date'] = convert_string_column_to_date(result['as_of_date']) test = (result == df) assert(test.all().all())
def test_nullcheck(self): f0_dict = {(r[0], r[1]): r for r in features0_pre} f1_dict = {(r[0], r[1]): r for r in features1_pre} features0 = sorted(f0_dict.values(), key=lambda x: (x[1], x[0])) features1 = sorted(f1_dict.values(), key=lambda x: (x[1], x[0])) features_tables = [features0, features1] with testing.postgresql.Postgresql() as postgresql: # create an engine and generate a table with fake feature data engine = create_engine(postgresql.url()) create_schemas(engine=engine, features_tables=features_tables, labels=labels, states=states) dates = [ datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0), datetime.datetime(2016, 3, 1, 0, 0) ] with TemporaryDirectory() as temp_dir: planner = Planner(feature_start_time=datetime.datetime( 2010, 1, 1, 0, 0), label_names=['booking'], label_types=['binary'], states=['state_one AND state_two'], db_config=db_config, matrix_directory=temp_dir, user_metadata={}, engine=engine) matrix_dates = { 'first_as_of_time': datetime.datetime(2016, 1, 1, 0, 0), 'matrix_info_end_time': datetime.datetime(2016, 3, 1, 0, 0), 'as_of_times': dates } feature_dictionary = { 'features0': ['f1', 'f2'], 'features1': ['f3', 'f4'], } matrix_metadata = { 'matrix_id': 'hi', 'state': 'state_one AND state_two', 'label_name': 'booking', 'end_time': datetime.datetime(2016, 3, 1, 0, 0), 'feature_start_time': datetime.datetime(2016, 1, 1, 0, 0), 'label_timespan': '1 month' } uuid = metta.generate_uuid(matrix_metadata) with self.assertRaises(ValueError): planner.build_matrix(as_of_times=dates, label_name='booking', label_type='binary', feature_dictionary=feature_dictionary, matrix_directory=temp_dir, matrix_metadata=matrix_metadata, matrix_uuid=uuid, matrix_type='test')