def test_grouped_filter(self): f = Filter().lt('LapCount', 1).group(Filter().gt('Coolant', 212).or_().gt('RPM', 9000)) expected_output = 'datapoint.LapCount < 1 AND (datapoint.Coolant > 212 OR datapoint.RPM > 9000)' filter_text = str(f).strip() self.assertSequenceEqual(filter_text, expected_output)
def test_chained_filter(self): f = Filter().lt('LapCount', 1).gt('Coolant', 212).or_().eq('RPM', 9001) expected_output = 'datapoint.LapCount < 1 AND datapoint.Coolant > 212 OR datapoint.RPM = 9001' filter_text = str(f).strip() self.assertSequenceEqual(filter_text, expected_output)
def test_not_equal_filter(self): f = Filter().neq('LapCount', 1) expected_output = 'datapoint.LapCount != 1' filter_text = str(f).strip() self.assertSequenceEqual(filter_text, expected_output)
def test_basic_filter(self): f = Filter().lt('LapCount', 1) expected_output = 'datapoint.LapCount < 1' filter_text = str(f).strip() self.assertSequenceEqual(filter_text, expected_output)
def test_dataset_record_oriented(self): f = Filter().lt('LapCount', 1) dataset = self.ds.query(channels=['Coolant', 'RPM', 'MAP'], data_filter=f) records = dataset.fetch_records(100) self.assertEqual(len(records), 100)
def test_dataset_val_count(self): f = Filter().lt('LapCount', 1) dataset = self.ds.query(channels=['Coolant', 'RPM', 'MAP'], data_filter=f) samples = dataset.fetch_columns(100) for k in samples.keys(): self.assertEqual(len(samples[k]), 100)
def test_dataset_columns(self): f = Filter().lt('LapCount', 1) dataset = self.ds.query(channels=['Coolant', 'RPM', 'MAP'], data_filter=f) expected_channels = ['Coolant', 'RPM', 'MAP'] dset_channels = dataset.channels #Sort both lists to ensure that they'll compare properly expected_channels.sort() dset_channels.sort() self.assertListEqual(expected_channels, dset_channels)
def test_get_all_laptimes(self): f = Filter().gt('LapCount', 0) dataset = self.ds.query(sessions=[1], channels=['LapCount', 'LapTime'], data_filter=f, distinct_records=True) laptimes = {} records = dataset.fetch_records() for r in records: laptimes[int(r[1])] = r[2] self.assertEqual(laptimes[1], 3.437) self.assertEqual(laptimes[2], 2.257) self.assertEqual(laptimes[3], 2.227) self.assertEqual(laptimes[4], 2.313) self.assertEqual(laptimes[5], 2.227) self.assertEqual(laptimes[6], 2.227) self.assertEqual(laptimes[7], 2.423) self.assertEqual(laptimes[8], 2.31) self.assertEqual(laptimes[9], 2.223) self.assertEqual(laptimes[10], 2.233) self.assertEqual(laptimes[11], 2.247) self.assertEqual(laptimes[12], 2.24) self.assertEqual(laptimes[13], 2.25) self.assertEqual(laptimes[14], 2.237) self.assertEqual(laptimes[15], 2.243) self.assertEqual(laptimes[16], 2.29) self.assertEqual(laptimes[17], 2.387) self.assertEqual(laptimes[18], 2.297) self.assertEqual(laptimes[19], 2.383) self.assertEqual(laptimes[20], 2.177) self.assertEqual(laptimes[21], 2.207) self.assertEqual(laptimes[22], 2.18) self.assertEqual(laptimes[23], 2.17) self.assertEqual(laptimes[24], 2.22) self.assertEqual(laptimes[25], 2.217) self.assertEqual(laptimes[26], 2.223) self.assertEqual(laptimes[27], 2.173) self.assertEqual(laptimes[28], 2.19) self.assertEqual(laptimes[29], 2.33) self.assertEqual(laptimes[30], 2.227) self.assertEqual(laptimes[31], 2.257) self.assertEqual(laptimes[32], 2.183) self.assertEqual(laptimes[33], 2.163) self.assertEqual(laptimes[34], 2.23) self.assertEqual(laptimes[35], 2.23) self.assertEqual(laptimes[36], 2.54) self.assertEqual(laptimes[37], 3.383)
def test_distinct_queries(self): """ This is a really basic test to ensure that we always obey the distinct keyword Distinct should filter out duplicate results, leading to a much smaller dataset """ f = Filter().gt('LapCount', 0) dataset = self.ds.query(sessions=[1], channels=['LapCount', 'LapTime'], data_filter=f) records = dataset.fetch_records() self.assertEqual(len(records), 24667) dataset = self.ds.query(sessions=[1], channels=['LapCount', 'LapTime'], data_filter=f, distinct_records=True) records = dataset.fetch_records() self.assertEqual(len(records), 37)