def test_read_errors(self): formatter = GNUPlotFormat() # non-comment line at the beginning location = self.locations[0] data = DataSet(location=location) os.makedirs(location, exist_ok=True) with open(location + '/x_set.dat', 'w') as f: f.write('1\t2\n' + file_1d()) with LogCapture() as logs: formatter.read(data) self.assertTrue('ValueError' in logs.value, logs.value) # same data array in 2 files location = self.locations[1] data = DataSet(location=location) os.makedirs(location, exist_ok=True) with open(location + '/x_set.dat', 'w') as f: f.write('\n'.join( ['# x_set\ty', '# "X"\t"Y"', '# 2', '1\t2', '3\t4'])) with open(location + '/q.dat', 'w') as f: f.write('\n'.join(['# q\ty', '# "Q"\t"Y"', '# 2', '1\t2', '3\t4'])) with LogCapture() as logs: formatter.read(data) self.assertTrue('ValueError' in logs.value, logs.value)
def test_full_write(self): formatter = GNUPlotFormat() location = self.locations[0] data = DataSet1D(name="test_full_write", location=location) formatter.write(data, data.io, data.location) with open(location + '/x_set.dat') as f: self.assertEqual(f.read(), file_1d()) # check that we can add comment lines randomly into the file # as long as it's after the first three lines, which are comments # with well-defined meaning, # and that we can un-quote the labels lines = file_1d().split('\n') lines[1] = lines[1].replace('"', '') lines[3:3] = ['# this data is awesome!'] lines[6:6] = ['# the next point is my favorite.'] with open(location + '/x_set.dat', 'w') as f: f.write('\n'.join(lines)) # normally this would be just done by data2 = load_data(location) # but we want to work directly with the Formatter interface here data2 = DataSet(location=location) formatter.read(data2) self.checkArraysEqual(data2.x_set, data.x_set) self.checkArraysEqual(data2.y, data.y) # data has been saved self.assertEqual(data.y.last_saved_index, 4) # data2 has been read back in, should show the same # last_saved_index self.assertEqual(data2.y.last_saved_index, 4) # while we're here, check some errors on bad reads # first: trying to read into a dataset that already has the # wrong size x = DataArray(name='x_set', label='X', preset_data=(1., 2.)) y = DataArray(name='y', label='Y', preset_data=(3., 4.), set_arrays=(x, )) data3 = new_data(arrays=(x, y), location=location + 'XX') # initially give it a different location so we can make it without # error, then change back to the location we want. data3.location = location with LogCapture() as logs: formatter.read(data3) self.assertTrue('ValueError' in logs.value, logs.value) # no problem reading again if only data has changed, it gets # overwritten with the disk copy data2.x_set[2] = 42 data2.y[2] = 99 formatter.read(data2) self.assertEqual(data2.x_set[2], 3) self.assertEqual(data2.y[2], 5)
def test_complete(self): array = DataArray(name='y', shape=(5,)) array.init_data() data = new_data(arrays=(array,), location=False) self.syncing_array = array self.sync_index = 0 data.sync = self.mock_sync bf = DataSet.background_functions bf['fail'] = self.failing_func bf['log'] = self.logging_func with LogCapture() as logs: # grab info and warnings but not debug messages logging.getLogger().setLevel(logging.INFO) data.complete(delay=0.001) logs = logs.value expected_logs = [ 'waiting for DataSet <False> to complete', 'DataSet: 0% complete', 'RuntimeError: it is called failing_func for a reason!', 'background at index 1', 'DataSet: 20% complete', 'RuntimeError: it is called failing_func for a reason!', 'background function fail failed twice in a row, removing it', 'background at index 2', 'DataSet: 40% complete', 'background at index 3', 'DataSet: 60% complete', 'background at index 4', 'DataSet: 80% complete', 'background at index 5', 'DataSet <False> is complete' ] log_index = 0 for line in expected_logs: self.assertIn(line, logs, logs) try: log_index_new = logs.index(line, log_index) except ValueError: raise ValueError('line {} not found after {} in: \n {}'.format( line, log_index, logs)) self.assertTrue(log_index_new >= log_index, logs) log_index = log_index_new + len(line) + 1 # +1 for \n self.assertEqual(log_index, len(logs), logs)
def test_init_and_bad_read(self): location = self.locations[0] path = f'./{location}/bad.dat' class MyFormatter(Formatter): def read_one_file(self, data_set, f, ids_read): s = f.read() if 'garbage' not in s: raise Exception('reading the wrong file?') # mark this file as read, before generating an error if not hasattr(data_set, 'files_read'): data_set.files_read = [] data_set.files_read.append(f.name) raise ValueError('garbage in, garbage out') def read_metadata(self, data_set): pass formatter = MyFormatter() data = DataSet1D(location=location, name="test_init_and_bad_read") data.x_set.ndarray = None data.y.ndarray = None os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as f: f.write('garbage') with LogCapture() as logs: formatter.read(data) # we tried to read this file but it generated an error self.assertEqual(logs.value.count('error reading file'), 1, logs.value) self.assertEqual(data.files_read, [os.path.abspath(path)]) expected_array_repr = repr([float('nan')] * 5) self.assertEqual(repr(data.x_set.tolist()), expected_array_repr) self.assertEqual(repr(data.y.tolist()), expected_array_repr)
def test_warning(self): with LogCapture() as logs: secs_out = wait_secs(time.perf_counter() - 1) self.assertEqual(secs_out, 0) self.assertEqual(logs.value.count('negative delay'), 1, logs.value)
def test_warning(): with LogCapture() as logs: secs_out = wait_secs(time.perf_counter() - 1) assert secs_out == 0 assert logs.value.count('negative delay') == 1, logs.value
def test_zero_delay(self): with LogCapture() as logs: Loop(self.p1[1:3:1]).each(self.p1).run_temp() self.assertEqual(logs.value.count('negative delay'), 0, logs.value)
def test_very_short_delay(self): with LogCapture() as logs: Loop(self.p1[1:3:1], 1e-9).each(self.p1).run_temp() self.assertEqual(logs.value.count('negative delay'), 2, logs.value)