def test_read_errors(self): formatter = GNUPlotFormat() # non-comment line at the beginning location = self.locations[0] data = DataSet(location=location) os.makedirs(location, exist_ok=True) with open(location + '/x_set.dat', 'w') as f: f.write('1\t2\n' + file_1d()) with LogCapture() as logs: formatter.read(data) self.assertTrue('ValueError' in logs.value, logs.value) # same data array in 2 files location = self.locations[1] data = DataSet(location=location) os.makedirs(location, exist_ok=True) with open(location + '/x_set.dat', 'w') as f: f.write('\n'.join(['# x_set\ty', '# "X"\t"Y"', '# 2', '1\t2', '3\t4'])) with open(location + '/q.dat', 'w') as f: f.write('\n'.join(['# q\ty', '# "Q"\t"Y"', '# 2', '1\t2', '3\t4'])) with LogCapture() as logs: formatter.read(data) self.assertTrue('ValueError' in logs.value, logs.value)
def test_units(self): with LogCapture() as logs: p = SimpleArrayParam([6, 7], 'p', (2, ), units='V') self.assertIn('deprecated', logs.value) self.assertEqual(p.unit, 'V') with LogCapture() as logs: self.assertEqual(p.units, 'V') self.assertIn('deprecated', logs.value) with LogCapture() as logs: p = SimpleArrayParam([6, 7], 'p', (2, ), unit='Tesla', units='Gauss') self.assertIn('deprecated', logs.value) self.assertEqual(p.unit, 'Tesla') with LogCapture() as logs: self.assertEqual(p.units, 'Tesla') self.assertIn('deprecated', logs.value)
def test_slow_set(self): # at least for now, need a local instrument to test logging gatesLocal = MockGates(model=self.model, server_name=None, name='gateslocal') for param, logcount in (('chan0slow', 2), ('chan0slow2', 2), ('chan0slow3', 0), ('chan0slow4', 1), ('chan0slow5', 0)): gatesLocal.chan0.set(-0.5) with LogCapture() as logs: if param in ('chan0slow', 'chan0slow2', 'chan0slow3'): # these are the stepped parameters gatesLocal.set(param, 0.5) else: # these are the non-stepped parameters that # still have delays gatesLocal.set(param, -1) gatesLocal.set(param, 1) loglines = logs.value.split('\n')[:-1] # TODO: occasional extra negative delays here self.assertEqual(len(loglines), logcount, (param, logs.value)) for line in loglines: self.assertTrue(line.startswith('negative delay'), line)
def test_full_write(self): formatter = GNUPlotFormat() location = self.locations[0] data = DataSet1D(location) formatter.write(data, data.io, data.location) with open(location + '/x_set.dat', 'r') as f: self.assertEqual(f.read(), file_1d()) # check that we can add comment lines randomly into the file # as long as it's after the first three lines, which are comments # with well-defined meaning, # and that we can un-quote the labels lines = file_1d().split('\n') lines[1] = lines[1].replace('"', '') lines[3:3] = ['# this data is awesome!'] lines[6:6] = ['# the next point is my favorite.'] with open(location + '/x_set.dat', 'w') as f: f.write('\n'.join(lines)) # normally this would be just done by data2 = load_data(location) # but we want to work directly with the Formatter interface here data2 = DataSet(location=location) formatter.read(data2) self.checkArraysEqual(data2.x_set, data.x_set) self.checkArraysEqual(data2.y, data.y) # data has been saved self.assertEqual(data.y.last_saved_index, 4) # data2 has been read back in, should show the same # last_saved_index self.assertEqual(data2.y.last_saved_index, 4) # while we're here, check some errors on bad reads # first: trying to read into a dataset that already has the # wrong size x = DataArray(name='x_set', label='X', preset_data=(1., 2.)) y = DataArray(name='y', label='Y', preset_data=(3., 4.), set_arrays=(x, )) data3 = new_data(arrays=(x, y), location=location + 'XX') # initially give it a different location so we can make it without # error, then change back to the location we want. data3.location = location with LogCapture() as logs: formatter.read(data3) self.assertTrue('ValueError' in logs.value, logs.value) # no problem reading again if only data has changed, it gets # overwritten with the disk copy data2.x_set[2] = 42 data2.y[2] = 99 formatter.read(data2) self.assertEqual(data2.x_set[2], 3) self.assertEqual(data2.y[2], 5)
def check_set_amplitude2(self, val, log_count, history_count): source = self.sourceLocal with LogCapture() as logs: source.amplitude2.set(val) loglines = logs.value.split('\n')[:-1] self.assertEqual(len(loglines), log_count, logs.value) for line in loglines: self.assertIn('cannot sweep', line.lower()) hist = source.getattr('history') self.assertEqual(len(hist), history_count)
def test_units(self): with LogCapture() as logs: p = GettableParam('p', units='V') self.assertIn('deprecated', logs.value) self.assertEqual(p.unit, 'V') with LogCapture() as logs: self.assertEqual(p.units, 'V') self.assertIn('deprecated', logs.value) with LogCapture() as logs: p = GettableParam('p', unit='Tesla', units='Gauss') self.assertIn('deprecated', logs.value) self.assertEqual(p.unit, 'Tesla') with LogCapture() as logs: self.assertEqual(p.units, 'Tesla') self.assertIn('deprecated', logs.value)
def test_complete(self): array = DataArray(name='y', shape=(5, )) array.init_data() data = new_data(arrays=(array, ), location=False) self.syncing_array = array self.sync_index = 0 data.sync = self.mock_sync bf = DataSet.background_functions bf['fail'] = self.failing_func bf['log'] = self.logging_func with LogCapture() as logs: # grab info and warnings but not debug messages logging.getLogger().setLevel(logging.INFO) data.complete(delay=0.001) logs = logs.value expected_logs = [ 'waiting for DataSet <False> to complete', 'DataSet: 0% complete', 'RuntimeError: it is called failing_func for a reason!', 'background at index 1', 'DataSet: 20% complete', 'RuntimeError: it is called failing_func for a reason!', 'background function fail failed twice in a row, removing it', 'background at index 2', 'DataSet: 40% complete', 'background at index 3', 'DataSet: 60% complete', 'background at index 4', 'DataSet: 80% complete', 'background at index 5', 'DataSet <False> is complete' ] log_index = 0 for line in expected_logs: self.assertIn(line, logs, logs) try: log_index_new = logs.index(line, log_index) except ValueError: raise ValueError('line {} not found after {} in: \n {}'.format( line, log_index, logs)) self.assertTrue(log_index_new >= log_index, logs) log_index = log_index_new + len(line) + 1 # +1 for \n self.assertEqual(log_index, len(logs), logs)
def test_init_and_bad_read(self): location = self.locations[0] path = './{}/bad.dat'.format(location) class MyFormatter(Formatter): def read_one_file(self, data_set, f, ids_read): s = f.read() if 'garbage' not in s: raise Exception('reading the wrong file?') # mark this file as read, before generating an error if not hasattr(data_set, 'files_read'): data_set.files_read = [] data_set.files_read.append(f.name) raise ValueError('garbage in, garbage out') def read_metadata(self, data_set): pass formatter = MyFormatter() data = DataSet1D(location) data.x_set.ndarray = None data.y.ndarray = None os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as f: f.write('garbage') with LogCapture() as logs: formatter.read(data) # we tried to read this file but it generated an error self.assertEqual(logs.value.count('error reading file'), 1, logs.value) self.assertEqual(data.files_read, [os.path.abspath(path)]) expected_array_repr = repr([float('nan')] * 5) self.assertEqual(repr(data.x_set.tolist()), expected_array_repr) self.assertEqual(repr(data.y.tolist()), expected_array_repr)
def test_warning(self): with LogCapture() as logs: secs_out = wait_secs(time.perf_counter() - 1) self.assertEqual(secs_out, 0) self.assertEqual(logs.value.count('negative delay'), 1, logs.value)
def test_zero_delay(self): with LogCapture() as logs: Loop(self.p1[1:3:1]).each(self.p1).run_temp() self.assertEqual(logs.value.count('negative delay'), 0, logs.value)
def test_very_short_delay(self): with LogCapture() as logs: Loop(self.p1[1:3:1], 1e-9).each(self.p1).run_temp() self.assertEqual(logs.value.count('negative delay'), 2, logs.value)
def time_acquisition(ctl, nsamples, nrecords, nbuffers, alloc_buffers=10, SR=2e8, t_total=None): """ Parameters: ----------- ctl : qcodes instrument instance acquisition controller to test nsamples : int samples per record nrecords : int records per buffer nbuffers : int buffers in the total acquisition alloc_buffers : int (default: 10) number of allocated buffers to use by the card SR : float (default: 2e8) acquisition sampling rate t_total : float (default: None) total time the sequence takes for this measurement. allows to compute the overhead. Returns: -------- acquisition time the time that the ctl.acquisition call took, in seconds. data the data returned by the ctl.acquisition call. """ ats_logger = qc.instrument_drivers.AlazarTech.ATS.logger ats_logger.setLevel(logging.DEBUG) t1 = time.perf_counter() ctl.setup_acquisition(samples=nsamples, records=nrecords, buffers=nbuffers, acq_time=None, allocated_buffers=alloc_buffers, SR=int(SR)) t2 = time.perf_counter() print("done:", t2 - t1, 's') with LogCapture(logger=ats_logger) as logs: t0 = time.perf_counter() data = ctl.acquisition() t1 = time.perf_counter() log_str = logs.value t_acq_total = t1 - t0 print(f'Acquistion: {t_acq_total:.6f} sec.') if t_total is not None: overhead = t_acq_total / t_total print(f'Net time: {t_total:.6f} sec.') print(f'Overhead: {overhead:.2f} X') print(f'Mean buffer handling time: {ctl.handling_times.mean():.1f} ms') if hasattr(ctl, 'post_acquire_time'): print(f'post_acquire time: {ctl.post_acquire_time:.2f} s') print('Data shape:', data.shape) print('\n', log_str) return t_acq_total, data
def test_mechanics(self): extras = 'super secret don\'t tell anyone' sm = ServerManagerTest(name='test', server_class=EmptyServer, shared_attrs=extras) sm._run_server() self.assertEqual(sm._query_queue.get(timeout=1), 'why?') self.assertEqual(sm._response_queue.get(timeout=1), extras) # builtin errors we propagate to the server builtin_error_str = ('traceback\n lines\n and then\n' ' OSError: your hard disk went floppy.') sm._response_queue.put((RESPONSE_ERROR, builtin_error_str)) with self.assertRaises(OSError): sm.ask('which way does the wind blow?') # non-built-in errors we fall back on RuntimeError custom_error_str = ('traceback\nlines\nand then\n' 'CustomError: the Balrog is loose!') extra_resp1 = 'should get tossed by the error checker' extra_resp2 = 'so should this.' sm._response_queue.put((RESPONSE_OK, extra_resp1)) sm._response_queue.put((RESPONSE_OK, extra_resp2)) sm._response_queue.put((RESPONSE_ERROR, custom_error_str)) # TODO: we have an intermittent failure below, but only when running # the full test suite (including pyqt and matplotlib?), not if we # run just this module, or at least not nearly as frequently. time.sleep(0.2) with LogCapture() as logs: with self.assertRaises(RuntimeError): sm.ask('something benign') self.assertTrue(sm._response_queue.empty()) self.assertIn(extra_resp1, logs.value) self.assertIn(extra_resp2, logs.value) # extra responses to a query, only the last should be taken extra_resp1 = 'boo!' extra_resp2 = 'a barrel of monkeys!' sm._response_queue.put((RESPONSE_OK, extra_resp1)) sm._response_queue.put((RESPONSE_OK, extra_resp2)) time.sleep(0.05) p = mp.Process(target=delayed_put, args=(sm._response_queue, (RESPONSE_OK, 42), 0.05)) p.start() with LogCapture() as logs: self.assertEqual(sm.ask('what is the answer'), 42) self.assertIn(extra_resp1, logs.value) self.assertIn(extra_resp2, logs.value) # no response to a query with self.assertRaises(Empty): sm.ask('A sphincter says what?', timeout=0.05) # test halting an unresponsive server sm._server = mp.Process(target=time.sleep, args=(1000,)) sm._server.start() self.assertIn(sm._server, mp.active_children()) with LogCapture() as logs: sm.halt(0.01) self.assertIn('ServerManager did not respond ' 'to halt signal, terminated', logs.value) self.assertNotIn(sm._server, mp.active_children())
def test_warning(self): with LogCapture() as logs: secs_out = wait_secs(time.perf_counter() - 1) self.assertEqual(secs_out, 0)
def test_normal(self): # we really only need to test local here - as a server it's already # used in other tests, but only implicitly (and not covered as it's # in a subprocess) queries = ( # add an "instrument" to the server (0.5, ( QUERY_ASK, 'new_id', )), (0.01, (QUERY_ASK, 'new', (Holder, 0))), # some sets and gets that work (0.01, (QUERY_WRITE, 'cmd', (0, 'set', 'happiness', 'a warm gun'), {})), (0.01, (QUERY_WRITE, 'cmd', (0, 'set'), { 'val': 42, 'key': 'the answer' })), (0.01, (QUERY_ASK, 'cmd', (0, 'get'), { 'key': 'happiness' })), (0.01, (QUERY_ASK, 'cmd', ( 0, 'get', 'the answer', ), {})), # then some that make errors # KeyError (0.01, (QUERY_ASK, 'cmd', ( 0, 'get', 'Carmen Sandiego', ), {})), # TypeError (too many args) shows up in logs (0.01, (QUERY_WRITE, 'cmd', (0, 'set', 1, 2, 3), {})), # TypeError (unexpected kwarg) shows up in logs (0.01, (QUERY_WRITE, 'cmd', (0, 'set', 'do'), { 'c': 'middle' })), # and another good one, just so we know it still works (0.01, (QUERY_ASK, 'cmd', (0, 'get_extras'), {})), # delete the instrument and stop the server # (no need to explicitly halt) (0.01, (QUERY_ASK, 'delete', (0, )))) extras = {'where': 'infinity and beyond'} run_schedule(queries, self.query_queue) try: with LogCapture() as logs: TimedInstrumentServer(self.query_queue, self.response_queue, extras) except TypeError: from traceback import format_exc print(format_exc()) self.assertEqual(logs.value.count('TypeError'), 2) for item in ('1, 2, 3', 'middle'): self.assertIn(item, logs.value) responses = get_results(self.response_queue) expected_responses = [ (RESPONSE_OK, 0), (RESPONSE_OK, { 'functions': {}, 'id': 0, 'name': 'J Edgar', '_methods': {}, 'parameters': {} }), (RESPONSE_OK, 'a warm gun'), (RESPONSE_OK, 42), (RESPONSE_ERROR, ('KeyError', 'Carmen Sandiego')), (RESPONSE_OK, extras) ] for response, expected in zip(responses, expected_responses): if expected[0] == RESPONSE_OK: self.assertEqual(response, expected) else: self.assertEqual(response[0], expected[0]) for item in expected[1]: self.assertIn(item, response[1])