def classpath(v): """given a class/instance return the full class path (eg, prefix.module.Classname) :param v: class or instance :returns: string, the full classpath of v """ if isinstance(v, type): ret = strclass(v) else: ret = strclass(v.__class__) return ret
def id(self): """ Overridden version of TestCase.id() This is an internal implementation detail of TestCase, it is used in certain places instead of __str__(). It behaves very similar to what __str__() does, except that it displays the class name differently """ if self.parameters is None: return "{}.{} [<unparameterized>]".format( strclass(self.__class__), self._testMethodName) else: return "{}.{} [{}]".format( strclass(self.__class__), self._testMethodName, self.parameters)
def _createTempDirEx(cls, *names): prefix = ['toil', 'test', strclass(cls)] prefix.extend(filter(None, names)) prefix.append('') temp_dir_path = tempfile.mkdtemp(dir=cls._tempBaseDir, prefix='-'.join(prefix)) cls._tempDirs.append(temp_dir_path) return temp_dir_path
def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): return try: currentClass._classSetupFailed = False except TypeError: # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: _call_if_exists(result, '_setupStdout') try: deferred = setUpClass() if isiterable(deferred): yield from deferred except Exception as e: if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) errorName = 'setUpClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout')
def getClassDescription(self, test): test_class = test.__class__ description = test.__doc__ if self.descriptions and description: return description.split('\n')[0].strip() else: return strclass(test_class)
def _tearDownPreviousClass(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if getattr(previousClass, '_classSetupFailed', False): return if getattr(result, '_moduleSetUpFailed', False): return if getattr(previousClass, "__unittest_skip__", False): return tearDownClass = getattr(previousClass, 'tearDownClass', None) if tearDownClass is not None: _call_if_exists(result, '_setupStdout') try: deferred = tearDownClass() if isiterable(deferred): yield from deferred except Exception as e: if isinstance(result, _DebugResult): raise className = util.strclass(previousClass) errorName = 'tearDownClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout')
def startTest(self, test): super(BlackMambaTestResult, self).startTest(test) self.startTime = time.time() if self.showAll: if self._prev_class != test.__class__: self._prev_class = test.__class__ self._results += '${}+'.format(strclass(self._prev_class))
def print_test_times_by_class(self, suite, percent=.5): times_by_class = defaultdict(datetime.timedelta) for test, test_time in suite.test_times: times_by_class[strclass(TimingTestSuite.get_test_class(test))] += test_time self._print_test_times( sorted(times_by_class.items(), key=lambda x: x[1], reverse=True), percent, )
def __str__(self): lines = [] for test in self._tests: if isinstance(test, type(self)): lines.append(str(test)) else: lines.append("{}.{}".format(strclass(test.__class__), test._testMethodName)) return "\n".join(lines)
def __repr__(self): """ Overridden version of TestCase.__repr__() This version displays the value of the parameters attribute """ return "<{} testMethod={} parameters={!r}>".format( strclass(self.__class__), self._testMethodName, self.parameters)
def __str__(self): """ This method will be used to print test name in verbose mode. It appends parameter descriptor to distinguish between same tests with different parameters. """ if hasattr(self, 'parameter_desc'): return "%s %s (%s)" % (self._testMethodName, self.parameter_desc, strclass(self.__class__)) else: return unittest.TestCase.__str__(self)
def startTest(self, test): """ran once before each TestCase""" self._pyt_start = time.time() echo.debug("{}/{} - Starting {}.{}".format( self.testsRun + 1, self.total_tests, strclass(test.__class__), test._testMethodName )) super(TestResult, self).startTest(test)
def loadTestsFromName(self, name, *args, **kwargs): ts = self.suiteClass() ti = TestInfo(name, self.basedir, self.testMethodPrefix) found = False for i, tc in enumerate(ti.possible, 1): echo.debug("{}. Searching for tests matching:", i) echo.debug(" {}", tc) if tc.has_method(): for c, mn in tc.method_names(): #echo.debug('adding test method to suite: {}', mn) #echo.out('Found method test: {}.{}.{}', c.__module__, c.__name__, mn) echo.debug('Found method test: {}.{}', strclass(c), mn) found = True ts.addTest(c(mn)) self.environ.counter["methods"] += 1 elif tc.has_class(): for c in tc.classes(): #echo.debug('adding testcase to suite: {}', c.__name__) #echo.out('Found class test: {}.{}', c.__module__, c.__name__) echo.debug('Found class test: {}', strclass(c)) found = True ts.addTest(self.loadTestsFromTestCase(c)) self.environ.counter["classes"] += 1 else: for m in tc.modules(): #echo.debug('adding module to suite: {}', m.__name__) echo.debug('Found module test: {}', m.__name__) found = True ts.addTest(self.loadTestsFromModule(m)) self.environ.counter["modules"] += 1 # if we found a module that matched then don't try for method if found: break if not found: ti.raise_any_error() echo.debug("Found {} total tests".format(ts.countTestCases())) return ts
def write_results(result, filename='tmp/tests.json'): data = {} tests = ((result.passes, 'pass'), (result.failures, 'fail')) for test_list, result in tests: for test, _ in test_list: name = "%s.%s" % (strclass(test.__class__), test._testMethodName) data[name] = result d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) with open(filename, 'w') as f: f.write(json.dumps(data))
def stopTest(self, test): """ran once after each TestCase""" super(TestResult, self).stopTest(test) pyt_start = self._pyt_start del(self._pyt_start) pyt_stop = time.time() echo.debug("Stopping {}.{} after {}s".format( strclass(test.__class__), test._testMethodName, round(pyt_stop - pyt_start, 2) ))
def _tearDownPreviousClass(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if getattr(previousClass, '_classSetupFailed', False): return if getattr(result, '_moduleSetUpFailed', False): return if getattr(previousClass, "__unittest_skip__", False): return tearDownClass = getattr(previousClass, 'tearDownClass', None) if tearDownClass is not None: try: tearDownClass() except Exception, e: if isinstance(result, _DebugResult): raise className = util.strclass(previousClass) errorName = 'tearDownClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally:
def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): # pragma: no cover return try: currentClass._classSetupFailed = False except TypeError: # pragma: no cover # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: _call_if_exists(result, '_setupStdout') try: setUpClass() # THIS is the part Python forgot to implement -- so Green will except unittest.case.SkipTest as e: currentClass.__unittest_skip__ = True currentClass.__unittest_skip_why__ = str(e) # -- END of fix except Exception as e: # pragma: no cover if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) errorName = 'setUpClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout')
def __repr__(cls): return strclass(cls)
def __unicode__(cls): return strclass(cls)
def __str__(cls): return strclass(cls)
# load all the test suites from the provided input needsPrefix = False # if we need to prepend the source name to the final label ; this is needed for dir path inputs if testSource.endswith(".py"): testSuite = loader.loadTestsFromName(_convert_name(testSource)) else: needsPrefix = True testSuite = loader.discover(testSource) # recursive generator to get all the TestCase entries from all the TestSuites; TestSuites can contain arbitrary levels of nested TestSuites's that contain TestCases def get_all_tests(testSuite): for item in testSuite: if not isinstance(item, TestSuite): yield item else: yield from get_all_tests(item) testsList = [i for i in get_all_tests(testSuite)] # make text labels for each test case that we can use to run the test case from the CLI testLabels = [] for t in testsList: label = strclass(t.__class__) + '.' + t._testMethodName if needsPrefix: label = _convert_name(testSource).rstrip("/") + '.' + label testLabels.append(label) for label in testLabels: print(label)
def getDescription(self, test): if 'SubTest' in strclass(test.__class__): return str(test) return '%-80s' % test.shortDescription()
def __repr__(self): return ("<%s run=%i errors=%i failures=%i>" % (util.strclass(self.__class__), self.testsRun, len( self.errors), len(self.failures)))
def __repr__(self): return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __str__(self): classname = strclass(self.__class__) return "{classname}.{method}".format( method=self._testMethodName, classname=re.sub(r"tests\d*.test_", "", classname) )
def __repr__(self): return "<%s testFunc=%s>" % (util.strclass( self.__class__), self._testFunc)
def __str__(self): return "%s (%s)" % (self._testMethodName, strclass(self._realclass))
def __repr__(self): return "<%s testMethod=%s>" % \ (util.strclass(self.__class__), self._testMethodName)
def id(self): return "%s.%s" % (util.strclass(self.__class__), self._testMethodName)
def __str__(self): return "%s[%d](%s) (%s)" % ( self._testMethodName, self.getTestCaseIndex(), self.getParameters(), strclass(self.__class__))
def __str__(self): return "[{0}] ({1})".format(self._testMethodName, strclass(self.__class__))
def test_channelisation(self): """TP.C.1.19 CBF Channelisation Wideband Coarse L-band""" test_chan = 1500 expected_fc = self.corr_freqs.chan_freqs[test_chan] init_dsim_sources(self.dhost) self.dhost.sine_sources.sin_0.set(frequency=expected_fc, scale=0.25) # The signal source is going to quantise the requested freqency, so see what we # actually got source_fc = self.dhost.sine_sources.sin_0.frequency # Get baseline 0 data, i.e. auto-corr of m000h test_baseline = 0 test_data = self.receiver.get_clean_dump(DUMP_TIMEOUT)['xeng_raw'] b_mag = normalised_magnitude(test_data[:, test_baseline, :]) # find channel with max power max_chan = np.argmax(b_mag) # self.assertEqual(max_chan, test_chan, # 'Channel with max power is not the test channel') requested_test_freqs = self.corr_freqs.calc_freq_samples( test_chan, samples_per_chan=101, chans_around=5) # Placeholder of actual frequencies that the signal generator produces actual_test_freqs = [] # Channel magnitude responses for each frequency chan_responses = [] last_source_freq = None for i, freq in enumerate(requested_test_freqs): # LOGGER.info('Getting channel response for freq {}/{}: {} MHz.'.format( # i+1, len(requested_test_freqs), freq/1e6)) print ('Getting channel response for freq {}/{}: {} MHz.'.format( i+1, len(requested_test_freqs), freq/1e6)) if freq == expected_fc: # We've already done this one! this_source_freq = source_fc this_freq_result = b_mag else: self.dhost.sine_sources.sin_0.set(frequency=freq, scale=0.125) this_source_freq = self.dhost.sine_sources.sin_0.frequency if this_source_freq == last_source_freq: LOGGER.info('Skipping channel response for freq {}/{}: {} MHz.\n' 'Digitiser frequency is same as previous.'.format( i+1, len(requested_test_freqs), freq/1e6)) continue # Already calculated this one else: last_source_freq = this_source_freq this_freq_data = self.receiver.get_clean_dump(DUMP_TIMEOUT)['xeng_raw'] this_freq_response = normalised_magnitude( this_freq_data[:, test_baseline, :]) actual_test_freqs.append(this_source_freq) chan_responses.append(this_freq_response) self.corr_fix.stop_x_data() # Convert the lists to numpy arrays for easier working actual_test_freqs = np.array(actual_test_freqs) chan_responses = np.array(chan_responses) def plot_and_save(freqs, data, plot_filename): df = self.corr_freqs.delta_f fig = plt.plot(freqs, data)[0] axes = fig.get_axes() ybound = axes.get_ybound() yb_diff = abs(ybound[1] - ybound[0]) new_ybound = [ybound[0] - yb_diff*1.1, ybound[1] + yb_diff * 1.1] plt.vlines(expected_fc, *new_ybound, colors='r', label='chan fc') plt.vlines(expected_fc - df / 2, *new_ybound, label='chan min/max') plt.vlines(expected_fc - 0.8*df / 2, *new_ybound, label='chan +-40%', linestyles='dashed') plt.vlines(expected_fc + df / 2, *new_ybound, label='_chan max') plt.vlines(expected_fc + 0.8*df / 2, *new_ybound, label='_chan +40%', linestyles='dashed') plt.legend() plt.title('Channel {} ({} MHz) response'.format( test_chan, expected_fc/1e6)) axes.set_ybound(*new_ybound) plt.grid(True) plt.ylabel('dB relative to VACC max') # TODO Normalise plot to frequency bins plt.xlabel('Frequency (Hz)') plt.savefig(plot_filename) plt.close() graph_name = '{}.{}.channel_response.svg'.format(strclass(self.__class__), self._testMethodName) plot_data_all = loggerise(chan_responses[:, test_chan], dynamic_range=90) plot_and_save(actual_test_freqs, plot_data_all, graph_name) # Get responses for central 80% of channel df = self.corr_freqs.delta_f central_indices = ( (actual_test_freqs <= expected_fc + 0.8*df) & (actual_test_freqs >= expected_fc - 0.8*df)) central_chan_responses = chan_responses[central_indices] central_chan_test_freqs = actual_test_freqs[central_indices] # Test responses in central 80% of channel for i, freq in enumerate(central_chan_test_freqs): max_chan = np.argmax(np.abs(central_chan_responses[i])) self.assertEqual(max_chan, test_chan, 'Source freq {} peak not in channel ' '{} as expected but in {}.' .format(freq, test_chan, max_chan)) # TODO Graph the central 80% too. self.assertLess( np.max(np.abs(central_chan_responses[:, test_chan])), 0.99, 'VACC output at > 99% of maximum value, indicates that ' 'something, somewhere, is probably overranging.') max_central_chan_response = np.max(10*np.log10(central_chan_responses[:, test_chan])) min_central_chan_response = np.min(10*np.log10(central_chan_responses[:, test_chan])) chan_ripple = max_chan_response - min_chan_response acceptable_ripple_lt = 0.3 self.assertLess(chan_ripple, acceptable_ripple_lt, 'ripple {} dB within 80% of channel fc is >= {} dB' .format(chan_ripple, acceptable_ripple_lt))
def __str__(self): return "format %s (%s)" % (os.path.basename( self.filename), strclass(self.__class__))
def __str__(self): return "%s (%s)" % (self._testMethodName, util.strclass(self.__class__))
def __str__(self): return "%s of %s (%s)" % (self.fmt, os.path.basename(self.filename), strclass(self.__class__))
def __str__(self): return "%s (%s)" % (util.strclass(self.__class__), self._testFunc.__name__)
def __repr__(self): return "<%s[%d](%s) testMethod=%s>" % (strclass( self.__class__), self.getTestCaseIndex(), self.getParameters(), self._testMethodName)
def almostEqual(self, first: Any, second: Any, places: int = None, msg: Any = None, delta: float = None): """Return True if first ~=~ second, else an error message""" if first == second: return True if msg is None: msg = '%s~=~%s' % (first, second) if isinstance(first, (int, float)) and isinstance(second, (int, float)): almost = self.almostEqualFloat(first, second, places, delta) return almost if almost is True else (msg + ': ' + almost) if type(first) != type(second): return msg + ': mismatched types: %s vs %s' % (type(first), type(second)) def msg_idx(i): if msg.endswith('] '): return msg[:-1] + '[%d] ' % i else: return msg + '@ [%d] ' % i # Any matching extensions? for matcher in self.matchers: if isinstance(first, matcher.kind): return matcher.almost_equal(first, second, places=places, delta=delta) if isinstance(first, (list, tuple)) and type(first) is type(second): if len(first) != len(second): msg += ': mismatched lengths: first=%d second=%d' % ( len(first), len(second)) return msg elif isinstance( first[0], self.matchable_types): # List of tuples or matchable types # Note: we know first is non-empty because: # 1. first == second test would catch empty list/tuple # 2. len(first) != len(second) would catch []==[...] if not isinstance(first[0], type(second[0])): msg += ': mismatched element types: %s vs %s' % ( util.strclass(type( first[0])), util.strclass(type(second[0]))) return msg for idx, (f, s) in enumerate(zip(first, second)): try: self.assertAlmostEqual(f, s, places=places, delta=delta, msg=msg_idx(idx)) except self.failureException as err: msg = str( err ) # Use this try/except/raise to simplify traceback stack return msg return True elif isinstance(first[0], Number): for idx, (f, s) in enumerate(zip(first, second)): almost = self.almostEqualFloat(f, s, places=places, delta=delta) if almost is not True: msg = msg_idx(idx)[:-1] + ': ' + almost return msg return True else: return msg + ": almostEqual: don't understand type: %s of %s" % ( type(first).__name__, type(first[0]).__name__) return msg + ": almostEqual: don't understand type: %s" % type( first).__name__
def __str__(self): return "%s (%s)" % (self._testMethodName, util.strclass( self.__class__))
def _getClassName(self, test): return strclass(test.__class__)
def __str__(self): return "%s (%s)" % (util.strclass( self.__class__), self._testFunc.__name__)
def run(self, result): logging.info("STARTING TEST %s (%s)" % (strclass(self.__class__), self._testMethodName)) super(AppTestCase, self).run(result)
def shortDescription(self): return strclass(self.__class__) + "." + self._testMethodName
def __str__(self): return "%s (%s.%s)" % (self._testMethodName, strclass( self.__class__), self._testMethodName)
def getClassDescription(self, test): test_class = test.__class__ doc = test_class.__doc__ if self.descriptions and doc: return doc.split('\n')[0].strip() return strclass(test_class)
def __repr__(self): return ("<%s run=%i errors=%i failures=%i>" % (util.strclass(self.__class__), self.testsRun, len(self.errors), len(self.failures)))
def __str__(self): return "%s - GUI (%s)" % (os.path.basename(self.filename), strclass(self.__class__))
def getClassDescription(self, test): test_class = test.__class__ doc = test_class.__doc__ if self.descriptions and doc: return doc.strip().split('\n')[0].strip() return strclass(test_class)
def __str__(self): classname = strclass(self.__class__) return "{classname}.{method}".format(method=self._testMethodName, classname=re.sub( r"tests\d*.test_", "", classname))
def __str__(self): return "%s of %s (%s)" % (self.fmt, os.path.basename( self.filename), strclass(self.__class__))
def __repr__(self): return "<%s testFunc=%s>" % (util.strclass(self.__class__), self._testFunc)
def __repr__(self): return "<%s tec=%s>" % (strclass(self.__class__), self._domain)