def compare_datasets(ref_dataset, test_dataset, rtol, atol): ''' Compare two datasets for numerical differences. :param ref_dataset : reference dataset :param test_dataset: test dataset :param rtol: relative tolerance :param atol: absolute tolerance :return failed: list of traces that failed numerical check ''' from qucstest.colors import pb, pr, pg if not os.path.isfile(ref_dataset): sys.exit('No reference dataset: %s' % ref_dataset) if not os.path.isfile(test_dataset): sys.exit('No test dataset: %s' % test_dataset) # TODO failed also catches if the solver didn't run, output_dataset will be empty, # it will fail the comparison # let's compare results # list of failed variable comparisons failed = [] logger.info(pb('load data %s' % (ref_dataset))) ref = QucsData(ref_dataset) logger.info(pb('load data %s' % (test_dataset))) test = QucsData(test_dataset) logger.info( pb('Comparing dependent variables [rtol=%s, atol=%s]' % (rtol, atol))) for name in ref.dependent.keys(): ref_trace = ref.data[name] test_trace = test.data[name] # check: abs(test - ref) <= (atol + rtol * abs(ref) ) if not np.allclose(test_trace, ref_trace, rtol=rtol, atol=atol): logger.warning(pr(' Failed %s' % (name))) failed.append(name) else: logger.info(pg(' Passed %s' % (name))) return failed
def compare_datasets(ref_dataset, test_dataset, rtol=1e-5, atol=1e-8): ''' Compare two datasets for numerical differences. :param ref_dataset : reference dataset :param test_dataset: test dataset :param rtol: relative tolerance :param atol: absolute tolerance :return failed: list of traces that failed numerical check ''' from qucstest.colors import pb, pr, pg if not os.path.isfile(ref_dataset): sys.exit('No reference dataset: %s' %ref_dataset) if not os.path.isfile(test_dataset): sys.exit('No test dataset: %s' %rest_dataset) # TODO failed also catches if the solver didn't run, output_dataset will be empty, # it will fail the comparison # let's compare results # list of failed variable comparisons failed=[] logger.info( pb('load data %s' %(ref_dataset)) ) ref = QucsData(ref_dataset) logger.info( pb('load data %s' %(test_dataset)) ) test = QucsData(test_dataset) logger.info( pb('Comparing dependent variables [rtol=%s, atol=%s]' %(rtol,atol)) ) for name in ref.dependent.keys(): ref_trace = ref.data[name] test_trace = test.data[name] # check: abs(test - ref) <= (atol + rtol * ref) if not np.allclose(test_trace, ref_trace, rtol=rtol, atol=atol): logger.warning( pr(' Failed %s' %(name)) ) failed.append(name) else: logger.info(pg(' Passed %s' %(name)) ) return failed
skip = args.exclude with open(skip) as fp: for line in fp: skip_proj = line.split(',')[0] if skip_proj in testsuite: print py('Skipping %s' %skip_proj) testsuite.remove(skip_proj) if args.include: add = args.include include = [] with open(add) as fp: for line in fp: proj = line.split(',')[0] if proj in testsuite: print pg('Including %s' %proj) include.append(proj) if include: testsuite = include # Toggle if any test fail returnStatus = 0 if args.qucs or args.qucsator or args.project: print '\n' print pb('******************************************') print pb('** Test suite - Selected Test Projects **') print pb('******************************************') # Print list of selected tests
skip = args.exclude with open(skip) as fp: for line in fp: skip_proj = line.split(',')[0] if skip_proj in testsuite: print py('Skipping %s' % skip_proj) testsuite.remove(skip_proj) if args.include: add = args.include include = [] with open(add) as fp: for line in fp: proj = line.split(',')[0] if proj in testsuite: print pg('Including %s' % proj) include.append(proj) if include: testsuite = include # Toggle if any test fail returnStatus = 0 if args.qucs or args.qucsator or args.project: print '\n' print pb('******************************************') print pb('** Test suite - Selected Test Projects **') print pb('******************************************') # Print list of selected tests pprint.pprint(testsuite)