def test_gen_bit_vector_getrandbits():
     print("	Testing gen_bit_vector_getrandbits().")
     prompt = "	... Test random bit vector generation: default 16 bits.	{}"
     # Number of discrete values representing a random signal/"process".
     k = 16
     statistical_analysis.increment_number_test_cases_used()
     temp_rand_signal = rand_signal_generator.gen_bit_vector_getrandbits()
     #print("!!!	temp_rand_signal is:",temp_rand_signal,".")
     if (k == len(temp_rand_signal)) and (
         (0 in temp_rand_signal) or
         (1 in temp_rand_signal)) and (not (-1 in temp_rand_signal)) and (
             not (3.14 in temp_rand_signal)):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test random bit vector generation: 8 bits.		{}"
     k = 8
     statistical_analysis.increment_number_test_cases_used()
     temp_rand_signal = rand_signal_generator.gen_bit_vector_getrandbits(k)
     #print("!!!	temp_rand_signal is:",temp_rand_signal,".")
     if (k == len(temp_rand_signal)) and (
         (0 in temp_rand_signal) or
         (1 in temp_rand_signal)) and (not (-1 in temp_rand_signal)) and (
             not (3.14 in temp_rand_signal)):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
示例#2
0
 def test_raising_graph_error():
     print("")
     print("==	Testing the graph_error class/module.")
     try:
         prompt = "	... Test: raise graph_error exception, 2 arguments	{}"
         statistical_analysis.increment_number_test_cases_used()
         #raise graph_error("Can graph_error be caught")
         #raise utilities.custom_exceptions.graph_error("Can graph_error be caught")
         raise graph_error("Can graph_error be caught?", "gyou")
     #except utilities.custom_exceptions.graph_error:
     except graph_error:
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     try:
         prompt = "	... Test: raise graph_error exception, 1 argument	{}"
         statistical_analysis.increment_number_test_cases_used()
         #raise graph_error("Can graph_error be caught")
         #raise utilities.custom_exceptions.graph_error("Can graph_error be caught")
         raise graph_error("Can graph_error be caught?")
     #except utilities.custom_exceptions.graph_error:
     except graph_error:
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
    def test_get_absolute_path_to_store_results():
        print("=	Testing get_absolute_path_to_store_results() method.")
        prompt = "	... Test if static variable = return value		{}"
        statistical_analysis.increment_number_test_cases_used()
        if misc.absolute_path_to_store_results == misc.get_absolute_path_to_store_results(
        ):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        """
			Temporarily reassign the path to test if invalid
				paths are processed correctly.
		"""
        temp_path = misc.get_absolute_path_to_store_results()
        misc.absolute_path_to_store_results = "/not/a/real/path"
        prompt = "	... Test if return value = 'None' for invalid path	{}"
        statistical_analysis.increment_number_test_cases_used()
        if misc.get_absolute_path_to_store_results() is None:
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        # Reassign correct path back to (static) variable.
        misc.absolute_path_to_store_results = temp_path
 def test_file_io_operations_open_file_object_write_results():
     prompt = "	Test: file_io_BLAH.open_file_object_write_results()	{}"
     statistical_analysis.increment_number_test_cases_used()
     results_f_obj = file_io_operations.open_file_object_write_results()
     if results_f_obj:
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     file_io_operations.close_file_object(results_f_obj)
 def test_add_commit_push_updates_to_git_repository():
     print("=	Testing add_commit_push_updates_to_git_repository() method.")
     prompt = "	... Test carrying out Git add/commit/push operations.	{}"
     statistical_analysis.increment_number_test_cases_used()
     if misc.add_commit_push_updates_to_git_repository(
             "Update build via Python script"):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
示例#6
0
 def test_constructors_and_equality_comparison():
     a = graph()
     prompt = "	... Test default constructor of graph	 		{}."
     statistical_analysis.increment_number_test_cases_used()
     if (a is not None) and (not a.is_self_loop_pseudograph()) and (
             not a.is_multigraph()) and (not a.is_hypergraph()) and (
                 not a.is_cyclic()):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
    def test_check_absolute_path_to_store_results():
        print("=	Testing check_absolute_path_to_store_results() method.")
        prompt = "	... Test: invalid path=None, invalid filename		{}"
        statistical_analysis.increment_number_test_cases_used()
        if not misc.check_absolute_path_to_store_results(None, "a_filename"):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test: invalid path, invalid filename=None		{}"
        statistical_analysis.increment_number_test_cases_used()
        if not misc.check_absolute_path_to_store_results(
                "/invalid/path/to/file", None):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test: invalid path, invalid filename		{}"
        statistical_analysis.increment_number_test_cases_used()
        #print("misc.check_absolute_path_to_store_results() is:",misc.check_absolute_path_to_store_results("/invalid/path/to/file","a_filename"),"=")
        if not misc.check_absolute_path_to_store_results(
                "/invalid/path/to/file", "a_filename"):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        valid_path = os.path.expanduser("./")
        #print("valid_path is:",valid_path,"=")
        prompt = "	... Test: valid path, invalid filename			{}"
        statistical_analysis.increment_number_test_cases_used()
        #print("misc.check_absolute_path_to_store_results() is:",misc.check_absolute_path_to_store_results("/invalid/path/to/file","a_filename"),"=")
        if not misc.check_absolute_path_to_store_results(
                valid_path, "a_filename"):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        """
			Alternatively, instead of the Makefile, use:
			+ README.md
			+ doxygen.config
		"""
        valid_filename = "makefile"
        prompt = "	... Test: valid path, valid filename			{}"
        statistical_analysis.increment_number_test_cases_used()
        #print("misc.check_absolute_path_to_store_results() is:",misc.check_absolute_path_to_store_results("/invalid/path/to/file","a_filename"),"=")
        if misc.check_absolute_path_to_store_results(valid_path,
                                                     valid_filename):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
	def test_convert_time_in_seconds_to_DD_HH_MM_SS():
		print("	Testing convert_time_in_seconds_to_DD_HH_MM_SS() method.")
		prompt = "	... Test: convert_time_in_seconds_...() = 0s		{}."
		statistical_analysis.increment_number_test_cases_used()
		if "0:00:00" == str(execution_time_measurement_no_ns.convert_time_in_seconds_to_DD_HH_MM_SS()):
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
		else:
			print(prompt .format("FAIL!!!"))
		prompt = "	... Test: convert_time_in_seconds_...(32:17) = 32:17	{}."
		statistical_analysis.increment_number_test_cases_used()
		if "0:32:17" == str(execution_time_measurement_no_ns.convert_time_in_seconds_to_DD_HH_MM_SS(32*60+17)):
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
		else:
			print(prompt .format("FAIL!!!"))
		prompt = "	... Test: convert_time_in_seconds_...(15:51:09) is ok	{}."
		statistical_analysis.increment_number_test_cases_used()
		if "15:51:09" == str(execution_time_measurement_no_ns.convert_time_in_seconds_to_DD_HH_MM_SS(15*60*60+51*60+9)):
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
		else:
			print(prompt .format("FAIL!!!"))
		prompt = "	... Test: convert_time_...(73 days, 22:04:58) is ok	{}."
		statistical_analysis.increment_number_test_cases_used()
		if "73 days, 22:04:58" == str(execution_time_measurement_no_ns.convert_time_in_seconds_to_DD_HH_MM_SS(73*24*60*60+22*60*60+4*60+58)):
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
		else:
			print(prompt .format("FAIL!!!"))
 def test_is_valid_month():
     print("	Testing date_time_operations.is_valid_month() method.")
     prompt = "	... Test: month = 0					{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_month(0):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: month = -7					{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_month(-7):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: month > 12					{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_month(15):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: month = 10					{}"
     statistical_analysis.increment_number_test_cases_used()
     if date_time_operations.is_valid_month(10):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
 def test_is_valid_year():
     print("	Testing date_time_operations.is_valid_year() method.")
     prompt = "	... Test: year < 2014					{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_year(2010):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: year = -467					{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_year(-467):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: year > 3645					{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_year(3645):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: year = 2019					{}"
     statistical_analysis.increment_number_test_cases_used()
     if date_time_operations.is_valid_year(2019):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
 def test_file_io_operations_check_file_extension():
     print("	Testing file_io_operations.check_file_extension() method.")
     prompt = "	... Test: same file extension				{}"
     statistical_analysis.increment_number_test_cases_used()
     if file_io_operations.check_file_extension("something.text", ".text"):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: different file extensions			{}"
     statistical_analysis.increment_number_test_cases_used()
     if not file_io_operations.check_file_extension("something.tar.gz",
                                                    ".rtsdtfyg"):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
示例#12
0
 def test_json_config_file_parser():
     # -------------------------------------------------------
     #print("	... Test parsing of JSON configuration file	{}.")
     prompt = "	... Test parsing of JSON configuration file		{}."
     statistical_analysis.increment_number_test_cases_used()
     if config_parser.parse_configuration_file():
         #prompt = "	Test: Congleton2017_json				{}"
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test writing results to storage location		{}."
     statistical_analysis.increment_number_test_cases_used()
     if config_parser_tester.test_writing_to_results_file():
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
 def test_file_io_operations_with_invalid_file():
     print("	... Testing file operations with invalid file.")
     filename = "nonsense"
     prompt = "	Test: file_io_operations.is_path_valid(...)	{}"
     statistical_analysis.increment_number_test_cases_used()
     if not file_io_operations.is_path_valid(filename):
         print(prompt.format("	OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("	FAIL!!!"))
     prompt = "	Test: file_io_operations.open_file_object_read(...)	{}"
     statistical_analysis.increment_number_test_cases_used()
     try:
         f_obj = file_io_operations.open_file_object_read(filename)
         print(prompt.format("FAIL!!!"))
     except:
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     prompt = "	Test: file_io_operations.open_file_object_write(...)	{}"
     statistical_analysis.increment_number_test_cases_used()
     try:
         f_obj = file_io_operations.open_file_object_write(filename)
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     except:
         print(prompt.format("FAIL!!!"))
     prompt = "	Test: file_io_ops[BLAH].open_file_object_write_new(...)	{}"
     statistical_analysis.increment_number_test_cases_used()
     try:
         f_obj = file_io_operations.open_file_object_write_new(filename)
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     except:
         print(prompt.format("FAIL!!!"))
     try:
         #	Close the file object, and delete the file.
         statistical_analysis.increment_number_test_cases_used()
         file_io_operations.close_file_object(f_obj)
         os.remove(filename)
         print("	Test: file_io_operations.close_file_object(...)		OK")
         statistical_analysis.increment_number_test_cases_passed()
     except:
         print(prompt.format("FAIL!!!"))
示例#14
0
 def test_statistical_analysis():
     print("")
     print("==	Testing class: statistical_analysis")
     print("1) Number of test cases passed:		{}".format(
         statistical_analysis.number_test_cases_passed))
     print("2) Number of test cases used:		{}".format(
         statistical_analysis.number_test_cases_used))
     print("Proportion of test cases passed:	{}".format(
         statistical_analysis.get_test_cases_passed_average()))
     for x in range(1, 7):
         statistical_analysis.increment_number_test_cases_used()
         statistical_analysis.increment_number_test_cases_passed()
         print("Value of x is: {}.".format(x))
         print("Number of test cases passed:	{}".format(
             statistical_analysis.number_test_cases_passed))
         print("Number of test cases used:	{}".format(
             statistical_analysis.number_test_cases_used))
         print("Proportion of test cases passed:	{}".format(
             statistical_analysis.get_test_cases_passed_average()))
示例#15
0
 def test_file_io_operations_get_filename_suffixes():
     print("	... Testing method to access filename suffixes.")
     set_of_suffixes = file_io_operations.get_filename_suffixes()
     suffix_1 = file_io_operations.results_suffix
     suffix_2 = file_io_operations.regression_testing_results_suffix
     prompt = "	Test: is suffix_1 & suffix_1 in the set 	{}"
     statistical_analysis.increment_number_test_cases_used()
     if (suffix_1 in set_of_suffixes) and (suffix_2 in set_of_suffixes):
         print(prompt.format("	OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("	FAIL!!!"))
     suffix_ne = "non-existent filename suffix"
     prompt = "	Test: is non-existent fname suffix in set	{}"
     statistical_analysis.increment_number_test_cases_used()
     if not (suffix_ne in set_of_suffixes):
         print(prompt.format("	OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("	FAIL!!!"))
示例#16
0
 def test_statistical_analysis(op_f_obj):
     op_f_obj.write("")
     op_f_obj.write("==	Testing class: statistical_analysis")
     op_f_obj.write("	Number of initial test cases passed: {}			{}".format(
         statistical_analysis.number_test_cases_passed, "OK"))
     op_f_obj.write("	Number of initial test cases used: {}			{}".format(
         statistical_analysis.number_test_cases_used, "OK"))
     op_f_obj.write(
         "	Proportion of initial test cases passed: {}		{}".format(
             statistical_analysis.get_test_cases_passed_average(), "OK"))
     for x in range(1, 7):
         statistical_analysis.increment_number_test_cases_used()
         statistical_analysis.increment_number_test_cases_passed()
         op_f_obj.write("	Test case number is: {}					{}.".format(x, "OK"))
         op_f_obj.write("	Number of test cases passed: {}				{}".format(
             statistical_analysis.number_test_cases_passed, "OK"))
         op_f_obj.write("	Number of test cases used: {}				{}".format(
             statistical_analysis.number_test_cases_used, "OK"))
         op_f_obj.write("	Proportion of test cases passed: {}			{}".format(
             statistical_analysis.get_test_cases_passed_average(), "OK"))
 def test_file_io_operations_on_files_with_same_content():
     print("	Testing file operations on files with the same content.")
     prompt = "	... Test: file_io_operations.file_comparison(...)	{}"
     statistical_analysis.increment_number_test_cases_used()
     if file_io_operations.file_comparison(
             "notes/mit-license-exact-copy.text",
             "notes/mit-license-same-copy.text"):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     print("	Testing file operations on files with the different content.")
     prompt = "	... Test: file_io_operations.file_comparison(...)	{}"
     statistical_analysis.increment_number_test_cases_used()
     if file_io_operations.file_comparison(
             "notes/mit-license-exact-copy.text",
             "notes/mit-license-different-copy.text"):
         print(prompt.format("FAIL!!!"))
     else:
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
示例#18
0
 def test_pairs_of_accessor_mutator_methods():
     a = vertex_ug()
     #print("a.get_id():::",a.get_id(),"=")
     prompt = "	... Test: get_id(), vertex_ug()			 	{}."
     statistical_analysis.increment_number_test_cases_used()
     #if (a.get_id()) == sys.maxsize:
     #if sys.maxsize == a.get_id():
     if sys.maxsize == a.get_id():
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     b = 72342787
     a = vertex_ug(b)
     prompt = "	... Test: get_id(), vertex_ug(72342787) 		{}."
     statistical_analysis.increment_number_test_cases_used()
     #if (a.get_id()) == sys.maxsize:
     #if sys.maxsize == a.get_id():
     if b == a.get_id():
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     b = 234
     a.set_id(b)
     prompt = "	... Test: set_id(234)		 			{}."
     statistical_analysis.increment_number_test_cases_used()
     #if (a.get_id()) == sys.maxsize:
     #if sys.maxsize == a.get_id():
     if b == a.get_id():
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
 def test_file_io_operations_get_file_extension():
     print("	Testing file_io_operations.get_file_extension() method.")
     prompt = "	... Test: one file extension				{}"
     statistical_analysis.increment_number_test_cases_used()
     if file_io_operations.get_file_extension("something.text") == ".text":
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: double/dual file extensions			{}"
     statistical_analysis.increment_number_test_cases_used()
     if file_io_operations.get_file_extension(
             "something.tar.gz") == ".tar.gz":
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
         #print(file_io_operations.get_file_extension("something.tar.gz"))
     prompt = "	... Test: multiple file extensions			{}"
     statistical_analysis.increment_number_test_cases_used()
     if file_io_operations.get_file_extension(
             "something.pdf.tar.gz") == ".pdf.tar.gz":
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
    def test_psl_uniform():
        print("	Testing psl_uniform().")
        prompt = "	... Test: incorrect type of signal, 'whatever'.		{}"
        statistical_analysis.increment_number_test_cases_used()
        temp_rand_number = prng.psl_uniform("whatever")
        """
			Currently supported pseudorandom number generation
				are for binary numbers or values of RTW signals.
			Since unsupported pseudorandom number generation
				defaults to the mode for binary numbers, I would
				compare the generated number to 0 and 1.
		"""
        if (1 == temp_rand_number) or (0 == temp_rand_number):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test: for bit vector signal, 'bv'.			{}"
        statistical_analysis.increment_number_test_cases_used()
        temp_rand_number = prng.psl_uniform(prng.bv_signal)
        if (1 == temp_rand_number) or (0 == temp_rand_number):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test: for RTW signal, 'rtw'.		{}"
        statistical_analysis.increment_number_test_cases_used()
        temp_rand_number = prng.psl_uniform(prng.rtw_signal)
        if (1 == temp_rand_number) or (-1 == temp_rand_number):
            print(prompt.format("	OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
	def test_check_elapsed_time():
		print("	Testing check_elapsed_time() method.")
		prompt = "	... Test: check_elapsed_time() works			{}."
		statistical_analysis.increment_number_test_cases_used()
		with warnings.catch_warnings(record=True) as w:
			# Check if default elapsed time = 0, and triggers warning.
			execution_time_measurement_no_ns.check_elapsed_time()
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
			prompt = "	... Test: check_elapsed_time(-235435) works		{}."
			statistical_analysis.increment_number_test_cases_used()
			with warnings.catch_warnings(record=True) as w:
				# Check if default elapsed time < 0, and triggers warning.
				execution_time_measurement_no_ns.check_elapsed_time(-235435)
				print(prompt .format("OK"))
				statistical_analysis.increment_number_test_cases_passed()
		prompt = "	... Test: check_elapsed_time(5678) works		{}."
		statistical_analysis.increment_number_test_cases_used()
		"""
			Check if default elapsed time > 0, and does not trigger
				any warning.
		"""
		execution_time_measurement_no_ns.check_elapsed_time(5678)
		print(prompt .format("OK"))
		statistical_analysis.increment_number_test_cases_passed()
 def test_date_time_tokenization():
     print("	Testing date & time tokenization method.")
     # Number of tokens in the DD-MM-YY-HR-MN-SS-US format.
     number_of_tokens = 7
     prompt = "	... Test: invalid DD-MM-YY-HR-MN-SS-US format		{}"
     statistical_analysis.increment_number_test_cases_used()
     tokens = date_time_operations.get_date_time_tokens_of_filename(
         "12-2018-14-11-50-912982.invalid")
     if (None != tokens) and (number_of_tokens == len(tokens)):
         print(prompt.format("FAIL!!!"))
     else:
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     prompt = "	... Test: with None object				{}"
     statistical_analysis.increment_number_test_cases_used()
     tokens = None
     if (None != tokens) and (number_of_tokens == len(tokens)):
         print(prompt.format("FAIL!!!"))
     else:
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     prompt = "	... Test: valid DD-MM-YY-HR-MN-SS-US format		{}"
     statistical_analysis.increment_number_test_cases_used()
     tokens = date_time_operations.get_date_time_tokens_of_filename(
         generate_filename.create_filename())
     if (None != tokens) and (number_of_tokens == len(tokens)):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
 def test_is_valid_30_day_month():
     print("	Testing date_time_operations.is_valid_30_day_month() method.")
     prompt = "	... Test: not 30-day month, mm = 2			{}"
     statistical_analysis.increment_number_test_cases_used()
     if date_time_operations.is_valid_30_day_month(17, 2):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: not 30-day month, mm = 12			{}"
     statistical_analysis.increment_number_test_cases_used()
     if date_time_operations.is_valid_30_day_month(17, 12):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: 30-day month, dd < 0				{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_30_day_month(-8, 6):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: 30-day month, dd = 0				{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_30_day_month(0, 4):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: 30-day month, dd = 27				{}"
     statistical_analysis.increment_number_test_cases_used()
     if date_time_operations.is_valid_30_day_month(27, 11):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
	def test_get_elapsed_time():
		print("	Testing get_elapsed_time() method.")
		# Set initial time stamp.
		execution_time_measurement_no_ns.set_initial_timestamp("perf_counter")
		prompt = "	... Test: for perf_counter				{}."
		statistical_analysis.increment_number_test_cases_used()
		elapsed_t = execution_time_measurement_no_ns.get_elapsed_time()
		execution_time_measurement_no_ns.check_elapsed_time(elapsed_t)
		print(prompt .format("OK"))
		statistical_analysis.increment_number_test_cases_passed()
		# Reset and set initial time stamp.
		execution_time_measurement_no_ns.reset_initial_timestamp()
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			warnings.warn("Initial time stamp not reset between test cases.")
		execution_time_measurement_no_ns.set_initial_timestamp("process_time")
		prompt = "	... Test: for process_time				{}."
		statistical_analysis.increment_number_test_cases_used()
		elapsed_t = execution_time_measurement_no_ns.get_elapsed_time()
		execution_time_measurement_no_ns.check_elapsed_time(elapsed_t)
		print(prompt .format("OK"))
		statistical_analysis.increment_number_test_cases_passed()
		# Reset and set initial time stamp.
		execution_time_measurement_no_ns.reset_initial_timestamp()
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			warnings.warn("Initial time stamp not reset between test cases.")
		execution_time_measurement_no_ns.set_initial_timestamp("time")
		prompt = "	... Test: for time					{}."
		statistical_analysis.increment_number_test_cases_used()
		elapsed_t = execution_time_measurement_no_ns.get_elapsed_time()
		execution_time_measurement_no_ns.check_elapsed_time(elapsed_t)
		print(prompt .format("OK"))
		statistical_analysis.increment_number_test_cases_passed()
		# Reset and set initial time stamp.
		execution_time_measurement_no_ns.reset_initial_timestamp()
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			warnings.warn("Initial time stamp not reset between test cases.")
		execution_time_measurement_no_ns.set_initial_timestamp("monotonic")
		prompt = "	... Test: for monotonic					{}."
		statistical_analysis.increment_number_test_cases_used()
		elapsed_t = execution_time_measurement_no_ns.get_elapsed_time()
		execution_time_measurement_no_ns.check_elapsed_time(elapsed_t)
		print(prompt .format("OK"))
		statistical_analysis.increment_number_test_cases_passed()
示例#25
0
    def test_file_io_operations_with_valid_file():
        print("	... Testing file operations with valid file.")
        filename = "notes/mit-license-original-copy.text"
        prompt = "	Test: file_io_operations.is_path_valid(...)	{}"
        statistical_analysis.increment_number_test_cases_used()
        if file_io_operations.is_path_valid(filename):
            print(prompt.format("	OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("	FAIL!!!"))
        prompt = "	Test: file_io_operations.open_file_object_read(...)	{}"
        statistical_analysis.increment_number_test_cases_used()
        try:
            f_obj = file_io_operations.open_file_object_read(filename)
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        except:
            print(prompt.format("FAIL!!!"))
        prompt = "	Test: file_io_operations.open_file_object_write(...)	{}"
        statistical_analysis.increment_number_test_cases_used()
        try:
            f_obj = file_io_operations.open_file_object_write(
                file_io_operations.result_repository,
                file_io_operations.results_suffix)
            print(prompt.format("FAIL!!!"))
        except:
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        prompt = "	Test: file_io_ops[BLAH].open_file_object_write_new(...)	{}"
        statistical_analysis.increment_number_test_cases_used()
        try:
            f_obj = file_io_operations.open_file_object_write_new(
                "mit-license-write-new-copy.text")
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        except:
            print(prompt.format("FAIL!!!"))
        """
			Close the file object to preserve data in the test data file.
			filename = "notes/mit-license.text"
		"""
        file_io_operations.close_file_object(f_obj)
    def test_gen_rand_signal_uniform_distributn():
        # Number of discrete values representing a random signal/"process".
        k = 16
        print("	Testing gen_rand_signal_uniform_distributn().")
        prompt = "	... Test: wrong type of signal.				{}"
        statistical_analysis.increment_number_test_cases_used()
        temp_rand_signal = rand_signal_generator.gen_rand_signal_uniform_distributn(
            "whatever", k)
        if (k == len(temp_rand_signal)):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test: type of signal = RTW.				{}"
        statistical_analysis.increment_number_test_cases_used()
        temp_rand_signal = rand_signal_generator.gen_rand_signal_uniform_distributn(
            rand_signal_generator.rtw_signal, k)
        """
			Generated RTW signal of length k must have k
				values/digits.
			Each value/digit in the RTW signal should also be
				either -1 or 1 exclusively, and not any other
				value (e.g., 0, 1234, or 3.14).
		"""
        if (k == len(temp_rand_signal)) and (
            (-1 in temp_rand_signal) or
            (1 in temp_rand_signal)) and (not (0 in temp_rand_signal)) and (
                not (3.14 in temp_rand_signal)):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        """
			Generated bit vector signal of length k must have k
				values/digits.
			Each value/digit in the bit vector should also be
				either 0 or 1 exclusively, and not any other
				value (e.g., 0, 1234, or 3.14).
		"""
        prompt = "	... Test: type of signal = bit vector.			{}"
        statistical_analysis.increment_number_test_cases_used()
        temp_rand_signal = rand_signal_generator.gen_rand_signal_uniform_distributn(
            rand_signal_generator.bv_signal, k)
        if (k == len(temp_rand_signal)) and (
            (0 in temp_rand_signal) or
            (1 in temp_rand_signal)) and (not (-1 in temp_rand_signal)) and (
                not (3.14 in temp_rand_signal)):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
    def test_store_results():
        print("=	Testing store_results() method.")
        """
		f_obj = open("/Users/zhiyang/Documents/ricerca/risultati_sperimentali/std-cell-library-characterization/2020/random.rnd", 'w+')
		if f_obj is not None:
			print("f_obj is not None object.")
		else:
			print("f_obj is None object.")
		"""
        prompt = "	... Test with valid path to file, non-existent file.	{}"
        statistical_analysis.increment_number_test_cases_used()
        valid_path_non_existent_file = "/Users/zhiyang/Documents/ricerca/risultati_sperimentali/std-cell-library-characterization/2020/random.rnd"
        """
			If a file for this path exists, delete the file to ensure
				reproducibility of regression test results.
		"""
        if os.path.exists(valid_path_non_existent_file):
            os.remove(valid_path_non_existent_file)
        f_obj = misc.store_results(valid_path_non_existent_file)
        if f_obj is not None:
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test with valid path to file, existent file.	{}"
        statistical_analysis.increment_number_test_cases_used()
        #if misc.store_results("/Users/zhiyang/Documents/ricerca/risultati_sperimentali/std-cell-library-characterization/2020/february/7-2-2020-9-56-54-334414.txt") is None:
        f_obj = misc.store_results(
            "/Users/zhiyang/Documents/ricerca/risultati_sperimentali/std-cell-library-characterization/2020/february/7-2-2020-9-56-54-334414.txt"
        )
        if f_obj is not None:
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test with invalid path to file, non-existent file.	{}"
        statistical_analysis.increment_number_test_cases_used()
        if misc.store_results(
                "/invalid/path/to/file/february/7-2-2020-9-56-54-334414.txt"
        ) is None:
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
    def test_find_desired_location_for_results():
        #incorrect_format_result = "'filename' needs to have the format: DD-MM-YY-HH-MM-SS-uS.txt."
        print("==	Test: test_find_desired_location_for_results().")
        test_filename = "25-3-2010-5-8-51-9994073289.dwq"
        results_location = misc.find_desired_location_for_results(
            test_filename)
        prompt = "	... Test: filename is 25-3-2010-5-8-51-9994073289.dwq.	{}"
        statistical_analysis.increment_number_test_cases_used()
        if misc.find_desired_location_for_results(test_filename) is None:
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        test_filename = "25-3-2010-5-8-51-9407.txt"
        results_location = misc.find_desired_location_for_results(
            test_filename)
        prompt = "	... Test: filename 25-3-2010-5-8-51-9407.txt.		{}"
        statistical_analysis.increment_number_test_cases_used()
        if misc.check_absolute_path_to_store_results(results_location,
                                                     test_filename):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
        prompt = "	... Test: 25-3-2010-5-8-51-9407.txt, correct base path?	{}"
        statistical_analysis.increment_number_test_cases_used()
        if results_location.startswith(
                misc.get_absolute_path_to_store_results()):
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        else:
            print(prompt.format("FAIL!!!"))
            """
			print("results_location:",results_location,"=")
			print("misc.get_absolute_path_to_store_results():",misc.get_absolute_path_to_store_results(),"=")
			print(results_location.find(misc.get_absolute_path_to_store_results()))
			"""
        f_obj = misc.store_results(results_location)
        if f_obj is not None:
            f_obj.write(
                "Storage of experimental, simulation, verification, and testing results work."
            )
            file_io_operations.close_file_object(f_obj)
 def test_is_valid_date_in_Feb():
     print("	Testing date_time_operations.is_valid_date_in_Feb() method.")
     prompt = "	... Test: not February, mm = 12				{}"
     statistical_analysis.increment_number_test_cases_used()
     if date_time_operations.is_valid_date_in_Feb(17, 12, 2016):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: not February, mm = -2				{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_date_in_Feb(17, -2, 2016):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
     prompt = "	... Test: not February, mm = 14				{}"
     statistical_analysis.increment_number_test_cases_used()
     if not date_time_operations.is_valid_date_in_Feb(17, 14, 2016):
         print(prompt.format("OK"))
         statistical_analysis.increment_number_test_cases_passed()
     else:
         print(prompt.format("FAIL!!!"))
	def test_set_reset_and_get_initial_timestamp():
		print("	Testing set, reset, get initial timestamp methods.")
		prompt = "	... Test: get_initial_timestamp() is invalid		{}."
		statistical_analysis.increment_number_test_cases_used()
		if execution_time_measurement_no_ns.invalid_timestamp == execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
		else:
			print(prompt .format("FAIL!!!"))
		prompt = "	... Test: set_initial_timestamp() not invalid		{}."
		statistical_analysis.increment_number_test_cases_used()
		execution_time_measurement_no_ns.set_initial_timestamp()
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
		else:
			print(prompt .format("FAIL!!!"))
		prompt = "	... Test: reset_initial_timestamp() is invalid		{}."
		statistical_analysis.increment_number_test_cases_used()
		execution_time_measurement_no_ns.reset_initial_timestamp()
		if execution_time_measurement_no_ns.invalid_timestamp == execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
		else:
			print(prompt .format("FAIL!!!"))
		prompt = "	... Test: set_initial_timestamp("") not invalid		{}."
		statistical_analysis.increment_number_test_cases_used()
		execution_time_measurement_no_ns.set_initial_timestamp("")
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
			"""
				Reset initial time stamp to test for other input
					parameters.
			"""
			execution_time_measurement_no_ns.reset_initial_timestamp()
		else:
			print(prompt .format("FAIL!!!"))
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			warnings.warn("Initial time stamp not reset between test cases.")
		prompt = "	... Test: set_initial_timestamp('perf_counter')		{}."
		statistical_analysis.increment_number_test_cases_used()
		execution_time_measurement_no_ns.set_initial_timestamp("perf_counter")
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
			"""
				Reset initial time stamp to test for other input
					parameters.
			"""
			execution_time_measurement_no_ns.reset_initial_timestamp()
		else:
			print(prompt .format("FAIL!!!"))
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			warnings.warn("Initial time stamp not reset between test cases.")
		prompt = "	... Test: set_initial_timestamp('process_time')		{}."
		statistical_analysis.increment_number_test_cases_used()
		execution_time_measurement_no_ns.set_initial_timestamp("process_time")
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
			"""
				Reset initial time stamp to test for other input
					parameters.
			"""
			execution_time_measurement_no_ns.reset_initial_timestamp()
		else:
			print(prompt .format("FAIL!!!"))
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			warnings.warn("Initial time stamp not reset between test cases.")
		prompt = "	... Test: set_initial_timestamp('time')			{}."
		statistical_analysis.increment_number_test_cases_used()
		execution_time_measurement_no_ns.set_initial_timestamp("time")
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
			"""
				Reset initial time stamp to test for other input
					parameters.
			"""
			execution_time_measurement_no_ns.reset_initial_timestamp()
		else:
			print(prompt .format("FAIL!!!"))
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			warnings.warn("Initial time stamp not reset between test cases.")
		prompt = "	... Test: set_initial_timestamp('monotonic')		{}."
		statistical_analysis.increment_number_test_cases_used()
		execution_time_measurement_no_ns.set_initial_timestamp("monotonic")
		if execution_time_measurement_no_ns.invalid_timestamp != execution_time_measurement_no_ns.get_initial_timestamp():
			print(prompt .format("OK"))
			statistical_analysis.increment_number_test_cases_passed()
			"""
				Reset initial time stamp to test for other input
					parameters.
			"""
			execution_time_measurement_no_ns.reset_initial_timestamp()
		else:
			print(prompt .format("FAIL!!!"))