def run_config(config, basedir, output_dir, random_seed, report_all, args): for key in config.iterkeys(): if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) report_filename = "report.xml" report_filename = os.path.abspath(os.path.join(output_dir, report_filename)) global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize else: normalize = lambda x, y: x command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) generator = SQLGenerator(config["schema"], config["template"]) statements = [] counter = 0 for i in generator.generate(): statements.append({"id": counter, "SQL": i}) counter += 1 if run_once("jni", command, statements) != 0: exit(-1) if run_once("hsqldb", command, statements) != 0: exit(-1) report = XMLGenerator({"Statements": statements}) if not os.path.exists(output_dir): os.makedirs(output_dir) fd = open(report_filename, "w") fd.write(report.toXML()) fd.close() success = generate_html_reports({"Statements": statements}, random_seed, report_all, output_dir) return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_invalid, report_all, reproducer, generate_only, subversion_generation, submit_verbosely, ascii_only, args, testConfigKit): # Store the current, initial system time (in seconds since January 1, 1970) time0 = time.time() precision = 0 within_minutes = 0 for key in config.iterkeys(): if key == "precision": precision = int(config["precision"]) elif key == "within-minutes": within_minutes = int(config["within-minutes"]) elif not os.path.isabs(config[key]): config[key] = get_config_path(basedir, key, config[key]) print "in run_config key = '%s', config[key] = '%s'" % ( key, str(config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) global comparison_database comparison_database_lower = comparison_database.lower() statements_path = os.path.abspath( os.path.join(output_dir, "statements.data")) cmpdb_path = os.path.abspath( os.path.join(output_dir, comparison_database_lower + ".data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) modified_sql_path = None debug_transform_sql_arg = '' global debug_transform_sql if debug_transform_sql: if comparison_database == 'PostgreSQL' or comparison_database == 'PostGIS': modified_sql_path = os.path.abspath( os.path.join(output_dir, 'postgresql_transform.out')) debug_transform_sql_arg = ' -Dsqlcoverage.transform.sql.file=' + modified_sql_path template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template self_check_safecmp = imp.load_source("normalizer", config["normalizer"]).safecmp theNow = datetime.datetime.now() if self_check_safecmp([theNow], [theNow]) != 0: print >> sys.stderr, "safe_cmp fails [datetime] selfcheck" exit(2) if self_check_safecmp([None], [None]) != 0: print >> sys.stderr, "safe_cmp fails [None] selfcheck" exit(2) if self_check_safecmp([theNow], [None]) <= 0: print >> sys.stderr, "safe_cmp fails [datetime], [None] selfcheck" exit(2) theLater = datetime.datetime.now() if self_check_safecmp([None, theNow], [None, theLater]) >= 0: print >> sys.stderr, "safe_cmp fails [None, datetime] selfcheck" exit(2) else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) if debug_transform_sql: command = command.replace(" -server ", debug_transform_sql_arg + " -server ") random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation, ascii_only) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(submit_verbosely): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() min_statements_per_pattern = generator.min_statements_per_pattern() max_statements_per_pattern = generator.max_statements_per_pattern() num_inserts = generator.num_insert_statements() num_patterns = generator.num_patterns() num_unresolved = generator.num_unresolved_statements() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats": None, "mis": 0} # Print the elapsed time, with a message global total_gensql_time gensql_time = print_elapsed_seconds( "for generating statements (" + suite_name + ")", time0) total_gensql_time += gensql_time volt_crashes = 0 failed = False try: if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit, precision) != 0: print >> sys.stderr, "Test with the JNI (VoltDB) backend had errors (crash?)." failed = True except: print >> sys.stderr, "JNI (VoltDB) backend crashed!!!" traceback.print_exc() failed = True if (failed): print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() volt_crashes += 1 # Print the elapsed time, with a message global total_voltdb_time voltdb_time = print_elapsed_seconds( "for running VoltDB (JNI) statements (" + suite_name + ")") total_voltdb_time += voltdb_time random.seed(random_seed) random.setstate(random_state) cmp_crashes = 0 diff_crashes = 0 failed = False try: if run_once(comparison_database_lower, command, statements_path, cmpdb_path, submit_verbosely, testConfigKit, precision) != 0: print >> sys.stderr, "Test with the " + comparison_database + " backend had errors (crash? Connection refused?)." failed = True except: print >> sys.stderr, comparison_database + " backend crashed!!" traceback.print_exc() failed = True if (failed): print >> sys.stderr, " cmpdb_path: %s" % (cmpdb_path) sys.stderr.flush() cmp_crashes += 1 # Print the elapsed time, with a message global total_cmpdb_time cmpdb_time = print_elapsed_seconds("for running " + comparison_database + " statements (" + suite_name + ")") total_cmpdb_time += cmpdb_time someStats = ( get_numerical_html_table_element(min_statements_per_pattern, strong_warn_below=1) + get_numerical_html_table_element( max_statements_per_pattern, strong_warn_below=1, warn_above=100000) + get_numerical_html_table_element( num_inserts, warn_below=4, strong_warn_below=1, warn_above=1000) + get_numerical_html_table_element( num_patterns, warn_below=4, strong_warn_below=1, warn_above=10000) + get_numerical_html_table_element(num_unresolved, error_above=0) + get_time_html_table_element(gensql_time) + get_time_html_table_element(voltdb_time) + get_time_html_table_element(cmpdb_time)) extraStats = ( get_numerical_html_table_element(volt_crashes, error_above=0) + get_numerical_html_table_element(cmp_crashes, error_above=0) + get_numerical_html_table_element(diff_crashes, error_above=0) + someStats) max_mismatches = get_max_mismatches(comparison_database, suite_name) global compare_results try: compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results( suite_name, random_seed, statements_path, cmpdb_path, jni_path, output_dir, report_invalid, report_all, extraStats, comparison_database, modified_sql_path, max_mismatches, within_minutes, ignore_known_mismatches(comparison_database, config_name, reproducer), config.get("ddl")) except: print >> sys.stderr, "Compare (VoltDB & " + comparison_database + ") results crashed!" traceback.print_exc() print >> sys.stderr, " jni_path: %s" % (jni_path) print >> sys.stderr, " cmpdb_path: %s" % (cmpdb_path) sys.stderr.flush() diff_crashes += 1 gray_zero_html_table_element = get_numerical_html_table_element( 0, use_gray=True) errorStats = ( gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + get_numerical_html_table_element(volt_crashes, error_above=0) + get_numerical_html_table_element(cmp_crashes, error_above=0) + get_numerical_html_table_element(diff_crashes, error_above=0) + someStats + '</tr>') success = {"keyStats": errorStats, "mis": -1} # Print & save the elapsed time and total time, with a message global total_compar_time compar_time = print_elapsed_seconds("for comparing DB results (" + suite_name + ")") total_compar_time += compar_time suite_secs = print_elapsed_seconds( "for run_config of '" + suite_name + "'", time0, "Sub-tot time: ") sys.stdout.flush() # Accumulate the total number of Valid, Invalid, Mismatched & Total statements global total_statements def next_keyStats_column_value(): prefix = "<td" suffix = "</td>" global keyStats_start_index start_index = 0 end_index = 0 next_col_val = "0" try: start_index = success["keyStats"].index( prefix, keyStats_start_index) + len(prefix) start_index = success["keyStats"].index('>', start_index) + 1 end_index = success["keyStats"].index(suffix, start_index) next_col_val = success["keyStats"][start_index:end_index] keyStats_start_index = end_index + len(suffix) except: print "Caught exception:\n", sys.exc_info()[0] print "success[keyStats]:\n", success["keyStats"] print "keyStats_start_index:", keyStats_start_index print "start_index :", start_index print "end_index :", end_index print "next_col_val:", next_col_val return next_col_val global valid_statements global invalid_statements global mismatched_statements global keyStats_start_index global total_volt_fatal_excep global total_volt_nonfatal_excep global total_cmp_excep global total_volt_crashes global total_cmp_crashes global total_diff_crashes global total_num_inserts global total_num_patterns global total_num_unresolved global min_all_statements_per_pattern global max_all_statements_per_pattern keyStats_start_index = 0 valid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Valid % invalid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Invalid % total_statements += int(next_keyStats_column_value()) mismatched_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Mismatched % total_volt_fatal_excep += int(next_keyStats_column_value()) total_volt_nonfatal_excep += int(next_keyStats_column_value()) total_cmp_excep += int(next_keyStats_column_value()) total_volt_crashes += volt_crashes total_cmp_crashes += cmp_crashes total_diff_crashes += diff_crashes total_num_inserts += num_inserts total_num_patterns += num_patterns total_num_unresolved += num_unresolved min_all_statements_per_pattern = min(min_all_statements_per_pattern, min_statements_per_pattern) max_all_statements_per_pattern = max(max_all_statements_per_pattern, max_statements_per_pattern) finalStats = (get_time_html_table_element(compar_time) + get_time_html_table_element(suite_secs)) success["keyStats"] = success["keyStats"].replace('</tr>', finalStats + '</tr>') return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, args, testConfigKit): for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath(os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, True) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if (generate_only): # Claim success without running servers. return [0,0] if run_once("jni", command, statements_path, jni_path, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) random.seed(random_seed) random.setstate(random_state) # To get around the timestamp issue. Volt and HSQLDB use different units # for timestamp (microsec vs. millisec), so we have to use different # template file for regression test, since all the statements are not # generated in this case. if "template-hsqldb" in config: template = config["template-hsqldb"] generator = SQLGenerator(config["schema"], template, False) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if run_once("hsqldb", command, statements_path, hsql_path, testConfigKit) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) global compare_results compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, subversion_generation, submit_verbosely, args, testConfigKit): # Store the current, initial system time (in seconds since January 1, 1970) time0 = time.time() for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath(os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template self_check_safecmp = imp.load_source("normalizer", config["normalizer"]).safecmp theNow = datetime.datetime.now() if self_check_safecmp([theNow], [theNow]) != 0: print >> sys.stderr, "safe_cmp fails [datetime] selfcheck" exit(2) if self_check_safecmp([None], [None]) != 0: print >> sys.stderr, "safe_cmp fails [None] selfcheck" exit(2) if self_check_safecmp([theNow], [None]) <= 0: print >> sys.stderr, "safe_cmp fails [datetime], [None] selfcheck" exit(2) theLater = datetime.datetime.now() if self_check_safecmp([None, theNow], [None, theLater]) >= 0: print >> sys.stderr, "safe_cmp fails [None, datetime] selfcheck" exit(2) else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() min_statements_per_pattern = generator.min_statements_per_pattern() max_statements_per_pattern = generator.max_statements_per_pattern() num_inserts = generator.num_insert_statements() num_patterns = generator.num_patterns() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats" : None, "mis" : 0} # Print the elapsed time, with a message global total_gensql_time gensql_time = print_elapsed_seconds("for generating statements (" + suite_name + ")", time0) total_gensql_time += gensql_time num_crashes = 0 failed = False try: if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI (VoltDB) backend had errors." failed = True except: print >> sys.stderr, "JNI (VoltDB) backend crashed!!!" traceback.print_exc() failed = True if (failed): print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() num_crashes += 1 #exit(1) # Print the elapsed time, with a message global total_voltdb_time voltdb_time = print_elapsed_seconds("for running VoltDB (JNI) statements (" + suite_name + ")") total_voltdb_time += voltdb_time random.seed(random_seed) random.setstate(random_state) failed = False try: if run_once("hsqldb", command, statements_path, hsql_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the HSqlDB backend had errors." failed = True except: print >> sys.stderr, "HSqlDB backend crashed!!" traceback.print_exc() failed = True if (failed): print >> sys.stderr, " hsql_path: %s" % (hsql_path) sys.stderr.flush() num_crashes += 1 #exit(1) # Print the elapsed time, with a message global total_hsqldb_time hsqldb_time = print_elapsed_seconds("for running HSqlDB statements (" + suite_name + ")") total_hsqldb_time += hsqldb_time someStats = (get_numerical_html_table_element(min_statements_per_pattern, strong_warn_below=1) + get_numerical_html_table_element(max_statements_per_pattern, strong_warn_below=1, warn_above=100000) + get_numerical_html_table_element(num_inserts, warn_below=4, strong_warn_below=1, warn_above=1000) + get_numerical_html_table_element(num_patterns, warn_below=4, strong_warn_below=1, warn_above=10000) + get_time_html_table_element(gensql_time) + get_time_html_table_element(voltdb_time) + get_time_html_table_element(hsqldb_time) ) extraStats = get_numerical_html_table_element(num_crashes, error_above=0) + someStats global compare_results try: compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all, extraStats) except: print >> sys.stderr, "Compare (VoltDB & HSqlDB) results crashed!" traceback.print_exc() print >> sys.stderr, " jni_path: %s" % (jni_path) print >> sys.stderr, " hsql_path: %s" % (hsql_path) sys.stderr.flush() num_crashes += 1 gray_zero_html_table_element = get_numerical_html_table_element(0, use_gray=True) errorStats = (gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + gray_zero_html_table_element + get_numerical_html_table_element(num_crashes, error_above=0) + someStats + '</tr>' ) success = {"keyStats": errorStats, "mis": -1} # Print & save the elapsed time and total time, with a message global total_compar_time compar_time = print_elapsed_seconds("for comparing DB results (" + suite_name + ")") total_compar_time += compar_time suite_secs = print_elapsed_seconds("for run_config of '" + suite_name + "'", time0, "Sub-tot time: ") sys.stdout.flush() # Accumulate the total number of Valid, Invalid, Mismatched & Total statements global total_statements def next_keyStats_column_value(): prefix = "<td" suffix = "</td>" global keyStats_start_index start_index = 0 end_index = 0 next_col_val = "0" try: start_index = success["keyStats"].index(prefix, keyStats_start_index) + len(prefix) start_index = success["keyStats"].index('>', start_index) + 1 end_index = success["keyStats"].index(suffix, start_index) next_col_val = success["keyStats"][start_index: end_index] keyStats_start_index = end_index + len(suffix) except: print "Caught exception:\n", sys.exc_info()[0] print "success[keyStats]:\n", success["keyStats"] print "keyStats_start_index:", keyStats_start_index print "start_index :", start_index print "end_index :", end_index print "next_col_val:", next_col_val return next_col_val global valid_statements global invalid_statements global mismatched_statements global keyStats_start_index global total_num_npes global total_num_crashes global total_num_inserts global total_num_patterns global min_all_statements_per_pattern global max_all_statements_per_pattern keyStats_start_index = 0 valid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Valid % invalid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Invalid % total_statements += int(next_keyStats_column_value()) mismatched_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Mismatched % total_num_npes += int(next_keyStats_column_value()) total_num_crashes += num_crashes total_num_inserts += num_inserts total_num_patterns += num_patterns min_all_statements_per_pattern = min(min_all_statements_per_pattern, min_statements_per_pattern) max_all_statements_per_pattern = max(max_all_statements_per_pattern, max_statements_per_pattern) finalStats = (get_time_html_table_element(compar_time) + get_time_html_table_element(suite_secs) ) success["keyStats"] = success["keyStats"].replace('</tr>', finalStats + '</tr>') return success
def run_config(config, basedir, output_dir, random_seed, report_all, args): for key in config.iterkeys(): if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) report_filename = "report.xml" report_filename = os.path.abspath(os.path.join(output_dir, report_filename)) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize else: normalize = lambda x, y: x command = " ".join(args[2:]) command += " schema=" + os.path.basename(config["ddl"]) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, True) statements = [] counter = 0 for i in generator.generate(): statements.append({"id": counter, "SQL": i}) counter += 1 if run_once("jni", command, statements) != 0: print >> sys.stderr, "Test with the JNI backend had errors." exit(1) random.seed(random_seed) random.setstate(random_state) # To get around the timestamp issue. Volt and HSQLDB use different units # for timestamp (microsec vs. millisec), so we have to use different # template file for regression test, since all the statements are not # generated in this case. if "template-hsqldb" in config: template = config["template-hsqldb"] generator = SQLGenerator(config["schema"], template, False) counter = 0 for i in generator.generate(): statements[counter]["SQL"] = i counter += 1 if run_once("hsqldb", command, statements) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) report_dict = {"Seed": random_seed, "Statements": statements} report = XMLGenerator(report_dict) if not os.path.exists(output_dir): os.makedirs(output_dir) fd = open(report_filename, "w") fd.write(report.toXML()) fd.close() success = generate_html_reports(report_dict, output_dir, report_all) return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, subversion_generation, submit_verbosely, args, testConfigKit): # Store the current, initial system time (in seconds since January 1, 1970) time0 = time.time() for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath( os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats": None, "mis": 0} # Print the elapsed time, with a message global gensql_time gensql_time += print_elapsed_seconds( "for generating statements (" + suite_name + ")", time0) if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) # Print the elapsed time, with a message global voltdb_time voltdb_time += print_elapsed_seconds( "for running VoltDB (JNI) statements (" + suite_name + ")") random.seed(random_seed) random.setstate(random_state) if run_once("hsqldb", command, statements_path, hsql_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) # Print the elapsed time, with a message global hsqldb_time hsqldb_time += print_elapsed_seconds("for running HSqlDB statements (" + suite_name + ")") global compare_results compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) # Print the elapsed time and total time, with a message global compar_time compar_time += print_elapsed_seconds("for comparing DB results (" + suite_name + ")") print_elapsed_seconds("for run_config of '" + suite_name + "'", time0, "Sub-tot time: ") return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, subversion_generation, submit_verbosely, args, testConfigKit): # Store the current, initial system time (in seconds since January 1, 1970) time0 = time.time() for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath(os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats" : None, "mis" : 0} # Print the elapsed time, with a message global gensql_time gensql_time += print_elapsed_seconds("for generating statements (" + suite_name + ")", time0) if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) # Print the elapsed time, with a message global voltdb_time voltdb_time += print_elapsed_seconds("for running VoltDB (JNI) statements (" + suite_name + ")") random.seed(random_seed) random.setstate(random_state) if run_once("hsqldb", command, statements_path, hsql_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) # Print the elapsed time, with a message global hsqldb_time hsqldb_time += print_elapsed_seconds("for running HSqlDB statements (" + suite_name + ")") global compare_results compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) # Print the elapsed time and total time, with a message global compar_time compar_time += print_elapsed_seconds("for comparing DB results (" + suite_name + ")") print_elapsed_seconds("for run_config of '" + suite_name + "'", time0, "Sub-tot time: ") return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, subversion_generation, submit_verbosely, args, testConfigKit): # Store the current, initial system time (in seconds since January 1, 1970) time0 = time.time() for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath(os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template self_check_safecmp = imp.load_source("normalizer", config["normalizer"]).safecmp theNow = datetime.datetime.now() if self_check_safecmp([theNow], [theNow]) != 0: print >> sys.stderr, "safe_cmp fails [datetime] selfcheck" exit(2) if self_check_safecmp([None], [None]) != 0: print >> sys.stderr, "safe_cmp fails [None] selfcheck" exit(2) if self_check_safecmp([theNow], [None]) <= 0: print >> sys.stderr, "safe_cmp fails [datetime], [None] selfcheck" exit(2) theLater = datetime.datetime.now() if self_check_safecmp([None, theNow], [None, theLater]) >= 0: print >> sys.stderr, "safe_cmp fails [None, datetime] selfcheck" exit(2) else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats" : None, "mis" : 0} # Print the elapsed time, with a message global gensql_time gensql_time += print_elapsed_seconds("for generating statements (" + suite_name + ")", time0) if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) # Print the elapsed time, with a message global voltdb_time voltdb_time += print_elapsed_seconds("for running VoltDB (JNI) statements (" + suite_name + ")") random.seed(random_seed) random.setstate(random_state) if run_once("hsqldb", command, statements_path, hsql_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) # Print the elapsed time, with a message global hsqldb_time hsqldb_time += print_elapsed_seconds("for running HSqlDB statements (" + suite_name + ")") global compare_results compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) # Print & save the elapsed time and total time, with a message global compar_time compar_time += print_elapsed_seconds("for comparing DB results (" + suite_name + ")") suite_secs = print_elapsed_seconds("for run_config of '" + suite_name + "'", time0, "Sub-tot time: ") # Accumulate the total number of Valid, Invalid, Mismatched & Total statements global total_statements def next_keyStats_column_value(): prefix = "<td" suffix = "</td>" global keyStats_start_index start_index = 0 end_index = 0 next_col_val = "0" try: start_index = success["keyStats"].index(prefix, keyStats_start_index) + len(prefix) start_index = success["keyStats"].index('>', start_index) + 1 end_index = success["keyStats"].index(suffix, start_index) next_col_val = success["keyStats"][start_index: end_index] keyStats_start_index = end_index + len(suffix) except: print "Caught exception:\n", sys.exc_info()[0] print "success[keyStats]:\n" + success["keyStats"] print "keyStats_start_index:", keyStats_start_index print "start_index :", start_index print "end_index :", end_index print "next_col_val:", next_col_val return next_col_val global valid_statements global invalid_statements global mismatched_statements global keyStats_start_index keyStats_start_index = 0 valid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Valid % invalid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Invalid % total_statements += int(next_keyStats_column_value()) mismatched_statements += int(next_keyStats_column_value()) # Save the total time for this test suite success["keyStats"] = success["keyStats"].replace('</tr>', '\n<td align=right>%s</td></tr>' % minutes_colon_seconds(suite_secs)) return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, subversion_generation, submit_verbosely, args, testConfigKit): for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath( os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats": None, "mis": 0} if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) random.seed(random_seed) random.setstate(random_state) if run_once("hsqldb", command, statements_path, hsql_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) global compare_results compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) return success
def run_config(config, basedir, output_dir, random_seed, report_all, args): for key in config.iterkeys(): if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath(os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize else: normalize = lambda x, y: x command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, True) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if run_once("jni", command, statements_path, jni_path) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) random.seed(random_seed) random.setstate(random_state) # To get around the timestamp issue. Volt and HSQLDB use different units # for timestamp (microsec vs. millisec), so we have to use different # template file for regression test, since all the statements are not # generated in this case. if "template-hsqldb" in config: template = config["template-hsqldb"] generator = SQLGenerator(config["schema"], template, False) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if run_once("hsqldb", command, statements_path, hsql_path) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) success = generate_html_reports(random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, subversion_generation, submit_verbosely, args, testConfigKit): # Store the current, initial system time (in seconds since January 1, 1970) time0 = time.time() for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath( os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template self_check_safecmp = imp.load_source("normalizer", config["normalizer"]).safecmp theNow = datetime.datetime.now() if self_check_safecmp([theNow], [theNow]) != 0: print >> sys.stderr, "safe_cmp fails [datetime] selfcheck" exit(2) if self_check_safecmp([None], [None]) != 0: print >> sys.stderr, "safe_cmp fails [None] selfcheck" exit(2) if self_check_safecmp([theNow], [None]) <= 0: print >> sys.stderr, "safe_cmp fails [datetime], [None] selfcheck" exit(2) theLater = datetime.datetime.now() if self_check_safecmp([None, theNow], [None, theLater]) >= 0: print >> sys.stderr, "safe_cmp fails [None, datetime] selfcheck" exit(2) else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats": None, "mis": 0} # Print the elapsed time, with a message global gensql_time gensql_time += print_elapsed_seconds( "for generating statements (" + suite_name + ")", time0) if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) # Print the elapsed time, with a message global voltdb_time voltdb_time += print_elapsed_seconds( "for running VoltDB (JNI) statements (" + suite_name + ")") random.seed(random_seed) random.setstate(random_state) if run_once("hsqldb", command, statements_path, hsql_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) # Print the elapsed time, with a message global hsqldb_time hsqldb_time += print_elapsed_seconds("for running HSqlDB statements (" + suite_name + ")") global compare_results compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) # Print & save the elapsed time and total time, with a message global compar_time compar_time += print_elapsed_seconds("for comparing DB results (" + suite_name + ")") suite_secs = print_elapsed_seconds( "for run_config of '" + suite_name + "'", time0, "Sub-tot time: ") # Accumulate the total number of Valid, Invalid, Mismatched & Total statements global total_statements def next_keyStats_column_value(): prefix = "<td" suffix = "</td>" global keyStats_start_index start_index = 0 end_index = 0 next_col_val = "0" try: start_index = success["keyStats"].index( prefix, keyStats_start_index) + len(prefix) start_index = success["keyStats"].index('>', start_index) + 1 end_index = success["keyStats"].index(suffix, start_index) next_col_val = success["keyStats"][start_index:end_index] keyStats_start_index = end_index + len(suffix) except: print "Caught exception:\n", sys.exc_info()[0] print "success[keyStats]:\n" + success["keyStats"] print "keyStats_start_index:", keyStats_start_index print "start_index :", start_index print "end_index :", end_index print "next_col_val:", next_col_val return next_col_val global valid_statements global invalid_statements global mismatched_statements global keyStats_start_index keyStats_start_index = 0 valid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Valid % invalid_statements += int(next_keyStats_column_value()) next_keyStats_column_value() # ignore Invalid % total_statements += int(next_keyStats_column_value()) mismatched_statements += int(next_keyStats_column_value()) # Save the total time for this test suite success["keyStats"] = success["keyStats"].replace( '</tr>', '\n<td align=right>%s</td></tr>' % minutes_colon_seconds(suite_secs)) return success
def run_config(suite_name, config, basedir, output_dir, random_seed, report_all, generate_only, subversion_generation, submit_verbosely, args, testConfigKit): for key in config.iterkeys(): print "in run_config key = '%s', config[key] = '%s'" % (key, config[key]) if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath(os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize # print "DEBUG: using normalizer ", config["normalizer"], " for ", template else: normalize = lambda x, y: x # print "DEBUG: using no normalizer for ", template command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, subversion_generation) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if generate_only or submit_verbosely: print "Generated %d statements." % counter if generate_only: # Claim success without running servers. return {"keyStats" : None, "mis" : 0} if run_once("jni", command, statements_path, jni_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the JNI backend had errors." print >> sys.stderr, " jni_path: %s" % (jni_path) sys.stderr.flush() exit(1) random.seed(random_seed) random.setstate(random_state) if run_once("hsqldb", command, statements_path, hsql_path, submit_verbosely, testConfigKit) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) global compare_results compare_results = imp.load_source("normalizer", config["normalizer"]).compare_results success = compare_results(suite_name, random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) return success
def run_config(config, basedir, output_dir, random_seed, report_all, args): for key in config.iterkeys(): if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) report_filename = "report.xml" report_filename = os.path.abspath(os.path.join(output_dir, report_filename)) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize else: normalize = lambda x, y: x command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, True) statements = [] counter = 0 for i in generator.generate(): statements.append({"id": counter, "SQL": i}) counter += 1 if run_once("jni", command, statements) != 0: print >> sys.stderr, "Test with the JNI backend had errors." exit(1) random.seed(random_seed) random.setstate(random_state) # To get around the timestamp issue. Volt and HSQLDB use different units # for timestamp (microsec vs. millisec), so we have to use different # template file for regression test, since all the statements are not # generated in this case. if "template-hsqldb" in config: template = config["template-hsqldb"] generator = SQLGenerator(config["schema"], template, False) counter = 0 for i in generator.generate(): statements[counter]["SQL"] = i counter += 1 if run_once("hsqldb", command, statements) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) report_dict = {"Seed": random_seed, "Statements": statements} report = XMLGenerator(report_dict) if not os.path.exists(output_dir): os.makedirs(output_dir) fd = open(report_filename, "w") fd.write(report.toXML()) fd.close() success = generate_html_reports(report_dict, output_dir, report_all) return success
def run_config(config, basedir, output_dir, random_seed, report_all, args): for key in config.iterkeys(): if not os.path.isabs(config[key]): config[key] = os.path.abspath(os.path.join(basedir, config[key])) if not os.path.exists(output_dir): os.makedirs(output_dir) statements_path = os.path.abspath(os.path.join(output_dir, "statements.data")) hsql_path = os.path.abspath(os.path.join(output_dir, "hsql.data")) jni_path = os.path.abspath(os.path.join(output_dir, "jni.data")) template = config["template"] global normalize if "normalizer" in config: normalize = imp.load_source("normalizer", config["normalizer"]).normalize else: normalize = lambda x, y: x command = " ".join(args[2:]) command += " schema=" + os.path.basename(config['ddl']) random_state = random.getstate() if "template-jni" in config: template = config["template-jni"] generator = SQLGenerator(config["schema"], template, True) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if run_once("jni", command, statements_path, jni_path) != 0: print >> sys.stderr, "Test with the JNI backend had errors." exit(1) random.seed(random_seed) random.setstate(random_state) # To get around the timestamp issue. Volt and HSQLDB use different units # for timestamp (microsec vs. millisec), so we have to use different # template file for regression test, since all the statements are not # generated in this case. if "template-hsqldb" in config: template = config["template-hsqldb"] generator = SQLGenerator(config["schema"], template, False) counter = 0 statements_file = open(statements_path, "wb") for i in generator.generate(): cPickle.dump({"id": counter, "SQL": i}, statements_file) counter += 1 statements_file.close() if run_once("hsqldb", command, statements_path, hsql_path) != 0: print >> sys.stderr, "Test with the HSQLDB backend had errors." exit(1) success = generate_html_reports(random_seed, statements_path, hsql_path, jni_path, output_dir, report_all) return success