def load(self): """ Loads a config.json file and run a validation process. If the configurations seem valid, returns Configuration object. """ util.set_working_directory() # paths relatively to this script's location schema_json = util.json_decode(self.schema_path) if schema_json is None: msg = "Problem has occurred during the decoding procedure" + \ " with the following file: " + self.schema_path + "." logging.error(msg) raise IOError(msg) tools_json = util.json_decode(self.tools_path) if tools_json is None: msg = "Problem has occurred during the decoding procedure" + \ " with the following file: " + self.tools_path + "." logging.error(msg) raise IOError(msg) try: with open(self.config_path, mode="r") as file: config_string = file.read() decoder = json.JSONDecoder(object_pairs_hook=checking_hook) config_json = decoder.decode(config_string) except IOError: msg = "The file does not exist or cannot be read:" + \ (os.path.split(self.config_path))[1] logging.error(msg) raise IOError(msg) except ValueError as value_error: msg = (os.path.split(self.config_path))[1] + " file is not valid" logging.error(msg) print(value_error) raise ValidationError(msg) except KeyError as k_error: msg = "Duplicate key specified." logging.error(msg) print(k_error) print("Modify: " + (os.path.split(self.config_path))[1]) raise ValidationError(msg) valid = validation.is_valid_json(config_json, schema_json) if not valid: msg = "Validation failed for " + \ (os.path.split(self.config_path))[1] + "." logging.error(msg) raise ValidationError(msg) config = Configuration() config.iterations = config_json["IterationCount"] config.runs = config_json["Runs"] config.queries = config_json["Queries"] config.scenarios = config_json["Scenarios"] config.sizes = util.get_power_of_two(config_json["MinSize"], config_json["MaxSize"]) config.tools = config_json["Tools"] config.optional_arguments = config_json["OptionalArguments"] return config
def load(self): """ Loads a config.json file and run a validation process. If the configurations seem valid, returns a list with Configuration objects. """ util.set_working_directory() try: with open(self.config_path, mode="r") as file: config_string = file.read() decoder = json.JSONDecoder(object_pairs_hook=checking_hook) config_json = decoder.decode(config_string) except IOError: msg = "The file does not exist or cannot read:" + \ (os.path.split(self.config_path))[1] logging.error(msg) raise IOError(msg) except ValueError as value_error: msg = (os.path.split(self.config_path))[1] + " file is not valid" logging.error(msg) logging.error(value_error) raise IOError(msg) except KeyError as k_error: msg = "Duplicate key specified." logging.error(msg) logging.error(k_error) logging.error("Modify: " + (os.path.split(self.config_path))[1]) raise IOError(msg) util.check_validation(config_json) config = Configuration() sizes = util.get_power_of_two(config_json["MinSize"], config_json["MaxSize"]) config.tools = config_json["Tools"] config.queries = config_json["Queries"] config.change_sets = config_json["ChangeSets"] config.sizes = sizes config.iterations = config_json["IterationCount"] config.runs = config_json["Runs"] config.vmargs = config_json["JVM"]["vmargs"] config.xmx = config_json["JVM"]["Xmx"] config.timeout = config_json["Timeout"] config.optional_arguments = config_json["OptionalArguments"] return config
def load(self): """ Loads a config.json file and run a validation process. If the configurations seem valid, returns a list with Configuration objects. """ util.set_working_directory() try: with open(self.config_path, mode="r") as file: config_string = file.read() decoder = json.JSONDecoder(object_pairs_hook=checking_hook) config_json = decoder.decode(config_string) except IOError: msg = "The file does not exist or cannot read:" + \ (os.path.split(self.config_path))[1] logging.error(msg) raise IOError(msg) except ValueError as value_error: msg = (os.path.split(self.config_path))[1] + " file is not valid" logging.error(msg) logging.error(value_error) raise IOError(msg) except KeyError as k_error: msg = "Duplicate key specified." logging.error(msg) logging.error(k_error) logging.error("Modify: " + (os.path.split(self.config_path))[1]) raise IOError(msg) util.check_validation(config_json) config = Configuration() sizes = util.get_power_of_two(config_json["MinSize"], config_json["MaxSize"]) config.tools = config_json["Tools"] config.queries = config_json["Queries"] config.change_sets = config_json["ChangeSets"] config.sizes = sizes config.iterations = config_json["IterationCount"] config.runs = config_json["Runs"] config.vmargs = config_json["JVM"]["vmargs"] config.timeout = config_json["Timeout"] config.optional_arguments = config_json["OptionalArguments"] return config
help="generate models", action="store_true") parser.add_argument("-m", "--measure", help="run the benchmark", action="store_true") parser.add_argument("-s", "--skip-tests", help="skip JUnit tests", action="store_true") args = parser.parse_args() # set working directory to this file's path util.set_working_directory() with open("config/config.yml", 'r') as stream: config = yaml.load(stream) config["sizes"] = util.get_power_of_two(config["min_size"], config["max_size"]) with open("config/formats.yml", 'r') as stream: tool_formats = yaml.load(stream) formats = set() for tool in config["tools"]: formats.add(tool_formats[tool]) # if there are no args, execute a full sequence # with the test and the visualization/reporting no_args = all(val is False for val in vars(args).values()) if no_args: args.build = True args.generate = True args.measure = True