def test_generates_template_for_success(self, mock_template_dir, mock_log): generated_templates_dir = os.path.join( APEX_LAKE_ROOT, 'tests/data/generated_templates/') mock_template_dir.return_value = generated_templates_dir test_templates = os.path.join(APEX_LAKE_ROOT, 'tests/data/test_templates/') heat_gen.generates_templates(self.template_name, self.deployment_configuration) for dirname, dirnames, filenames in os.walk(test_templates): for filename in filenames: with open(test_templates + filename) as test: with open(generated_templates_dir + filename) as generated: self.assertListEqual(test.readlines(), generated.readlines()) self.template_name = os.path.join( APEX_LAKE_ROOT, 'tests/data/generated_templates/VTC_base_single_vm_wait.tmp') heat_gen.generates_templates(self.template_name, self.deployment_configuration) for dirname, dirnames, filenames in os.walk(test_templates): for filename in filenames: with open(test_templates + filename) as test: with open(generated_templates_dir + filename) as generated: self.assertListEqual(test.readlines(), generated.readlines())
def execute_framework(test_cases, iterations, base_heat_template, heat_template_parameters, deployment_configuration, openstack_credentials): """ Runs the framework :param test_cases: Test cases to be ran on the workload (dict() of dict()) Each string represents a test case and it is one of the strings provided by the "get_available_test_cases()" function output. :param iterations: Number of iterations to be executed (int) :param base_heat_template: File name of the base heat template of the workload to be deployed (string) :param heat_template_parameters: Dictionary of parameters to be given as input to the heat template ( dict() ) See http://docs.openstack.org/developer/heat/template_guide/hot_guide.html Section "Template input parameters" :param deployment_configuration: Dictionary of parameters representing the deployment configuration of the workload The key is a string representing the name of the parameter, the value is a list of strings representing the value to be assumed by a specific param. The format is: ( dict[string] = list(strings) ) ) The parameters are user defined: they have to correspond to the place holders provided in the heat template. (Use "#" in the syntax, es. - heat template "#param", - config_var "param") :return: the name of the csv file where the results have been stored """ # TODO: replace with user credentials credentials = common.get_credentials() # TODO: improve Input validation if not isinstance(base_heat_template, str): raise ValueError('The provided base_heat_template variable must be a string') if not isinstance(iterations, int): raise ValueError('The provided iterations variable must be an integer value') if not isinstance(credentials, dict): raise ValueError('The provided openstack_credentials variable must be a dictionary') credential_keys = ['', ''] missing = [credential_key for credential_key in credential_keys if credential_key not in credentials.keys()] if not isinstance(heat_template_parameters, dict): raise ValueError('The provided heat_template_parameters variable must be a dictionary') if not isinstance(test_cases, list): raise ValueError('The provided test_cases variable must be a dictionary') # Heat template generation (base_heat_template, deployment_configuration) common.LOG.info("Generation of all the heat templates required by the experiment") heat_template_generation.generates_templates(base_heat_template, deployment_configuration) # Benchmarking Unit (test_cases, iterations, heat_template_parameters)\ benchmarking_unit = bench.BenchmarkingUnit(base_heat_template, common.get_credentials(), heat_template_parameters, iterations, test_cases) try: common.LOG.info("Benchmarking Unit initialization") benchmarking_unit.initialize() common.LOG.info("Becnhmarking Unit Running") benchmarking_unit.run_benchmarks() finally: common.LOG.info("Benchmarking Unit Finalization") benchmarking_unit.finalize()
def test_generates_template_for_success(self, mock_template_dir, mock_log): generated_templates_dir = 'tests/data/generated_templates/' mock_template_dir.return_value = generated_templates_dir test_templates = 'tests/data/test_templates/' heat_gen.generates_templates(self.template_name, self.deployment_configuration) for dirname, dirnames, filenames in os.walk(test_templates): for filename in filenames: with open(test_templates + filename) as test: with open(generated_templates_dir + filename) as generated: self.assertListEqual(test.readlines(), generated.readlines()) t_name = '/tests/data/generated_templates/VTC_base_single_vm_wait.tmp' self.template_name = "{}{}".format(os.getcwd(), t_name) heat_gen.generates_templates(self.template_name, self.deployment_configuration) for dirname, dirnames, filenames in os.walk(test_templates): for filename in filenames: with open(test_templates + filename) as test: with open(generated_templates_dir + filename) as generated: self.assertListEqual(test.readlines(), generated.readlines())
def test_generates_template_for_success(self, mock_template_dir, mock_log): generated_templates_dir = 'tests/data/generated_templates/' mock_template_dir.return_value = generated_templates_dir test_templates = 'tests/data/test_templates/' heat_gen.generates_templates(self.template_name, self.deployment_configuration) for dirname, dirnames, filenames in os.walk(test_templates): for filename in filenames: with open(test_templates + filename) as test: with open(generated_templates_dir + filename) as generated: self.assertListEqual(test.readlines(), generated.readlines()) t_name = '/tests/data/generated_templates/VTC_base_single_vm_wait.tmp' self.template_name = "{}{}".format(os.getcwd(), t_name) heat_gen.generates_templates(self.template_name, self.deployment_configuration) for dirname, dirnames, filenames in os.walk(test_templates): for filename in filenames: with open(test_templates + filename) as test: with open(generated_templates_dir + filename) as generated: self.assertListEqual(test.readlines(), generated.readlines())
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = "vmriccox" from experimental_framework import heat_template_generation, common from experimental_framework import benchmarking_unit as bench_unit # Initialization of the utilities tools common.init() common.LOG.info("Generation of all the heat templates required by the experiment ...") heat_template_generation.generates_templates(common.TEMPLATE_NAME, common.get_deployment_configuration_variables_from_conf_file()) common.LOG.info("Running Benchmarks ...") required_benchmarks = common.get_benchmarks_from_conf_file() test_case_params = common.get_testcase_params() benchmarks = list() for benchmark in required_benchmarks: bench = dict() bench['name'] = benchmark bench['params'] = dict() for param in test_case_params.keys(): bench['params'][param] = test_case_params[param] benchmarks.append(bench) b_unit = bench_unit.BenchmarkingUnit(common.TEMPLATE_NAME, common.get_credentials(), common.get_heat_template_params(), common.ITERATIONS, benchmarks)
def execute_framework(test_cases, iterations, heat_template, heat_template_parameters, deployment_configuration, openstack_credentials): """ Executes the framework according the inputs :param test_cases: Test cases to be ran on the workload (dict() of dict()) Example: test_case = dict() test_case['name'] = 'module.Class' test_case['params'] = dict() test_case['params']['throughput'] = '1' test_case['params']['vlan_sender'] = '1007' test_case['params']['vlan_receiver'] = '1006' test_cases = [test_case] :param iterations: Number of cycles to be executed (int) :param heat_template: (string) File name of the heat template of the workload to be deployed. It contains the parameters to be evaluated in the form of #parameter_name. (See heat_templates/vTC.yaml as example). :param heat_template_parameters: (dict) Parameters to be provided as input to the heat template. See http://docs.openstack.org/developer/heat/ template_guide/hot_guide.html - section "Template input parameters" for further info. :param deployment_configuration: ( dict[string] = list(strings) ) ) Dictionary of parameters representing the deployment configuration of the workload The key is a string corresponding to the name of the parameter, the value is a list of strings representing the value to be assumed by a specific param. The parameters are user defined: they have to correspond to the place holders (#parameter_name) specified in the heat template. :return: dict() Containing results """ common.init(api=True) # Input Validation common.InputValidation.validate_os_credentials(openstack_credentials) credentials = openstack_credentials msg = 'The provided heat_template does not exist' if common.RELEASE == 'liberty': heat_template = 'vTC_liberty.yaml' else: heat_template = 'vTC.yaml' template = "{}{}".format(common.get_template_dir(), heat_template) common.InputValidation.validate_file_exist(template, msg) msg = 'The provided iterations variable must be an integer value' common.InputValidation.validate_integer(iterations, msg) msg = 'The provided heat_template_parameters variable must be a ' \ 'dictionary' common.InputValidation.validate_dictionary(heat_template_parameters, msg) log_msg = "Generation of all the heat templates " \ "required by the experiment" common.LOG.info(log_msg) heat_template_generation.generates_templates(heat_template, deployment_configuration) benchmarking_unit = \ b_unit.BenchmarkingUnit( heat_template, credentials, heat_template_parameters, iterations, test_cases) try: common.LOG.info("Benchmarking Unit initialization") benchmarking_unit.initialize() common.LOG.info("Benchmarking Unit Running") results = benchmarking_unit.run_benchmarks() finally: common.LOG.info("Benchmarking Unit Finalization") benchmarking_unit.finalize() return results
def execute_framework( test_cases, iterations, heat_template, heat_template_parameters, deployment_configuration, openstack_credentials ): """ Executes the framework according the inputs :param test_cases: Test cases to be ran on the workload (dict() of dict()) Example: test_case = dict() test_case['name'] = 'module.Class' test_case['params'] = dict() test_case['params']['throughput'] = '1' test_case['params']['vlan_sender'] = '1007' test_case['params']['vlan_receiver'] = '1006' test_cases = [test_case] :param iterations: Number of cycles to be executed (int) :param heat_template: (string) File name of the heat template of the workload to be deployed. It contains the parameters to be evaluated in the form of #parameter_name. (See heat_templates/vTC.yaml as example). :param heat_template_parameters: (dict) Parameters to be provided as input to the heat template. See http://docs.openstack.org/developer/heat/ template_guide/hot_guide.html - section "Template input parameters" for further info. :param deployment_configuration: ( dict[string] = list(strings) ) ) Dictionary of parameters representing the deployment configuration of the workload The key is a string corresponding to the name of the parameter, the value is a list of strings representing the value to be assumed by a specific param. The parameters are user defined: they have to correspond to the place holders (#parameter_name) specified in the heat template. :return: dict() Containing results """ common.init(api=True) # Input Validation common.InputValidation.validate_os_credentials(openstack_credentials) credentials = openstack_credentials msg = 'The provided heat_template does not exist' if common.RELEASE == 'liberty': heat_template = 'vTC_liberty.yaml' else: heat_template = 'vTC.yaml' template = "{}{}".format(common.get_template_dir(), heat_template) common.InputValidation.validate_file_exist(template, msg) msg = 'The provided iterations variable must be an integer value' common.InputValidation.validate_integer(iterations, msg) msg = 'The provided heat_template_parameters variable must be a ' \ 'dictionary' common.InputValidation.validate_dictionary(heat_template_parameters, msg) log_msg = "Generation of all the heat templates " \ "required by the experiment" common.LOG.info(log_msg) heat_template_generation.generates_templates(heat_template, deployment_configuration) benchmarking_unit = \ b_unit.BenchmarkingUnit( heat_template, credentials, heat_template_parameters, iterations, test_cases) try: common.LOG.info("Benchmarking Unit initialization") benchmarking_unit.initialize() common.LOG.info("Benchmarking Unit Running") results = benchmarking_unit.run_benchmarks() finally: common.LOG.info("Benchmarking Unit Finalization") benchmarking_unit.finalize() return results