def parse_arguments(): parser = ArgParser(prog='python %s' % __file__) parser.add_argument('-nh', dest='no_health_check', action='store_true', default=False, help='Don\'t run health check after deployment') parser.add_argument('-dt', dest='deploy_timeout', action='store', default=240, help='Deployment timeout (in minutes) ' '[default: 240]') parser.add_argument('-nde', dest='no_deploy_environment', action='store_true', default=False, help=('Do not launch environment deployment')) parser.add_argument('dea_file', action='store', help='Deployment Environment Adapter: dea.yaml') args = parser.parse_args() check_file_exists(args.dea_file) kwargs = { 'dea_file': args.dea_file, 'no_health_check': args.no_health_check, 'deploy_timeout': args.deploy_timeout, 'no_deploy_environment': args.no_deploy_environment } return kwargs
def modify_node_interface(self, node_id, roles_blade): log('Modify interface config for node %s' % node_id) interface_yaml = ('%s/node_%s/interfaces.yaml' % (self.yaml_config_dir, node_id)) check_file_exists(interface_yaml) backup('%s/node_%s' % (self.yaml_config_dir, node_id)) with io.open(interface_yaml) as stream: interfaces = yaml.load(stream) net_name_id = {} for interface in interfaces: for network in interface['assigned_networks']: net_name_id[network['name']] = network['id'] type = self.dea.get_node_property(roles_blade[1], 'interfaces') interface_config = self.dea.get_property(type) for interface in interfaces: interface['assigned_networks'] = [] if interface['name'] in interface_config: for net_name in interface_config[interface['name']]: net = {} net['id'] = net_name_id[net_name] net['name'] = net_name interface['assigned_networks'].append(net) with io.open(interface_yaml, 'w') as stream: yaml.dump(interfaces, stream, default_flow_style=False)
def parse_arguments(): if len(sys.argv) != 2: usage() sys.exit(1) dea_file = sys.argv[-1] check_file_exists(dea_file) return dea_file
def modify_network_config(self): log('Modify network config for environment %s' % self.env_id) network_yaml = ('%s/network_%s.yaml' % (self.yaml_config_dir, self.env_id)) check_file_exists(network_yaml) backup(network_yaml) network_config = self.dea.get_property('network') with io.open(network_yaml) as stream: network = yaml.load(stream) net_names = self.dea.get_network_names() net_id = {} for net in network['networks']: if net['name'] in net_names: net_id[net['name']] = { 'id': net['id'], 'group_id': net['group_id'] } for network in network_config['networks']: network.update(net_id[network['name']]) with io.open(network_yaml, 'w') as stream: yaml.dump(network_config, stream, default_flow_style=False)
def modify_settings(self): log('Modify settings for environment %s' % self.env_id) settings_yaml = ('%s/settings_%s.yaml' % (self.yaml_config_dir, self.env_id)) check_file_exists(settings_yaml) with io.open(settings_yaml, 'r') as stream: orig_dea = yaml.load(stream) backup(settings_yaml) settings = self.dea.get_property('settings') # Copy fuel defined plugin_id's to user defined settings # From Fuel 8.0 chosen_id was added because it is now # possible to install many version of the same plugin # but we will install only one version for plugin in orig_dea['editable']: if 'metadata' in orig_dea['editable'][plugin]: if 'plugin_id' in orig_dea['editable'][plugin]['metadata']: if not plugin in settings['editable']: settings['editable'][plugin] = orig_dea['editable'][plugin] else: settings['editable'][plugin]["metadata"]["plugin_id"] = orig_dea['editable'][plugin]["metadata"]["plugin_id"] elif 'chosen_id' in orig_dea['editable'][plugin]['metadata']: if not plugin in settings['editable']: settings['editable'][plugin] = orig_dea['editable'][plugin] else: settings['editable'][plugin]['metadata']['chosen_id'] = orig_dea['editable'][plugin]['metadata']['chosen_id'] settings['editable'][plugin]['metadata']['versions'][0]['metadata']['plugin_id'] = orig_dea['editable'][plugin]['metadata']['versions'][0]['metadata']['plugin_id'] with io.open(settings_yaml, 'w') as stream: yaml.dump(settings, stream, default_flow_style=False)
def main(): dea_file = parse_arguments() check_file_exists(ASTUTE_YAML) dea = DeploymentEnvironmentAdapter(dea_file) with io.open(ASTUTE_YAML) as stream: astute = yaml.load(stream) transplant(dea, astute) with io.open(ASTUTE_YAML, 'w') as stream: yaml.dump(astute, stream, default_flow_style=False)
def modify_settings(self): log('Modify settings for environment %s' % self.env_id) settings_yaml = ('%s/settings_%s.yaml' % (self.yaml_config_dir, self.env_id)) check_file_exists(settings_yaml) backup(settings_yaml) settings = self.dea.get_property('settings') with io.open(settings_yaml, 'w') as stream: yaml.dump(settings, stream, default_flow_style=False)
def parse_arguments(): parser = ArgParser(prog='python %s' % __file__) parser.add_argument('-nh', dest='no_health_check', action='store_true', default=False, help='Don\'t run health check after deployment') parser.add_argument('dea_file', action='store', help='Deployment Environment Adapter: dea.yaml') args = parser.parse_args() check_file_exists(args.dea_file) kwargs = {'dea_file': args.dea_file, 'no_health_check': args.no_health_check} return kwargs
def main(): dea_file = parse_arguments() check_file_exists(ASTUTE_YAML) # Temporarily disabled for Fuel 10. # check_file_exists(FUEL_BOOTSTRAP_CLI_YAML) dea = DeploymentEnvironmentAdapter(dea_file) log('Reading astute file %s' % ASTUTE_YAML) with io.open(ASTUTE_YAML) as stream: astute = yaml.load(stream) log('Initiating transplant') transplant(dea, astute) with io.open(ASTUTE_YAML, 'w') as stream: yaml.dump(astute, stream, default_flow_style=False) log('Transplant done')
def __init__(self, storage_dir, pxe_bridge, dha_file, root_dir): super(VirtualFuel, self).__init__(storage_dir, dha_file, root_dir) self.pxe_bridge = pxe_bridge self.temp_dir = tempfile.mkdtemp() self.vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName') self.vm_template = '%s/%s' % ( self.root_dir, self.dha.get_node_property(self.fuel_node_id, 'libvirtTemplate')) check_file_exists(self.vm_template) with open(self.vm_template) as f: self.vm_xml = etree.parse(f) self.temp_vm_file = '%s/%s' % (self.temp_dir, self.vm_name) self.update_vm_template_file()
def create_vms(self): temp_dir = tempfile.mkdtemp() disk_sizes = self.dha.get_disks() for node_id in self.node_ids: vm_name = self.dha.get_node_property(node_id, 'libvirtName') vm_template = '%s/%s' % (self.root_dir, self.dha.get_node_property( node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) self.create_storage(node_id, disk_path, disk_sizes) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) self.define_vm(vm_name, temp_vm_file, disk_path) delete(temp_dir)
def create_vm(self): temp_dir = tempfile.mkdtemp() vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName') vm_template = '%s/%s' % (self.root_dir, self.dha.get_node_property( self.fuel_node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) disk_sizes = self.dha.get_disks() disk_size = disk_sizes['fuel'] exec_cmd('fallocate -l %s %s' % (disk_size, disk_path)) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) self.set_vm_nic(temp_vm_file) self.define_vm(vm_name, temp_vm_file, disk_path) delete(temp_dir)
def analyze_experiment(info, experiments_dir, tmp_data_dir, date_str, exp_name): exp_dir = os.path.join(experiments_dir, exp_name) exp_data_dir = os.path.join(tmp_data_dir, exp_name) tmp_analysis_dir = os.path.join(exp_data_dir, 'analysis') idemp_mkdir(tmp_analysis_dir) analyzed_data_dir = info.exp_data_dir(exp_name) if not os.path.exists(analyzed_data_dir): idemp_mkdir(analyzed_data_dir) subprocess.call([os.path.join(exp_dir, 'analyze.sh'), info.exp_config_dir(exp_name), exp_data_dir, tmp_analysis_dir], cwd=exp_dir) status = validate_status(tmp_analysis_dir) # read the analyzed data, append a timestamp field, and copy over to the permanent data dir if status['success']: data_exists = check_file_exists(tmp_analysis_dir, 'data.json') if not data_exists: status = {'success': False, 'message': 'No data.json file produced by {}'.format(exp_name)} else: # collect data to dump to data_*.json dump_data = { 'timestamp' : date_str, } dump_data.update(read_json(tmp_analysis_dir, 'data.json')) # fetch time spent on the experiment dump_data.update(get_timing_info(info, exp_name)) write_json(analyzed_data_dir, 'data_{}.json'.format(date_str), dump_data) info.report_exp_status(exp_name, 'analysis', status) return status['success']
def modify_node_attributes(self, node_id, roles_blade): log('Modify attributes for node {0}'.format(node_id)) dea_key = self.dea.get_node_property(roles_blade[1], 'attributes') if not dea_key: # Node attributes are not overridden. Nothing to do. return new_attributes = self.dea.get_property(dea_key) attributes_yaml = ('%s/node_%s/attributes.yaml' % (self.yaml_config_dir, node_id)) check_file_exists(attributes_yaml) backup('%s/node_%s' % (self.yaml_config_dir, node_id)) with open(attributes_yaml) as stream: attributes = yaml.load(stream) result_attributes = self._merge_dicts(attributes, new_attributes) with open(attributes_yaml, 'w') as stream: yaml.dump(result_attributes, stream, default_flow_style=False)
def create_vms(self): temp_dir = tempfile.mkdtemp() disk_sizes = self.dha.get_disks() for node_id in self.node_ids: vm_name = self.dha.get_node_property(node_id, 'libvirtName') vm_template = '%s/%s' % (self.root_dir, self.dha.get_node_property( node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) self.create_storage(node_id, disk_path, disk_sizes) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) vm_definition_overwrite = self.dha.get_vm_definition( self.dea.get_node_main_role(node_id, self.fuel_node_id)) self.define_vm(vm_name, temp_vm_file, disk_path, vm_definition_overwrite) delete(temp_dir)
def main(home_dir, experiments_dir, subsystem_dir, telemetry_script_dir): """ Home directory: Where config info for experiments, etc., is Experiments directory: Where experiment implementations are Both should be given as absolute directories """ time_str = get_timestamp() if not check_file_exists(home_dir, 'config.json'): print('Dashboard config (config.json) is missing in {}'.format(home_dir)) return 1 dash_config = read_json(home_dir, 'config.json') # must expand all tildes in the config to avoid future errors for path_field in ['tmp_data_dir', 'setup_dir', 'backup_dir']: dash_config[path_field] = os.path.expanduser(dash_config[path_field]) tmp_data_dir = os.path.join(dash_config['tmp_data_dir'], 'benchmarks_' + time_str) data_archive = os.path.join(dash_config['tmp_data_dir'], 'benchmarks_' + time_str + '_data.tar.gz') setup_dir = dash_config['setup_dir'] backup_archive = os.path.join(dash_config['backup_dir'], 'dashboard_' + time_str + '.tar.gz') idemp_mkdir(tmp_data_dir) idemp_mkdir(os.path.dirname(backup_archive)) idemp_mkdir(setup_dir) info = DashboardInfo(home_dir) # make a backup of the previous dashboard files if they exist if os.path.exists(home_dir): subprocess.call(['tar', '-zcf', backup_archive, home_dir]) # directories whose contents should not change between runs of the dashboard persistent_dirs = {info.exp_data, info.exp_configs, info.subsys_configs, info.subsys_output} all_dashboard_dirs = info.all_experiment_dirs() + info.all_subsystem_dirs() # instantiate necessary dashboard dirs and clean any that should be empty for dashboard_dir in all_dashboard_dirs: if dashboard_dir not in persistent_dirs: subprocess.call(['rm', '-rf', dashboard_dir]) idemp_mkdir(dashboard_dir) randomize_exps = True if 'randomize' in dash_config: randomize_exps = dash_config['randomize'] telemetry_rate = dash_config.get('telemetry_rate', 15) run_cpu_telemetry = dash_config.get('run_cpu_telemetry', False) run_gpu_telemetry = dash_config.get('run_gpu_telemetry', False) run_all_experiments(info, experiments_dir, setup_dir, tmp_data_dir, data_archive, time_str, telemetry_script_dir, run_cpu_telemetry=run_cpu_telemetry, run_gpu_telemetry=run_gpu_telemetry, telemetry_interval=telemetry_rate, randomize=randomize_exps) run_all_subsystems(info, subsystem_dir, time_str)
def __init__(self, filename): """Initialization. Parameters ---------- filename : str GFF file to parse. """ check_file_exists(filename) self.genes = {} self.last_coding_base = {} self.__parseGFF(filename) self.coding_base_masks = {} for seq_id in self.genes: self.coding_base_masks[seq_id] = self.__build_coding_base_mask( seq_id)
def main(): dea_file = parse_arguments() check_file_exists(ASTUTE_YAML) check_file_exists(FUEL_BOOTSTRAP_CLI_YAML) dea = DeploymentEnvironmentAdapter(dea_file) log('Reading astute file %s' % ASTUTE_YAML) with io.open(ASTUTE_YAML) as stream: astute = yaml.load(stream) log('Initiating transplant') transplant(dea, astute) with io.open(ASTUTE_YAML, 'w') as stream: yaml.dump(astute, stream, default_flow_style=False) log('Transplant done') # Update bootstrap config yaml with info from DEA/astute.yaml with io.open(FUEL_BOOTSTRAP_CLI_YAML) as stream: fuel_bootstrap_cli = yaml.load(stream) transplant_bootstrap(astute, fuel_bootstrap_cli) with io.open(FUEL_BOOTSTRAP_CLI_YAML, 'w') as stream: yaml.dump(fuel_bootstrap_cli, stream, default_flow_style=False)
def summary_valid(exp_summary_dir): """ Checks that the experiment summary directory contains a summary.json file and that the summary.json file contains the required fields, title and value. """ exists = check_file_exists(exp_summary_dir, 'summary.json') if not exists: return False summary = read_json(exp_summary_dir, 'summary.json') return 'title' in summary and 'value' in summary
def modify_node_interface(self, node_id, roles_blade): log('Modify interface config for node %s' % node_id) interface_yaml = ('%s/node_%s/interfaces.yaml' % (self.yaml_config_dir, node_id)) check_file_exists(interface_yaml) backup('%s/node_%s' % (self.yaml_config_dir, node_id)) with io.open(interface_yaml) as stream: interfaces = yaml.load(stream) net_name_id = {} for interface in interfaces: for network in interface['assigned_networks']: net_name_id[network['name']] = network['id'] type = self.dea.get_node_property(roles_blade[1], 'interfaces') interface_config = self.dea.get_property(type) for interface in interfaces: interface['assigned_networks'] = [] if interface['name'] in interface_config: for prop in interface_config[interface['name']]: net = {} # net name if isinstance(prop, six.string_types): net['id'] = net_name_id[prop] net['name'] = prop interface['assigned_networks'].append(net) # network properties elif isinstance(prop, dict): if 'interface_properties' not in prop: log('Interface configuration contains unknown dict: %s' % prop) continue interface['attributes'] = self._merge_dicts( interface.get('attributes', {}), prop.get('interface_properties', {})) with io.open(interface_yaml, 'w') as stream: yaml.dump(interfaces, stream, default_flow_style=False)
def modify_settings(self): log('Modify settings for environment %s' % self.env_id) settings_yaml = ('%s/settings_%s.yaml' % (self.yaml_config_dir, self.env_id)) check_file_exists(settings_yaml) with io.open(settings_yaml, 'r') as stream: orig_dea = yaml.load(stream) backup(settings_yaml) settings = self.dea.get_property('settings') # Copy fuel defined plugin_id's to user defined settings # From Fuel 8.0 chosen_id was added because it is now # possible to install many version of the same plugin # but we will install only one version for plugin in orig_dea['editable']: if 'metadata' in orig_dea['editable'][plugin]: if 'plugin_id' in orig_dea['editable'][plugin]['metadata']: if not plugin in settings['editable']: settings['editable'][plugin] = orig_dea['editable'][ plugin] else: settings['editable'][plugin]["metadata"][ "plugin_id"] = orig_dea['editable'][plugin][ "metadata"]["plugin_id"] elif 'chosen_id' in orig_dea['editable'][plugin]['metadata']: if not plugin in settings['editable']: settings['editable'][plugin] = orig_dea['editable'][ plugin] else: settings['editable'][plugin]['metadata'][ 'chosen_id'] = orig_dea['editable'][plugin][ 'metadata']['chosen_id'] settings['editable'][plugin]['metadata']['versions'][ 0]['metadata']['plugin_id'] = orig_dea['editable'][ plugin]['metadata']['versions'][0]['metadata'][ 'plugin_id'] with io.open(settings_yaml, 'w') as stream: yaml.dump(settings, stream, default_flow_style=False)
def attempt_parse_config(config_dir, target): """ Returns the parsed config for the target (experiment or subsystem) if it exists. Returns None if the config is missing or could not be parsed. """ conf_subdir = os.path.join(config_dir, target) if not check_file_exists(conf_subdir, 'config.json'): return None try: return read_json(conf_subdir, 'config.json') except Exception as e: return None
def parse_arguments(): parser = ArgParser(prog='python %s' % __file__) parser.add_argument('-nh', dest='no_health_check', action='store_true', default=False, help='Don\'t run health check after deployment') parser.add_argument('-dt', dest='deploy_timeout', action='store', default=240, help='Deployment timeout (in minutes) ' '[default: 240]') parser.add_argument('-nde', dest='no_deploy_environment', action='store_true', default=False, help=('Do not launch environment deployment')) parser.add_argument('dea_file', action='store', help='Deployment Environment Adapter: dea.yaml') args = parser.parse_args() check_file_exists(args.dea_file) kwargs = {'dea_file': args.dea_file, 'no_health_check': args.no_health_check, 'deploy_timeout': args.deploy_timeout, 'no_deploy_environment': args.no_deploy_environment} return kwargs
def _check_stage_status(target_status_dir, stage_name): filename = '{}.json'.format(stage_name) if not check_file_exists(target_status_dir, filename): return { 'success': False, 'message': '{} stage status missing'.format(stage_name) } try: return read_json(target_status_dir, filename) except: return { 'success': False, 'message': 'Failed to parse {} stage status'.format(stage_name) }
def check_error(experiment_name, model_name, specific_params, path_prefix): if not check_file_exists(path_prefix, 'errors.json'): return False logged_errors = read_json(path_prefix, 'errors.json') if experiment_name not in logged_errors: return False if model_name not in logged_errors[experiment_name]: return False errors = logged_errors[experiment_name][model_name] check_func = lambda err: lambda kv: err.get(kv[0]) == kv[1] if specific_params.get('kind') == 'ratio': check_func = lambda err: lambda kv: err.get(kv[0]) == kv[1] if kv[0] != 'memory_budget' else True return any(map(lambda err: all(map(check_func(err), specific_params.items())), errors))
def log_error(experiment_name, model_name, specific_params, inp, err_msg, path_prefix): err_info = {'input': inp, 'msg': err_msg} logged_errors = {} if check_file_exists(path_prefix, 'errors.json'): logged_errors = read_json(path_prefix, 'errors.json') if experiment_name not in logged_errors: logged_errors[experiment_name] = {} if model_name not in logged_errors[experiment_name]: logged_errors[experiment_name][model_name] = [] logged_errors[experiment_name][model_name].append({ 'err_info': err_info, **specific_params }) write_json(path_prefix, 'errors.json', logged_errors)
def exp_stage_statuses(self, exp_name): ret = {'precheck': self.exp_stage_status(exp_name, 'precheck')} if not ret['precheck']['success'] or not self.exp_active(exp_name): return ret # setup is the only optional stage if check_file_exists(self.exp_status_dir(exp_name), 'setup.json'): ret['setup'] = self.exp_stage_status(exp_name, 'setup') if not ret['setup']['success']: return ret for stage in ['run', 'analysis', 'summary', 'visualization']: ret[stage] = self.exp_stage_status(exp_name, stage) if not ret[stage]['success']: break return ret
def collect_raw_measurements(experiment_name, model, specific_params, path_prefix, cmd_id): """ Reads the raw data for the given experiment name and params and returns a tuple (metrics dictionary, memory budget if applicable, error message if there is no data file). The first two fields will be None if there is no data file. """ filename = '{}-{}.csv'.format(get_report_prefix(experiment_name, specific_params, cmd_id), model) if not check_file_exists(path_prefix, filename): return (None, None, 'Data file {} does not exist at {}'.format(filename, path_prefix)) full_path = os.path.join(path_prefix, filename) metrics = {} memory_budget = None num_retries = None with open(full_path, 'r', newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: # In case there are commands for the same model # that have the same values for all configurations idx = int(row['input']) measured = { key: float(row[key]) for key in MEASURED_KEYS } if memory_budget is None and specific_params.get('kind') == 'ratio': memory_budget = float(row['memory_budget']) if num_retries is None: num_retries = int(row['num_retries']) if idx not in metrics.keys(): metrics[idx] = { key: [] for key in MEASURED_KEYS } for key in MEASURED_KEYS: metrics[idx][key].append(measured[key]) return (metrics, memory_budget, num_retries, 'success')
def run_subsystem(info, subsystem_dir, subsys_name): subsys_dir = os.path.join(subsystem_dir, subsys_name) subsys_output_dir = info.subsys_output_dir(subsys_name) idemp_mkdir(subsys_output_dir) # remove the old status if one is hanging around # (subsystem output dirs remain around between runs) if check_file_exists(subsys_output_dir, 'status.json'): subprocess.call(['rm', '-f', os.path.join(subsys_output_dir, 'status.json')]) # run the run.sh file on the configs directory and the output directory subprocess.call([os.path.join(subsys_dir, 'run.sh'), info.subsys_config_dir(subsys_name), info.home_dir, subsys_output_dir], cwd=subsys_dir) # collect the status file from the destination directory, copy to status dir status = validate_status(subsys_output_dir) # not literally copying because validate may have produced a status that generated an error info.report_subsys_status(subsys_name, 'run', status) return status['success']
def extend_simrd_config(dest_dir, sim_conf_filename, model_name, specific_params, log_name): if not check_file_exists(dest_dir, sim_conf_filename): prepare_out_file(dest_dir, sim_conf_filename) write_json(dest_dir, sim_conf_filename, dict()) conf = read_json(dest_dir, sim_conf_filename) if model_name not in conf: conf[model_name] = [] conf[model_name].append({ 'name': model_util.get_model_family(model_name), 'batch_size': str(specific_params['batch_size']), 'layers': specific_params.get('layers', model_util.get_model_layers(model_name)), 'type': model_util.get_model_type(model_name), 'log': log_name, 'has_start': True }) write_json(dest_dir, sim_conf_filename, conf)
def extend_simrd_config(dest_dir, sim_conf_filename, model_name, specific_params, log_name): import model_util if not check_file_exists(dest_dir, sim_conf_filename): prepare_out_file(dest_dir, sim_conf_filename) write_json(dest_dir, sim_conf_filename, dict()) conf = read_json(dest_dir, sim_conf_filename) if model_name not in conf: conf[model_name] = [] name = model_util.format_model_name(model_name, specific_params) conf[model_name].append({ 'name': name, 'title': name, 'desc': model_util.format_input_description(model_name, specific_params), 'log': log_name, 'has_start': True }) write_json(dest_dir, sim_conf_filename, conf)
def parse_arguments(): parser = ArgParser(prog='python %s' % __file__) parser.add_argument('-nf', dest='no_fuel', action='store_true', default=False, help='Do not install Fuel Master (and Node VMs when ' 'using libvirt)') parser.add_argument('-nh', dest='no_health_check', action='store_true', default=False, help='Don\'t run health check after deployment') parser.add_argument('-fo', dest='fuel_only', action='store_true', default=False, help='Install Fuel Master only (and Node VMs when ' 'using libvirt)') parser.add_argument('-co', dest='cleanup_only', action='store_true', default=False, help='Cleanup VMs and Virtual Networks according to ' 'what is defined in DHA') parser.add_argument('-c', dest='cleanup', action='store_true', default=False, help='Cleanup after deploy') if {'-iso', '-dea', '-dha', '-h'}.intersection(sys.argv): parser.add_argument('-iso', dest='iso_file', action='store', nargs='?', default='%s/OPNFV.iso' % CWD, help='ISO File [default: OPNFV.iso]') parser.add_argument('-dea', dest='dea_file', action='store', nargs='?', default='%s/dea.yaml' % CWD, help='Deployment Environment Adapter: dea.yaml') parser.add_argument('-dha', dest='dha_file', action='store', nargs='?', default='%s/dha.yaml' % CWD, help='Deployment Hardware Adapter: dha.yaml') else: parser.add_argument('iso_file', action='store', nargs='?', default='%s/OPNFV.iso' % CWD, help='ISO File [default: OPNFV.iso]') parser.add_argument('dea_file', action='store', nargs='?', default='%s/dea.yaml' % CWD, help='Deployment Environment Adapter: dea.yaml') parser.add_argument('dha_file', action='store', nargs='?', default='%s/dha.yaml' % CWD, help='Deployment Hardware Adapter: dha.yaml') parser.add_argument('-s', dest='storage_dir', action='store', default='%s/images' % CWD, help='Storage Directory [default: images]') parser.add_argument('-b', dest='pxe_bridge', action='store', default='pxebr', help='Linux Bridge for booting up the Fuel Master VM ' '[default: pxebr]') parser.add_argument('-p', dest='fuel_plugins_dir', action='store', help='Fuel Plugins directory') parser.add_argument('-pc', dest='fuel_plugins_conf_dir', action='store', help='Fuel Plugins Configuration directory') parser.add_argument('-np', dest='no_plugins', action='store_true', default=False, help='Do not install Fuel Plugins') parser.add_argument('-dt', dest='deploy_timeout', action='store', default=240, help='Deployment timeout (in minutes) ' '[default: 240]') parser.add_argument('-nde', dest='no_deploy_environment', action='store_true', default=False, help=('Do not launch environment deployment')) args = parser.parse_args() log(args) check_file_exists(args.dha_file) if not args.cleanup_only: check_file_exists(args.dea_file) check_fuel_plugins_dir(args.fuel_plugins_dir) iso_abs_path = os.path.abspath(args.iso_file) if not args.no_fuel and not args.cleanup_only: log('Using OPNFV ISO file: %s' % iso_abs_path) check_file_exists(iso_abs_path) log('Using image directory: %s' % args.storage_dir) create_dir_if_not_exists(args.storage_dir) check_bridge(args.pxe_bridge, args.dha_file) kwargs = {'no_fuel': args.no_fuel, 'fuel_only': args.fuel_only, 'no_health_check': args.no_health_check, 'cleanup_only': args.cleanup_only, 'cleanup': args.cleanup, 'storage_dir': args.storage_dir, 'pxe_bridge': args.pxe_bridge, 'iso_file': iso_abs_path, 'dea_file': args.dea_file, 'dha_file': args.dha_file, 'fuel_plugins_dir': args.fuel_plugins_dir, 'fuel_plugins_conf_dir': args.fuel_plugins_conf_dir, 'no_plugins': args.no_plugins, 'deploy_timeout': args.deploy_timeout, 'no_deploy_environment': args.no_deploy_environment} return kwargs
def main(config_dir, home_dir, output_dir): config = read_config(config_dir) if 'channel_id' not in config: write_status(output_dir, False, 'No channel token given') return 1 channel = config['channel_id'] success, msg, client = new_client(config) if not success: write_status(output_dir, False, msg) return 1 info = DashboardInfo(home_dir) failed_subsys = [] reports = [] failed_reports = [] for subsys in info.all_present_subsystems(): # ignore self if subsys == 'subsys_reporter': continue if not info.subsys_active(subsys): continue status = info.subsys_stage_status(subsys, 'run') if not status['success']: failed_subsys.append(failed_subsys_field(subsys, status)) continue report_present = check_file_exists(info.subsys_output_dir(subsys), 'report.json') if not report_present: continue try: report = read_json(info.subsys_output_dir(subsys), 'report.json') reports.append( build_field(title=report['title'], value=report['value'])) except Exception: failed_reports.append(subsys_name) attachments = [] if reports: attachments.append(build_attachment(title='Reports', fields=reports)) if failed_reports or failed_subsys: failure_text = '' if failed_reports: failure_text = 'Failed to parse reports: {}'.format( ', '.join(failed_reports)) attachments.append( build_attachment(title='Errors', text=failure_text, color='#fa0000', fields=failed_subsys)) if not attachments: write_status(output_dir, True, 'Nothing to report') return 0 success, _, msg = post_message( client, channel, build_message(text='Subsystem Results', attachments=attachments)) write_status(output_dir, success, msg)
def parse_data_file(experiment_name, model, config, specific_params, path_prefix, cmd_id=0): """ Given an experiment name, model name, directory, and number of inputs, parses the corresponding data file if it exists and computes summary statistics for the (wall-clock) time, GPU time, and memory used in that data file for choice of specific settings Returns None and an error message if it fails """ try: filename = '{}-{}.csv'.format( get_report_prefix(experiment_name, specific_params, cmd_id), model) if not check_file_exists(path_prefix, filename): return (None, 'Data file {} does not exist at {}'.format( filename, path_prefix)) full_path = os.path.join(path_prefix, filename) report_errors = config['report_errors'] metrics = {} memory_budget = None with open(full_path, 'r', newline='') as csvfile: reader = csv.DictReader(csvfile) for row in reader: # In case of there are commands for the same model # that have the same values for all configurations idx = int(row['input']) measured = {key: float(row[key]) for key in MEASURED_KEYS} if memory_budget is None and specific_params.get( 'kind') == 'ratio': memory_budget = float(row['memory_budget']) specific_params['memory_budget'] = memory_budget if idx not in metrics.keys(): metrics[idx] = {key: [] for key in MEASURED_KEYS} for key in MEASURED_KEYS: metrics[idx][key].append(measured[key]) summary = {'specific_params': specific_params} # in case everything errored out, this ensure that we will have a record of the error if report_errors: if check_error(experiment_name, model, specific_params, path_prefix): summary['summary'] = 'error' return summary, 'success' summary_stats = [] for (_, stat) in metrics.items(): summary_stats.append({ key: compute_summary_stats(stat[key]) for key in MEASURED_KEYS }) summary['summary'] = summary_stats return (summary, 'success') except Exception as e: return (None, 'Encountered exception on ({}, {}): '.format( experiment_name, model) + render_exception(e))