def f(args): (index, nid, (config, )) = args logger.getlogger().info('+ {0}'.format(nid)) g = activity_[config['parameters']['activity']] df = g(rt.mkargs(nid, config)) df.rename(nid, inplace=True) return (nid, df)
def acc(args): (path, freq) = args (observation, prediction) = [ int(x) for x in path.parts[-2:] ] logger.getlogger().info('o: {0} p: {1}'.format(observation, prediction)) data = [ pd.read_pickle(str(x)) for x in path.glob('*.pkl') ] df = pd.concat(data, axis=1) df = df.resample(freq).sum().mean() return (observation, prediction, df)
def _run_playbook(playbook, config_path, extra_vars=None, display=True, load_config_vars=False): log = logger.getlogger() config_pointer_file = gen.get_python_path() + '/config_pointer_file' with open(config_pointer_file, 'w') as f: f.write(config_path) ansible_playbook = gen.get_ansible_playbook_path() inventory = ' -i ' + gen.get_python_path() + '/inventory.py' playbook = ' ' + playbook cmd = ansible_playbook + inventory + playbook if load_config_vars: cmd += f" --extra-vars '@{config_path}'" if extra_vars is not None: cmd += f" --extra-vars '{' '.join(extra_vars)}'" command = ['bash', '-c', cmd] log.debug('Run subprocess: %s' % ' '.join(command)) if display: process = Popen(command, cwd=gen.get_playbooks_path()) process.wait() stdout = '' else: process = Popen(command, stdout=PIPE, stderr=PIPE, cwd=gen.get_playbooks_path()) stdout, stderr = process.communicate() try: stdout = stdout.decode('utf-8') except AttributeError: pass return (process.returncode, stdout)
def cobbler_add_profile(distro, name): log = logger.getlogger() cobbler_server = xmlrpclib.Server("http://127.0.0.1/cobbler_api") token = cobbler_server.login(COBBLER_USER, COBBLER_PASS) distro_list = cobbler_server.get_distros() existing_distro_list = [] for existing_distro in distro_list: existing_distro_list.append(existing_distro['name']) if distro not in existing_distro_list: log.warning("Cobbler Skipping Profile - Distro Unavailable: " "name=%s, distro=%s" % (name, distro)) return new_profile_create = cobbler_server.new_profile(token) cobbler_server.modify_profile(new_profile_create, "name", name, token) cobbler_server.modify_profile(new_profile_create, "distro", distro, token) cobbler_server.modify_profile(new_profile_create, "enable_menu", "True", token) cobbler_server.modify_profile(new_profile_create, "kickstart", "/var/lib/cobbler/kickstarts/%s.seed" % name, token) cobbler_server.save_profile(new_profile_create, token) log.info("Cobbler Add Profile: name=%s, distro=%s" % (name, distro)) cobbler_server.sync(token) log.info("Running Cobbler sync")
def _get_mtu_list(): """ Aggregate mtu port data. Returns: Dictionary of {switch : {port : mtu value, ...}} """ log = logger.getlogger() mtu_list = Tree() for ntmpl_ind in CFG.yield_ntmpl_ind(): for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind): mtu = '' phy_ifc = CFG.get_ntmpl_phyintf_data_ifc(ntmpl_ind, phyintf_idx) ifc = CFG.get_interface(phy_ifc) if 'mtu' in ifc: mtu = ifc['mtu'] elif 'MTU' in ifc: mtu = ifc['MTU'] if mtu: switch = CFG.get_ntmpl_phyintf_data_switch( ntmpl_ind, phyintf_idx) ports = CFG.get_ntmpl_phyintf_data_ports( ntmpl_ind, phyintf_idx) if switch in mtu_list and mtu in mtu_list[switch]: mtu_list[switch][mtu] += ports else: mtu_list[switch][mtu] = ports pretty_str = PP.pformat(mtu_list) log.debug('mtu_list') log.debug('\n' + pretty_str) return mtu_list
def __init__(self, dhcp_leases_file, port_type, config_path=None): self.log = logger.getlogger() self.cfg = Config(config_path) self.dhcp_leases_file = dhcp_leases_file self.port_type = port_type self.inv = Inventory(cfg_file=config_path) self.log.debug('Add ports, port type: {}'.format(self.port_type))
def ip_route_get_to(host): """Get interface IP that routes to hostname or IP address Args: host (str): Hostname or IP address Returns: str: Interface IP with route to host """ log = logger.getlogger() # Check if host is given as IP address if netaddr.valid_ipv4(host, flags=0): host_ip = host else: try: host_ip = socket.gethostbyname(host) except socket.gaierror as exc: log.warning( "Unable to resolve host to IP: '{}' exception: '{}'".format( host, exc)) with IPRoute() as ipr: route = ipr.route('get', dst=host_ip)[0]['attrs'][3][1] return route
def __init__(self, host, ssh_log=False, username=None, password=None, look_for_keys=True, key_filename=None): paramiko.SSHClient.__init__(self) self.host = host self.log = logger.getlogger() self.ssh_log = SSH_LOG if ssh_log and logger.is_log_level_file_debug(): paramiko.util.log_to_file(self.ssh_log) if key_filename is None: self.load_system_host_keys() self.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: self.connect(host, username=username, password=password, look_for_keys=look_for_keys, key_filename=key_filename) except (paramiko.BadHostKeyException, paramiko.AuthenticationException, paramiko.SSHException, socket.error, BaseException) as exc: self.log.error('%s: %s' % (host, str(exc))) raise SSH_Exception('Connection Failure - {}'.format(exc))
def __init__(self, cfg_file): self.log = logger.getlogger() self.cfg_file = cfg_file self.cfg = None self.inv = None
def _inv_add_ports_ipmi(self): log = logger.getlogger() from lib.inventory import Inventory inv = Inventory(cfg_file=self.config_file_path) if (inv.check_all_nodes_ipmi_macs() and inv.check_all_nodes_ipmi_ipaddrs()): log.info("IPMI ports MAC and IP addresses already in inventory") return dhcp_lease_file = '/var/lib/misc/dnsmasq.leases' from lib.container import Container cont = Container(self.config_file_path) cmd = [] cmd.append(gen.get_container_venv_python_exe()) cmd.append(os.path.join( gen.get_container_python_path(), 'inv_add_ports.py')) cmd.append(dhcp_lease_file) cmd.append('ipmi') cmd.append(self.cont_config_file_path) try: cont.run_command(cmd) except UserException as exc: print('Fail:', exc.message, file=sys.stderr) sys.exit(1) print('IPMI ports added to inventory')
def bmcReset(host, op, bmc): """ controls resetting the bmc. warm reset reboots the bmc, cold reset removes the configuration and reboots. Args: host: string, the hostname or IP address of the bmc args: contains additional arguments used by the bmcReset sub command bmc: the active bmc connection to use returns : True if reset accepted, else False """ log = logger.getlogger() op = op.lower() class BmcOp(Enum): # warm = 'warm' # implemention in openbmctool is same as 'warm' cold = 'cold' try: BmcOp[op] except KeyError as exc: log.error(f'Invalid bmc operation: {op} Key error {exc}') raise if (BmcOp[op].value == "cold"): try: # 'raw_command used here instead of reset_bmc so as to get the # response res = bmc.reset_bmc() except pyghmi_exception.IpmiException: log.error(f'Failed cold reboot of BMC {host}') res = False else: res = True return res
def _set_pup_reboot(software_hosts_file_path, hosts_list): """Set 'pup_reboot' variable for all hosts. If the installer is included as a host (for self-install) it should set 'pup_reboot=False'. A global 'pup_reboot=True' is set for other hosts. Users could manually set 'pup_reboot=False' on any host to prevent automatic reboots. Args: software_hosts_file_path (str): Path to software inventory file host_list (list): List of hostnames or IP addresses """ log = logger.getlogger() log.debug("Adding global software_hosts var 'pup_reboot=True'") add_software_hosts_global_var(software_hosts_file_path, "pup_reboot=True", clear_ssh_key=False) installer_fqdn = getfqdn() if installer_fqdn in hosts_list: log.debug(f"Self-install detected - '{installer_fqdn}' in software " "hosts. Setting host var 'pup_reboot=False'") line_in_file(software_hosts_file_path, f'^{installer_fqdn}.*', installer_fqdn + " pup_reboot=False")
def create_ssh_key_pair(name): """Create an SSH private/public key pair in ~/.ssh/ If an SSH key pair exists with "name" then the private key path is returned *without* creating anything new. Args: name (str): Filename of private key file Returns: str: Private ssh key path Raises: UserException: If ssh-keygen command fails """ log = logger.getlogger() ssh_dir = os.path.join(Path.home(), ".ssh") private_key_path = os.path.join(ssh_dir, name) if not os.path.isdir(ssh_dir): os.mkdir(ssh_dir, mode=0o700) if os.path.isfile(private_key_path): log.info(f'SSH key \'{private_key_path}\' already exists, continuing') else: print(bold(f'Creating SSH key \'{private_key_path}\'')) cmd = ('ssh-keygen -t rsa -b 4096 ' '-C "Generated by Power-Up Software Installer" ' f'-f {private_key_path} -N ""') resp, err, rc = sub_proc_exec(cmd, shell=True) if str(rc) != "0": msg = 'ssh-keygen failed:\n{}'.format(resp) log.debug(msg) raise UserException(msg) return private_key_path
def _create_inventory(self): # from lib.inventory import Inventory # log = logger.getlogger() # inv = Inventory(cfg_file=self.config_file_path) # node_count = len(inv.inv['nodes']) # if node_count > 0: # log.info("Inventory already exists!") # print("\nInventory already exists with {} nodes defined." # "".format(node_count)) # print("Press enter to continue using the existing inventory.") # print("Type 'C' to continue creating a new inventory. " # "WARNING: Contents of current file will be overwritten!") # resp = raw_input("Type 'T' to terminate Cluster Genesis ") # if resp == 'T': # sys.exit('POWER-Up stopped at user request') # elif resp == 'C': # log.info("'{}' entered. Creating new inventory file." # "".format(resp)) # else: # log.info("Continuing with existing inventory.") # return from lib.container import Container log = logger.getlogger() cont = Container(self.config_file_path, self.args.create_inventory) cmd = [] cmd.append(gen.get_container_venv_python_exe()) cmd.append(os.path.join( gen.get_container_python_path(), 'inv_create.py')) cmd.append(self.cont_config_file_path) try: cont.run_command(cmd) except UserException as exc: print('Fail:', exc.message, file=sys.stderr) sys.exit(1) deployer_inv_file = gen.get_symlink_realpath(self.config_file_path) # If inventory file symlink is broken link remove it symlink_path = gen.get_symlink_path(self.config_file_path) if os.path.islink(symlink_path): if not os.path.exists(os.readlink(symlink_path)): os.unlink(symlink_path) # If inventory is an empty file delete it if (os.path.isfile(deployer_inv_file) and os.stat(deployer_inv_file).st_size == 0): os.remove(deployer_inv_file) # Create a sym link on deployer to inventory inside container if not os.path.isfile(deployer_inv_file): cont_inv_file = os.path.join(gen.LXC_DIR, cont.name, 'rootfs', gen.CONTAINER_PACKAGE_PATH[1:], gen.INV_FILE_NAME) log.debug("Creating symlink on deployer to container inventory: " "{} -> {}".format(deployer_inv_file, cont_inv_file)) os.symlink(cont_inv_file, deployer_inv_file) print('Success: Created inventory file')
def __init__(self, config_path=None, name=None): self.log = logger.getlogger() self.cfg = Config(config_path) self.cont_package_path = gen.get_container_package_path() self.cont_id_file = gen.get_container_id_file() self.cont_venv_path = gen.get_container_venv_path() self.cont_scripts_path = gen.get_container_scripts_path() self.cont_python_path = gen.get_container_python_path() self.cont_os_images_path = gen.get_container_os_images_path() self.cont_playbooks_path = gen.get_container_playbooks_path() self.depl_package_path = gen.get_package_path() self.depl_python_path = gen.get_python_path() self.depl_playbooks_path = gen.get_playbooks_path() if name is True or name is None: for vlan in self.cfg.yield_depl_netw_client_vlan('pxe'): break self.name = '{}-pxe{}'.format(self.DEFAULT_CONTAINER_NAME, vlan) else: self.name = name self.client = docker.from_env() try: self.image = self.client.images.get('power-up') except docker.errors.ImageNotFound: self.image = None try: self.cont = self.client.containers.get(self.name) except docker.errors.NotFound: self.cont = None
def checkFWactivation(host, session): """ Checks the software inventory for an image that is being activated. @return: True if an image is being activated, false is no activations are happening """ log = logger.getlogger() url = f"https://{host}/xyz/openbmc_project/software/enumerate" httpHeader = {'Content-Type': 'application/json'} try: resp = session.get(url, headers=httpHeader, verify=False, timeout=5) except(requests.exceptions.Timeout) as exc: log.error(f'BMC request timeout error. {exc}') return True except(requests.exceptions.ConnectionError) as exc: log.error(f'BMC connection error. {exc}') return True fwInfo = json.loads(resp.text)['data'] for key in fwInfo: if 'Activation' in fwInfo[key]: if 'Activating' in (fwInfo[key]['Activation'], fwInfo[key]['RequestedActivation']): return True return False
def get_system_inventory_in_background(host, user, pw): """ Launches a background subprocess (using Popen) to gather fru information from a target node. The reference to the subprocess class is returned. The background subprocess can be polled for completion using process.poll Fru information can be read using process.communicate example: p = get_system_inventory_in_background('192.168.36.21', 'ADMIN', 'admin') ready = False while not ready: if p.poll(): ready = True sys_inv = p.communicate() sys_info = extract_system_info_from_inventory(sys_inv) # returns dict sn, pn = extract_system_sn_pn_from_inventory(sys_inv) """ log = logger.getlogger() cmd = f'ipmitool -I lanplus -H {host} -U {user} -P {pw} fru' try: process = u.sub_proc_launch(cmd) except OSError: log.error('An OS error occurred while attempting to run ipmitool fru ' 'cmd') except ValueError: log.error('An incorrect argument was passed to the subprocess running ' 'ipmitool') return process
def logout(host, username, pw, session, timeout=10): """ Logs out of the bmc and terminates the session @param host: string, the hostname or IP address of the bmc to log out of @param username: The user name for the bmc to log out of @param pw: The password for the BMC to log out of @param session: the active session to use """ log = logger.getlogger() httpHeader = {'Content-Type': 'application/json'} try: r = session.post(f'https://{host}/logout', headers=httpHeader, json={"data": [username, pw]}, verify=False, timeout=timeout) except (requests.exceptions.Timeout) as err: log.debug(f'BMC session request timout error {err}') except (requests.exceptions.ConnectionError) as err: log.debug(f'BMC logout session request connect error {err}') else: if ('"message": "200 OK"' in r.text): log.debug(f'Host {host}, user {username} has been logged out') return True
def get_system_sn_pn(host, session, timeout=5): log = logger.getlogger() url = (f"https://{host}/xyz/openbmc_project/inventory/system") httpHeader = {'Content-Type': 'application/json'} try: res = session.get(url, headers=httpHeader, verify=False, timeout=timeout) except(requests.exceptions.Timeout) as exc: log.debug('BMC request timeout error. Host: {host}') log.debug(exc) res = None except(requests.exceptions.ConnectionError) as exc: log.debug('BMC request connection error. Host: {host}') log.debug(exc) res = None else: try: res = json.loads(res.text) res = (res['data']['SerialNumber'], res['data']['Model']) except (json.JSONDecodeError, AttributeError) as exc: log.error(f'Error in JSON response from BMC {host}') log.debug(exc) res = None log.debug(f'BMC SN and PN: {res}') return res
def bmcPowerState(host, session, timeout): log = logger.getlogger() url = f"https://{host}/xyz/openbmc_project/state/bmc0/attr/CurrentBMCState" httpHeader = {'Content-Type': 'application/json'} try: res = session.get(url, headers=httpHeader, verify=False, timeout=timeout) except (requests.exceptions.Timeout) as exc: log.debug('BMC request timeout error. Host: {host}') log.debug(exc) res = None except (requests.exceptions.ConnectionError) as exc: log.debug('BMC request connection error. Host: {host}') log.debug(exc) res = None else: try: res = json.loads(res.text)['data'].split('.')[-1].lower() except (json.JSONDecodeError, AttributeError) as exc: log.error(f'Error in JSON response from BMC {host}') log.debug(exc) res = None log.debug(f'BMC Power state: {res}') return res
def _inv_add_ports_pxe(self): log = logger.getlogger() from lib.inventory import Inventory inv = Inventory(cfg_file=self.config_file_path) if (inv.check_all_nodes_pxe_macs() and inv.check_all_nodes_pxe_ipaddrs()): log.info("PXE ports MAC and IP addresses already in inventory") return power_wait = gen.get_power_wait() ipmi_set_power('off', self.config_file_path, wait=power_wait) # set boot dev to bios, to avoid situations where some node types can skip # past pxe boot or attempt to boot from disk if pxe does not respond in time ipmi_set_bootdev('setup', False, self.config_file_path) ipmi_set_power('on', self.config_file_path, wait=power_wait) dhcp_lease_file = '/var/lib/misc/dnsmasq.leases' from lib.container import Container cont = Container(self.config_file_path, self.args.inv_add_ports_pxe) cmd = [] cmd.append(gen.get_container_venv_python_exe()) cmd.append(os.path.join( gen.get_container_python_path(), 'inv_add_ports.py')) cmd.append(dhcp_lease_file) cmd.append('pxe') cmd.append(self.cont_config_file_path) try: cont.run_command(cmd) except UserException as exc: print('Fail:', exc.message, file=sys.stderr) sys.exit(1) print('PXE ports added to inventory')
def download_os_images(config_path=None): """Download OS installation images""" log = logger.getlogger() cfg = Config(config_path) os_images_path = get_os_images_path() + "/" os_image_urls = get_os_image_urls() for os_profile in cfg.yield_ntmpl_os_profile(): for os_image_url in os_image_urls: if check_os_profile(os_profile) in os_image_url['name']: for image in os_image_url['images']: dest = os_images_path if 'filename' in image: dest += image['filename'] else: dest += image['url'].split("/")[-1] if not os.path.isfile(dest): log.info(f"Downloading OS image: {image['url']}") wget.download(image['url'], out=dest) print('') sys.stdout.flush() log.info('Verifying OS image sha1sum: %s' % dest) if image['sha1sum'] != sha1sum(dest): msg = ('OS image sha1sum verification failed: %s' % dest) log.error(msg) raise UserException(msg)
def create_pup_repo_mirror(repo_base_dir): """ Download POWER-Up public repository in full Args: repo_base_dir (str): Base directory path arch (str, optional): Select package architecture to save py_ver (int, optional): Python version number """ log = logger.getlogger() if not repo_base_dir.endswith('.git'): repo_base_dir = path.join(repo_base_dir, 'power-up.git') makedirs(repo_base_dir, exist_ok=True) if len(listdir(repo_base_dir)) != 0: log.info(f"The directory '{repo_base_dir}' already exists and is not" " empty.") if get_yesno("Permanently remove existing contents and re-clone? "): rmtree(repo_base_dir) makedirs(repo_base_dir, exist_ok=True) else: log.debug("User selected to continue without re-cloning") return url_path = rlinput("POWER-Up Repository path/URL: ", get_github_url()) resp, err, rc = sub_proc_exec(f'git clone --mirror {url_path} ' f'{repo_base_dir}') if rc != 0: log.error('An error occurred while cloning mirror of power-up repo: ' f'{err}')
def bmcReset(host, op, session): """ controls resetting the bmc. warm reset reboots the bmc, cold reset removes the configuration and reboots. @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the bmcReset sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption @return : response from BMC if reset accepted, else None """ log = logger.getlogger() op = op.lower() class BmcOp(Enum): # warm = 'warm' # implemention in openbmctool is same as 'warm' cold = 'cold' try: BmcOp[op] except KeyError as exc: log.error(f'Invalid bmc operation: {op} Key error {exc}') raise if checkFWactivation(host, session): log.error("BMC reset control disabled during firmware activation") if(BmcOp[op].value == "cold"): url = (f"https://{host}/xyz/openbmc_project/state/bmc0/attr/" "RequestedBMCTransition") httpHeader = {'Content-Type': 'application/json'} data = '{"data":"xyz.openbmc_project.State.BMC.Transition.Reboot"}' try: res = session.put(url, headers=httpHeader, data=data, verify=False, timeout=5) except(requests.exceptions.Timeout) as exc: log.error(f'BMC request timeout error. Host: {host}') log.debug(exc) res = None except(requests.exceptions.ConnectionError) as exc: log.error(f'BMC request connection error. Host: {host}') log.debug(exc) res = None else: try: res = json.loads(res.text)['status'].lower() except json.JSONDecodeError as exc: log.error(f'Error decoding JSON response from BMC {host}') log.error(exc) res = None except KeyError as exc: log.error(f'Error in response from BMC {host}.' 'Status key not found {exc}') log.error(exc) res = None return res
def hostBootMode(host, mode, session, timeout=5): """Gets or sets the host boot mode. @param host: string, the hostname or IP address of the bmc @param source: (str) The mode to boot. If empty, returns the boot source. """ log = logger.getlogger() mode = mode.title() class BootMode(Enum): Regular = 'Regular' Setup = 'Setup' Bios = 'Setup' Safe = 'Safe' if mode: try: BootMode[mode] except KeyError as exc: log.error(f'Invalid Boot mode: {mode} Key error {exc}') raise url = (f"https://{host}/xyz/openbmc_project/control/host0/boot/" "one_time/attr/BootMode") httpHeader = {'Content-Type': 'application/json'} data = ('xyz.openbmc_project.Control.Boot.Mode.Modes.' f'{BootMode[mode].value}') data = '{"data":"' + data + '"}' try: res = session.put(url, headers=httpHeader, data=data, verify=False, timeout=timeout) except (requests.exceptions.Timeout) as exc: log.error('BMC request timeout error.') log.debug(exc) else: if res.status_code == 200: return BootMode[mode].value.lower() else: log.error(f'Error setting boot source. rc: {res.status_code} ' f'reason: {res.reason}') return else: url = (f"https://{host}/xyz/openbmc_project/control/host0/boot/" "one_time/attr/BootMode") httpHeader = {'Content-Type': 'application/json'} try: res = session.get(url, headers=httpHeader, verify=False, timeout=timeout) except (requests.exceptions.Timeout) as exc: log.error(f'BMC request timeout error. {exc}') else: bootMode = json.loads(res.text)['data'].split('.')[-1] return bootMode.lower()
def load_yamlfile(yamlfile): log = logger.getlogger() try: yaml_file = yaml.full_load(open(yamlfile, 'r')) except yaml.YAMLError as e: log.error("unable to open file: {0}\n error: {1}".format(yamlfile, e)) raise e return yaml_file
def __init__(self, host=None, userid=None, password=None, mode=None, outfile=None): self.log = logger.getlogger() pass
def get_system_inventory(host, user, pw): log = logger.getlogger() cmd = f'ipmitool -I lanplus -H {host} -U {user} -P {pw} fru' res, err, rc = u.sub_proc_exec(cmd) if rc == 0: return res else: log.debug(f'Unable to read system information from {host}, rc: {rc}')
def _bundle(self, root_dir): log = logger.getlogger() print('Bundling {0} directory'.format(root_dir)) try: bundle.bundle_this(root_dir, self.args.bundle_to[0]) print('Bundled {0} directory'.format(root_dir)) except KeyboardInterrupt as e: log.error("User exit ... {0}".format(e))
def create_base_dir(base_dir): log = logger.getlogger() print('\nMove or Copy the existing software server directories?') ch, action = get_selection('move\ncopy', ('m', 'c')) if action == 'copy': statvfs = os.statvfs(base_dir) freespace = statvfs.f_frsize * statvfs.f_bavail if freespace < 18000000000: sys.exit('Insufficient space on disk') arch = '' exists = glob('/srv/repos/dependencies/rhel7/*') if not exists: log.error('\nNo dependencies folder found. Unable to perform move.\n') sys.exit() for path in exists: if 'p8' in path or 'p9' in path: arch = 'ppc64le' break elif 'x86_64' in path: arch = 'x86_64' break if not arch: log.error('\nUnable to determine architecture. Unable to perform move.\n') sys.exit() if os.path.exists(f'{base_dir}/wmla120-{arch}'): print(f'Destination path {base_dir}/wmla120-{arch} already exists.') if action == 'copy': if not get_yesno('Okay to proceed with force copy? '): sys.exit('Exit at user request') else: os.mkdir(f'{base_dir}/wmla120-{arch}/') for _dir in (('repos', 'anaconda', 'spectrum-conductor', 'spectrum-dli', 'wmla-license',)): path = os.path.join('/srv/', _dir, '') if os.path.isdir(path): print(f'Found dir: {path}') if action == 'move': try: _dir = f'{base_dir}/wmla120-{arch}/' move(path, _dir) cmd = f'sudo chcon -Rv --type=httpd_sys_content_t {_dir}' _, err, rc = sub_proc_exec(cmd) if rc != 0: log.error(f'chtype of directory {_dir} failed {err}') except shutil_Error as exc: print(exc) elif action == 'copy': cmd = f'cp -rf {path} {base_dir}/wmla120-{arch}/' try: _, err, rc = sub_proc_exec(cmd) except: pass if rc != 0: log.error('Copy error {err}') else: log.error(f'Path {path} missing') print('Done')
def hostBootSource(host, source, session, timeout=5): """Gets or sets the host boot source. @param host: string, the hostname or IP address of the bmc @param source: (str) The source to boot from. If empty, returns the boot source. """ log = logger.getlogger() source = source.title() class BootSource(Enum): Default = 'Default' Network = 'Network' Pxe = 'Network' Disk = 'Disk' if source: try: BootSource[source] except KeyError as exc: log.error(f'Invalid Boot source: {source} Key error {exc}') raise url = ( f"https://{host}/xyz/openbmc_project/control/host0/boot/one_time/" "attr/BootSource") httpHeader = {'Content-Type': 'application/json'} data = ('xyz.openbmc_project.Control.Boot.Source.Sources.' f'{BootSource[source].value}') data = '{"data":"' + data + '"}' try: res = session.put(url, headers=httpHeader, data=data, verify=False, timeout=timeout) except (requests.exceptions.Timeout) as exc: log.error(f'BMC request timeout error. {exc}') else: if res.status_code == 200: return BootSource[source].value.lower() else: log.error(f'Error setting boot source. rc: {res.status_code} ' f'reason: {res.reason}') return else: url = ( f"https://{host}/xyz/openbmc_project/control/host0/boot/one_time/" "attr/BootSource") httpHeader = {'Content-Type': 'application/json'} try: res = session.get(url, headers=httpHeader, verify=False, timeout=timeout) except (requests.exceptions.Timeout) as exc: log.error(f'BMC request timeout error. {exc}') else: bootSource = json.loads(res.text)['data'].split('.')[-1] return bootSource.lower()
def __init__(self, cfg_file=None, inv_file=None): self.log = logger.getlogger() self.dbase = DatabaseInventory(inv_file=inv_file, cfg_file=cfg_file) self.inv = AttrDict() inv = self.dbase.load_inventory() if inv is not None: self.inv = inv self.switch = None self.switch_type = None if self.InvKey.CONFIG_FILE not in self.inv: self.inv.config_file = cfg_file if self.InvKey.NODES not in self.inv: self.inv.nodes = [] if self.InvKey.SWITCHES not in self.inv: self.inv.switches = [] # Order is only kept in Python 3.6 and above # self.nodes = AttrDict({ # self.InvKey.LABEL: 'a', # self.InvKey.HOSTNAME: 'b', # self.InvKey.PORT: 'c'}) self.nodes = AttrDict() self.nodes[self.InvKey.LABEL] = [] self.nodes[self.InvKey.HOSTNAME] = [] self.nodes[self.InvKey.RACK_ID] = [] self.nodes[self.InvKey.BMC_TYPE] = [] self.nodes[self.InvKey.IPMI] = AttrDict() self.nodes[self.InvKey.PXE] = AttrDict() self.nodes[self.InvKey.DATA] = AttrDict() self.nodes[self.InvKey.OS] = [] self.nodes[self.InvKey.ROLES] = [] self.nodes[self.InvKey.INTERFACES] = [] self.nodes[self.InvKey.IPMI][self.InvKey.SWITCHES] = [] self.nodes[self.InvKey.IPMI][self.InvKey.PORTS] = [] self.nodes[self.InvKey.IPMI][self.InvKey.MACS] = [] self.nodes[self.InvKey.IPMI][self.InvKey.IPADDRS] = [] self.nodes[self.InvKey.IPMI][self.InvKey.USERID] = [] self.nodes[self.InvKey.IPMI][self.InvKey.PASSWORD] = [] self.nodes[self.InvKey.PXE][self.InvKey.PORTS] = [] self.nodes[self.InvKey.PXE][self.InvKey.MACS] = [] self.nodes[self.InvKey.PXE][self.InvKey.IPADDRS] = [] self.nodes[self.InvKey.PXE][self.InvKey.DEVICES] = [] self.nodes[self.InvKey.PXE][self.InvKey.SWITCHES] = [] self.nodes[self.InvKey.PXE][self.InvKey.RENAME] = [] self.nodes[self.InvKey.DATA][self.InvKey.SWITCHES] = [] self.nodes[self.InvKey.DATA][self.InvKey.PORTS] = [] self.nodes[self.InvKey.DATA][self.InvKey.MACS] = [] self.nodes[self.InvKey.DATA][self.InvKey.IPADDRS] = [] self.nodes[self.InvKey.DATA][self.InvKey.DEVICES] = [] self.nodes[self.InvKey.DATA][self.InvKey.RENAME] = []
def duration(args): df = pd.Series() intensity = Intensity(args.node.readings.speed, args.classifier) for (left, right) in args.node.range(args.window): if left[0].is_month_start and not left[0].hour and not left[0].minute: log = logger.getlogger() log.info('{0}: {1} {2}'.format(repr(args.node), left[0], right[0])) df.set_value(right.min(), intensity.duration(left, right)) return df
def f(args): (index, nid, (config, )) = args logger.getlogger().info(nid) n = nd.Node(nid) grouped = n.readings.speed.groupby(lambda x: x.hour) product = [ pd.Series(x.values) for (_, x) in grouped ] df = pd.concat(product, axis=1, ignore_index=True) ax = df.boxplot(grid=False, sym=',', return_type='axes') ax.grid(b=True, axis='y') ax.set_xlabel('Hour of the day') ax.set_ylabel('Speed (mph)') plt.title(str(n)) fname = '{0:03d}'.format(nid) path = Path(config['output']['destination'], fname).with_suffix('.png') plt.savefig(str(path)) plt.close() return (nid, df)
def func(args): log = logger.getlogger() log.info(args) try: n = Node(args, freq=None) path = Path("/", "Volumes", "Untitled", "{0:03d}".format(args)) path = path.with_suffix(".pkl") n.readings.to_pickle(str(path)) except AttributeError: pass return args
def __build(self, level, cluster, db_conn, seen): if level > 0: try: cl = cluster(self.node.nid, db_conn) except AttributeError as err: log = logger.getlogger() log.error(err) return neighbors = cl.neighbors.difference(seen) seen.update(neighbors) for i in neighbors: try: lag = cl.lag(i) + self.lag except ValueError as err: log = logger.getlogger() log.error(err) continue child = Network(i, level - 1, cluster, lag, db_conn, seen) self.children.add(child)
def __get_data(self, url, retries, timeout): elist = [] for _ in range(retries): try: r = requests.get(url) r.raise_for_status() except (RequestException, ConnectionError) as err: elist.append(type(err).__name__) time.sleep(timeout) continue return r.text log = logger.getlogger() log.error(elist) raise AttributeError('Retries exceeded')
def run(args): (index, node, (config,)) = args log = logger.getlogger() log.info('node: {0}'.format(node)) opts = config['machine'] machine = machine_[opts['model']] aggregator = aggregator_[opts['feature-transform']] model = machine(node, config, aggregator) keys = model.header() values = [] try: values = model.predict(model.classify()) except ValueError as v: log.error(v) return Results(keys, values)
def plot(args): (source, target, frequencies, title) = args log = logger.getlogger() log.info('o: {0} p: {1}'.format(*map(int, source.parts[-2:]))) data = [ pd.read_pickle(str(x)) for x in source.glob('*.pkl') ] df = pd.concat(data, axis=1) columns = [ 'Hour', 'Segment', 'Average number of jams' ] kwargs = { 'x': columns[0], 'y': columns[-1] } for freq in map(lambda x: str(x) + 'H', frequencies): log.info(freq) df_ = df.resample(freq).sum() grouped = df_.groupby(lambda x: x.hour).mean() grouped = grouped.stack(level=0).reset_index() grouped.columns = columns # grouped.replace(to_replace={grouped.columns[0]: { # 0: '00:00-03:59', # 4: '04:00-07:59', # 8: '08:00-11:59', # 12: '12:00-15:59', # 16: '16:00-19:59', # 20: '20:00-23:59', # }}, inplace=True) kwargs['data'] = grouped for f in [ boxplot, factorplot ]: f(**kwargs) sns.despine(trim=True) fname = f.__name__ + '-' + freq dest = target.joinpath(fname).with_suffix('.png') plt.title(title) plt.gcf().savefig(str(dest)) plt.close()
# grouped.replace(to_replace={grouped.columns[0]: { # 0: '00:00-03:59', # 4: '04:00-07:59', # 8: '08:00-11:59', # 12: '12:00-15:59', # 16: '16:00-19:59', # 20: '20:00-23:59', # }}, inplace=True) kwargs['data'] = grouped for f in [ boxplot, factorplot ]: f(**kwargs) sns.despine(trim=True) fname = f.__name__ + '-' + freq dest = target.joinpath(fname).with_suffix('.png') plt.title(title) plt.gcf().savefig(str(dest)) plt.close() args = cli.CommandLine(cli.optsfile('characterisation-plot')).args source = Path(args.source) target = Path(args.target) logger.getlogger(True) with Pool(cpu_count() // 2, maxtasksperchild=1) as pool: for _ in pool.imap_unordered(plot, mkargs(source, target, args.freqs)): pass
logger.getlogger().info('o: {0} p: {1}'.format(observation, prediction)) data = [ pd.read_pickle(str(x)) for x in path.glob('*.pkl') ] df = pd.concat(data, axis=1) df = df.resample(freq).sum().mean() return (observation, prediction, df) args = cli.CommandLine(cli.optsfile('characterisation-plot')).args top_level = Path(args.source) target = Path(args.target) target.mkdir(parents=True, exist_ok=True) freqs = args.freqs if args.freqs else [ 'D' ] # XXX defaults? names = [ 'observation', 'prediction' ] log = logger.getlogger(True) for fq in freqs: log.info('collect {0}'.format(fq)) with Pool(cpu_count() // 2, maxtasksperchild=1) as pool: f = pool.imap_unordered d = { tuple(i): j.values for (*i, j) in f(acc, mkargs(top_level, fq)) } index = pd.MultiIndex.from_tuples(d.keys(), names=names) df = pd.DataFrame(list(d.values()), index=index).sort_index() fname = target.joinpath(fq).with_suffix('.pkl') df.to_pickle(str(fname))
def __init__(self, url, retries, timeout): super().__init__(url, retries, timeout, None, None) processors = { 'nyc': NYC, 'mass': Massachusetts, 'ie': Ireland, } cargs = cli.CommandLine(cli.optsfile('storage')) args = cargs.args handler = processors[args.source] try: data = handler(args.url, args.retries, args.timeout) data.parse(args.table, args.root) data.to_file(args.output) # data.check(args.output) except AttributeError as err: log = logger.getlogger() log.critical(err) except AssertionError: (*_, tb) = sys.exc_info() (*_, tb_info) = map(list, traceback.extract_tb(tb)) if data.doc: fname = handle_error(data.doc) tb_info.append(fname) log = logger.getlogger() log.critical(' '.join(map(str, tb_info)))