def node_set_boot_order(self, node_id, boot_order_list): boot_order_list = self.translate(boot_order_list) vm_name = self.get_node_property(node_id, 'libvirtName') temp_dir = tempfile.mkdtemp() log('Set boot order %s on Node %s' % (boot_order_list, vm_name)) resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp, self.parser) os = xml_dump.xpath('/domain/os') for o in os: for bootelem in ['boot', 'bootmenu']: boot = o.xpath(bootelem) for b in boot: o.remove(b) for dev in boot_order_list: b = etree.Element('boot') b.set('dev', dev) o.append(b) bmenu = etree.Element('bootmenu') bmenu.set('enable', 'no') o.append(bmenu) tree = etree.ElementTree(xml_dump) xml_file = temp_dir + '/%s.xml' % vm_name with open(xml_file, 'w') as f: tree.write(f, pretty_print=True, xml_declaration=True) exec_cmd('virsh define %s' % xml_file) delete(temp_dir)
def node_power_off(self, node_id): log('Power OFF Node %s' % node_id) cmd_prefix = self.amt_cmd(node_id) resp, ret = exec_cmd('{0} info'.format(cmd_prefix), check=False) if "Powerstate: S0" in resp: resp, ret = exec_cmd('{0} powerdown'.format(cmd_prefix), check=False) if 'pt_status: success' not in resp: err('Could Not Power OFF Node %s' % node_id)
def node_power_on(self, node_id): log('Power ON Node %s' % node_id) cmd_prefix = self.amt_cmd(node_id) resp, ret = exec_cmd('{0} info'.format(cmd_prefix), check=False) if 'Powerstate: S0' not in resp: dev = self.node_get_boot_dev(node_id) resp, ret = exec_cmd('{0} powerup {1}'.format(cmd_prefix, dev), check=False) if 'pt_status: success' not in resp: err('Could Not Power ON Node %s' % node_id)
def repo_setup(): _log("In Function {0}".format(inspect.stack()[0][3])) repo_p = os.path.join(os.path.join("/opt", "x86vm"), "repo-vc") os.mkdir(repo_p) main_p = os.path.join(repo_p, "main") os.mkdir(main_p) os.mkdir(os.path.join(repo_p, "updates")) for f in glob.glob("x86/*.rpm"): shutil.copy(f, main_p) common.exec_cmd(["createrepo", "-v", main_p])
def collect_error_logs(self): for node_id, roles_blade in self.node_id_roles_dict.iteritems(): log_list = [] cmd = ('ssh -q node-%s grep \'"%s"\' %s' % (node_id, SEARCH_TEXT, LOG_FILE)) results, _ = exec_cmd(cmd, False) for result in results.splitlines(): log_msg = '' sub_cmd = '"%s" %s' % (result, LOG_FILE) for c in LIST_OF_CHAR_TO_BE_ESCAPED: sub_cmd = sub_cmd.replace(c, '\%s' % c) grep_cmd = ('grep -B%s %s' % (GREP_LINES_OF_LEADING_CONTEXT, sub_cmd)) cmd = ('ssh -q node-%s "%s"' % (node_id, grep_cmd)) details, _ = exec_cmd(cmd, False) details_list = details.splitlines() found_prev_log = False for i in range(len(details_list) - 2, -1, -1): if self.pattern.match(details_list[i]): found_prev_log = True break if found_prev_log: log_msg += '\n'.join(details_list[i:-1]) + '\n' grep_cmd = ('grep -A%s %s' % (GREP_LINES_OF_TRAILING_CONTEXT, sub_cmd)) cmd = ('ssh -q node-%s "%s"' % (node_id, grep_cmd)) details, _ = exec_cmd(cmd, False) details_list = details.splitlines() found_next_log = False for i in range(1, len(details_list)): if self.pattern.match(details_list[i]): found_next_log = True break if found_next_log: log_msg += '\n'.join(details_list[:i]) else: log_msg += details if log_msg: log_list.append(log_msg) if log_list: role = ('controller' if 'controller' in roles_blade[0] else 'compute host') log('_' * 40 + 'Errors in node-%s %s' % (node_id, role) + '_' * 40) for log_msg in log_list: print(log_msg + '\n')
def patch_iso(self, new_iso): tmp_orig_dir = '%s/origiso' % self.tmp_dir tmp_new_dir = '%s/newiso' % self.tmp_dir try: self.copy(tmp_orig_dir, tmp_new_dir) self.patch(tmp_new_dir, new_iso) except Exception as e: exec_cmd('fusermount -u %s' % tmp_orig_dir, False) os.environ.pop(MOUNT_STATE_VAR, None) delete(self.tmp_dir) err(e)
def node_reset(self, node_id): log('RESET Node %s' % node_id) cmd_prefix = self.amt_cmd(node_id) dev = self.node_get_boot_dev(node_id) resp, ret = exec_cmd('{0} info'.format(cmd_prefix), check=False) if 'Powerstate: S0' in resp: resp, ret = exec_cmd('{0} reset {1}'.format(cmd_prefix, dev), check=False) if 'pt_status: success' not in resp: err('Could Not RESET Node %s' % node_id) else: err('Cannot RESET Node %s because it\'s not Active, state: %s' % (node_id, resp))
def health_check(self): log('Now running sanity and smoke health checks') r = exec_cmd('fuel health --env %s --check sanity,smoke --force' % self.env_id) log(r) if 'failure' in r: err('Healthcheck failed!')
def node_zero_mbr(self, node_id): vm_name = self.get_node_property(node_id, 'libvirtName') resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp) disks = xml_dump.xpath('/domain/devices/disk') for disk in disks: if disk.get('device') == 'disk': sources = disk.xpath('source') for source in sources: disk_file = source.get('file') disk_size = exec_cmd('qemu-img info ' '%s |grep \"virtual size:\"' % disk_file).split()[2] delete(disk_file) exec_cmd('qemu-img create -f qcow2 %s %s' % (disk_file, disk_size))
def env_exists(self, env_name): env_list = parse(exec_cmd('fuel env --list')) for env in env_list: if env[E['name']] == env_name and env[E['status']] == 'new': self.env_id = env[E['id']] return True return False
def configure_environment(self): log('Configure environment') delete(self.yaml_config_dir) create_dir_if_not_exists(self.yaml_config_dir) env_name = self.dea.get_env_name() env_net_segment_type = self.dea.get_env_net_segment_type() log('Creating environment %s release %s net-segment-type %s' % (env_name, self.release_id, env_net_segment_type)) exec_cmd('fuel env create --name "%s" --release %s --net-segment-type %s' % (env_name, self.release_id, env_net_segment_type)) if not self.env_exists(env_name): err('Failed to create environment %s' % env_name) self.config_settings() self.config_network() self.config_nodes()
def get_interface(self, real_node_id): exec_cmd('fuel node --node-id %s --network --download --dir %s' % (real_node_id, self.temp_dir)) interface_file = ('%s/node_%s/interfaces.yaml' % (self.temp_dir, real_node_id)) interfaces = self.read_yaml(interface_file) interface_config = {} pxe_mac = None for interface in interfaces: networks = [] for network in interface['assigned_networks']: networks.append(network['name']) if network['name'] == 'fuelweb_admin': pxe_mac = interface['mac'] if networks: interface_config[interface['name']] = networks return interface_config, pxe_mac
def handle_signals(signal_num, frame): signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) log('Caught signal %s, cleaning up and exiting.' % signal_num) mount_point = os.environ.get(MOUNT_STATE_VAR) if mount_point: log('Unmounting ISO from "%s"' % mount_point) # Prevent 'Device or resource busy' errors when unmounting os.chdir('/') exec_cmd('fusermount -u %s' % mount_point, True) # Be nice and remove our environment variable, even though the OS would # would clean it up anyway os.environ.pop(MOUNT_STATE_VAR) sys.exit(1)
def run(self): while not queue.empty(): i, instance, search_text_list = queue.get() file_name = self.file_path + "%s%s.txt" % (instance.table_name, i) awk_cmd = self.cmd_obj.create_cmd(search_text_list, file_name) output = exec_cmd(awk_cmd) result = Commands.process_data(instance, output) self.resultset.extend(result) queue.task_done()
def create_vms(self): temp_dir = tempfile.mkdtemp() disk_sizes = self.dha.get_disks() for node_id in self.node_ids: vm_name = self.dha.get_node_property(node_id, 'libvirtName') vm_template = '%s/%s' % (self.root_dir, self.dha.get_node_property( node_id, 'libvirtTemplate')) check_file_exists(vm_template) disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) self.create_storage(node_id, disk_path, disk_sizes) temp_vm_file = '%s/%s' % (temp_dir, vm_name) exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) vm_definition_overwrite = self.dha.get_vm_definition( self.dea.get_node_main_role(node_id, self.fuel_node_id)) self.define_vm(vm_name, temp_vm_file, disk_path, vm_definition_overwrite) delete(temp_dir)
def file_search(self): global CMD_OUTPUT while not queue.empty(): file_prefix, table_name, bucket_no, search_text_list, file_prefix = queue.get() file_name = "%sindex_%s_%s.txt" % (file_prefix, table_name, bucket_no) cmd_list = self.cmd_obj.create_cmd(search_text_list, file_name) result = exec_cmd(cmd_list) CMD_OUTPUT.append(result) queue.task_done()
def node_power_off(self, node_id): WAIT_LOOP = 200 SLEEP_TIME = 3 log('Power OFF Node %s' % node_id) cmd_prefix = self.ipmi_cmd(node_id) state = exec_cmd('%s chassis power status' % cmd_prefix) if state == 'Chassis Power is on': done = False exec_cmd('%s chassis power off' % cmd_prefix) for i in range(WAIT_LOOP): state, _ = exec_cmd('%s chassis power status' % cmd_prefix, False) if state == 'Chassis Power is off': done = True break else: time.sleep(SLEEP_TIME) if not done: err('Could Not Power OFF Node %s' % node_id)
def configure_environment(self): release_list = parse(exec_cmd('fuel release -l')) for release in release_list: if release[R['name']] == self.wanted_release: break config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR, release[R['id']], self.node_roles_dict) config_env.configure_environment() self.env_id = config_env.env_id
def copy(self, tmp_orig_dir, tmp_new_dir): log('Copying...') os.makedirs(tmp_orig_dir) os.makedirs(tmp_new_dir) exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir)) os.environ[MOUNT_STATE_VAR] = tmp_orig_dir with cd(tmp_orig_dir): exec_cmd('find . | cpio -pd %s' % tmp_new_dir) exec_cmd('fusermount -u %s' % tmp_orig_dir) os.environ.pop(MOUNT_STATE_VAR, None) delete(tmp_orig_dir) exec_cmd('chmod -R 755 %s' % tmp_new_dir)
def check_bridge(pxe_bridge, dha_path): with io.open(dha_path) as yaml_file: dha_struct = yaml.load(yaml_file) if dha_struct['adapter'] != 'libvirt': log('Using Linux Bridge %s for booting up the Fuel Master VM' % pxe_bridge) r = exec_cmd('ip link show %s' % pxe_bridge) if pxe_bridge in r and 'state DOWN' in r: err('Linux Bridge {0} is not Active, bring' ' it UP first: [ip link set dev {0} up]'.format(pxe_bridge))
def get_node_pxe_mac(self, node_id): mac_list = [] vm_name = self.get_node_property(node_id, 'libvirtName') resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp) interfaces = xml_dump.xpath('/domain/devices/interface') for interface in interfaces: macs = interface.xpath('mac') for mac in macs: mac_list.append(mac.get('address').lower()) return mac_list
def get_name_of_device(self, vm_name, device_type): resp = exec_cmd('virsh dumpxml %s' % vm_name) xml_dump = etree.fromstring(resp) disks = xml_dump.xpath('/domain/devices/disk') for disk in disks: if disk.get('device') == device_type: targets = disk.xpath('target') for target in targets: device = target.get('dev') if device: return device
def verify_node_status(self): node_list = parse(exec_cmd('fuel node list')) failed_nodes = [] for node in node_list: if node[N['status']] != 'ready' and node[N['cluster']] != 'None': failed_nodes.append((node[N['id']], node[N['status']])) if failed_nodes: summary = '' for node, status in failed_nodes: summary += '[node %s, status %s]\n' % (node, status) err('Deployment failed: %s' % summary)
def verify_node_status(self): node_list = parse(exec_cmd('fuel node list')) failed_nodes = [] for node in node_list: if node[N['status']] != 'ready': failed_nodes.append((node[N['id']], node[N['status']])) if failed_nodes: summary = '' for node, status in failed_nodes: summary += '[node %s, status %s]\n' % (node, status) err('Deployment failed: %s' % summary)
def transplant(dea, astute): fuel_conf = dea.get_fuel_config() require_network_restart = False for key in fuel_conf.iterkeys(): if key == 'ADMIN_NETWORK': for skey in fuel_conf[key].iterkeys(): astute[key][skey] = fuel_conf[key][skey] elif re.match('^IFCFG', key): log('Adding interface configuration for: %s' % key.lower()) require_network_restart = True write_ifcfg_file(key, fuel_conf) if astute.has_key(key): astute.pop(key, None) else: astute[key] = fuel_conf[key] if require_network_restart: admin_ifcfg = '/etc/sysconfig/network-scripts/ifcfg-eth0' exec_cmd('echo "DEFROUTE=no" >> %s' % admin_ifcfg) log('At least one interface was reconfigured, restart network manager') exec_cmd('systemctl restart network') return astute
def create_networks(self): for net_file in glob.glob('%s/*' % self.network_dir): exec_cmd('virsh net-define %s' % net_file) for net in self.net_names: log('Creating network %s' % net) exec_cmd('virsh net-autostart %s' % net) exec_cmd('virsh net-start %s' % net)
def node_set_boot_order(self, node_id, boot_order_list): log('Set boot order %s on Node %s' % (boot_order_list, node_id)) boot_order_list.reverse() cmd_prefix = self.ipmi_cmd(node_id) for dev in boot_order_list: if dev == 'pxe': exec_cmd('%s chassis bootdev pxe options=persistent' % cmd_prefix, attempts=self.attempts, delay=self.delay, verbose=True, mask_args=[8, 10]) elif dev == 'iso': exec_cmd('%s chassis bootdev cdrom' % cmd_prefix, attempts=self.attempts, delay=self.delay, verbose=True, mask_args=[8, 10]) elif dev == 'disk': exec_cmd('%s chassis bootdev disk options=persistent' % cmd_prefix, attempts=self.attempts, delay=self.delay, verbose=True, mask_args=[8, 10])
def patch(self, tmp_new_dir, new_iso): log('Patching...') patch_dir = '%s/%s' % (CWD, PATCH_DIR) ks_path = '%s/ks.cfg.patch' % patch_dir with cd(tmp_new_dir): exec_cmd('cat %s | patch -p0' % ks_path) delete('.rr_moved') isolinux = 'isolinux/isolinux.cfg' log('isolinux.cfg before: %s' % exec_cmd('grep ip= %s' % isolinux)) self.update_fuel_isolinux(isolinux) log('isolinux.cfg after: %s' % exec_cmd('grep ip= %s' % isolinux)) iso_label = self.parse_iso_volume_label(self.iso_file) log('Volume label: %s' % iso_label) iso_linux_bin = 'isolinux/isolinux.bin' exec_cmd('mkisofs -quiet -r -J -R -b %s ' '-no-emul-boot -boot-load-size 4 ' '-boot-info-table -hide-rr-moved ' '-joliet-long ' '-x "lost+found:" -V %s -o %s .' % (iso_linux_bin, iso_label, new_iso)) delete(tmp_new_dir)
def config_nodes(self): log('Configure nodes') # Super dirty fix since Fuel 7 requires user defined roles to be # assigned before anything else (BUG fixed in Fuel 8)! for node_id, roles_blade in self.node_id_roles_dict.iteritems(): if "opendaylight" in roles_blade[0] or "onos" in roles_blade[0] or "contrail" in roles_blade[0]: exec_cmd('fuel node set --node-id %s --role %s --env %s' % (node_id, roles_blade[0], self.env_id)) for node_id, roles_blade in self.node_id_roles_dict.iteritems(): if "opendaylight" not in roles_blade[0] and "onos" not in roles_blade[0] and "contrail" not in roles_blade[0]: exec_cmd('fuel node set --node-id %s --role %s --env %s' % (node_id, roles_blade[0], self.env_id)) self.download_deployment_config() for node_id, roles_blade in self.node_id_roles_dict.iteritems(): self.download_interface_config(node_id) self.modify_node_interface(node_id, roles_blade) self.modify_node_network_schemes(node_id, roles_blade) self.upload_interface_config(node_id) self.upload_deployment_config()
def define_vm(self, vm_name, temp_vm_file, disk_path): log('Creating VM %s with disks %s' % (vm_name, disk_path)) with open(temp_vm_file) as f: vm_xml = etree.parse(f) names = vm_xml.xpath('/domain/name') for name in names: name.text = vm_name uuids = vm_xml.xpath('/domain/uuid') for uuid in uuids: uuid.getparent().remove(uuid) disks = vm_xml.xpath('/domain/devices/disk') for disk in disks: if (disk.get('type') == 'file' and disk.get('device') == 'disk'): sources = disk.xpath('source') for source in sources: disk.remove(source) source = etree.Element('source') source.set('file', disk_path) disk.append(source) with open(temp_vm_file, 'w') as f: vm_xml.write(f, pretty_print=True, xml_declaration=True) exec_cmd('virsh define %s' % temp_vm_file)
def reap_fuel_settings(self): data = self.read_yaml('/etc/fuel/astute.yaml') fuel = {} del data['ADMIN_NETWORK']['mac'] del data['ADMIN_NETWORK']['interface'] for key in [ 'ADMIN_NETWORK', 'HOSTNAME', 'DNS_DOMAIN', 'DNS_SEARCH', 'DNS_UPSTREAM', 'NTP1', 'NTP2', 'NTP3', 'FUEL_ACCESS' ]: fuel[key] = data[key] for key in fuel['ADMIN_NETWORK'].keys(): if key not in [ 'ipaddress', 'netmask', 'dhcp_pool_start', 'dhcp_pool_end', 'ssh_network' ]: del fuel['ADMIN_NETWORK'][key] ## FIXME(armband): Factor in support for adding public/other interfaces. ## TODO: Following block expects interface name(s) to be lowercase only interfaces_list = exec_cmd('ip -o -4 a | grep -e "e[nt][hopsx].*"') for interface in re.split('\n', interfaces_list): # Sample output line from above cmd: # 3: eth1 inet 10.0.2.10/24 scope global eth1 valid_lft forever ... ifcfg = re.split(r'\s+', interface) ifcfg_name = ifcfg[1] ifcfg_ipaddr = ifcfg[3] # Filter out admin interface (device name is not known, match IP) current_network = netaddr.IPNetwork(ifcfg_ipaddr) if str(current_network.ip) == fuel['ADMIN_NETWORK']['ipaddress']: continue # Read ifcfg-* network interface config file, write IFCFG_<IFNAME> ifcfg_sec = 'IFCFG_%s' % ifcfg_name.upper() fuel[ifcfg_sec] = {} ifcfg_data = {} ifcfg_f = ('/etc/sysconfig/network-scripts/ifcfg-%s' % ifcfg_name) with open(ifcfg_f) as f: for line in f: if line.startswith('#'): continue (key, val) = line.split('=') ifcfg_data[key.lower()] = val.rstrip() # Keep only needed info (e.g. filter-out type=Ethernet). fuel[ifcfg_sec]['ipaddress'] = ifcfg_data['ipaddr'] fuel[ifcfg_sec]['device'] = ifcfg_data['device'] fuel[ifcfg_sec]['netmask'] = str(current_network.netmask) fuel[ifcfg_sec]['gateway'] = ifcfg_data['gateway'] self.write_yaml(self.dea_file, {'fuel': fuel})
def collect_logs(self): log('Cleaning out any previous deployment logs') exec_cmd('rm -f /var/log/remote/fuel-snapshot-*', False) exec_cmd('rm -f /root/deploy-*', False) log('Generating Fuel deploy snap-shot') if exec_cmd('fuel snapshot < /dev/null &> snapshot.log', False)[1] <> 0: log('Could not create a Fuel snapshot') else: exec_cmd('mv /root/fuel-snapshot* /var/log/remote/', False) log('Collecting all Fuel Snapshot & deploy log files') r, _ = exec_cmd( 'tar -czhf /root/deploy-%s.log.tar.gz /var/log/remote' % time.strftime("%Y%m%d-%H%M%S"), False) log(r)
def node_reset(self, node_id): WAIT_LOOP = 600 log('RESET Node %s' % node_id) cmd_prefix = self.ipmi_cmd(node_id) state = exec_cmd('%s chassis power status' % cmd_prefix) if state == 'Chassis Power is on': was_shut_off = False done = False exec_cmd('%s chassis power reset' % cmd_prefix) for i in range(WAIT_LOOP): state, _ = exec_cmd('%s chassis power status' % cmd_prefix, False) if state == 'Chassis Power is off': was_shut_off = True elif state == 'Chassis Power is on' and was_shut_off: done = True break time.sleep(1) if not done: err('Could Not RESET Node %s' % node_id) else: err('Cannot RESET Node %s because it\'s not Active, state: %s' % (node_id, state))
def sniff(self, interface, capfilter="", timeout="", count=""): cmd = "tshark -i %s -T fields -e frame.time_epoch -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport -e udp.srcport -e udp.dstport -e frame.protocols -l" % ( interface) if capfilter: cmd += ' -f "%s"' % (capfilter) if timeout: cmd += " -a duration:%s" % (timeout) if count: cmd += " -c %s" % (count) if not timeout and not count: cmd += " -c 1" output = exec_cmd(cmd) result = self.parse_result(output.split("\n")) return result
def run_deploy(self): SLEEP_TIME = 60 LOG_FILE = 'cloud.log' log('Starting deployment of environment %s' % self.env_id) p = run_proc('fuel --env %s deploy-changes | strings > %s' % (self.env_id, LOG_FILE)) ready = False for i in range(int(self.deploy_timeout)): env = parse(exec_cmd('fuel env --env %s' % self.env_id)) log('Environment status: %s' % env[0][E['status']]) r, _ = exec_cmd('tail -2 %s | head -1' % LOG_FILE, False) if r: log(r) if env[0][E['status']] == 'operational': ready = True break elif (env[0][E['status']] == 'error' or env[0][E['status']] == 'stopped'): break else: time.sleep(SLEEP_TIME) p.poll() if p.returncode == None: log('The process deploying the changes has not yet finished.') log('''The file %s won't be deleted''' % LOG_FILE) else: delete(LOG_FILE) if ready: log('Environment %s successfully deployed' % self.env_id) else: self.collect_error_logs() err('Deployment failed, environment %s is not operational' % self.env_id)
def _task_fields(self, id): try: out, _ = exec_cmd('fuel2 task show {} -f json'.format(id), False) task_info = json.loads(out) properties = {} # for 9.0 this can be list of dicts or dict # see https://bugs.launchpad.net/fuel/+bug/1625518 if isinstance(task_info, list): for d in task_info: properties.update({d['Field']: d['Value']}) else: return task_info return properties except ValueError as e: err('Unable to fetch task info: {}'.format(e))
def upload_iso(self, iso_file): size = os.path.getsize(iso_file) vol_name = os.path.basename(iso_file) vol_xml = VOL_XML_TEMPLATE.format(name=vol_name, unit='bytes', size=size, format_type='raw') fd, fname = tempfile.mkstemp(text=True, suffix='deploy') os.write(fd, vol_xml) os.close(fd) log(vol_xml) pool = DEFAULT_POOL # FIXME exec_cmd('virsh vol-create --pool %s %s' % (pool, fname)) vol_path = exec_cmd('virsh vol-path --pool %s %s' % (pool, vol_name)) exec_cmd('virsh vol-upload %s %s' % (vol_path, iso_file), attempts=5, delay=10, verbose=True) delete(fname) return vol_path
def check_bridge(pxe_bridge, dha_path): # Assume that bridges on remote nodes exists, we could ssh but # the remote user might not have a login shell. if os.environ.get('LIBVIRT_DEFAULT_URI'): return with io.open(dha_path) as yaml_file: dha_struct = yaml.load(yaml_file) if dha_struct['adapter'] != 'libvirt': log('Using Linux Bridge %s for booting up the Fuel Master VM' % pxe_bridge) r = exec_cmd('ip link show %s' % pxe_bridge) if pxe_bridge in r and 'state DOWN' in r: err('Linux Bridge {0} is not Active, bring' ' it UP first: [ip link set dev {0} up]'.format(pxe_bridge))
def reap_environment_info(self): network_file = ('%s/network_%s.yaml' % (self.temp_dir, self.env_id)) network = self.read_yaml(network_file) env = {'environment': {'name': self.env[E['name']], 'net_segment_type': network['networking_parameters']['segmentation_type']}} self.write_yaml(self.dea_file, env) wanted_release = None rel_list = parse(exec_cmd('fuel release')) for rel in rel_list: if rel[R['id']] == self.env[E['release_id']]: wanted_release = rel[R['name']] self.write_yaml(self.dea_file, {'wanted_release': wanted_release})
def reap_nodes_interfaces_transformations(self): node_list = parse(exec_cmd('fuel node')) real_node_ids = [node[N['id']] for node in node_list] real_node_ids.sort() min_node = real_node_ids[0] interfaces = {} transformations = {} dea_nodes = [] dha_nodes = [] for real_node_id in real_node_ids: node_id = int(real_node_id) - int(min_node) + 1 self.last_node = node_id node = self.get_node_by_id(node_list, real_node_id) roles = commafy(node[N['roles']]) if not roles: err('Fuel Node %s has no role' % real_node_id) dea_node = {'id': node_id, 'role': roles} dha_node = {'id': node_id} if_name, mac = self.reap_interface(real_node_id, interfaces) log('reap transformation for node %s' % real_node_id) tr_name = self.reap_transformation(real_node_id, roles, transformations) dea_node.update({ 'interfaces': if_name, 'transformations': tr_name }) dha_node.update({ 'pxeMac': mac if mac else None, 'ipmiIp': None, 'ipmiUser': None, 'ipmiPass': None, 'libvirtName': None, 'libvirtTemplate': None }) dea_nodes.append(dea_node) dha_nodes.append(dha_node) self.write_yaml(self.dha_file, {'nodes': dha_nodes}, False) self.write_yaml(self.dea_file, {'nodes': dea_nodes}) self.write_yaml(self.dea_file, interfaces) self.write_yaml(self.dea_file, transformations) self.reap_fuel_node_info() self.write_yaml(self.dha_file, {'disks': DISKS})
def reap_nodes_interfaces_transformations(self): node_list = parse(exec_cmd('fuel node')) real_node_ids = [node[N['id']] for node in node_list] real_node_ids.sort() min_node = real_node_ids[0] interfaces = {} transformations = {} dea_nodes = [] dha_nodes = [] for real_node_id in real_node_ids: node_id = int(real_node_id) - int(min_node) + 1 self.last_node = node_id node = self.get_node_by_id(node_list, real_node_id) roles = commafy(node[N['roles']]) if not roles: err('Fuel Node %s has no role' % real_node_id) dea_node = {'id': node_id, 'role': roles} dha_node = {'id': node_id} if_name, mac = self.reap_interface(real_node_id, interfaces) log('reap transformation for node %s' % real_node_id) tr_name = self.reap_transformation(real_node_id, roles, transformations) dea_node.update( {'interfaces': if_name, 'transformations': tr_name}) dha_node.update( {'pxeMac': mac if mac else None, 'ipmiIp': None, 'ipmiUser': None, 'ipmiPass': None, 'libvirtName': None, 'libvirtTemplate': None}) dea_nodes.append(dea_node) dha_nodes.append(dha_node) self.write_yaml(self.dha_file, {'nodes': dha_nodes}, False) self.write_yaml(self.dea_file, {'nodes': dea_nodes}) self.write_yaml(self.dea_file, interfaces) self.write_yaml(self.dea_file, transformations) self.reap_fuel_node_info() self.write_yaml(self.dha_file, {'disks': DISKS})
def node_set_boot_order(self, node_id, boot_order_list): log('Set boot order %s on Node %s' % (boot_order_list, node_id)) boot_order_list.reverse() cmd_prefix = self.ipmi_cmd(node_id) for dev in boot_order_list: if dev == 'pxe': exec_cmd('%s chassis bootdev pxe options=persistent' % cmd_prefix) elif dev == 'iso': exec_cmd('%s chassis bootdev cdrom' % cmd_prefix) elif dev == 'disk': exec_cmd('%s chassis bootdev disk options=persistent' % cmd_prefix)
def intro(self): delete(self.dea_file) delete(self.dha_file) self.temp_dir = tempfile.mkdtemp() date = time.strftime('%c') self.write(self.dea_file, DEA_1.format(date=date, comment=self.comment), False) self.write(self.dha_file, DHA_1.format(date=date, comment=self.comment)) self.get_env() # Need to download deployment with explicit node ids node_list = parse(exec_cmd('fuel node')) real_node_ids = [node[N['id']] for node in node_list] real_node_ids.sort() self.download_node_config(','.join(real_node_ids)) self.download_config('settings') self.download_config('network')
def reap_environment_info(self): network_file = ('%s/network_%s.yaml' % (self.temp_dir, self.env_id)) network = self.read_yaml(network_file) env = { 'environment': { 'name': self.env[E['name']], 'net_segment_type': network['networking_parameters']['segmentation_type'] } } self.write_yaml(self.dea_file, env) wanted_release = None rel_list = parse(exec_cmd('fuel release')) for rel in rel_list: if rel[R['id']] == self.env[E['release_id']]: wanted_release = rel[R['name']] self.write_yaml(self.dea_file, {'wanted_release': wanted_release})
def download_config(self, config_type): log('Download %s config for environment %s' % (config_type, self.env_id)) exec_cmd('fuel %s --env %s --download --dir %s' % (config_type, self.env_id, self.temp_dir))
def delete_networks(self): for net in self.net_names: log('Deleting network %s' % net) exec_cmd('virsh net-destroy %s' % net, False) exec_cmd('virsh net-undefine %s' % net, False)
def download_node_config(self, nodeid): log('Download node %s config for environment %s to %s' % (nodeid, self.env_id, self.temp_dir)) exec_cmd('fuel deployment --node-id %s --env %s --default --dir %s' % (nodeid, self.env_id, self.temp_dir))
def start_docker_machine(name): cmd = "docker-machine start " + name return common.exec_cmd(cmd)
def delete_vm(self, node_id): vm_name = self.dha.get_node_property(node_id, 'libvirtName') r, c = exec_cmd('virsh dumpxml %s' % vm_name, False) if c: return self.undefine_vm_delete_disk(r, vm_name)
print('Number of subjects:', len(dxBySubject)) print('Subject DX distribution :', cntSubjectClass) print('NIfTI file DX distribution :', cntRecordClass) ##### Main Start if not os.path.exists(inputFile): print('ERROR: inputFile not exists,',inputFile) sys.exit(-1) ##### Select columns of interest cmd = "awk -F ',' '{OFS=\",\"} {print $2,$3,$9,$10,$19,$20,$22,$52,$43,$5,$6,$15,$16,$7}' " + inputFile + " > " + stage1ofile;exec_cmd(cmd) cmd = "sed -i 's/\"//g' " + stage1ofile; exec_cmd(cmd) cmd = "sed -i 's/Dementia/AD/g' " + stage1ofile; exec_cmd(cmd) cmd = "sed -i 's/CN/NL/g' " + stage1ofile; exec_cmd(cmd) fillDXbySubject(stage1ofile) print('Number of subjects:', len(dxBySubject)) for k in dxBySubject: v = dxBySubject[k] for item in v: cntClass[item] += 1 ##### Get rid of subjects with all empty DX, and subjects with more than one valid DX ## Keep subjects with only one valid DX and with or without empty DX vSet = set(v) if (len(vSet) == 1) and (not '' in vSet):
def remove_docker_machine(name): cmd = "docker-machine rm -y " + name return common.exec_cmd(cmd)
def is_dm_running(name): cmd = "docker-machine ip " + name return common.exec_cmd(cmd)
def tcdel(self, device): cmd = "tcdel --device %s --all" % (device) return exec_cmd(cmd)
def create_storage(self, node_id, disk_path, disk_sizes): role = self.dea.get_node_main_role(node_id, self.fuel_node_id) disk_size = disk_sizes[role] exec_cmd('qemu-img create -f raw %s %s' % (disk_path, disk_size))
def tcshow(self, device): cmd = "tcshow --device %s" % (device) return exec_cmd(cmd)
def check_docker_machine(name): cmd = "docker-machine inspect " + name return common.exec_cmd(cmd)