def rsync_plugin_modules_on_node(host, plugin_name, fuel_node_ip): LOG.debug('Sync plugin modules on node %s .' % host) plugin_version = get_plugin_version(plugin_name) modules_path = 'rsync://' \ + fuel_node_ip \ + ':/plugins/' \ + plugin_name \ + '-' \ + plugin_version \ + '/deployment_scripts/puppet/' plugin_dir = '/etc/fuel/plugins/' \ + plugin_name \ + '-' \ + plugin_version \ + '/puppet' (out, err) = ssh_connect(host, 'test -d %s || mkdir -p %s' % (plugin_dir, plugin_dir)) if err != '': log_split_output(err, 'error') (out, err) = ssh_connect(host, 'rsync -vzrtopg %s %s' % (modules_path, plugin_dir)) if err != '': log_split_output(err, 'error')
def delete_backend_volume_eqlx(volume_id): # get provider_location sql_get_provider_location = 'SELECT provider_location FROM volumes WHERE id =\'%s\';' % volume_id provider_location = str(db_connect(sql_get_provider_location)).split()[1] # ssh to controller and compute node to delete the iscsi connection LOG.info(' Deleting iscsi connection ...') controller_list = get_node_list('controller') compute_list = get_node_list('compute') node_list = controller_list + compute_list for node in node_list: cmd_show = 'iscsiadm -m session -o show' (out, err) = ssh_connect(node, cmd_show) if out.find(provider_location) > 0: cmd_delete = 'iscsiadm -m node -u -T %s' % provider_location (o, e) = ssh_connect(node, cmd_delete) if o.find('successful') < 0: LOG.error(' Can not delete the iscsi connection "%s" at host %s.' % (provider_location, node)) # ssh to eqlx to delete the volume LOG.info(' Deleting backend(eqlx) volume ...') ## set eqlx volume status to offline cmd_set_eqlx_volume_offline = 'volume select volume-%s offline' % volume_id out = eqlx_ssh_execute(cmd_set_eqlx_volume_offline) if len(out) == 3: LOG.error(' ' + out[1]) return False ## delete the eqlx volume cmd_delete_eqlx_volume = 'volume delete volume-%s' % volume_id result = eqlx_ssh_execute(cmd_delete_eqlx_volume) if not result or result[1] != 'Volume deletion succeeded.': LOG.error(' Delete backend volume faild !') return False else: return True
def delete_backend_volume_eqlx(volume_id): # get provider_location sql_get_provider_location = 'SELECT provider_location FROM volumes WHERE id =\'%s\';' % volume_id provider_location = str(db_connect(sql_get_provider_location)).split()[1] # ssh to controller and compute node to delete the iscsi connection LOG.info(' Deleting iscsi connection ...') controller_list = get_node_list('controller') compute_list = get_node_list('compute') node_list = controller_list + compute_list for node in node_list: cmd_show = 'iscsiadm -m session -o show' (out, err) = ssh_connect(node, cmd_show) if out.find(provider_location) > 0: cmd_delete = 'iscsiadm -m node -u -T %s' % provider_location (o, e) = ssh_connect(node, cmd_delete) if o.find('successful') < 0: LOG.error( ' Can not delete the iscsi connection "%s" at host %s.' % (provider_location, node)) # ssh to eqlx to delete the volume LOG.info(' Deleting backend(eqlx) volume ...') ## set eqlx volume status to offline cmd_set_eqlx_volume_offline = 'volume select volume-%s offline' % volume_id out = eqlx_ssh_execute(cmd_set_eqlx_volume_offline) if len(out) == 3: LOG.error(' ' + out[1]) return False ## delete the eqlx volume cmd_delete_eqlx_volume = 'volume delete volume-%s' % volume_id result = eqlx_ssh_execute(cmd_delete_eqlx_volume) if not result or result[1] != 'Volume deletion succeeded.': LOG.error(' Delete backend volume faild !') return False else: return True
def install_packages_on_influxdbnode(): LOG.info('Install rpm packages "%s" on node %s .' % (INSTALL_PACKAGES, INFLUXDB_HOST)) (out, err) = ssh_connect(INFLUXDB_HOST, 'yum -d 0 -e 0 -y install %s' % INSTALL_PACKAGES) if out != '': log_split_output(out, 'warn')
def detach_disk_on_compute_node(servers, volume_id): for server_id in servers: LOG.info(' Detaching disk "%s" from instance "%s".' % (volume_id, server_id)) logging.disable(logging.INFO) server = pc.nova_server(server_id) server_status = server._info['status'] server_host = server._info['OS-EXT-SRV-ATTR:host'] server_instance_name = server._info['OS-EXT-SRV-ATTR:instance_name'] server_device = os.path.basename(pc.nova_volume(server_id, volume_id)._info['device']) logging.disable(logging.NOTSET) if disk_attached(server_host, server_instance_name, server_device): if server_status == 'ACTIVE': detach_disk_cmd = 'virsh detach-disk %s %s --persistent' \ % (server_instance_name, server_device) reval = ssh_connect(server_host, detach_disk_cmd) if 'Disk detached successfully\n\n' in reval: LOG.info(' Detach disk %s on instance %s successfully.' \ % (server_device, server_instance_name)) return True else: LOG.error(' Detach disk %s on instance %s failed.' \ % (server_device, server_instance_name)) return False else: LOG.info(' Disk %s already detached from instance %s.' \ % (server_device, server_instance_name)) return True
def vrouter_get_gw_remote(l3_host, rid): cmd = "ip netns exec qrouter-%s ip route show | " "grep 'default' | awk '{print $3}'" % (rid) out, err = ssh_connect(l3_host, cmd) if not err: return out.strip("\n") else: return None
def check_node_online(host): online = False (out, err) = ssh_connect(host, 'echo "online test"') if out.split('\n')[0] == 'online test': LOG.debug('Node %s is online .' % host) online = True return online
def disk_attached(server_host, instance_name, server_device): check_cmd = 'virsh domblklist %s | grep -q %s ; echo $?' % (instance_name, server_device) reval = ssh_connect(server_host, check_cmd) if '0\n' in reval: return True else: return False
def get_idrac_addr(node_ip): cmd_get_idrac_addr = 'ipmitool lan print | grep -v "IP Address Source" | grep "IP Address"' (out, err) = ssh_connect(node_ip, cmd_get_idrac_addr) if out: # return idrac address return out.split(":")[1].strip() else: return ''
def vrouter_get_gw_remote(l3_host, rid): cmd = "ip netns exec qrouter-%s ip route show | "\ "grep 'default' | awk '{print $3}'" % (rid) out, err = ssh_connect(l3_host, cmd) if not err: return out.strip('\n') else: return None
def create_symbolic_links_on_openstack_node(nodes_info): LOG.info('Create symbolic links on openstack node.') for node in nodes_info: host = str(node['fuelweb_admin'].split('/')[0]) src_file = '/etc/astute.yaml' dst_file = '/etc/hiera/astute.yaml' cmd = 'test -h ' + dst_file + ' || ln -s ' + src_file + ' ' + dst_file (out, err) = ssh_connect(host, cmd) if err != '': LOG.error('Can not run command: %s on node %s .' % (cmd, host)) else: LOG.debug('Create symbolic links on node %s .' % host)
def port_check_one(pid, l3_host=None): def port_log(device_owner, s): if device_owner == 'network:router_gateway': LOG.warn(s) else: LOG.error(s) fmt = 'json' cmd = 'neutron port-show %s -f %s -F status -F admin_state_up '\ '-F device_owner -F device_id' % (pid, fmt) out = run_command(cmd) if out: detail = port_result_to_json(out, fmt) device_owner = detail['device_owner'] rid = detail['device_id'] if l3_host is None: # if l3_host is None, it is certain that the function is called # via command line, rather than vrouter_check_one, so it's ok # to call vrouter_get_l3_host here. l3_host = vrouter_get_l3_host(rid) # 1) check status of gateway port and interface port if detail['status'] != 'ACTIVE': port_log( device_owner, "status of port %s[%s] on %s is down" % (device_owner, pid, l3_host)) if not detail['admin_state_up']: port_log( device_owner, "admin_status of port %s[%s] on %s is down" % (device_owner, pid, l3_host)) # 2) ping external gateway to check network status LOG.debug('check gateway for port on %s' % (l3_host)) if device_owner == 'network:router_gateway': LOG.debug('this port is external port, check external gateway') gw = vrouter_get_gw_remote(l3_host, rid) if gw: LOG.debug("check external gateway %s on %s" % (gw, l3_host)) cmd = "ip netns exec qrouter-%s ping -c 1 %s" % (rid, gw) out, err = ssh_connect(l3_host, cmd) if not err: LOG.debug("external gateway is ok") else: LOG.error("failed to connect external gateway on %s" % (l3_host)) else: LOG.error("failed to get external gateway on %s" % (l3_host)) else: LOG.debug('this port is normal port, do not need to check gateway')
def _puppet_apply(host, module_path, manifest): success = False log_file = '/var/log/deployment_influxdb.log' cmd = ('puppet apply --modulepath=%s -l %s --debug %s || echo $?' % (module_path, log_file, manifest)) LOG.info('Apply manifest %s on node %s .' % (os.path.basename(manifest), host)) (out, err) = ssh_connect(host, cmd) if out != '': LOG.error('Apply manifest %s on node %s failed .' 'Please check %s on node %s .' % (os.path.basename(manifest), host, log_file, host)) else: success = True LOG.debug('Apply manifest %s on node-%s successfully .' % (os.path.basename(manifest), host)) return success
def port_check_one(pid, l3_host=None): def port_log(device_owner, s): if device_owner == 'network:router_gateway': LOG.warn(s) else: LOG.error(s) fmt = 'json' cmd = 'neutron port-show %s -f %s -F status -F admin_state_up '\ '-F device_owner -F device_id' % (pid, fmt) out = run_command(cmd) if out: detail = port_result_to_json(out, fmt) device_owner = detail['device_owner'] rid = detail['device_id'] if l3_host is None: # if l3_host is None, it is certain that the function is called # via command line, rather than vrouter_check_one, so it's ok # to call vrouter_get_l3_host here. l3_host = vrouter_get_l3_host(rid) # 1) check status of gateway port and interface port if detail['status'] != 'ACTIVE': port_log(device_owner, "status of port %s[%s] on %s is down" % (device_owner, pid, l3_host)) if not detail['admin_state_up']: port_log(device_owner, "admin_status of port %s[%s] on %s is down" % (device_owner, pid, l3_host)) # 2) ping external gateway to check network status LOG.debug('check gateway for port on %s' % (l3_host)) if device_owner == 'network:router_gateway': LOG.debug('this port is external port, check external gateway') gw = vrouter_get_gw_remote(l3_host, rid) if gw: LOG.debug("check external gateway %s on %s" % (gw, l3_host)) cmd = "ip netns exec qrouter-%s ping -c 1 %s" % (rid, gw) out, err = ssh_connect(l3_host, cmd) if not err: LOG.debug("external gateway is ok") else: LOG.error("failed to connect external gateway on %s" % (l3_host)) else: LOG.error("failed to get external gateway on %s" % (l3_host)) else: LOG.debug('this port is normal port, do not need to check gateway')
def get_instance_power_state(instance_id): compute_node = get_hypervisor_hostname(instance_id) instance_name = get_instance_name(instance_id) cmd = 'virsh domstate %s' % instance_name instance_state = ssh_connect(compute_node, cmd)[0].split('\n')[0] return instance_state
def ssh_cmd(hostname, cmd): reval = ssh_connect(hostname, cmd)[0].split('\n')[0] return reval