def install_packages_on_influxdbnode():
    LOG.info('Install rpm packages "%s" on node %s .'
                % (INSTALL_PACKAGES, INFLUXDB_HOST))
    (out, err) = ssh_connect(INFLUXDB_HOST, 'yum -d 0 -e 0 -y install %s'
                                % INSTALL_PACKAGES)
    if out != '':
        log_split_output(out, 'warn')
Example #2
0
def update_volume_quota(volume_id):
    LOG.info('   [%s]Updating volume quota ...' % volume_id)
    # get volume size & project id
    sql_get_size_project_id = 'SELECT size,project_id FROM volumes WHERE id=\'%s\';' % volume_id
    get_size_project_id = db_connect(sql_get_size_project_id)
    size = get_size_project_id[0]
    project_id = get_size_project_id[1]
    # get backend type
    backend_type = get_backend_type(volume_id)
    sql_update_gigabytes = 'UPDATE quota_usages SET in_use=in_use-%s where project_id=\'%s\' and resource=\'gigabytes\';' % (
        size, project_id)
    sql_update_volumes = 'UPDATE quota_usages SET in_use=in_use-1 where project_id=\'%s\' and resource=\'volumes\';' % project_id
    db_connect(sql_update_gigabytes)
    db_connect(sql_update_volumes)
    if backend_type == 'eqlx':
        sql_update_gigabytes_eqlx = 'UPDATE quota_usages SET in_use=in_use-%s where project_id=\'%s\' and resource=\'gigabytes_eqlx\';' % (
            size, project_id)
        sql_update_volumes_eqlx = 'UPDATE quota_usages SET in_use=in_use-1 where project_id=\'%s\' and resource=\'volumes_eqlx\';' % project_id
        db_connect(sql_update_gigabytes_eqlx)
        db_connect(sql_update_volumes_eqlx)
    elif backend_type == 'rbd':
        sql_update_gigabytes_rbd = 'UPDATE quota_usages SET in_use=in_use-%s where project_id=\'%s\' and resource=\'gigabytes_rbd\';' % (
            size, project_id)
        sql_update_volumes_rbd = 'UPDATE quota_usages SET in_use=in_use-1 where project_id=\'%s\' and resource=\'volumes_rbd\';' % project_id
        db_connect(sql_update_gigabytes_rbd)
        db_connect(sql_update_volumes_rbd)
Example #3
0
def setup(parser):
    """Set things up for the upgrade operation."""
    if NODE_ROLE.is_fuel():
        setup_rsyncd_config()
        setup_nodes(parser.MYIP)
    else:
        LOG.error('This command can only be run on the fuel node.')
Example #4
0
def check_ntp():
    check_service('ntpd')
    ntpserver = _get_ntpserver()
    if not ntpserver:
        LOG.error('Can not get ntp server, please check it.')
    else:
        LOG.debug('ntpserver is %s' % ntpserver)
Example #5
0
def delete_backend_snapshots_rbd(snapshots_id, volume_id):
    success = True
    rbd_pool = get_backend_pool(volume_id)
    LOG.info('   Deleting backend(rbd) snapshots ...')
    for snapshot_id in snapshots_id:
        LOG.info('   [%s]Deleting backend snapshot ...' % snapshot_id)
        (s, o) = commands.getstatusoutput(
            'rbd -p %s snap unprotect --image volume-%s --snap snapshot-%s' %
            (rbd_pool, volume_id, snapshot_id))
        if s == 0:
            (ss, oo) = commands.getstatusoutput(
                'rbd -p %s snap rm --image volume-%s --snap snapshot-%s' %
                (rbd_pool, volume_id, snapshot_id))
            if ss != 0:
                LOG.error('Can not delete backend snapshot "snapshot-%s" !' %
                          snapshot_id)
                success = False
        elif o.find('No such file or directory') > 0:
            LOG.error('   This snapshot does not exist !')
            success = False
        elif o.find('Device or resource busy') > 0:
            LOG.error(
                '   Unprotecting snapshot failed. Device or resource busy !')
            success = False
        else:
            success = False
    return success
Example #6
0
def write_db(backup_id, backup_file):
    # append
    try:
        with open('/tmp/tools.db', 'a') as db:
            db.writelines('%s' % backup_id + ' ' + '%s\n' % backup_file)
    except Exception:
        LOG.error('Write to db error!')
Example #7
0
def delete_backend_snapshots_rbd(snapshots_id, volume_id):
    success = True
    rbd_pool = get_backend_pool(volume_id)
    LOG.info('   Deleting backend(rbd) snapshots ...')
    for snapshot_id in snapshots_id:
        LOG.info('   [%s]Deleting backend snapshot ...' % snapshot_id)
        (s, o) = commands.getstatusoutput(
                 'rbd -p %s snap unprotect --image volume-%s --snap snapshot-%s'
                 % (rbd_pool, volume_id, snapshot_id))
        if s == 0:
            (ss, oo) = commands.getstatusoutput(
                       'rbd -p %s snap rm --image volume-%s --snap snapshot-%s'
                       % (rbd_pool, volume_id, snapshot_id))
            if ss != 0:
                LOG.error('Can not delete backend snapshot "snapshot-%s" !' % snapshot_id)
                success = False
        elif o.find('No such file or directory') > 0:
            LOG.error('   This snapshot does not exist !')
            success = False
        elif o.find('Device or resource busy') > 0:
            LOG.error('   Unprotecting snapshot failed. Device or resource busy !')
            success = False
        else:
            success = False
    return success
Example #8
0
def ami(parser):
    if not NODE_ROLE.is_controller():
        LOG.warn('This command can only run on controller node !')
    else:
        # "if controller leave to last"
        if not parser.KERNEL_FILE and not parser.INITRD_FILE and not parser.IMAGE_FILE:
            LOG.error('Lack of arguments, you can use --help to get help infomation\n')
        elif not parser.KERNEL_FILE:
            LOG.error('Please specify the kernel file\n')
        elif not parser.INITRD_FILE:
            LOG.error('Please specify the initrd file\n')
        elif not parser.IMAGE_FILE:
            LOG.error('Please specify the image file\n')
        else:
            if parser.NAME:
                # split the path and filename
                kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE)
                initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE)
                ami_image_upload(parser.KERNEL_FILE, kernel_file_name,
                                 parser.INITRD_FILE, initrd_file_name,
                                 parser.IMAGE_FILE, parser.NAME)
            else:
                # if not specify image name, use IMAGE_FILE as AMI name
                # split the path and filename
                kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE)
                initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE)
                ami_image_name = os.path.basename(r'%s' % parser.IMAGE_FILE)
                ami_image_upload(parser.KERNEL_FILE, kernel_file_name,
                                 parser.INITRD_FILE, initrd_file_name,
                                 parser.IMAGE_FILE, ami_image_name)
Example #9
0
def ssh_connect(hostname, commands,
                key_file=os.environ['HOME'] + '/.ssh/id_rsa',
                ssh_port=22, username='******', timeout=2):
    # Temporarily disable INFO level logging
    logging.disable(logging.INFO)
    # need use rsa key, if use dsa key replace 'RSA' to 'DSS'
    key = paramiko.RSAKey.from_private_key_file(key_file)
    s = paramiko.SSHClient()
    s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    try:
        s.connect(hostname, ssh_port, username=username,
                  pkey=key, timeout=timeout)
        stdin, stdout, stderr = s.exec_command(commands)
        result_out = stdout.read()
        result_err = stderr.read()
    except paramiko.ssh_exception.AuthenticationException:
        result_out = result_err = ''
        LOG.error('Can not connect to %s, Authentication (publickey) '
                  'failed !' % (hostname))
    except socket.timeout:
        result_out = result_err = ''
        LOG.error('Can not connect to %s, Connect time out !' % (hostname))
    finally:
        s.close()
        logging.disable(logging.NOTSET)
    return result_out, result_err
Example #10
0
def _network_remote_network_inf(cfg):
    nodes = cfg['nodes']
    all_node_inf = []
    for n in nodes:
        try:
            node_inf = {}
            if n['role'].endswith('controller'):
                # dont care primary-controller, consider it as normal
                # controller
                node_inf['role'] = 'controller'
            else:
                node_inf['role'] = n['role']
            if n['role'].endswith('controller'):
                node_inf['public_address'] = n['public_address']
            node_inf['internal_address'] = n['internal_address']
            node_inf['host'] = n['fqdn']
            if not n['role'].endswith('mongo'):
                node_inf['storage_address'] = n['storage_address']
            if n['role'].endswith('ceph-osd'):
                node_inf['ceph_cluster_address'] = n['ceph_cluster_address']
            all_node_inf.append(node_inf)
        except:
            LOG.error("failed to parse node:%s" % n['fqdn'])
            continue
    return all_node_inf
def push_conf_file_to_influxdbnode():
    LOG.info('Push conf file to influxdb node.')
    push_repo_file_to_node(INFLUXDB_HOST, 'influxdb_grafana',
                    INFLUXDB_REPO_CONF_FILEPATH, backup=True)
    push_repo_file_to_node(INFLUXDB_HOST, 'nailgun',
                    NAILGUN_REPO_CONF_FILEPATH)
    push_yaml_to_node(INFLUXDB_HOST, ASTUTE_CONF_FILEPATH, 'astute.yaml')
Example #12
0
def setup(parser):
    """Set things up for the upgrade operation."""
    if NODE_ROLE.is_fuel():
        setup_rsyncd_config()
        setup_nodes(parser.MYIP)
    else:
        LOG.error('This command can only be run on the fuel node.')
Example #13
0
def update():
    '''update eayunstack-tools on all nodes'''
    node_list = get_node_list('all')
    update_cmd = 'yum clean all && yum -y -d 0 update eayunstack-tools'
    results = run_cmd_on_nodes(node_list, update_cmd)
    get_current_version = \
        'rpm --queryformat "%{VERSION} %{RELEASE}" -q eayunstack-tools'
    current_version = run_cmd_on_nodes(node_list, get_current_version)
    correct_version = run_command(
        'rpm --queryformat "%{VERSION} %{RELEASE}" -q eayunstack-tools')

    for node in node_list:
        out = results[node][0]
        err = results[node][1]
        current_ver = current_version[node][0].split(' ')[0] + \
            '-' + current_version[node][0].split(' ')[1].split('.')[0]
        correct_ver = correct_version[0].split(' ')[0] + \
            '-' + correct_version[0].split(' ')[1].split('.')[0]
        if err or current_ver != correct_ver:
            LOG.error('Update on %s failed !' % node)
            LOG.error('Current version: %s' % current_ver)
            for l in err.split('\n'):
                LOG.error(l)
            print
        else:
            LOG.info('Update on %s successfully.' % node)
            LOG.info('Current version: %s' % current_ver)
            print
Example #14
0
def delete_snapshots(snapshots_id):
    LOG.info('Deleting snapshot %s ...' % snapshots_id)
    if delete_backend_snapshots(snapshots_id):
        update_snapshots_db(snapshots_id)
        return True
    else:
        return False
Example #15
0
def _check_managed_status(resource):
    if resource['@managed'] == 'true':
        LOG.info('Resource %s was managed on node %s' \
                 % (resource['@id'], resource['node']['@name']))
    else:
        LOG.error('Resource %s was unmanaged on node %s' \
                  % (resource['@id'], resource['node']['@name']))
Example #16
0
def delete_volume(volume_id, volume_status):
    LOG.info('Deleting volume %s ...' % volume_id)
    if volume_status == 'creating':
        update_db(volume_id)
    else:
        if delete_backend_volume(volume_id):
            update_db(volume_id)
Example #17
0
def orphan(parser):
    logging.disable(logging.INFO)
    if not NODE_ROLE.is_controller():
        LOG.warn('This command can only run on controller node !')
        return

    # run delete orphan
    # run delete servers thread first
    nova_thread = RunNovaThread()
    nova_thread.start()
    nova_thread.join()

    # run other thread parallel
    threads = [
        RunCinderThread(),
        RunGlanceThread(),
        RunNetBaseThread(),
        RunFirewallThread(),
        RunSecgroupThread(),
        RunVPNThread(),
        RunLBThread(),
        RunQoSThread()
    ]

    for thread in threads:
        thread.start()
    for thread in threads:
        thread.join()

    logging.disable(logging.NOTSET)
def check_node_online(host):
    online = False
    (out, err) = ssh_connect(host, 'echo "online test"')
    if out.split('\n')[0] == 'online test':
        LOG.debug('Node %s is online .' % host)
        online = True
    return online
Example #19
0
def check_node_online(host):
    online = False
    (out, err) = ssh_connect(host, 'echo "online test"')
    if out.split('\n')[0] == 'online test':
        LOG.debug('Node %s is online .' % host)
        online = True
    return online
Example #20
0
def check_ntp():
    check_service("ntpd")
    ntpserver = _get_ntpserver()
    if not ntpserver:
        LOG.error("Can not get ntp server, please check it.")
    else:
        LOG.debug("ntpserver is %s" % ntpserver)
Example #21
0
def push_yaml_to_node(host, src_path, dst_file_name):
    (out, err) = ssh_connect2(host, 'test -d /etc/hiera || mkdir /etc/hiera')
    if err == '':
        LOG.debug('Push %s to node %s .' % (src_path, host))
        scp_connect(host, src_path, '/etc/hiera/%s' % dst_file_name)
    else:
        LOG.error('Can not create "/etc/hiera/" on node %s .' % host)
Example #22
0
def check_all_openstack_node_online(nodes_info):
    LOG.info('Checking all openstack node is online ...')
    for node in nodes_info:
        host = str(node['fuelweb_admin'].split('/')[0])
        if not check_node_online(host):
            return False
    return True
Example #23
0
def ami(parser):
    if not NODE_ROLE.is_controller():
        LOG.warn('This command can only run on controller node !')
    else:
        # "if controller leave to last"
        if not parser.KERNEL_FILE and not parser.INITRD_FILE and not parser.IMAGE_FILE:
            LOG.error(
                'Lack of arguments, you can use --help to get help infomation\n'
            )
        elif not parser.KERNEL_FILE:
            LOG.error('Please specify the kernel file\n')
        elif not parser.INITRD_FILE:
            LOG.error('Please specify the initrd file\n')
        elif not parser.IMAGE_FILE:
            LOG.error('Please specify the image file\n')
        else:
            if parser.NAME:
                # split the path and filename
                kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE)
                initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE)
                ami_image_upload(parser.KERNEL_FILE, kernel_file_name,
                                 parser.INITRD_FILE, initrd_file_name,
                                 parser.IMAGE_FILE, parser.NAME)
            else:
                # if not specify image name, use IMAGE_FILE as AMI name
                # split the path and filename
                kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE)
                initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE)
                ami_image_name = os.path.basename(r'%s' % parser.IMAGE_FILE)
                ami_image_upload(parser.KERNEL_FILE, kernel_file_name,
                                 parser.INITRD_FILE, initrd_file_name,
                                 parser.IMAGE_FILE, ami_image_name)
Example #24
0
def get_value_common(key, filepath):
    (s, o) = commands.getstatusoutput('grep "^%s" %s | cut -d "=" -f 2' %
                                      (key, filepath))
    if s != 0 or o is None:
        LOG.error('Can not get %s\'s value ! Please check file: %s.' %
                  (key, filepath))
    return o.strip()
Example #25
0
def rsync_plugin_modules_on_node(host, plugin_name, fuel_node_ip):
    LOG.debug('Sync plugin modules on node %s .' % host)
    plugin_version = get_plugin_version(plugin_name)
    modules_path = 'rsync://' \
                   + fuel_node_ip \
                   + ':/plugins/' \
                   + plugin_name \
                   + '-' \
                   + plugin_version \
                   + '/deployment_scripts/puppet/'
    plugin_dir = '/etc/fuel/plugins/' \
                + plugin_name \
                + '-' \
                + plugin_version \
                + '/puppet'
    (out,
     err) = ssh_connect(host,
                        'test -d %s || mkdir -p %s' % (plugin_dir, plugin_dir))
    if err != '':
        log_split_output(err, 'error')
    (out,
     err) = ssh_connect(host,
                        'rsync -vzrtopg %s %s' % (modules_path, plugin_dir))
    if err != '':
        log_split_output(err, 'error')
Example #26
0
def delete_backend_volume_eqlx(volume_id):
    # get provider_location
    sql_get_provider_location = 'SELECT provider_location FROM volumes WHERE id =\'%s\';' % volume_id
    provider_location = str(db_connect(sql_get_provider_location)).split()[1]
    # ssh to controller and compute node to delete the iscsi connection
    LOG.info('   Deleting iscsi connection ...')
    controller_list = get_node_list('controller')
    compute_list = get_node_list('compute')
    node_list = controller_list + compute_list
    for node in node_list:
        cmd_show = 'iscsiadm -m session -o show'
        (out, err) = ssh_connect(node, cmd_show)
        if out.find(provider_location) > 0:
            cmd_delete = 'iscsiadm -m node -u -T %s' % provider_location
            (o, e) = ssh_connect(node, cmd_delete)
            if o.find('successful') < 0:
                LOG.error('   Can not delete the iscsi connection "%s" at host %s.' % (provider_location, node))
    # ssh to eqlx to delete the volume
    LOG.info('   Deleting backend(eqlx) volume ...')
    ## set eqlx volume status to offline
    cmd_set_eqlx_volume_offline = 'volume select volume-%s offline' % volume_id
    out = eqlx_ssh_execute(cmd_set_eqlx_volume_offline)
    if len(out) == 3:
        LOG.error('   ' + out[1])
        return False
    ## delete the eqlx volume
    cmd_delete_eqlx_volume = 'volume delete volume-%s' % volume_id
    result = eqlx_ssh_execute(cmd_delete_eqlx_volume)
    if not result or result[1] != 'Volume deletion succeeded.':
        LOG.error('   Delete backend volume faild !')
        return False
    else:
        return True
Example #27
0
def update_nova_db(instance_id): 
    LOG.info('Update nova database.')
    tenant_id = get_tenant_id(instance_id)
    flavor_id = get_flavor(instance_id)['id']
    ram_usage = get_flavor_resource(flavor_id, 'ram')
    vcpu_usage = get_flavor_resource(flavor_id, 'vcpus')
    # update instances table vm_state power_state
    nova_db.connect('UPDATE instances SET vm_state="deleted",power_state=0,deleted=id WHERE uuid="%s";' % instance_id)
    # update quota_usages table instances
    nova_db.connect('UPDATE quota_usages SET in_use=in_use-1 WHERE project_id="%s" and resource="instances";' % tenant_id)
    # update quota_usages table
    nova_db.connect('UPDATE quota_usages SET in_use=in_use-%s WHERE project_id="%s" and resource="ram";' % (ram_usage, tenant_id))
    # update quota_usages table
    nova_db.connect('UPDATE quota_usages SET in_use=in_use-%s WHERE project_id="%s" and resource="cores";' % (vcpu_usage, tenant_id))
    # update instance_faults table
    nova_db.connect('UPDATE instance_faults SET deleted=id WHERE instance_uuid="%s";' % instance_id)
    # update instance_info_caches table
    nova_db.connect('UPDATE instance_info_caches SET deleted=id WHERE instance_uuid="%s";' % instance_id)
    # update security_group_instance_association table
    nova_db.connect('UPDATE security_group_instance_association SET deleted=id WHERE instance_uuid="%s";' % instance_id)
    # update block_device_mapping table
    nova_db.connect('UPDATE block_device_mapping SET deleted=id WHERE instance_uuid="%s";' % instance_id)
    # update fixed_ips table
    nova_db.connect('UPDATE fixed_ips SET deleted=id WHERE instance_uuid="%s";' % instance_id)
    # update virtual_interfaces table
    nova_db.connect('UPDATE virtual_interfaces SET deleted=id WHERE instance_uuid="%s";' % instance_id)
Example #28
0
def delete_volume(volume_id, volume_status):
    LOG.info('Deleting volume %s ...' % volume_id)
    if volume_status == 'creating':
        update_db(volume_id)
    else:
        if delete_backend_volume(volume_id):
            update_db(volume_id)
Example #29
0
def check_all_nodes(check_obj):
    if check_obj is 'all':
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls --all'
        else:
            check_cmd = 'sudo eayunstack doctor cls --all'
    else:
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls -n %s' % check_obj
        else:
            check_cmd = 'sudo eayunstack doctor cls -n %s' % check_obj
    # get controller node list
    node_list = get_node_list('controller')
    # ssh to all controller node to check obj
    if len(node_list) == 0:
        LOG.warn('Node list is null !')
        return
    else:
        if check_obj == 'ceph':
            # only need to check one node for ceph cluster
            ceph_node = node_list[0]
            run_doctor_cmd_on_node('controller', ceph_node, check_cmd)
        else:
            (proc_list, pipe) = run_doctor_on_nodes('controller', node_list,
                                                    check_cmd)
            for proc in proc_list:
                proc.join()
            LOG.info(pipe.recv(), remote=True)
Example #30
0
def update():
    '''update eayunstack-tools on all nodes'''
    node_list = get_node_list('all')
    update_cmd = 'yum clean all && yum -y -d 0 update eayunstack-tools'
    results = run_cmd_on_nodes(node_list, update_cmd)
    get_current_version = \
        'rpm --queryformat "%{VERSION} %{RELEASE}" -q eayunstack-tools'
    current_version = run_cmd_on_nodes(node_list, get_current_version)
    correct_version = run_command(
        'rpm --queryformat "%{VERSION} %{RELEASE}" -q eayunstack-tools')

    for node in node_list:
        out = results[node][0]
        err = results[node][1]
        current_ver = current_version[node][0].split(' ')[0] + \
            '-' + current_version[node][0].split(' ')[1].split('.')[0]
        correct_ver = correct_version[0].split(' ')[0] + \
            '-' + correct_version[0].split(' ')[1].split('.')[0]
        if err or current_ver != correct_ver:
            LOG.error('Update on %s failed !' % node)
            LOG.error('Current version: %s' % current_ver)
            for l in err.split('\n'):
                LOG.error(l)
            print
        else:
            LOG.info('Update on %s successfully.' % node)
            LOG.info('Current version: %s' % current_ver)
            print
Example #31
0
def _network_remote_network_inf(cfg):
    nodes = cfg['nodes']
    all_node_inf = []
    for n in nodes:
        try:
            node_inf = {}
            if n['role'].endswith('controller'):
                # dont care primary-controller, consider it as normal
                # controller
                node_inf['role'] = 'controller'
            else:
                node_inf['role'] = n['role']
            if n['role'].endswith('controller'):
                node_inf['public_address'] = n['public_address']
            node_inf['internal_address'] = n['internal_address']
            node_inf['host'] = n['fqdn']
            if not n['role'].endswith('mongo'):
                node_inf['storage_address'] = n['storage_address']
            if n['role'].endswith('ceph-osd'):
                node_inf['ceph_cluster_address'] = n['ceph_cluster_address']
            all_node_inf.append(node_inf)
        except:
            LOG.error("failed to parse node:%s" % n['fqdn'])
            continue
    return all_node_inf
Example #32
0
def install_packages_on_influxdbnode():
    LOG.info('Install rpm packages "%s" on node %s .' %
             (INSTALL_PACKAGES, INFLUXDB_HOST))
    (out, err) = ssh_connect(INFLUXDB_HOST,
                             'yum -d 0 -e 0 -y install %s' % INSTALL_PACKAGES)
    if out != '':
        log_split_output(out, 'warn')
Example #33
0
def check_ntp():
    check_service('ntpd')
    ntpserver = _get_ntpserver()
    if not ntpserver:
        LOG.error('Can not get ntp server, please check it.')
    else:
        LOG.debug('ntpserver is %s' % ntpserver)
Example #34
0
def check_all_nodes(check_obj):
    if check_obj is 'all':
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls --all'
        else:
            check_cmd = 'sudo eayunstack doctor cls --all'
    else:
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls -n %s' % check_obj
        else:
            check_cmd = 'sudo eayunstack doctor cls -n %s' % check_obj
    # get controller node list
    node_list = get_node_list('controller')
    # ssh to all controller node to check obj
    if len(node_list) == 0:
        LOG.warn('Node list is null !')
        return
    else:
        if check_obj == 'ceph':
            # only need to check one node for ceph cluster
            ceph_node = node_list[0]
            run_doctor_cmd_on_node('controller', ceph_node, check_cmd)
        else:
            proc_list = run_doctor_on_nodes('controller', node_list, check_cmd)
            for proc in proc_list:
                proc.join()
Example #35
0
def check_all_nodes(check_obj):
    if check_obj is 'all':
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls --all'
        else:
            check_cmd = 'sudo eayunstack doctor cls --all'
    else:
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls -n %s' % check_obj
        else:
            check_cmd = 'sudo eayunstack doctor cls -n %s' % check_obj
    # get controller node list
    node_list = get_node_list('controller')
    # ssh to all controller node to check obj
    if len(node_list) == 0:
        LOG.warn('Node list is null !')
        return
    else:
        if check_obj == 'ceph':
            # only need to check one node for ceph cluster
            ceph_node = node_list[0]
            run_doctor_cmd_on_node('controller', ceph_node, check_cmd)
        else:
            nodes = []
            for node in node_list:
                node_info = {}
                node_info['role'] = 'controller'
                node_info['name'] = node
                nodes.append(node_info)
            result = run_doctor_on_nodes(nodes, check_cmd)
            for res in result:
                LOG.info(res, remote=True)
Example #36
0
def write_db(backup_id, backup_file):
    # append
    try:
        with open('/tmp/tools.db', 'a') as db:
            db.writelines('%s' % backup_id + ' ' + '%s\n' % backup_file)
    except Exception:
        LOG.error('Write to db error!')
Example #37
0
def check_all_nodes(check_obj):
    if check_obj is 'all':
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls --all'
        else:
            check_cmd = 'sudo eayunstack doctor cls --all'
    else:
        if LOG.enable_debug:
            check_cmd = 'sudo eayunstack --debug doctor cls -n %s' % check_obj
        else:
            check_cmd = 'sudo eayunstack doctor cls -n %s' % check_obj
    # get controller node list
    node_list = get_node_list('controller')
    # ssh to all controller node to check obj
    if len(node_list) == 0:
        LOG.warn('Node list is null !')
        return
    else:
        if check_obj == 'ceph':
            # only need to check one node for ceph cluster
            ceph_node = node_list[0]
            run_doctor_cmd_on_node('controller', ceph_node, check_cmd)
        else:
            nodes = []
            for node in node_list:
                node_info = {}
                node_info['role'] = 'controller'
                node_info['name'] = node
                nodes.append(node_info)
            result = run_doctor_on_nodes(nodes, check_cmd)
            for res in result:
                LOG.info(res, remote=True)
Example #38
0
def delete_backend_volume_eqlx(volume_id):
    # get provider_location
    sql_get_provider_location = 'SELECT provider_location FROM volumes WHERE id =\'%s\';' % volume_id
    provider_location = str(db_connect(sql_get_provider_location)).split()[1]
    # ssh to controller and compute node to delete the iscsi connection
    LOG.info('   Deleting iscsi connection ...')
    controller_list = get_node_list('controller')
    compute_list = get_node_list('compute')
    node_list = controller_list + compute_list
    for node in node_list:
        cmd_show = 'iscsiadm -m session -o show'
        (out, err) = ssh_connect(node, cmd_show)
        if out.find(provider_location) > 0:
            cmd_delete = 'iscsiadm -m node -u -T %s' % provider_location
            (o, e) = ssh_connect(node, cmd_delete)
            if o.find('successful') < 0:
                LOG.error(
                    '   Can not delete the iscsi connection "%s" at host %s.' %
                    (provider_location, node))
    # ssh to eqlx to delete the volume
    LOG.info('   Deleting backend(eqlx) volume ...')
    ## set eqlx volume status to offline
    cmd_set_eqlx_volume_offline = 'volume select volume-%s offline' % volume_id
    out = eqlx_ssh_execute(cmd_set_eqlx_volume_offline)
    if len(out) == 3:
        LOG.error('   ' + out[1])
        return False
    ## delete the eqlx volume
    cmd_delete_eqlx_volume = 'volume delete volume-%s' % volume_id
    result = eqlx_ssh_execute(cmd_delete_eqlx_volume)
    if not result or result[1] != 'Volume deletion succeeded.':
        LOG.error('   Delete backend volume faild !')
        return False
    else:
        return True
def check_all_openstack_node_online(nodes_info):
    LOG.info('Checking all openstack node is online ...')
    for node in nodes_info:
        host = str(node['fuelweb_admin'].split('/')[0])
        if not check_node_online(host):
            return False
    return True
Example #40
0
def stack(parser):
    # if node role is "unknow", go back
    if NODE_ROLE.is_unknown():
        LOG.error('Can not confirm the node role!')
        return
    if not NODE_ROLE.is_fuel():
        if parser.CONTROLLER:
            if not NODE_ROLE.is_controller():
                cmd_warn('controller')
                return
        if parser.COMPUTE:
            if not NODE_ROLE.is_compute():
                cmd_warn('compute')
                return
        if parser.MONGO:
            if not NODE_ROLE.is_mongo():
                cmd_warn('mongo')
                return
    if parser.CONTROLLER or parser.COMPUTE or parser.MONGO:
        if parser.PROFILE and not parser.SERVICE and not parser.CHECK_ALL:
            if parser.CONTROLLER:
                check('controller', 'profile')
            if parser.COMPUTE:
                check('compute', 'profile')
            if parser.MONGO:
                check('mongo', 'profile')
        if parser.SERVICE and not parser.PROFILE and not parser.CHECK_ALL:
            if parser.CONTROLLER:
                check('controller', 'service')
            if parser.COMPUTE:
                check('compute', 'service')
            if parser.MONGO:
                check('mongo', 'service')
        if parser.SERVICE and parser.PROFILE or parser.CHECK_ALL or not parser.PROFILE and not parser.SERVICE:
            if parser.CONTROLLER:
                check('controller', 'all')
            if parser.COMPUTE:
                check('compute', 'all')
            if parser.MONGO:
                check('mongo', 'all')
        return
    # check all
    if parser.CHECK_ALL and parser.PROFILE and parser.SERVICE:
        check_all()
        return
    elif parser.CHECK_ALL and parser.PROFILE:
        check_all_profile()
        return
    elif parser.CHECK_ALL and parser.SERVICE:
        check_all_service()
        return
    elif parser.CHECK_ALL:
        check_all()
        return
    # check profile or service
    if parser.PROFILE:
        check_all_profile()
    if parser.SERVICE:
        check_all_service()
Example #41
0
def stack(parser):
    # if node role is "unknow", go back
    if NODE_ROLE.is_unknown():
        LOG.error('Can not confirm the node role!')
        return
    if not NODE_ROLE.is_fuel():
        if parser.CONTROLLER:
            if not NODE_ROLE.is_controller():
                cmd_warn('controller')
                return
        if parser.COMPUTE:
            if not NODE_ROLE.is_compute():
                cmd_warn('compute')
                return
        if parser.MONGO:
            if not NODE_ROLE.is_mongo():
                cmd_warn('mongo')
                return
    if parser.CONTROLLER or parser.COMPUTE or parser.MONGO:
        if parser.PROFILE and not parser.SERVICE and not parser.CHECK_ALL:
            if parser.CONTROLLER:
                check('controller', 'profile')
            if parser.COMPUTE:
                check('compute', 'profile')
            if parser.MONGO:
                check('mongo', 'profile')
        if parser.SERVICE and not parser.PROFILE and not parser.CHECK_ALL:
            if parser.CONTROLLER:
                check('controller', 'service')
            if parser.COMPUTE:
                check('compute', 'service')
            if parser.MONGO:
                check('mongo', 'service')
        if parser.SERVICE and parser.PROFILE or parser.CHECK_ALL or not parser.PROFILE and not parser.SERVICE:
            if parser.CONTROLLER:
                check('controller', 'all')
            if parser.COMPUTE:
                check('compute', 'all')
            if parser.MONGO:
                check('mongo', 'all')
        return
    # check all
    if parser.CHECK_ALL and parser.PROFILE and parser.SERVICE:
        check_all()
        return
    elif parser.CHECK_ALL and parser.PROFILE:
        check_all_profile()
        return
    elif parser.CHECK_ALL and parser.SERVICE:
        check_all_service()
        return
    elif parser.CHECK_ALL:
        check_all()
        return
    # check profile or service
    if parser.PROFILE:
        check_all_profile()
    if parser.SERVICE:
        check_all_service()
Example #42
0
def ping(peer,hostname,network_role):
    (status, out) = commands.getstatusoutput('ping -c 1 %s' % (peer))
    if status == 0:
        LOG.debug('ping %s(%s) reached --- %s network' \
	          % (peer,hostname,network_role))
    else:
        LOG.error('ping %s(%s) can not be reached --- %s network!' \
	          % (peer,hostname,network_role))
Example #43
0
def check_services(services_list):
    for service in services_list:
        if service['status'] != 'enabled':
            LOG.warn('Service %s on %s status is %s' %
                     (service['binary'], service['host'], service['status']))
        if service['state'] != 'up':
            LOG.error('Service %s on %s state is %s' %
                      (service['binary'], service['host'], service['state']))
Example #44
0
def check_node_profiles(role):
    component_list = eval('get_%s_component' % role)()
    for c in component_list:
        LOG.info('Checking "%s" Component' % c.capitalize())
        profile_list = eval('get_%s_profiles' % c)()
        for p in profile_list:
            LOG.debug('Profile: ' + p)
            check_profile(p, role)
Example #45
0
def check_file_resolvability(filepath):
    tp = ConfigParser.ConfigParser()
    try:
        tp.read(filepath)
    except ConfigParser.ParsingError, msg:
        LOG.error(msg)
        LOG.error('Abort this check!')
        return False
Example #46
0
def deployment_monitor_plugins(parser):
    if not NODE_ROLE.is_fuel():
        LOG.warn('This command can only run on fuel node !')
        return
    if parser.INFLUXDB:
        deployment_influxdb_grafana(parser.ENV)
    if parser.LMA_COLLECTOR:
        deployment_lma_collector(parser.ENV)
Example #47
0
def push_conf_file_to_openstack_node(nodes_info):
    LOG.info('Push conf file to openstack node.')
    for node in nodes_info:
        host = str(node['fuelweb_admin'].split('/')[0])
        push_repo_file_to_node(host, 'lma_collector', LMA_REPO_CONF_FILEPATH)
        src_path = CONF_TMP_DIR + 'globals.yaml' + '-' + host
        dst_file_name = 'globals.yaml'
        push_yaml_to_node(host, src_path, dst_file_name)
Example #48
0
def update_volume_table(volume_id):
    LOG.info('   [%s]Updating volumes table ...' % volume_id)
    sql_update = 'UPDATE volumes SET deleted=1,status=\'deleted\' WHERE id=\'%s\';' % volume_id
    db_connect(sql_update)
    sql_select = 'SELECT deleted,status FROM volumes WHERE id =\'%s\';' % volume_id
    rest = db_connect(sql_select)
    if rest[0] != 1 or rest[1] != 'deleted':
        LOG.error('   Database update faild !')
Example #49
0
def init(parser):
    if NODE_ROLE.is_unknown():
        LOG.error('Can not confirm the node role!')
    if not NODE_ROLE.is_fuel():
        LOG.warn('This command can only run on fuel node !')
        return
    init_node_list_file()
    init_node_role_file()
Example #50
0
def ssh_connect2(hostname, commands):
    """exec ssh command and print the result """
    out, err = ssh_connect(hostname, commands)
    if out:
        LOG.info(out, remote=True)
    elif err:
        LOG.info(err, remote=True)
    return out, err
Example #51
0
def check_file_resolvability(filepath):
    tp = ConfigParser.ConfigParser()
    try:
        tp.read(filepath)
    except ConfigParser.ParsingError, msg:
        LOG.error(msg)
        LOG.error('Abort this check!')
        return False
Example #52
0
def check_node_profiles(role):
    component_list = eval('get_%s_component' % role)()
    for c in component_list:
        LOG.info('Checking "%s" Component' % c.capitalize())
        profile_list = eval('get_%s_profiles' % c)()
        for p in profile_list:
            LOG.debug('Profile: ' + p)
            check_profile(p, role)
Example #53
0
def run_command(cmd):
    reval = None
    (status, out) = commands.getstatusoutput(cmd)
    if status != 0:
        LOG.error("run %s error: %s" % (cmd, out))
    else:
        reval = out
    return reval
def deployment_monitor_plugins(parser):
    if not NODE_ROLE.is_fuel():
        LOG.warn('This command can only run on fuel node !')
        return
    if parser.INFLUXDB:
        deployment_influxdb_grafana(parser.ENV)
    if parser.LMA_COLLECTOR:
        deployment_lma_collector(parser.ENV)
Example #55
0
def init(parser):
    if NODE_ROLE.is_unknown():
        LOG.error("Can not confirm the node role!")
    if not NODE_ROLE.is_fuel():
        LOG.warn("This command can only run on fuel node !")
        return
    init_node_list_file()
    init_node_role_file()
Example #56
0
def update_volume_table(volume_id):
    LOG.info('   [%s]Updating volumes table ...' % volume_id)
    sql_update = 'UPDATE volumes SET deleted=1,status=\'deleted\' WHERE id=\'%s\';' % volume_id
    db_connect(sql_update)
    sql_select = 'SELECT deleted,status FROM volumes WHERE id =\'%s\';' % volume_id
    rest = db_connect(sql_select)
    if rest[0] != 1 or rest[1] != 'deleted':
        LOG.error('   Database update faild !')
Example #57
0
def get_config(section, key):
    profile = '/etc/cinder/cinder.conf'
    try:
        cp = ConfigParser.ConfigParser()
        cp.read(profile)
        value = cp.get(section, key)
        return value
    except:
        LOG.error('   Can not get %s\'s value !' % key)
Example #58
0
def delete_snapshots(snapshots_id, volume_id):
    LOG.info('Deleting snapshot %s ...' % snapshots_id)
    if delete_backend_snapshots(snapshots_id, volume_id):
        try:
            delete_image(snapshots_id)
        except Exception,ex:
            LOG.error('   Delete image failed!\n %s' % ex)
        update_snapshots_db(snapshots_id, volume_id)
        return True
Example #59
0
def run_command(cmd):
    reval = None
    run_cmd = 'source /root/openrc;' + cmd
    (status, out) = commands.getstatusoutput(run_cmd)
    if status != 0:
        LOG.error("run %s error: %s" % (run_cmd, out))
    else:
        reval = out
    return reval
Example #60
0
def delete_snapshots(snapshots_id, volume_id):
    LOG.info('Deleting snapshot %s ...' % snapshots_id)
    if delete_backend_snapshots(snapshots_id, volume_id):
        try:
            delete_image(snapshots_id)
        except Exception, ex:
            LOG.error('   Delete image failed!\n %s' % ex)
        update_snapshots_db(snapshots_id, volume_id)
        return True