Ejemplo n.º 1
0
def reset_device_n3000(pci_addr):
    # Reset the N3000 FPGA at the specified PCI address.
    try:
        # Build up the command to perform the reset.
        # Note the hack to work around OPAE tool locale issues
        cmd = ("docker run -t --privileged -e LC_ALL=en_US.UTF-8 "
               "-e LANG=en_US.UTF-8 " + OPAE_IMG +
               " rsu bmcimg " + pci_addr)

        # Issue the command to perform the firmware update.
        subprocess.check_output(shlex.split(cmd),
                                         stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as exc:
        # "docker run" return code will be:
        #    125 if the error is with Docker daemon itself
        #    126 if the contained command cannot be invoked
        #    127 if the contained command cannot be found
        #    Exit code of contained command otherwise
        msg = ("Failed to reset device %s, "
               "return code is %d, command output: %s." %
               (pci_addr, exc.returncode,
                exc.output.decode('utf-8')))
        LOG.error(msg)
        LOG.error("Check for intel-max10 kernel logs.")
        raise exception.SysinvException(msg)
Ejemplo n.º 2
0
def write_device_image_n3000(filename, pci_addr):
    # Write the firmware image to the FPGA at the specified PCI address.
    # We're assuming that the image update tools will catch the scenario
    # where the image is not compatible with the device.
    try:
        # Build up the command to perform the firmware update.
        # Note the hack to work around OPAE tool locale issues
        cmd = ("docker run -t --privileged -e LC_ALL=en_US.UTF-8 "
               "-e LANG=en_US.UTF-8 -v " + DEVICE_IMAGE_CACHE_DIR +
               ":" + "/mnt/images " + OPAE_IMG +
               " fpgasupdate -y --log-level debug /mnt/images/" +
               filename + " " + pci_addr)

        # Issue the command to perform the firmware update.
        subprocess.check_output(shlex.split(cmd),
                                         stderr=subprocess.STDOUT)
        # TODO: switch to subprocess.Popen, parse the output and send
        #       progress updates.
    except subprocess.CalledProcessError as exc:
        # Check the return code, send completion info to sysinv-conductor.
        # "docker run" return code will be:
        #    125 if the error is with Docker daemon itself
        #    126 if the contained command cannot be invoked
        #    127 if the contained command cannot be found
        #    Exit code of contained command otherwise
        msg = ("Failed to update device image %s for device %s, "
               "return code is %d, command output: %s." %
               (filename, pci_addr, exc.returncode,
                exc.output.decode('utf-8')))
        LOG.error(msg)
        LOG.error("Check for intel-max10 kernel logs.")
        raise exception.SysinvException(msg)
Ejemplo n.º 3
0
    def _check_platform_backup_partition(self):
        """Check that the platform-backup partition is the correct size/type"""

        args = ['/usr/bin/validate-platform-backup.sh']
        try:
            subprocess.check_output(args, stderr=subprocess.STDOUT)  # pylint: disable=not-callable
        except subprocess.CalledProcessError as exc:
            LOG.error("Call to %s returned %s and %s" % (args, exc.returncode, exc.output))
            return False

        return True
Ejemplo n.º 4
0
def watchdog_action(action):
    if action not in ["stop", "start"]:
        LOG.warn("watchdog_action called with invalid action: %s", action)
        return
    try:
        # Build up the command to perform the action.
        cmd = ["systemctl", action, "hostw"]

        # Issue the command to stop/start the watchdog
        subprocess.check_output(cmd, stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as exc:
        msg = ("Failed to %s hostw service, "
                 "return code is %d, command output: %s." %
                 (action, exc.returncode, exc.output))
        LOG.warn(msg)
Ejemplo n.º 5
0
    def get_pci_sriov_vf_driver_name(self, pciaddr, sriov_vfs_pci_address):
        vf_driver = None
        for addr in sriov_vfs_pci_address:

            try:
                with open(os.devnull, "w") as fnull:
                    output = subprocess.check_output(['lspci', '-vmmks', addr],
                                                     stderr=fnull)
            except Exception as e:
                LOG.error(
                    "Error getting PCI data for SR-IOV "
                    "VF address %s: %s", addr, e)
                continue

            for line in output.split('\n'):
                pci_attr = shlex.split(line.strip())
                if (pci_attr and len(pci_attr) == 2
                        and 'Module' in pci_attr[0]):
                    vf_driver = pci_attr[1]
                    break

            # All VFs have the same driver per device.
            if vf_driver:
                break

        return vf_driver
Ejemplo n.º 6
0
def create_certificate(keyout=None, certout=None):
    if not keyout:
        keyout, certout = get_certificate_paths()
    if not keyout:
        raise Exception('Unable to locate TLS certificate path automatically')
    shortname = socket.gethostname().split('.')[0]
    longname = socket.getfqdn()
    subprocess.check_call([
        'openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out', keyout
    ])
    san = ['IP:{0}'.format(x) for x in get_ip_addresses()]
    # It is incorrect to put IP addresses as DNS type.  However
    # there exists non-compliant clients that fail with them as IP
    san.extend(['DNS:{0}'.format(x) for x in get_ip_addresses()])
    san.append('DNS:{0}'.format(shortname))
    san.append('DNS:{0}'.format(longname))
    san = ','.join(san)
    sslcfg = get_openssl_conf_location()
    tmpconfig = tempfile.mktemp()
    shutil.copy2(sslcfg, tmpconfig)
    try:
        with open(tmpconfig, 'a') as cfgfile:
            cfgfile.write(
                '\n[SAN]i\nbasicConstraints = CA:true\nsubjectAltName={0}'.
                format(san))
        subprocess.check_call([
            'openssl', 'req', '-new', '-x509', '-key', keyout, '-days', '7300',
            '-out', certout, '-subj', '/CN={0}'.format(longname),
            '-extensions', 'SAN', '-config', tmpconfig
        ])
    finally:
        os.remove(tmpconfig)
    # Could restart the webserver now?
    fname = '/var/lib/confluent/public/site/tls/{0}.pem'.format(
        collective.get_myname())
    try:
        os.makedirs(os.path.dirname(fname))
    except OSError as e:
        if e.errno != 17:
            raise
    shutil.copy2(certout, fname)
    hv = subprocess.check_output(
        ['openssl', 'x509', '-in', certout, '-hash', '-noout'])
    if not isinstance(hv, str):
        hv = hv.decode('utf8')
    hv = hv.strip()
    hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv)
    certname = '{0}.pem'.format(collective.get_myname())
    for currname in os.listdir('/var/lib/confluent/public/site/tls/'):
        currname = os.path.join('/var/lib/confluent/public/site/tls/',
                                currname)
        if currname.endswith('.0'):
            try:
                realname = os.readlink(currname)
                if realname == certname:
                    os.unlink(currname)
            except OSError:
                pass
    os.symlink(certname, hashname)
Ejemplo n.º 7
0
    def _get_kubernetes_join_cmd(self, host):
        # The token expires after 24 hours and is needed for a reinstall.
        # The puppet manifest handles the case where the node already exists.
        try:
            join_cmd_additions = ''
            if host.personality == constants.CONTROLLER:
                # Upload the certificates used during kubeadm join
                # The cert key will be printed in the last line of the output

                # We will create a temp file with the kubeadm config
                # We need this because the kubeadm config could have changed
                # since bootstrap. Reading the kubeadm config each time
                # it is needed ensures we are not using stale data

                fd, temp_kubeadm_config_view = tempfile.mkstemp(
                    dir='/tmp', suffix='.yaml')
                with os.fdopen(fd, 'w') as f:
                    cmd = ['kubeadm', KUBECONFIG, 'config', 'view']
                    subprocess.check_call(cmd, stdout=f)  # pylint: disable=not-callable

                # We will use a custom key to encrypt kubeadm certificates
                # to make sure all hosts decrypt using the same key

                key = str(keyring.get_password(CERTIFICATE_KEY_SERVICE,
                        CERTIFICATE_KEY_USER))
                with open(temp_kubeadm_config_view, "a") as f:
                    f.write("---\r\napiVersion: kubeadm.k8s.io/v1beta2\r\n"
                            "kind: InitConfiguration\r\ncertificateKey: "
                            "{}".format(key))

                cmd = ['kubeadm', 'init', 'phase', 'upload-certs',
                       '--upload-certs', '--config',
                       temp_kubeadm_config_view]

                subprocess.check_call(cmd)  # pylint: disable=not-callable
                join_cmd_additions = \
                    " --control-plane --certificate-key %s" % key
                os.unlink(temp_kubeadm_config_view)

                # Configure the IP address of the API Server for the controller host.
                # If not set the default network interface will be used, which does not
                # ensure it will be the Cluster IP address of this host.
                host_cluster_ip = self._get_host_cluster_address(host)
                join_cmd_additions += \
                    " --apiserver-advertise-address %s" % host_cluster_ip

            cmd = ['kubeadm', KUBECONFIG, 'token', 'create', '--print-join-command',
                   '--description', 'Bootstrap token for %s' % host.hostname]
            join_cmd = subprocess.check_output(cmd)  # pylint: disable=not-callable
            join_cmd_additions += \
                " --cri-socket /var/run/containerd/containerd.sock"
            join_cmd = join_cmd.strip() + join_cmd_additions
            LOG.info('get_kubernetes_join_cmd join_cmd=%s' % join_cmd)
        except Exception:
            LOG.exception("Exception generating bootstrap token")
            raise exception.SysinvException(
                'Failed to generate bootstrap token')

        return join_cmd
Ejemplo n.º 8
0
def get_openstack_pending_install_charts():
    try:
        return subprocess.check_output(
            ['helm', '--kubeconfig', '/etc/kubernetes/admin.conf',
             'list', '--namespace', 'openstack', '--pending'])
    except Exception as e:
        raise exception.HelmTillerFailure(
            reason="Failed to obtain pending charts list: %s" % e)
Ejemplo n.º 9
0
def fixed_dylib(dylibs, environ):
    for _dylib in dylibs:
        _dylib = _dylib.format(**environ)
        logger.debug("fixed dylib `%s'" % _dylib)
        cmd = ['otool', '-L', _dylib]
        for line in subprocess.check_output(cmd).replace('\t', '').split(os.linesep):
            if line.startswith('@rpath'):
                logger.debug('dylib info: %s' % line)
                old_rpath = line.split()[0]
                filename = old_rpath.split(os.path.sep)[-1]
                new_rpath = os.path.join(environ['DIR_BUNDLES'], 'lib', filename)
                logger.debug('old rpath: %s' % old_rpath)
                logger.debug('new rpath: %s' % new_rpath)
                logger.debug('verify new rpath: %s' % os.path.isfile(new_rpath))
                cmd = ['install_name_tool', '-change', old_rpath, new_rpath, _dylib]
                logger.debug('run command: %s' % ' '.join(cmd))
                logger.debug(subprocess.check_output(cmd))
Ejemplo n.º 10
0
def assure_tls_ca():
    keyout, certout = ('/etc/confluent/tls/cakey.pem',
                       '/etc/confluent/tls/cacert.pem')
    if os.path.exists(certout):
        return
    try:
        os.makedirs('/etc/confluent/tls')
    except OSError as e:
        if e.errno != 17:
            raise
    sslcfg = get_openssl_conf_location()
    tmpconfig = tempfile.mktemp()
    shutil.copy2(sslcfg, tmpconfig)
    subprocess.check_call([
        'openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out', keyout
    ])
    try:
        with open(tmpconfig, 'a') as cfgfile:
            cfgfile.write('\n[CACert]\nbasicConstraints = CA:true\n')
        subprocess.check_call([
            'openssl', 'req', '-new', '-x509', '-key', keyout, '-days',
            '27300', '-out', certout, '-subj',
            '/CN=Confluent TLS Certificate authority ({0})'.format(
                socket.gethostname()), '-extensions', 'CACert', '-config',
            tmpconfig
        ])
    finally:
        os.remove(tmpconfig)
    # Could restart the webserver now?
    fname = '/var/lib/confluent/public/site/tls/{0}.pem'.format(
        collective.get_myname())
    try:
        os.makedirs(os.path.dirname(fname))
    except OSError as e:
        if e.errno != 17:
            raise
    shutil.copy2('/etc/confluent/tls/cacert.pem', fname)
    hv = subprocess.check_output([
        'openssl', 'x509', '-in', '/etc/confluent/tls/cacert.pem', '-hash',
        '-noout'
    ])
    if not isinstance(hv, str):
        hv = hv.decode('utf8')
    hv = hv.strip()
    hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv)
    certname = '{0}.pem'.format(collective.get_myname())
    for currname in os.listdir('/var/lib/confluent/public/site/tls/'):
        currname = os.path.join('/var/lib/confluent/public/site/tls/',
                                currname)
        if currname.endswith('.0'):
            try:
                realname = os.readlink(currname)
                if realname == certname:
                    os.unlink(currname)
            except OSError:
                pass
    os.symlink(certname, hashname)
Ejemplo n.º 11
0
def mounted(remote_dir, local_dir):
    local_dir = os.path.abspath(local_dir)
    try:
        subprocess.check_output(["/bin/nfs-mount", remote_dir, local_dir],
                                stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:
        raise OSError(("mount operation failed: "
                       "command={}, retcode={}, output='{}'").format(
                           e.cmd, e.returncode, e.output))
    try:
        yield
    finally:
        try:
            subprocess.check_output(["/bin/umount", local_dir],
                                    stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            raise OSError(("umount operation failed: "
                           "command={}, retcode={}, output='{}'").format(
                               e.cmd, e.returncode, e.output))
Ejemplo n.º 12
0
def _iptables(*args, **kwargs):
    # NOTE(dtantsur): -w flag makes it wait for xtables lock
    cmd = BASE_COMMAND + args
    ignore = kwargs.pop('ignore', False)
    LOG.debug('Running iptables %s', args)
    kwargs['stderr'] = subprocess.STDOUT
    try:
        subprocess.check_output(cmd, **kwargs)
    except subprocess.CalledProcessError as exc:
        output = exc.output.replace('\n', '. ')
        if ignore:
            LOG.debug('Ignoring failed iptables %(args)s: %(output)s', {
                'args': args,
                'output': output
            })
        else:
            LOG.error('iptables %(iptables)s failed: %(exc)s', {
                'iptables': args,
                'exc': output
            })
            raise
Ejemplo n.º 13
0
def n3000_img_accessible():
    cmd = 'docker image list "%s"  --format "{{.Repository}}:{{.Tag}}"' % \
            constants.OPAE_IMG
    items = subprocess.check_output(
        shlex.split(cmd),  # pylint: disable=not-callable
        stderr=subprocess.STDOUT)
    for line in items.splitlines():
        if line == constants.OPAE_IMG:
            LOG.info('%s image found' % constants.OPAE_IMG)
            return True

    LOG.info("%s image not found." % constants.OPAE_IMG)
    return False
Ejemplo n.º 14
0
def process_generation(ifname,ofname):
    global have_printed_header
    gen_num = int(re.findall('gen(\d+)',ifname)[0])
    gen_num2 = int(re.findall('gen(\d+)',ofname)[0])
    assert(gen_num==gen_num2)
    args = [MonKeyTestPath,'-1',ifname,'-2',ofname]
    mkout = subprocess.check_output(args)
    mklines = mkout.splitlines()
    if not have_printed_header:
        sys.stdout.write("generation\t"+mklines[0]+'\n')
        have_printed_header = True
    for line in mklines[1:]:
        if line=='': next
        sys.stdout.write("%i\t%s\n"%(gen_num,line))
Ejemplo n.º 15
0
def get_cgts_vg_free_space():
    """Determine free space in cgts-vg"""

    try:
        # Determine space in cgts-vg in GiB
        vg_free_str = subprocess.check_output(
            ['vgdisplay', '-C', '--noheadings', '--nosuffix',
             '-o', 'vg_free', '--units', 'g', 'cgts-vg'],
            close_fds=True).rstrip()
        cgts_vg_free = int(float(vg_free_str))
    except subprocess.CalledProcessError:
        LOG.error("Command vgdisplay failed")
        raise Exception("Command vgdisplay failed")

    return cgts_vg_free
Ejemplo n.º 16
0
    def _get_host_join_command(self, host):
        config = {}
        if not utils.is_initial_config_complete():
            return config

        # The token expires after 24 hours and is needed for a reinstall.
        # The puppet manifest handles the case where the node already exists.
        try:
            join_cmd_additions = ''
            if host.personality == constants.CONTROLLER:
                # Upload the certificates used during kubeadm join
                # The cert key will be printed in the last line of the output
                cmd = [
                    'kubeadm', 'init', 'phase', 'upload-certs',
                    '--upload-certs', '--config',
                    '/etc/kubernetes/kubeadm.yaml'
                ]
                cmd_output = subprocess.check_output(cmd)
                cert_key = cmd_output.strip().split('\n')[-1]
                join_cmd_additions = " --control-plane --certificate-key %s" % cert_key

            cmd = [
                'kubeadm', 'token', 'create', '--print-join-command',
                '--description',
                'Bootstrap token for %s' % host.hostname
            ]
            join_cmd = subprocess.check_output(cmd)
            join_cmd_additions += " --cri-socket /var/run/containerd/containerd.sock"
            join_cmd = join_cmd.strip() + join_cmd_additions
        except subprocess.CalledProcessError:
            raise exception.SysinvException(
                'Failed to generate bootstrap token')

        config.update({'platform::kubernetes::params::join_cmd': join_cmd})

        return config
Ejemplo n.º 17
0
 def _get_keyring_password(self, service, user, pw_format=None):
     password = keyring.get_password(service, user)
     if not password:
         if pw_format == common.PASSWORD_FORMAT_CEPH:
             try:
                 cmd = ['ceph-authtool', '--gen-print-key']
                 password = subprocess.check_output(cmd).strip()
             except subprocess.CalledProcessError:
                 raise exception.SysinvException(
                     'Failed to generate ceph key')
         else:
             password = self._generate_random_password()
         keyring.set_password(service, user, password)
     # get_password() returns in unicode format, which leads to YAML
     # that Armada doesn't like.  Converting to UTF-8 is safe because
     # we generated the password originally.
     return password.encode('utf8', 'strict')
Ejemplo n.º 18
0
def get_ip_addresses():
    lines = subprocess.check_output('ip addr'.split(' '))
    if not isinstance(lines, str):
        lines = lines.decode('utf8')
    for line in lines.split('\n'):
        if line.startswith('    inet6 '):
            line = line.replace('    inet6 ', '').split('/')[0]
            if line == '::1':
                continue
        elif line.startswith('    inet '):
            line = line.replace('    inet ', '').split('/')[0]
            if line == '127.0.0.1':
                continue
            if line.startswith('169.254.'):
                continue
        else:
            continue
        yield line
Ejemplo n.º 19
0
def fetch_and_store_one((index, row)):
    try:
        sha256 = subprocess.check_output([
            '/bin/sh', '-c',
            'source /etc/profile; nix-prefetch-url {}'.format(row['url']),
        ], stderr=open('/dev/null')).strip()

        meta = dep_meta.SDist(
            pypi_name=row['pypi_name'],
            version=row['version'],
            url=row['url'],
        )

        with open(os.path.join(row['store_root'], sha256), 'w') as f:
            f.write(meta.SerializeToString())

    except subprocess.CalledProcessError as e:
        STATS['errors'] += 1
        print "error fetching", e, row
Ejemplo n.º 20
0
def get_n3000_devices():
    # First get the PCI addresses of each supported FPGA device
    cmd = ["lspci", "-Dm", "-d " + constants.N3000_VENDOR + ":" +
           constants.N3000_DEVICE]

    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as exc:
        msg = ("Failed to get pci devices with vendor %s and device %s, "
               "return code is %d, command output: %s." %
               (constants.N3000_VENDOR, constants.N3000_DEVICE, exc.returncode, exc.output))
        LOG.warn(msg)
        raise exception.SysinvException(msg)

    # Parse the output of the lspci command and grab the PCI address
    fpga_addrs = []
    for line in output.splitlines():
        line = shlex.split(line.strip())
        fpga_addrs.append(line[0])
    return fpga_addrs
Ejemplo n.º 21
0
def assure_agent():
    if sshver() <= 7.6:
        return False
    global agent_pid
    if agent_pid is None:
        sai = subprocess.check_output(['ssh-agent'])
        for line in sai.split(b'\n'):
            if b';' not in line:
                continue
            line, _ = line.split(b';', 1)
            if b'=' not in line:
                continue
            k, v = line.split(b'=', 1)
            if not isinstance(k, str):
                k = k.decode('utf8')
                v = v.decode('utf8')
            if k == 'SSH_AGENT_PID':
                agent_pid = v
            os.environ[k] = v
    return True
Ejemplo n.º 22
0
    def thinpools_in_vg(self, vg, cinder_device=None):
        """Return number of thinpools in the specified vg. """
        try:
            command = ['vgs', '--noheadings', '-o', 'lv_name', vg]
            if cinder_device:
                if vg == constants.LVG_CINDER_VOLUMES:
                    global_filer = 'devices/global_filter=["a|' + \
                                   cinder_device + '|","r|.*|"]'
                    command = command + ['--config', global_filer]
            output = subprocess.check_output(command)
        except Exception as e:
            self.handle_exception("Could not retrieve vgdisplay "
                                  "information: %s" % e)
            output = ""
        thinpools = 0
        for line in output.splitlines():
            # This makes some assumptions, the suffix is defined in nova.
            if constants.LVM_POOL_SUFFIX in line:
                thinpools += 1

        return thinpools
Ejemplo n.º 23
0
    def execute_command(name, command, cwd=None, shell=False):
        Logger().write(LogVerbosity.Info, "Starting " + name)
        start = current_time()
        try:
            if shell and sys.platform == "linux" or sys.platform == "linux2":
                command = " ".join(command)

            result = subprocess.check_output(command,
                                             universal_newlines=True,
                                             cwd=cwd,
                                             shell=shell)
            Logger().write(LogVerbosity.Debug, name + " output: " + result)
            Logger().write(
                LogVerbosity.Info,
                name + " successful in " + str(current_time() - start) + "ms")
            return True
        except subprocess.CalledProcessError as e:
            Logger().write(
                LogVerbosity.Info, name + " failed in " +
                str(current_time() - start) + "ms: " + e.output)
            return False
Ejemplo n.º 24
0
def sync_list_to_node(sl, node, suffixes):
    targdir = tempfile.mkdtemp('.syncto{}'.format(node))
    output = ''
    try:
        for ent in sl.replacemap:
            stage_ent(sl.replacemap, ent, targdir)
        if 'append' in suffixes:
            while suffixes['append'] and suffixes['append'][0] == '/':
                suffixes['append'] = suffixes['append'][1:]
            for ent in sl.appendmap:
                stage_ent(sl.appendmap, ent,
                          os.path.join(targdir, suffixes['append']))
        if 'merge' in suffixes:
            while suffixes['merge'] and suffixes['merge'][0] == '/':
                suffixes['merge'] = suffixes['merge'][1:]
            for ent in sl.mergemap:
                stage_ent(sl.mergemap, ent,
                          os.path.join(targdir, suffixes['merge']))
        sshutil.prep_ssh_key('/etc/confluent/ssh/automation')
        output = subprocess.check_output(
            ['rsync', '-rvL', targdir + '/', 'root@{}:/'.format(node)])
    finally:
        shutil.rmtree(targdir)
    return output
Ejemplo n.º 25
0
def fix_crushmap(dbapi=None):
    """ Set Ceph's CRUSH Map based on storage model """
    def _create_crushmap_flag_file():
        try:
            open(crushmap_flag_file, "w").close()
        except IOError as e:
            LOG.warn(('Failed to create flag file: {}. '
                      'Reason: {}').format(crushmap_flag_file, e))

    if not dbapi:
        dbapi = pecan.request.dbapi
    crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                      constants.CEPH_CRUSH_MAP_APPLIED)

    if not os.path.isfile(crushmap_flag_file):
        _operator = CephApiOperator()
        if not cutils.is_aio_system(dbapi):
            # At least two monitors have to be running on a standard deployment,
            # otherwise don't even try to load the crushmap.
            active_mons, required_mons, __ = _operator.get_monitors_status(
                dbapi)
            if required_mons > active_mons:
                LOG.info("Not enough monitors yet available to fix crushmap.")
                return False

        # For AIO system, crushmap should be already loaded through puppet.
        # If it was loaded, set the crushmap flag to avoid loading it twice.
        default_ceph_tier_name = constants.SB_TIER_DEFAULT_NAMES[
            constants.SB_TIER_TYPE_CEPH] + constants.CEPH_CRUSH_TIER_SUFFIX
        rule_is_present, __, __ = _operator._crush_rule_status(
            default_ceph_tier_name)
        if rule_is_present:
            _create_crushmap_flag_file()
            return False

        try:
            # For AIO system, crushmap should alreadby be loaded through
            # puppet. If for any reason it is not, as a precaution we set
            # the crushmap here.

            # Check if a backup crushmap exists. If it does, that means
            # it is during restore. We need to restore the backup crushmap
            # instead of generating it. For non-AIO system, it is stored in
            # /opt/platform/sysinv which is a drbd fs. For AIO systems because
            # when unlocking controller-0 for the first time, the crushmap is
            # set thru ceph puppet when /opt/platform is not mounted yet, we
            # store the crushmap in /etc/sysinv.

            if cutils.is_aio_system(dbapi):
                backup = os.path.join(
                    constants.CEPH_CRUSH_MAP_BACKUP_DIR_FOR_AIO,
                    constants.CEPH_CRUSH_MAP_BACKUP)
            else:
                backup = os.path.join(constants.SYSINV_CONFIG_PATH,
                                      constants.CEPH_CRUSH_MAP_BACKUP)
            crushmap_bin = "/etc/sysinv/crushmap.bin"
            if os.path.exists(backup):
                shutil.copyfile(backup, crushmap_bin)
            else:
                stor_model = get_ceph_storage_model(dbapi)
                if stor_model == constants.CEPH_AIO_SX_MODEL:
                    crushmap_txt = "/etc/sysinv/crushmap-aio-sx.txt"
                elif stor_model == constants.CEPH_CONTROLLER_MODEL:
                    crushmap_txt = "/etc/sysinv/crushmap-controller-model.txt"
                elif stor_model == constants.CEPH_STORAGE_MODEL:
                    crushmap_txt = "/etc/sysinv/crushmap-storage-model.txt"
                else:
                    reason = "Error: Undefined ceph storage model %s" % stor_model
                    raise exception.CephCrushMapNotApplied(reason=reason)
                LOG.info("Updating crushmap with: %s" % crushmap_txt)

                # Compile crushmap
                subprocess.check_output("crushtool -c %s "
                                        "-o %s" % (crushmap_txt, crushmap_bin),
                                        stderr=subprocess.STDOUT,
                                        shell=True)
            # Set crushmap
            subprocess.check_output("ceph osd setcrushmap -i %s" %
                                    crushmap_bin,
                                    stderr=subprocess.STDOUT,
                                    shell=True)

            if os.path.exists(backup):
                os.remove(backup)
        except (IOError, subprocess.CalledProcessError) as e:
            # May not be critical, depends on where this is called.
            reason = "Error: %s Output: %s" % (str(e), e.output)
            raise exception.CephCrushMapNotApplied(reason=reason)

        _create_crushmap_flag_file()

        return True
    return False
Ejemplo n.º 26
0
def is_drbd_fs_syncing():
    output = subprocess.check_output("drbd-overview", stderr=subprocess.STDOUT)  # pylint: disable=not-callable
    LOG.info("is_drbd_fs_syncing returned '%s'" % output)
    if "sync\'ed" in output:
        return True
    return False
Ejemplo n.º 27
0
def handle_request(env, start_response):
    global currtz
    global keymap
    global currlocale
    global currtzvintage
    configmanager.check_quorum()
    nodename = env.get('HTTP_CONFLUENT_NODENAME', None)
    apikey = env.get('HTTP_CONFLUENT_APIKEY', None)
    if not (nodename and apikey):
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    cfg = configmanager.ConfigManager(None)
    eak = cfg.get_node_attributes(nodename, 'crypted.selfapikey').get(
        nodename, {}).get('crypted.selfapikey', {}).get('hashvalue', None)
    if not eak:
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    salt = '$'.join(eak.split('$', 3)[:-1]) + '$'
    if crypt.crypt(apikey, salt) != eak:
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    retype = env.get('HTTP_ACCEPT', 'application/yaml')
    isgeneric = False
    if retype == '*/*':
        isgeneric = True
        retype = 'application/yaml'
    if retype == 'application/yaml':
        dumper = yamldump
    elif retype == 'application/json':
        dumper = json.dumps
    else:
        start_response('406 Not supported', [])
        yield 'Unsupported content type in ACCEPT: ' + retype
        return
    if env['REQUEST_METHOD'] not in (
            'HEAD', 'GET') and 'CONTENT_LENGTH' in env and int(
                env['CONTENT_LENGTH']) > 0:
        reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH']))
    if env['PATH_INFO'] == '/self/deploycfg':
        if 'HTTP_CONFLUENT_MGTIFACE' in env:
            ncfg = netutil.get_nic_config(cfg,
                                          nodename,
                                          ifidx=env['HTTP_CONFLUENT_MGTIFACE'])
        else:
            myip = env.get('HTTP_X_FORWARDED_HOST', None)
            if ']' in myip:
                myip = myip.split(']', 1)[0]
            else:
                myip = myip.split(':', 1)[0]
            myip = myip.replace('[', '').replace(']', '')
            ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip)
        if ncfg['prefix']:
            ncfg['ipv4_netmask'] = netutil.cidr_to_mask(ncfg['prefix'])
        deployinfo = cfg.get_node_attributes(
            nodename, ('deployment.*', 'console.method', 'crypted.*', 'dns.*'))
        deployinfo = deployinfo.get(nodename, {})
        profile = deployinfo.get('deployment.pendingprofile',
                                 {}).get('value', '')
        ncfg['encryptboot'] = deployinfo.get('deployment.encryptboot',
                                             {}).get('value', None)
        if ncfg['encryptboot'] in ('', 'none'):
            ncfg['encryptboot'] = None
        ncfg['profile'] = profile
        protocol = deployinfo.get('deployment.useinsecureprotocols',
                                  {}).get('value', 'never')
        ncfg['textconsole'] = bool(
            deployinfo.get('console.method', {}).get('value', None))
        if protocol == 'always':
            ncfg['protocol'] = 'http'
        else:
            ncfg['protocol'] = 'https'
        ncfg['rootpassword'] = deployinfo.get('crypted.rootpassword',
                                              {}).get('hashvalue', None)
        ncfg['grubpassword'] = deployinfo.get('crypted.grubpassword',
                                              {}).get('grubhashvalue', None)
        if currtzvintage and currtzvintage > (time.time() - 30.0):
            ncfg['timezone'] = currtz
        else:
            langinfo = subprocess.check_output(['localectl',
                                                'status']).split(b'\n')
            for line in langinfo:
                line = line.strip()
                if line.startswith(b'System Locale:'):
                    ccurrlocale = line.split(b'=')[-1]
                    if not ccurrlocale:
                        continue
                    if not isinstance(ccurrlocale, str):
                        ccurrlocale = ccurrlocale.decode('utf8')
                    if ccurrlocale == 'n/a':
                        continue
                    currlocale = ccurrlocale
                elif line.startswith(b'VC Keymap:'):
                    ckeymap = line.split(b':')[-1]
                    ckeymap = ckeymap.strip()
                    if not ckeymap:
                        continue
                    if not isinstance(ckeymap, str):
                        ckeymap = ckeymap.decode('utf8')
                    if ckeymap == 'n/a':
                        continue
                    keymap = ckeymap
            tdc = subprocess.check_output(['timedatectl']).split(b'\n')
            for ent in tdc:
                ent = ent.strip()
                if ent.startswith(b'Time zone:'):
                    currtz = ent.split(b': ', 1)[1].split(b'(', 1)[0].strip()
                    if not isinstance(currtz, str):
                        currtz = currtz.decode('utf8')
                    currtzvintage = time.time()
                    ncfg['timezone'] = currtz
                    break
        ncfg['locale'] = currlocale
        ncfg['keymap'] = keymap
        ncfg['nameservers'] = []
        for dns in deployinfo.get('dns.servers', {}).get('value',
                                                         '').split(','):
            ncfg['nameservers'].append(dns)
        dnsdomain = deployinfo.get('dns.domain', {}).get('value', None)
        ncfg['dnsdomain'] = dnsdomain
        start_response('200 OK', (('Content-Type', retype), ))
        yield dumper(ncfg)
    elif env['PATH_INFO'] == '/self/sshcert':
        if not sshutil.ca_exists():
            start_response('500 Unconfigured', ())
            yield 'CA is not configured on this system (run ...)'
            return
        dnsinfo = cfg.get_node_attributes(nodename, ('dns.*'))
        dnsinfo = dnsinfo.get(nodename, {}).get('dns.domain',
                                                {}).get('value', None)
        if dnsinfo in nodename:
            dnsinfo = ''
        cert = sshutil.sign_host_key(reqbody, nodename, [dnsinfo])
        start_response('200 OK', (('Content-Type', 'text/plain'), ))
        yield cert
    elif env['PATH_INFO'] == '/self/nodelist':
        nodes = set(cfg.list_nodes())
        domaininfo = cfg.get_node_attributes(nodes, 'dns.domain')
        for node in list(util.natural_sort(nodes)):
            domain = domaininfo.get(node, {}).get('dns.domain',
                                                  {}).get('value', None)
            if domain and domain not in node:
                nodes.add('{0}.{1}'.format(node, domain))
        for mgr in configmanager.list_collective():
            nodes.add(mgr)
            if domain and domain not in mgr:
                nodes.add('{0}.{1}'.format(mgr, domain))
        myname = collective.get_myname()
        nodes.add(myname)
        if domain and domain not in myname:
            nodes.add('{0}.{1}'.format(myname, domain))
        if isgeneric:
            start_response('200 OK', (('Content-Type', 'text/plain'), ))
            for node in util.natural_sort(nodes):
                yield node + '\n'
        else:
            start_response('200 OK', (('Content-Type', retype), ))
            yield dumper(sorted(nodes))
    elif env['PATH_INFO'] == '/self/updatestatus':
        update = yaml.safe_load(reqbody)
        if update['status'] == 'staged':
            targattr = 'deployment.stagedprofile'
        elif update['status'] == 'complete':
            targattr = 'deployment.profile'
        else:
            raise Exception('Unknown update status request')
        currattr = cfg.get_node_attributes(nodename,
                                           'deployment.*').get(nodename, {})
        pending = None
        if targattr == 'deployment.profile':
            pending = currattr.get('deployment.stagedprofile',
                                   {}).get('value', '')
        if not pending:
            pending = currattr.get('deployment.pendingprofile',
                                   {}).get('value', '')
        updates = {}
        if pending:
            updates['deployment.pendingprofile'] = {'value': ''}
            if targattr == 'deployment.profile':
                updates['deployment.stagedprofile'] = {'value': ''}
            currprof = currattr.get(targattr, {}).get('value', '')
            if currprof != pending:
                updates[targattr] = {'value': pending}
            cfg.set_node_attributes({nodename: updates})
            start_response('200 OK', (('Content-Type', 'text/plain'), ))
            yield 'OK'
        else:
            start_response('500 Error', (('Content-Type', 'text/plain'), ))
            yield 'No pending profile detected, unable to accept status update'
    else:
        start_response('404 Not Found', ())
        yield 'Not found'
Ejemplo n.º 28
0
 def lldp_has_neighbour(self, name):
     p = subprocess.check_output([
         "lldpcli", "-f", "keyvalue", "show", "neighbors", "summary",
         "ports", name
     ])
     return len(p) > 0
Ejemplo n.º 29
0
 def get_lspci_output_by_addr(self, pciaddr):
     with open(os.devnull, "w") as fnull:
         output = subprocess.check_output(  # pylint: disable=not-callable
             ['lspci', '-vmmks', pciaddr],
             stderr=fnull)
     return output
Ejemplo n.º 30
0
def handle_request(env, start_response):
    global currtz
    global keymap
    global currlocale
    global currtzvintage
    configmanager.check_quorum()
    nodename = env.get('HTTP_CONFLUENT_NODENAME', None)
    apikey = env.get('HTTP_CONFLUENT_APIKEY', None)
    if not (nodename and apikey):
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    cfg = configmanager.ConfigManager(None)
    ea = cfg.get_node_attributes(nodename,
                                 ['crypted.selfapikey', 'deployment.apiarmed'])
    eak = ea.get(nodename, {}).get('crypted.selfapikey',
                                   {}).get('hashvalue', None)
    if not eak:
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    salt = '$'.join(eak.split('$', 3)[:-1]) + '$'
    if crypt.crypt(apikey, salt) != eak:
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    if ea.get(nodename, {}).get('deployment.apiarmed', {}).get('value',
                                                               None) == 'once':
        cfg.set_node_attributes({nodename: {'deployment.apiarmed': ''}})
    retype = env.get('HTTP_ACCEPT', 'application/yaml')
    isgeneric = False
    if retype == '*/*':
        isgeneric = True
        retype = 'application/yaml'
    if retype == 'application/yaml':
        dumper = yamldump
    elif retype == 'application/json':
        dumper = json.dumps
    else:
        start_response('406 Not supported', [])
        yield 'Unsupported content type in ACCEPT: ' + retype
        return
    operation = env['REQUEST_METHOD']
    if operation not in ('HEAD', 'GET') and 'CONTENT_LENGTH' in env and int(
            env['CONTENT_LENGTH']) > 0:
        reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH']))
    if env['PATH_INFO'] == '/self/bmcconfig':
        hmattr = cfg.get_node_attributes(nodename, 'hardwaremanagement.*')
        hmattr = hmattr.get(nodename, {})
        res = {}
        port = hmattr.get('hardwaremanagement.port', {}).get('value', None)
        if port is not None:
            res['bmcport'] = port
        vlan = hmattr.get('hardwaremanagement.vlan', {}).get('value', None)
        if vlan is not None:
            res['bmcvlan'] = vlan
        bmcaddr = hmattr.get('hardwaremanagement.manager',
                             {}).get('value', None)
        bmcaddr = socket.getaddrinfo(bmcaddr, 0)[0]
        bmcaddr = bmcaddr[-1][0]
        if '.' in bmcaddr:  # ipv4 is allowed
            netconfig = netutil.get_nic_config(cfg, nodename, ip=bmcaddr)
            res['bmcipv4'] = bmcaddr
            res['prefixv4'] = netconfig['prefix']
            res['bmcgw'] = netconfig.get('ipv4_gateway', None)
        # credential security results in user/password having to be deferred
        start_response('200 OK', (('Content-Type', retype), ))
        yield dumper(res)
    elif env['PATH_INFO'] == '/self/deploycfg':
        if 'HTTP_CONFLUENT_MGTIFACE' in env:
            ncfg = netutil.get_nic_config(cfg,
                                          nodename,
                                          ifidx=env['HTTP_CONFLUENT_MGTIFACE'])
        else:
            myip = env.get('HTTP_X_FORWARDED_HOST', None)
            if ']' in myip:
                myip = myip.split(']', 1)[0]
            else:
                myip = myip.split(':', 1)[0]
            myip = myip.replace('[', '').replace(']', '')
            ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip)
        if ncfg['prefix']:
            ncfg['ipv4_netmask'] = netutil.cidr_to_mask(ncfg['prefix'])
        if ncfg['ipv4_method'] == 'firmwaredhcp':
            ncfg['ipv4_method'] = 'static'
        deployinfo = cfg.get_node_attributes(
            nodename,
            ('deployment.*', 'console.method', 'crypted.*', 'dns.*', 'ntp.*'))
        deployinfo = deployinfo.get(nodename, {})
        profile = deployinfo.get('deployment.pendingprofile',
                                 {}).get('value', '')
        ncfg['encryptboot'] = deployinfo.get('deployment.encryptboot',
                                             {}).get('value', None)
        if ncfg['encryptboot'] in ('', 'none'):
            ncfg['encryptboot'] = None
        ncfg['profile'] = profile
        protocol = deployinfo.get('deployment.useinsecureprotocols',
                                  {}).get('value', 'never')
        ncfg['textconsole'] = bool(
            deployinfo.get('console.method', {}).get('value', None))
        if protocol == 'always':
            ncfg['protocol'] = 'http'
        else:
            ncfg['protocol'] = 'https'
        ncfg['rootpassword'] = deployinfo.get('crypted.rootpassword',
                                              {}).get('hashvalue', None)
        ncfg['grubpassword'] = deployinfo.get('crypted.grubpassword',
                                              {}).get('grubhashvalue', None)
        if currtzvintage and currtzvintage > (time.time() - 30.0):
            ncfg['timezone'] = currtz
        else:
            langinfo = subprocess.check_output(['localectl',
                                                'status']).split(b'\n')
            for line in langinfo:
                line = line.strip()
                if line.startswith(b'System Locale:'):
                    ccurrlocale = line.split(b'=')[-1]
                    if not ccurrlocale:
                        continue
                    if not isinstance(ccurrlocale, str):
                        ccurrlocale = ccurrlocale.decode('utf8')
                    if ccurrlocale == 'n/a':
                        continue
                    currlocale = ccurrlocale
                elif line.startswith(b'VC Keymap:'):
                    ckeymap = line.split(b':')[-1]
                    ckeymap = ckeymap.strip()
                    if not ckeymap:
                        continue
                    if not isinstance(ckeymap, str):
                        ckeymap = ckeymap.decode('utf8')
                    if ckeymap == 'n/a':
                        continue
                    keymap = ckeymap
            tdc = subprocess.check_output(['timedatectl']).split(b'\n')
            for ent in tdc:
                ent = ent.strip()
                if ent.startswith(b'Time zone:'):
                    currtz = ent.split(b': ', 1)[1].split(b'(', 1)[0].strip()
                    if not isinstance(currtz, str):
                        currtz = currtz.decode('utf8')
                    currtzvintage = time.time()
                    ncfg['timezone'] = currtz
                    break
        ncfg['locale'] = currlocale
        ncfg['keymap'] = keymap
        ncfg['nameservers'] = []
        for dns in deployinfo.get('dns.servers', {}).get('value',
                                                         '').split(','):
            ncfg['nameservers'].append(dns)
        ntpsrvs = deployinfo.get('ntp.servers', {}).get('value', '')
        if ntpsrvs:
            ntpsrvs = ntpsrvs.split(',')
        if ntpsrvs:
            ncfg['ntpservers'] = []
            for ntpsrv in ntpsrvs:
                ncfg['ntpservers'].append(ntpsrv)
        dnsdomain = deployinfo.get('dns.domain', {}).get('value', None)
        ncfg['dnsdomain'] = dnsdomain
        start_response('200 OK', (('Content-Type', retype), ))
        yield dumper(ncfg)
    elif env['PATH_INFO'] == '/self/sshcert' and reqbody:
        if not sshutil.ca_exists():
            start_response('500 Unconfigured', ())
            yield 'CA is not configured on this system (run ...)'
            return
        pals = get_extra_names(nodename, cfg)
        cert = sshutil.sign_host_key(reqbody, nodename, pals)
        start_response('200 OK', (('Content-Type', 'text/plain'), ))
        yield cert
    elif env['PATH_INFO'] == '/self/nodelist':
        nodes, _ = get_cluster_list(nodename, cfg)
        if isgeneric:
            start_response('200 OK', (('Content-Type', 'text/plain'), ))
            for node in util.natural_sort(nodes):
                yield node + '\n'
        else:
            start_response('200 OK', (('Content-Type', retype), ))
            yield dumper(sorted(nodes))
    elif env['PATH_INFO'] == '/self/remoteconfigbmc' and reqbody:
        try:
            reqbody = yaml.safe_load(reqbody)
        except Exception:
            reqbody = None
        cfgmod = reqbody.get('configmod', 'unspecified')
        if cfgmod == 'xcc':
            xcc.remote_nodecfg(nodename, cfg)
        elif cfgmod == 'tsm':
            tsm.remote_nodecfg(nodename, cfg)
        else:
            start_response('500 unsupported configmod', ())
            yield 'Unsupported configmod "{}"'.format(cfgmod)
        start_response('200 Ok', ())
        yield 'complete'
    elif env['PATH_INFO'] == '/self/updatestatus' and reqbody:
        update = yaml.safe_load(reqbody)
        if update['status'] == 'staged':
            targattr = 'deployment.stagedprofile'
        elif update['status'] == 'complete':
            targattr = 'deployment.profile'
        else:
            raise Exception('Unknown update status request')
        currattr = cfg.get_node_attributes(nodename,
                                           'deployment.*').get(nodename, {})
        pending = None
        if targattr == 'deployment.profile':
            pending = currattr.get('deployment.stagedprofile',
                                   {}).get('value', '')
        if not pending:
            pending = currattr.get('deployment.pendingprofile',
                                   {}).get('value', '')
        updates = {}
        if pending:
            updates['deployment.pendingprofile'] = {'value': ''}
            if targattr == 'deployment.profile':
                updates['deployment.stagedprofile'] = {'value': ''}
            currprof = currattr.get(targattr, {}).get('value', '')
            if currprof != pending:
                updates[targattr] = {'value': pending}
            cfg.set_node_attributes({nodename: updates})
            start_response('200 OK', (('Content-Type', 'text/plain'), ))
            yield 'OK'
        else:
            start_response('500 Error', (('Content-Type', 'text/plain'), ))
            yield 'No pending profile detected, unable to accept status update'
    elif env['PATH_INFO'] == '/self/saveapikey' and reqbody:
        if not isinstance(reqbody, str):
            reqbody = reqbody.decode('utf8')
        cfg.set_node_attributes(
            {nodename: {
                'deployment.sealedapikey': {
                    'value': reqbody
                }
            }})
        start_response('200 OK', ())
        yield ''
    elif env['PATH_INFO'].startswith(
            '/self/remoteconfig/') and 'POST' == operation:
        scriptcat = env['PATH_INFO'].replace('/self/remoteconfig/', '')
        slist, profile = get_scriptlist(
            scriptcat, cfg, nodename,
            '/var/lib/confluent/public/os/{0}/ansible/{1}')
        playlist = []
        dirname = '/var/lib/confluent/public/os/{0}/ansible/{1}/'.format(
            profile, scriptcat)
        if not os.path.isdir(dirname):
            dirname = '/var/lib/confluent/public/os/{0}/ansible/{1}.d/'.format(
                profile, scriptcat)
        for filename in slist:
            if filename.endswith('.yaml') or filename.endswith('.yml'):
                playlist.append(os.path.join(dirname, filename))
        if playlist:
            runansible.run_playbooks(playlist, [nodename])
            start_response('202 Queued', ())
            yield ''
        else:
            start_response('200 OK', ())
            yield ''
            return
    elif env['PATH_INFO'].startswith('/self/remotesyncfiles'):
        if 'POST' == operation:
            result = syncfiles.start_syncfiles(nodename, cfg,
                                               json.loads(reqbody))
            start_response(result, ())
            yield ''
            return
        if 'GET' == operation:
            status, output = syncfiles.get_syncresult(nodename)
            start_response(status, ())
            yield output
            return
    elif env['PATH_INFO'].startswith('/self/remoteconfig/status'):
        rst = runansible.running_status.get(nodename, None)
        if not rst:
            start_response('204 Not Running', (('Content-Length', '0'), ))
            yield ''
            return
        start_response('200 OK', ())
        if rst.complete:
            del runansible.running_status[nodename]
        yield rst.dump_text()
        return
    elif env['PATH_INFO'].startswith('/self/scriptlist/'):
        scriptcat = env['PATH_INFO'].replace('/self/scriptlist/', '')
        slist, _ = get_scriptlist(
            scriptcat, cfg, nodename,
            '/var/lib/confluent/public/os/{0}/scripts/{1}')
        if slist:
            start_response('200 OK', (('Content-Type', 'application/yaml'), ))
            yield yaml.safe_dump(util.natural_sort(slist),
                                 default_flow_style=False)
        else:
            start_response('200 OK', ())
            yield ''
    else:
        start_response('404 Not Found', ())
        yield 'Not found'
Ejemplo n.º 31
0
    def merge_overrides(self, file_overrides=None, set_overrides=None):
        """ Merge helm overrides together.

        :param values: A dict of different types of user override values,
                       'files' (which generally specify many overrides) and
                       'set' (which generally specify one override).
        """

        if file_overrides is None:
            file_overrides = []
        if set_overrides is None:
            set_overrides = []
        # At this point we have potentially two separate types of overrides
        # specified by system or user, values from files and values passed in
        # via --set .  We need to ensure that we call helm using the same
        # mechanisms to ensure the same behaviour.
        cmd = ['helm', 'install', '--dry-run', '--debug']

        # Process the newly-passed-in override values
        tmpfiles = []

        for value_file in file_overrides:
            # For values passed in from files, write them back out to
            # temporary files.
            tmpfile = tempfile.NamedTemporaryFile(delete=False)
            tmpfile.write(value_file)
            tmpfile.close()
            tmpfiles.append(tmpfile.name)
            cmd.extend(['--values', tmpfile.name])

        for value_set in set_overrides:
            keypair = list(value_set.split("="))

            # request user to input with "--set key=value" or
            # "--set key=", for the second case, the value is assume ""
            # skip setting like "--set =value", "--set xxxx"
            if len(keypair) == 2 and keypair[0]:
                if keypair[1] and keypair[1].isdigit():
                    cmd.extend(['--set-string', value_set])
                else:
                    cmd.extend(['--set', value_set])

        env = os.environ.copy()
        env['KUBECONFIG'] = '/etc/kubernetes/admin.conf'

        # Make a temporary directory with a fake chart in it
        try:
            tmpdir = tempfile.mkdtemp()
            chartfile = tmpdir + '/Chart.yaml'
            with open(chartfile, 'w') as tmpchart:
                tmpchart.write('name: mychart\napiVersion: v1\n'
                               'version: 0.1.0\n')
            cmd.append(tmpdir)

            # Apply changes by calling out to helm to do values merge
            # using a dummy chart.
            output = subprocess.check_output(cmd, env=env)

            # Check output for failure

            # Extract the info we want.
            values = output.split('USER-SUPPLIED VALUES:\n')[1].split(
                '\nCOMPUTED VALUES:')[0]
        except Exception:
            raise
        finally:
            os.remove(chartfile)
            os.rmdir(tmpdir)

        for tmpfile in tmpfiles:
            os.remove(tmpfile)

        return values