Пример #1
0
def get_from_path(items, path):
    """Returns a list of items matching the specified path.

    Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
    in items, looks up items[prop1][prop2][prop3].  Like XPath, if any of the
    intermediate results are lists it will treat each list item individually.
    A 'None' in items or any child expressions will be ignored, this function
    will not throw because of None (anywhere) in items.  The returned list
    will contain no None values.

    """
    if path is None:
        raise exception.Error('Invalid mini_xpath')

    (first_token, sep, remainder) = path.partition('/')

    if first_token == '':
        raise exception.Error('Invalid mini_xpath')

    results = []

    if items is None:
        return results

    if not isinstance(items, list):
        # Wrap single objects in a list
        items = [items]

    for item in items:
        if item is None:
            continue
        get_method = getattr(item, 'get', None)
        if get_method is None:
            continue
        child = get_method(first_token)
        if child is None:
            continue
        if isinstance(child, list):
            # Flatten intermediate lists
            for x in child:
                results.append(x)
        else:
            results.append(child)

    if not sep:
        # No more tokens
        return results
    else:
        return get_from_path(results, remainder)
Пример #2
0
def get_my_linklocal(interface):
    try:
        if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
        condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
        links = [re.search(condition, x) for x in if_str[0].split('\n')]
        address = [w.group(1) for w in links if w is not None]
        if address[0] is not None:
            return address[0]
        else:
            raise exception.Error(
                _('Link Local address is not found.:%s') % if_str)
    except Exception as ex:
        raise exception.Error(
            _("Couldn't get Link Local IP of %(interface)s"
              " :%(ex)s") % locals())
Пример #3
0
    def _run_ssh(self, command, check_exit_code=True):
        chan = self.ssh.open_session()
        chan.invoke_shell()

        if FLAGS.eqlx_verbose_ssh:
            LOG.debug(_("Reading CLI MOTD"))
        motd = self._get_output(chan)

        cmd = "%s %s %s" % ('stty', 'columns', '255')
        if FLAGS.eqlx_verbose_ssh:
            LOG.debug(_("Setting CLI terminal width: '%s'"), cmd)
        chan.send(cmd + '\r')
        out = self._get_output(chan)

        if FLAGS.eqlx_verbose_ssh:
            LOG.debug(_("Sending CLI command: '%s'"), command)
        chan.send(command + '\r')
        out = self._get_output(chan)

        chan.close()

        if any(line.startswith(('% Error', 'Error:')) for line in out):
            msg = _("Error executing EQL command: %(cmd)s") % locals()
            for line in out:
                LOG.error(line)
            raise exception.Error(msg, out)
        return out
Пример #4
0
    def _check_connection(self):
        for try_no in range(FLAGS.eqlx_cli_max_retries):
            if hasattr(self, 'ssh'):
                try:
                    self._run_ssh('cli-settings',
                                  'show',
                                  timeout=FLAGS.eqlx_cli_timeout)
                except Exception as error:
                    LOG.debug(error)
                    LOG.info(_("Connection to SAN has been lost"))
                    delattr(self, 'ssh')
                else:
                    LOG.debug(_("SAN connection is up"))
                    return
            if try_no:
                time.sleep(FLAGS.eqlx_cli_retries_timeout)
            try:
                LOG.debug(_("Connecting to the SAN (%s@%s:%d)"),
                          FLAGS.san_login, FLAGS.san_ip, FLAGS.san_ssh_port)
                self.ssh = self._connect_to_ssh()
                LOG.info(_("Connected to the SAN after %d retries"), try_no)
            except Exception as error:
                LOG.debug(error)
                LOG.error(_("Failed to connect to the SAN"))
            else:
                return

        msg = _(
            "unable to connect to the EQL appliance after %(try_no)d retries"
        ) % locals()
        raise exception.Error(msg)
Пример #5
0
def get_blkdev_major_minor(path, lookup_for_file=True):
    """Get 'major:minor' number of block device.

    Get the device's 'major:minor' number of a block device to control
    I/O ratelimit of the specified path.
    If lookup_for_file is True and the path is a regular file, lookup a disk
    device which the file lies on and returns the result for the device.
    """
    st = os.stat(path)
    if stat.S_ISBLK(st.st_mode):
        path, st = _get_disk_of_partition(path, st)
        return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
    elif stat.S_ISCHR(st.st_mode):
        # No I/O ratelimit control is provided for character devices
        return None
    elif lookup_for_file:
        # lookup the mounted disk which the file lies on
        out, _err = execute('df', path)
        devpath = out.split("\n")[1].split()[0]
        if devpath[0] is not '/':
            # the file is on a network file system
            return None
        return get_blkdev_major_minor(devpath, False)
    else:
        msg = _("Unable to get a block device for file \'%s\'") % path
        raise exception.Error(msg)
Пример #6
0
    def update_free_index(self, index=None):
        """
        Update the free index, monotonically increasing, and
        looping back to 1 after the max lun ID value is hit.

        Arguments:
            index -- assume that all values below this number may be already
                     allocated, so start searching at that value if it is
                     higher than the free_index
        """
        i = 0
        count = 0
        max_size = len(self.lun_id_list)
        if index and index > self.free_index:
            i = index + 1
        else:
            i = self.free_index
        # avoid possibility of indexError
        if i >= max_size:
            i = 1
        while self.lun_id_list[i] == 1 and count < max_size:
            count += 1
            i += 1
            if i >= max_size:
                i = 1
        self.free_index = i
        if count == max_size:
            raise exception.Error("Cannot find free lun_id, giving up!")
Пример #7
0
 def _execute(self, *args, **kwargs):
     command = ' '.join(args)
     try:
         self._check_connection()
         LOG.info(_('executing "%s"') % command)
         return self._run_ssh(command, timeout=FLAGS.eqlx_cli_timeout)
     except Timeout:
         msg = _("Timeout while executing EQL command: %(command)s") % \
               locals()
         raise exception.Error(msg)
Пример #8
0
    def register(self, ext):
        # Do nothing if the extension doesn't check out
        if not self._check_extension(ext):
            return

        alias = ext.alias
        LOG.audit(_('Loaded extension: %s'), alias)

        if alias in self.extensions:
            raise exception.Error("Found duplicate extension: %s" % alias)
        self.extensions[alias] = ext
Пример #9
0
def db_sync(version=None):
    if version is not None:
        try:
            version = int(version)
        except ValueError:
            raise exception.Error(_("version should be an integer"))

    current_version = db_version()
    repository = _find_migrate_repo()
    if version is None or version > current_version:
        return versioning_api.upgrade(get_engine(), repository, version)
    else:
        return versioning_api.downgrade(get_engine(), repository, version)
Пример #10
0
def ssh_execute(ssh,
                cmd,
                process_input=None,
                addl_env=None,
                check_exit_code=True):
    LOG.debug(_('Running cmd (SSH): %s'), cmd)
    if addl_env:
        raise exception.Error(_('Environment not supported over SSH'))

    if process_input:
        # This is (probably) fixable if we need it...
        raise exception.Error(_('process_input not supported over SSH'))

    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
    channel = stdout_stream.channel

    #stdin.write('process_input would go here')
    #stdin.flush()

    # NOTE(justinsb): This seems suspicious...
    # ...other SSH clients have buffering issues with this approach
    stdout = stdout_stream.read()
    stderr = stderr_stream.read()
    stdin_stream.close()
    stdout_stream.close()
    stderr_stream.close()

    exit_status = channel.recv_exit_status()

    # exit_status == -1 if no exit code was returned
    if exit_status != -1:
        LOG.debug(_('Result was %s') % exit_status)
        if check_exit_code and exit_status != 0:
            raise exception.ProcessExecutionError(exit_code=exit_status,
                                                  stdout=stdout,
                                                  stderr=stderr,
                                                  cmd=cmd)
    channel.close()
    return (stdout, stderr)
Пример #11
0
    def _export_lun(self, volume, connector=None, igroup=None):
        """
        Generates the export configuration for the given volume

        The equivalent CLI command is "lun export container
        <container_name> name <lun_name>"

        Arguments:
            volume -- volume object provided by the Manager
            connector -- connector object provided by the Manager
            igroup -- name of igroup to use for exporting

        Returns:
            lun_id -- the LUN ID assigned by the backend
        """
        lun_id = -1
        export_to = ''
        v = self.vmem_vip

        if igroup:
            export_to = igroup
        elif connector:
            export_to = connector['initiator']
        else:
            raise exception.Error(_("No initiators found, cannot proceed"))

        target_name = self._get_short_name(volume['name'])

        # TODO(rlucio): new encryption code in afterglow requires that
        # lun state nodes for encryption exist before running the
        # export or else the export will fail on the backend (via
        # /vshare/state/local/container/%s/lun/%s/encrypted)
        #

        LOG.info(_("Exporting lun %s"), volume['name'])

        try:
            self._send_cmd(v.lun.export_lun, '', self.container,
                           volume['name'], target_name, export_to, 'auto')

        except Exception:
            LOG.exception(_("LUN export failed!"))
            raise

        else:
            self._wait_for_exportstate(volume['name'], True)
            lun_id = self._get_lun_id(volume['name'])

        return lun_id
Пример #12
0
def trycmd(*args, **kwargs):
    """Convenience wrapper around oslo's trycmd() method."""
    if 'run_as_root' in kwargs and not 'root_helper' in kwargs:
        kwargs['root_helper'] = get_root_helper()
    try:
        (stdout, stderr) = processutils.trycmd(*args, **kwargs)
    except processutils.ProcessExecutionError as ex:
        raise exception.ProcessExecutionError(exit_code=ex.exit_code,
                                              stderr=ex.stderr,
                                              stdout=ex.stdout,
                                              cmd=ex.cmd,
                                              description=ex.description)
    except processutils.UnknownArgumentError as ex:
        raise exception.Error(ex.message)
    return (stdout, stderr)
Пример #13
0
    def _export_snapshot(self, snapshot, connector=None, igroup=None):
        """Generates the export configuration for the given snapshot.

        The equivalent CLI command is "snapshot export container
        PROD08 lun <snapshot_name> name <volume_name>"

        Arguments:
            snapshot -- snapshot object provided by the Manager
            connector -- connector object provided by the Manager
            igroup -- name of igroup to use for exporting

        Returns:
            lun_id -- the LUN ID assigned by the backend
        """
        lun_id = -1
        export_to = ''
        v = self.common.vip

        target_name = self.TARGET_GROUP_NAME

        LOG.debug("Exporting snapshot %s.", snapshot['id'])

        if igroup:
            export_to = igroup
        elif connector:
            export_to = connector['initiator']
        else:
            raise exception.Error(_("No initiators found, cannot proceed"))

        try:
            self.common._send_cmd(v.snapshot.export_lun_snapshot, '',
                                  self.common.container, snapshot['volume_id'],
                                  snapshot['id'], export_to, target_name,
                                  'auto')

        except Exception:
            LOG.exception(_LE("Snapshot export for %s failed!"),
                          snapshot['id'])
            raise

        else:
            self.common._wait_for_export_state(snapshot['volume_id'],
                                               snapshot['id'],
                                               state=True)
            lun_id = self.common._get_snapshot_id(snapshot['volume_id'],
                                                  snapshot['id'])

        return lun_id
Пример #14
0
def execute(*cmd, **kwargs):
    """Convenience wrapper around oslo's execute() method."""
    if 'run_as_root' in kwargs and not 'root_helper' in kwargs:
        kwargs['root_helper'] =\
            'sudo cinder-rootwrap %s' % CONF.rootwrap_config
    try:
        (stdout, stderr) = processutils.execute(*cmd, **kwargs)
    except processutils.ProcessExecutionError as ex:
        raise exception.ProcessExecutionError(exit_code=ex.exit_code,
                                              stderr=ex.stderr,
                                              stdout=ex.stdout,
                                              cmd=ex.cmd,
                                              description=ex.description)
    except processutils.UnknownArgumentError as ex:
        raise exception.Error(ex.message)
    return (stdout, stderr)
Пример #15
0
    def _export_lun(self, volume, connector=None, igroup=None):
        """
        Generates the export configuration for the given volume

        The equivalent CLI command is "lun export container
        <container_name> name <lun_name>"

        Arguments:
            volume -- volume object provided by the Manager
            connector -- connector object provided by the Manager
            igroup -- name of igroup to use for exporting

        Returns:
            lun_id -- the LUN ID assigned by the backend
        """
        lun_id = ''
        export_to = ''
        v = self.vmem_vip

        lun_id = self.lun_tracker.get_lun_id_for_volume(volume)

        if igroup:
            export_to = igroup
        elif connector:
            export_to = self._convert_wwns_openstack_to_vmem(
                connector['wwpns'])
        else:
            raise exception.Error(_("No initiators found, cannot proceed"))

        LOG.info(
            _("Exporting lun %(vol_id)s on lun_id %(lun_id)s") % {
                'vol_id': volume['id'],
                'lun_id': lun_id
            })

        try:
            self._send_cmd_and_verify(
                v.lun.export_lun, self._wait_for_exportstate, '',
                [self.container, volume['id'], 'all', export_to, lun_id],
                [volume['id'], True])

        except Exception:
            LOG.exception(_("LUN export failed!"))
            raise

        return lun_id
Пример #16
0
def db_version():
    repository = _find_migrate_repo()
    try:
        return versioning_api.db_version(get_engine(), repository)
    except versioning_exceptions.DatabaseNotControlledError:
        # If we aren't version controlled we may already have the database
        # in the state from before we started version control, check for that
        # and set up version_control appropriately
        meta = sqlalchemy.MetaData()
        engine = get_engine()
        meta.reflect(bind=engine)
        tables = meta.tables
        if len(tables) == 0:
            db_version_control(migration.INIT_VERSION)
            return versioning_api.db_version(get_engine(), repository)
        else:
            raise exception.Error(_("Upgrade DB using Essex release first."))
Пример #17
0
    def __get_backend(self):
        if not self.__backend:
            backend_name = CONF[self.__pivot]
            if backend_name not in self.__backends:
                raise exception.Error(_('Invalid backend: %s') % backend_name)

            backend = self.__backends[backend_name]
            if isinstance(backend, tuple):
                name = backend[0]
                fromlist = backend[1]
            else:
                name = backend
                fromlist = backend

            self.__backend = __import__(name, None, None, fromlist)
            LOG.debug(_('backend %s'), self.__backend)
        return self.__backend
Пример #18
0
    def _add_igroup_member(self, connector, igroup):
        """Add an initiator to an igroup so it can see exports.

        The equivalent CLI command is "igroup addto name <igroup_name>
        initiators <initiator_name>"

        Arguments:
            connector -- connector object provided by the Manager
        """
        v = self.common.vip

        LOG.debug("Adding initiator %s to igroup.", connector['initiator'])

        resp = v.igroup.add_initiators(igroup, connector['initiator'])

        if resp['code'] != 0:
            raise exception.Error(
                _('Failed to add igroup member: %(code)d, %(message)s') % resp)
Пример #19
0
    def _export_snapshot(self, snapshot, connector=None, igroup=None):
        """
        Generates the export configuration for the given snapshot.

        The equivalent CLI command is "snapshot export container
        PROD08 lun <snapshot_name> name <volume_name>"

        Arguments:
            snapshot -- snapshot object provided by the Manager
            connector -- connector object provided by the Manager
            igroup -- name of igroup to use for exporting

        Returns:
            lun_id -- the LUN ID assigned by the backend
        """
        lun_id = ''
        export_to = ''
        v = self.vmem_vip

        lun_id = self.lun_tracker.get_lun_id_for_snapshot(snapshot)

        if igroup:
            export_to = igroup
        elif connector:
            export_to = self._convert_wwns_openstack_to_vmem(
                connector['wwpns'])
        else:
            raise exception.Error(_("No initiators found, cannot proceed"))

        LOG.info(_("Exporting snapshot %s"), snapshot['id'])

        try:
            self._send_cmd(v.snapshot.export_lun_snapshot, '', self.container,
                           snapshot['volume_id'], snapshot['id'], export_to,
                           'all', lun_id)

        except Exception:
            LOG.exception(_("Snapshot export failed!"))
            raise

        else:
            self._wait_for_exportstate(snapshot['id'], True)

        return lun_id
Пример #20
0
    def _export_lun(self, volume, connector=None, igroup=None):
        """Generates the export configuration for the given volume.

        The equivalent CLI command is "lun export container
        <container_name> name <lun_name>"

        Arguments:
            volume -- volume object provided by the Manager
            connector -- connector object provided by the Manager
            igroup -- name of igroup to use for exporting

        Returns:
            lun_id -- the LUN ID assigned by the backend
        """
        lun_id = -1
        export_to = ''
        v = self.common.vip

        if igroup:
            export_to = igroup
        elif connector:
            export_to = connector['initiator']
        else:
            raise exception.Error(_("No initiators found, cannot proceed"))

        target_name = self.TARGET_GROUP_NAME

        LOG.debug("Exporting lun %s.", volume['id'])

        try:
            self.common._send_cmd_and_verify(
                v.lun.export_lun, self.common._wait_for_export_state, '', [
                    self.common.container, volume['id'], target_name,
                    export_to, 'auto'
                ], [volume['id'], None, True])

        except Exception:
            LOG.exception(_LE("LUN export for %s failed!"), volume['id'])
            raise

        lun_id = self.common._get_lun_id(volume['id'])

        return lun_id
Пример #21
0
    def _add_igroup_member(self, connector, igroup):
        """Add an initiator to the openstack igroup so it can see exports.

        The equivalent CLI command is "igroup addto name <igroup_name>
        initiators <initiator_name>"

        Arguments:
            connector -- connector object provided by the Manager
        """
        v = self.common.vip
        wwpns = self._convert_wwns_openstack_to_vmem(connector['wwpns'])

        LOG.debug("Adding initiators %(wwpns)s to igroup %(igroup)s." %
                  {'wwpns': wwpns, 'igroup': igroup})

        resp = v.igroup.add_initiators(igroup, wwpns)

        if resp['code'] != 0:
            raise exception.Error(
                _('Failed to add igroup member: %(code)d, %(message)s') % resp)
Пример #22
0
def bad_function_error():
    raise exception.Error()
Пример #23
0
def execute(*cmd, **kwargs):
    """Helper method to execute command with optional retry.

    If you add a run_as_root=True command, don't forget to add the
    corresponding filter to etc/cinder/rootwrap.d !

    :param cmd:                Passed to subprocess.Popen.
    :param process_input:      Send to opened process.
    :param check_exit_code:    Single bool, int, or list of allowed exit
                               codes.  Defaults to [0].  Raise
                               exception.ProcessExecutionError unless
                               program exits with one of these code.
    :param delay_on_retry:     True | False. Defaults to True. If set to
                               True, wait a short amount of time
                               before retrying.
    :param attempts:           How many times to retry cmd.
    :param run_as_root:        True | False. Defaults to False. If set to True,
                               the command is prefixed by the command specified
                               in the root_helper FLAG.

    :raises exception.Error: on receiving unknown arguments
    :raises exception.ProcessExecutionError:

    :returns: a tuple, (stdout, stderr) from the spawned process, or None if
             the command fails.
    """

    process_input = kwargs.pop('process_input', None)
    check_exit_code = kwargs.pop('check_exit_code', [0])
    ignore_exit_code = False
    if isinstance(check_exit_code, bool):
        ignore_exit_code = not check_exit_code
        check_exit_code = [0]
    elif isinstance(check_exit_code, int):
        check_exit_code = [check_exit_code]
    delay_on_retry = kwargs.pop('delay_on_retry', True)
    attempts = kwargs.pop('attempts', 1)
    run_as_root = kwargs.pop('run_as_root', False)
    shell = kwargs.pop('shell', False)

    if len(kwargs):
        raise exception.Error(
            _('Got unknown keyword args '
              'to utils.execute: %r') % kwargs)

    if run_as_root:

        if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo':
            deprecated.warn(
                _('The root_helper option (which lets you specify '
                  'a root wrapper different from cinder-rootwrap, '
                  'and defaults to using sudo) is now deprecated. '
                  'You should use the rootwrap_config option '
                  'instead.'))

        if (FLAGS.rootwrap_config is not None):
            cmd = ['sudo', 'cinder-rootwrap', FLAGS.rootwrap_config
                   ] + list(cmd)
        else:
            cmd = shlex.split(FLAGS.root_helper) + list(cmd)
    cmd = map(str, cmd)

    while attempts > 0:
        attempts -= 1
        try:
            LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
            _PIPE = subprocess.PIPE  # pylint: disable=E1101
            obj = subprocess.Popen(cmd,
                                   stdin=_PIPE,
                                   stdout=_PIPE,
                                   stderr=_PIPE,
                                   close_fds=True,
                                   preexec_fn=_subprocess_setup,
                                   shell=shell)
            result = None
            if process_input is not None:
                result = obj.communicate(process_input)
            else:
                result = obj.communicate()
            obj.stdin.close()  # pylint: disable=E1101
            _returncode = obj.returncode  # pylint: disable=E1101
            if _returncode:
                LOG.debug(_('Result was %s') % _returncode)
                if not ignore_exit_code and _returncode not in check_exit_code:
                    (stdout, stderr) = result
                    raise exception.ProcessExecutionError(
                        exit_code=_returncode,
                        stdout=stdout,
                        stderr=stderr,
                        cmd=' '.join(cmd))
            return result
        except exception.ProcessExecutionError:
            if not attempts:
                raise
            else:
                LOG.debug(_('%r failed. Retrying.'), cmd)
                if delay_on_retry:
                    greenthread.sleep(random.randint(20, 200) / 100.0)
        finally:
            # NOTE(termie): this appears to be necessary to let the subprocess
            #               call clean something up in between calls, without
            #               it two execute calls in a row hangs the second one
            greenthread.sleep(0)