Beispiel #1
0
    def _cliq_get_cluster_vip(self, cluster_name):
        """Gets the IP on which a cluster shares iSCSI volumes"""
        cluster_xml = self._cliq_get_cluster_info(cluster_name)

        vips = []
        for vip in cluster_xml.findall("response/cluster/vip"):
            vips.append(vip.attrib.get('ipAddress'))

        if len(vips) == 1:
            return vips[0]

        _xml = ElementTree.tostring(cluster_xml)
        msg = (_("Unexpected number of virtual ips for cluster "
                 " %(cluster_name)s. Result=%(_xml)s") % locals())
        raise exception.Error(msg)
Beispiel #2
0
def setup_container(image, container_dir=None, use_cow=False):
    """Setup the LXC container.

    It will mount the loopback image to the container directory in order
    to create the root filesystem for the container.

    LXC does not support qcow2 images yet.
    """
    try:
        img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
        if img.mount():
            return img
        else:
            raise exception.Error(img.errors)
    except Exception, exn:
        LOG.exception(_('Failed to mount filesystem: %s'), exn)
Beispiel #3
0
def start_transfer(read_file_handle, data_size, write_file_handle=None,
                   tank_client=None, image_id=None, image_meta=None):
    """Start the data transfer from the reader to the writer.
    Reader writes to the pipe and the writer reads from the pipe. This means
    that the total transfer time boils down to the slower of the read/write
    and not the addition of the two times."""

    if not image_meta:
        image_meta = {}

    # The pipe that acts as an intermediate store of data for reader to write
    # to and writer to grab from.
    thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
    # The read thread. In case of tank it is the instance of the
    # TankFileRead class. The tank client read returns an iterator
    # and this class wraps that iterator to provide datachunks in calls
    # to read.
    read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)

    # In case of Tank - VMWare transfer, we just need a handle to the
    # HTTP Connection that is to send transfer data to the VMWare datastore.
    if write_file_handle:
        write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
    # In case of VMWare - Tank transfer, we relinquish VMWare HTTP file read
    # handle to Tank Client instance, but to be sure of the transfer we need
    # to be sure of the status of the image on glnace changing to active.
    # The TankWriteThread handles the same for us.
    elif tank_client and image_id:
        write_thread = io_util.TankWriteThread(thread_safe_pipe,
                                         tank_client, image_id, image_meta)
    # Start the read and write threads.
    read_event = read_thread.start()
    write_event = write_thread.start()
    try:
        # Wait on the read and write events to signal their end
        read_event.wait()
        write_event.wait()
    except Exception, exc:
        # In case of any of the reads or writes raising an exception,
        # stop the threads so that we un-necessarily don't keep the other one
        # waiting.
        read_thread.stop()
        write_thread.stop()

        # Log and raise the exception.
        LOG.exception(exc)
        raise exception.Error(exc)
Beispiel #4
0
    def __get_backend(self):
        if not self.__backend:
            backend_name = self.__pivot.value
            if backend_name not in self.__backends:
                raise exception.Error(_('Invalid backend: %s') % backend_name)

            backend = self.__backends[backend_name]
            if isinstance(backend, tuple):
                name = backend[0]
                fromlist = backend[1]
            else:
                name = backend
                fromlist = backend

            self.__backend = __import__(name, None, None, fromlist)
            LOG.debug(_('backend %s'), self.__backend)
        return self.__backend
Beispiel #5
0
def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
    """Get the vlan id and vswicth associated with the port group."""
    host_mor = session._call_method(vim_util, "get_objects",
                                    "HostSystem")[0].obj
    port_grps_on_host_ret = session._call_method(vim_util,
                                                 "get_dynamic_property",
                                                 host_mor, "HostSystem",
                                                 "config.network.portgroup")
    if not port_grps_on_host_ret:
        excep = ("ESX SOAP server returned an empty port group "
                 "for the host system in its response")
        LOG.exception(excep)
        raise exception.Error(_(excep))
    port_grps_on_host = port_grps_on_host_ret.HostPortGroup
    for p_gp in port_grps_on_host:
        if p_gp.spec.name == pg_name:
            p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
            return p_gp.spec.vlanId, p_grp_vswitch_name
Beispiel #6
0
    def _do_export(self, context, volume, force_create):
        """Supports ensure_export and create_export"""
        volume_info = self._cliq_get_volume_info(volume['name'])

        is_shared = 'permission.authGroup' in volume_info

        model_update = {}

        should_export = False

        if force_create or not is_shared:
            should_export = True
            # Check that we have a project_id
            project_id = volume['project_id']
            if not project_id:
                project_id = context.project_id

            if project_id:
                #TODO(justinsb): Use a real per-project password here
                chap_username = '******' + project_id
                # HP/Lefthand requires that the password be >= 12 characters
                chap_password = '******' + project_id
            else:
                msg = (_("Could not determine project for volume %s, "
                         "can't export") % (volume['name']))
                if force_create:
                    raise exception.Error(msg)
                else:
                    LOG.warn(msg)
                    should_export = False

        if should_export:
            cliq_args = {}
            cliq_args['volumeName'] = volume['name']
            cliq_args['chapName'] = chap_username
            cliq_args['targetSecret'] = chap_password

            self._cliq_run_xml("assignVolumeChap", cliq_args)

            model_update['provider_auth'] = ("CHAP %s %s" %
                                             (chap_username, chap_password))

        return model_update
Beispiel #7
0
 def _connect_to_ssh(self):
     ssh = paramiko.SSHClient()
     #TODO(justinsb): We need a better SSH key policy
     ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     if FLAGS.san_password:
         ssh.connect(FLAGS.san_ip,
                     port=FLAGS.san_ssh_port,
                     username=FLAGS.san_login,
                     password=FLAGS.san_password)
     elif FLAGS.san_privatekey:
         privatekeyfile = os.path.expanduser(FLAGS.san_privatekey)
         # It sucks that paramiko doesn't support DSA keys
         privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
         ssh.connect(FLAGS.san_ip,
                     port=FLAGS.san_ssh_port,
                     username=FLAGS.san_login,
                     pkey=privatekey)
     else:
         raise exception.Error(_("Specify san_password or san_privatekey"))
     return ssh
Beispiel #8
0
    def has_role(self, user, role, project=None):
        """Checks existence of role for user

        If project is not specified, checks for a global role. If project
        is specified, checks for the union of the global role and the
        project role.

        Role 'projectmanager' only works for projects and simply checks to
        see if the user is the project_manager of the specified project. It
        is the same as calling is_project_manager(user, project).

        @type user: User or uid
        @param user: User to check.

        @type role: str
        @param role: Role to check.

        @type project: Project or project_id
        @param project: Project in which to look for local role.

        @rtype: bool
        @return: True if the user has the role.
        """
        if role == 'projectmanager':
            if not project:
                raise exception.Error(_("Must specify project"))
            return self.is_project_manager(user, project)

        global_role = self._has_role(User.safe_id(user),
                                     role,
                                     None)

        if not global_role:
            return global_role

        if not project or role in FLAGS.global_roles:
            return global_role

        return self._has_role(User.safe_id(user),
                              role,
                              Project.safe_id(project))
Beispiel #9
0
        def _get_datastore_ref():
            """Get the datastore list and choose the first local storage."""
            data_stores = self._session._call_method(
                vim_util, "get_objects", "Datastore",
                ["summary.type", "summary.name"])
            for elem in data_stores:
                ds_name = None
                ds_type = None
                for prop in elem.propSet:
                    if prop.name == "summary.type":
                        ds_type = prop.val
                    elif prop.name == "summary.name":
                        ds_name = prop.val
                # Local storage identifier
                if ds_type == "VMFS":
                    data_store_name = ds_name
                    return data_store_name

            if data_store_name is None:
                msg = _("Couldn't get a local Datastore reference")
                LOG.exception(msg)
                raise exception.Error(msg)
Beispiel #10
0
def execute(*cmd, **kwargs):
    """
    Helper method to execute command with optional retry.

    :cmd                Passed to subprocess.Popen.
    :process_input      Send to opened process.
    :check_exit_code    Single bool, int, or list of allowed exit codes.
                        Defaults to [0].  Raise exception.ProcessExecutionError
                        unless program exits with one of these code.
    :delay_on_retry     True | False. Defaults to True. If set to True, wait a
                        short amount of time before retrying.
    :attempts           How many times to retry cmd.
    :run_as_root        True | False. Defaults to False. If set to True,
                        the command is prefixed by the command specified
                        in the root_helper FLAG.

    :raises exception.Error on receiving unknown arguments
    :raises exception.ProcessExecutionError

    :returns a tuple, (stdout, stderr) from the spawned process, or None if
             the command fails.
    """

    process_input = kwargs.pop('process_input', None)
    check_exit_code = kwargs.pop('check_exit_code', [0])
    ignore_exit_code = False
    if isinstance(check_exit_code, bool):
        ignore_exit_code = not check_exit_code
        check_exit_code = [0]
    elif isinstance(check_exit_code, int):
        check_exit_code = [check_exit_code]
    delay_on_retry = kwargs.pop('delay_on_retry', True)
    attempts = kwargs.pop('attempts', 1)
    run_as_root = kwargs.pop('run_as_root', False)
    shell = kwargs.pop('shell', False)

    if len(kwargs):
        raise exception.Error(
            _('Got unknown keyword args '
              'to utils.execute: %r') % kwargs)

    if run_as_root:
        cmd = shlex.split(FLAGS.root_helper) + list(cmd)
    cmd = map(str, cmd)

    while attempts > 0:
        attempts -= 1
        try:
            LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
            _PIPE = subprocess.PIPE  # pylint: disable=E1101
            obj = subprocess.Popen(cmd,
                                   stdin=_PIPE,
                                   stdout=_PIPE,
                                   stderr=_PIPE,
                                   close_fds=True,
                                   shell=shell)
            result = None
            if process_input is not None:
                result = obj.communicate(process_input)
            else:
                result = obj.communicate()
            obj.stdin.close()  # pylint: disable=E1101
            _returncode = obj.returncode  # pylint: disable=E1101
            if _returncode:
                LOG.debug(_('Result was %s') % _returncode)
                if not ignore_exit_code \
                    and _returncode not in check_exit_code:
                    (stdout, stderr) = result
                    raise exception.ProcessExecutionError(
                        exit_code=_returncode,
                        stdout=stdout,
                        stderr=stderr,
                        cmd=' '.join(cmd))
            return result
        except exception.ProcessExecutionError:
            if not attempts:
                raise
            else:
                LOG.debug(_('%r failed. Retrying.'), cmd)
                if delay_on_retry:
                    greenthread.sleep(random.randint(20, 200) / 100.0)
        finally:
            # NOTE(termie): this appears to be necessary to let the subprocess
            #               call clean something up in between calls, without
            #               it two execute calls in a row hangs the second one
            greenthread.sleep(0)
Beispiel #11
0
    def connect_volume(self, connection_info, mount_device):
        """Attach the volume to instance_name"""
        iscsi_properties = connection_info['data']
        try:
            # NOTE(vish): if we are on the same host as engine volume, the
            #             discovery makes the target so we don't need to
            #             run --op new
            self._run_iscsiadm(iscsi_properties, ())
        except exception.ProcessExecutionError:
            self._run_iscsiadm(iscsi_properties, ('--op', 'new'))

        if iscsi_properties.get('auth_method'):
            self._iscsiadm_update(iscsi_properties,
                                  "node.session.auth.authmethod",
                                  iscsi_properties['auth_method'])
            self._iscsiadm_update(iscsi_properties,
                                  "node.session.auth.username",
                                  iscsi_properties['auth_username'])
            self._iscsiadm_update(iscsi_properties,
                                  "node.session.auth.password",
                                  iscsi_properties['auth_password'])

        self._run_iscsiadm(iscsi_properties, ("--login", ))

        self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")

        if FLAGS.iscsi_helper == 'tgtadm':
            host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-1" %
                           (iscsi_properties['target_portal'],
                            iscsi_properties['target_iqn']))
        else:
            host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" %
                           (iscsi_properties['target_portal'],
                            iscsi_properties['target_iqn']))

        # The /dev/disk/by-path/... node is not always present immediately
        # TODO(justinsb): This retry-with-delay is a pattern, move to utils?
        tries = 0
        while not os.path.exists(host_device):
            if tries >= FLAGS.num_iscsi_scan_tries:
                raise exception.Error(
                    _("iSCSI device not found at %s") % (host_device))

            LOG.warn(
                _("ISCSI volume not yet found at: %(mount_device)s. "
                  "Will rescan & retry.  Try number: %(tries)s") % locals())

            # The rescan isn't documented as being necessary(?), but it helps
            self._run_iscsiadm(iscsi_properties, ("--rescan", ))

            tries = tries + 1
            if not os.path.exists(host_device):
                time.sleep(tries**2)

        if tries != 0:
            LOG.debug(
                _("Found iSCSI node %(mount_device)s "
                  "(after %(tries)s rescans)") % locals())

        connection_info['data']['device_path'] = host_device
        sup = super(LibvirtISCSIVolumeDriver, self)
        return sup.connect_volume(connection_info, mount_device)
Beispiel #12
0
    def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
        """Create a bridge unless it already exists.

        :param interface: the interface to create the bridge on.
        :param net_attrs: dictionary with  attributes used to create bridge.

        If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
        using net_attrs['broadcast'] and net_attrs['cidr'].  It will also add
        the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.

        The code will attempt to move any ips that already exist on the
        interface onto the bridge and reset the default gateway if necessary.

        """
        if not _device_exists(bridge):
            LOG.debug(_('Starting Bridge interface for %s'), interface)
            _execute('brctl', 'addbr', bridge, run_as_root=True)
            _execute('brctl', 'setfd', bridge, 0, run_as_root=True)
            # _execute('brctl setageing %s 10' % bridge, run_as_root=True)
            _execute('brctl', 'stp', bridge, 'off', run_as_root=True)
            # (danwent) bridge device MAC address can't be set directly.
            # instead it inherits the MAC address of the first device on the
            # bridge, which will either be the vlan interface, or a
            # physical NIC.
            _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)

        if interface:
            out, err = _execute('brctl', 'addif', bridge, interface,
                            check_exit_code=False, run_as_root=True)

            # NOTE(vish): This will break if there is already an ip on the
            #             interface, so we move any ips to the bridge
            old_gateway = None
            out, err = _execute('route', '-n', run_as_root=True)
            for line in out.split('\n'):
                fields = line.split()
                if fields and fields[0] == '0.0.0.0' and \
                                fields[-1] == interface:
                    old_gateway = fields[1]
                    _execute('route', 'del', 'default', 'gw', old_gateway,
                             'dev', interface, check_exit_code=False,
                             run_as_root=True)
            out, err = _execute('ip', 'addr', 'show', 'dev', interface,
                                'scope', 'global', run_as_root=True)
            for line in out.split('\n'):
                fields = line.split()
                if fields and fields[0] == 'inet':
                    params = fields[1:-1]
                    _execute(*_ip_bridge_cmd('del', params, fields[-1]),
                                run_as_root=True)
                    _execute(*_ip_bridge_cmd('add', params, bridge),
                                run_as_root=True)
            if old_gateway:
                _execute('route', 'add', 'default', 'gw', old_gateway,
                            run_as_root=True)

            if (err and err != "device %s is already a member of a bridge;"
                     "can't enslave it to bridge %s.\n" % (interface, bridge)):
                raise exception.Error('Failed to add interface: %s' % err)

        # Don't forward traffic unless we were told to be a gateway
        if gateway:
            iptables_manager.ipv4['filter'].add_rule('FORWARD',
                                             '--in-interface %s -j ACCEPT' % \
                                             bridge)
            iptables_manager.ipv4['filter'].add_rule('FORWARD',
                                             '--out-interface %s -j ACCEPT' % \
                                             bridge)
        else:
            iptables_manager.ipv4['filter'].add_rule('FORWARD',
                                             '--in-interface %s -j DROP' % \
                                             bridge)
            iptables_manager.ipv4['filter'].add_rule('FORWARD',
                                             '--out-interface %s -j DROP' % \
                                             bridge)
Beispiel #13
0
 def _handler_class(mode):
     """Look up the appropriate class to use based on MODE."""
     for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
         if cls.mode == mode:
             return cls
     raise exception.Error(_("unknown disk image handler: %s" % mode))
Beispiel #14
0
 def local_path(self, volume):
     # TODO(justinsb): Is this needed here?
     raise exception.Error(_("local_path not supported"))
Beispiel #15
0
 def wrapped(*args, **kwargs):
     if not FLAGS.build_plan_encryption_key:
         raise exception.Error(_("--build_plan_encryption_key not set"))
     return func(*args, **kwargs)
Beispiel #16
0
def bad_function_error():
    raise exception.Error()
Beispiel #17
0
 def check_for_setup_error(self):
     """Returns an error if prerequisites aren't met"""
     (stdout, stderr) = self._execute('rados', 'lspools')
     pools = stdout.split("\n")
     if not FLAGS.rbd_pool in pools:
         raise exception.Error(_("rbd has no pool %s") % FLAGS.rbd_pool)
Beispiel #18
0
def check_for_session_leaks():
    if len(_db_content['session']) > 0:
        raise exception.Error('Sessions have leaked: %s' %
                              _db_content['session'])