示例#1
0
def check_string_length(value, name, min_length=0, max_length=None):
    """Check the length of specified string
    :param value: the value of the string
    :param name: the name of the string
    :param min_length: the min_length of the string
    :param max_length: the max_length of the string
    """
    if not isinstance(value, six.string_types):
        msg = _("%s is not a string or unicode") % name
        raise exception.InvalidInput(message=msg)

    if len(value) < min_length:
        msg = _("%(name)s has a minimum character requirement of "
                "%(min_length)s.") % {
                    'name': name,
                    'min_length': min_length
                }
        raise exception.InvalidInput(message=msg)

    if max_length and len(value) > max_length:
        msg = _("%(name)s has more than %(max_length)s "
                "characters.") % {
                    'name': name,
                    'max_length': max_length
                }
        raise exception.InvalidInput(message=msg)
示例#2
0
    def __init__(self, name, loader=None):
        """Initialize, but do not start the WSGI server.

        :param name: The name of the WSGI server given to the loader.
        :param loader: Loads the WSGI application using the given name.
        :returns: None

        """
        self.name = name
        self.manager = self._get_manager()
        self.loader = loader or wsgi.Loader()
        self.app = self.loader.load_app(name)
        self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
        self.port = getattr(CONF, '%s_listen_port' % name, 0)
        LOG.error("test host: %(host)s, port: %(port)s", {
            'host': self.host,
            'port': self.port
        })
        self.workers = (getattr(CONF, '%s_workers' % name, None)
                        or processutils.get_worker_count())
        if self.workers and self.workers < 1:
            worker_name = '%s_workers' % name
            msg = (_("%(worker_name)s value of %(workers)d is invalid, "
                     "must be greater than 0.") % {
                         'worker_name': worker_name,
                         'workers': self.workers
                     })
            raise exception.InvalidInput(msg)

        self.server = wsgi.Server(name,
                                  self.app,
                                  host=self.host,
                                  port=self.port)
        self.periodic_fuzzy_delay = None
        self.periodic_interval = 10
示例#3
0
def check_exclusive_options(**kwargs):
    """Checks that only one of the provided options is actually not-none.

    Iterates over all the kwargs passed in and checks that only one of said
    arguments is not-none, if more than one is not-none then an exception will
    be raised with the names of those arguments who were not-none.
    """

    if not kwargs:
        return

    pretty_keys = kwargs.pop("pretty_keys", True)
    exclusive_options = {}
    for (k, v) in kwargs.iteritems():
        if v is not None:
            exclusive_options[k] = True

    if len(exclusive_options) > 1:
        # Change the format of the names from pythonic to
        # something that is more readable.
        #
        # Ex: 'the_key' -> 'the key'
        if pretty_keys:
            names = [k.replace('_', ' ') for k in kwargs.keys()]
        else:
            names = kwargs.keys()
        names = ", ".join(sorted(names))
        msg = (_("May specify only one of %s") % (names))
        raise exception.InvalidInput(reason=msg)
示例#4
0
    def extract_loadbalanceMembers(self, member_ids, pool_name):

        if not member_ids or not pool_name:
            LOG.error('Create LB member resource error: %s', pool_name)
            _msg = 'Create LB member resource error: member or pool is null.'
            raise exception.InvalidInput(reason=_msg)
        try:
            for member_id in member_ids:
                self.extract_loadbalanceMember(member_id, pool_name)
        except Exception as e:
            _msg = 'Create LB member resource error: %s' % e
            LOG.error(_msg)
            raise exception.ResourceExtractFailed(reason=_msg)
示例#5
0
    def extract_loadbalanceListeners(self, listener_ids, vip_id, vip_name):
        if not listener_ids:
            _msg = 'Create LB listener resource error: id is null.'
            LOG.error(_msg)
            raise exception.InvalidInput(reason=_msg)

        try:
            for listener_id in listener_ids:
                self.extract_loadbalanceListener(listener_id, vip_id, vip_name)
        except exception.ResourceExtractFailed:
            raise
        except Exception as e:
            _msg = 'Create LB listener resource error: %s' % e
            LOG.error(_msg)
            raise exception.ResourceExtractFailed(_msg)
示例#6
0
    def extract_loadbalanceVips(self, vip_ids):
        if not vip_ids:
            _msg = 'Create LB vip resource error: id is null.'
            LOG.error(_msg)
            raise exception.InvalidInput(reason=_msg)

        try:
            for vip_id in vip_ids:
                self.extract_loadbalanceVip(vip_id)
        except exception.ResourceExtractFailed:
            raise
        except Exception as e:
            _msg = 'Create LB vip resource error: %s' % e
            LOG.error(_msg)
            raise exception.ResourceExtractFailed(_msg)
示例#7
0
    def extract_loadbalancePools(self, pool_ids):

        if not pool_ids:
            _msg = 'Create LB pool resource error: id is null.'
            LOG.error(_msg)
            raise exception.InvalidInput(reason=_msg)

        try:
            for pool_id in pool_ids:
                self.extract_loadbalancePool(pool_id)
        except exception.ResourceExtractFailed:
            raise
        except Exception as e:
            _msg = 'Create LB pool resource error: %s' % e
            LOG.error(_msg)
            raise exception.ResourceExtractFailed(_msg)
示例#8
0
def is_all_tenants(search_opts):
    """Checks to see if the all_tenants flag is in search_opts

    :param dict search_opts: The search options for a request
    :returns: boolean indicating if all_tenants are being requested or not
    """
    all_tenants = search_opts.get('all_tenants')
    if all_tenants:
        try:
            all_tenants = strutils.bool_from_string(all_tenants, True)
        except ValueError as err:
            raise exception.InvalidInput(six.text_type(err))
    else:
        # The empty string is considered enabling all_tenants
        all_tenants = 'all_tenants' in search_opts
    return all_tenants
示例#9
0
    def __init__(self, ip, port, conn_timeout, login, password=None,
                 privatekey=None, *args, **kwargs):
        self.ip = ip
        self.port = port
        self.login = login
        self.password = password
        self.conn_timeout = conn_timeout if conn_timeout else None
        self.privatekey = privatekey
        self.hosts_key_file = None

        # Validate good config setting here.
        # Paramiko handles the case where the file is inaccessible.
        if not CONF.ssh_hosts_key_file:
            raise exception.ParameterNotFound(param='ssh_hosts_key_file')
        elif not os.path.isfile(CONF.ssh_hosts_key_file):
            # If using the default path, just create the file.
            if CONF.state_path in CONF.ssh_hosts_key_file:
                open(CONF.ssh_hosts_key_file, 'a').close()
            else:
                msg = (_("Unable to find ssh_hosts_key_file: %s") %
                       CONF.ssh_hosts_key_file)
                raise exception.InvalidInput(reason=msg)

        if 'hosts_key_file' in kwargs.keys():
            self.hosts_key_file = kwargs.pop('hosts_key_file')
            LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
                         "loaded along with %(conf)s "
                         "from /etc/conveyor.conf."),
                     {'kwargs': self.hosts_key_file,
                      'conf': CONF.ssh_hosts_key_file})

        LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' "
                  "using ssh_hosts_key_file '%(key_file)s'.",
                  {'policy': CONF.strict_ssh_host_key_policy,
                   'key_file': CONF.ssh_hosts_key_file})

        self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy

        if not self.hosts_key_file:
            self.hosts_key_file = CONF.ssh_hosts_key_file
        else:
            self.hosts_key_file += ',' + CONF.ssh_hosts_key_file

        super(SSHPool, self).__init__(*args, **kwargs)
示例#10
0
    def extract_consistency_groups(self,
                                   cg_ids,
                                   parent_name=None,
                                   parent_resources=None):
        if not cg_ids:
            _msg = 'Create consistency groups resource error: id is null.'
            LOG.error(_msg)
            raise exception.InvalidInput(reason=_msg)

        try:
            for cg_id in cg_ids:
                self.extract_consistency_group(cg_id, parent_name,
                                               parent_resources)
        except exception.ResourceExtractFailed:
            raise
        except Exception as e:
            _msg = 'Create consistency groups resource error: %s' % e
            LOG.error(_msg)
            raise exception.ResourceExtractFailed(_msg)
示例#11
0
    def extract_loadbalanceHealthmonitors(self, healthmonitor_ids):

        if not healthmonitor_ids:
            _msg = 'Create LB health monitor resource error: id is null.'
            LOG.error(_msg)
            raise exception.InvalidInput(reason=_msg)

        res_list = []
        try:
            for healthmonitor_id in healthmonitor_ids:
                res = self.extract_loadbalanceHealthmonitor(healthmonitor_id)
                res_list.append(res)
        except exception.ResourceExtractFailed:
            raise
        except Exception as e:
            _msg = 'Create LB health monitor resource error: %s' % e
            LOG.error(_msg)
            raise exception.ResourceExtractFailed(reason=_msg)

        return res_list
示例#12
0
    def create_volume(self,
                      context,
                      size,
                      name,
                      snapshot_id=None,
                      description=None,
                      volume_type=None,
                      user_id=None,
                      project_id=None,
                      availability_zone=None,
                      metadata=None,
                      imageRef=None,
                      scheduler_hints=None):

        kwargs = dict(snapshot_id=snapshot_id,
                      description=description,
                      volume_type=volume_type,
                      user_id=context.user_id,
                      project_id=context.project_id,
                      availability_zone=availability_zone,
                      metadata=metadata,
                      imageRef=imageRef,
                      scheduler_hints=scheduler_hints)

        version = get_cinder_client_version(context)
        if version == '1':
            kwargs['display_name'] = name
            kwargs['display_description'] = description
        elif version == '2':
            kwargs['name'] = name
            kwargs['description'] = description
        try:
            volume = cinderclient(context).volumes.create(size, name, **kwargs)

            return _untranslate_volume_summary_view(context, volume)
        except cinder_exception.OverLimit:
            raise exception.QuotaError(overs='volumes')
        except cinder_exception.BadRequest as e:
            raise exception.InvalidInput(reason=unicode(e))
示例#13
0
    def extract_volume_types(self, volume_type_ids, parent_name=None,
                             parent_resources=None):
        if not volume_type_ids:
            _msg = 'Create volume type resource error: id is null.'
            LOG.error(_msg)
            raise exception.InvalidInput(reason=_msg)
        volume_type_res = []
        try:
            for volume_type_id in volume_type_ids:
                type_res = self.extract_volume_type(volume_type_id,
                                                    parent_name,
                                                    parent_resources)
                volume_type_res.append(type_res)
        except exception.ResourceExtractFailed:
            raise
        except exception.ResourceNotFound:
            raise
        except Exception as e:
            _msg = 'Create volume type resource error: %s' % e
            LOG.error(_msg)
            raise exception.ResourceExtractFailed(_msg)

        return volume_type_res
示例#14
0
 def wrapper(self, ctx, volume_id, *args, **kwargs):
     try:
         res = method(self, ctx, volume_id, *args, **kwargs)
     except (cinder_exception.ClientException,
             keystone_exception.ClientException):
         exc_type, exc_value, exc_trace = sys.exc_info()
         if isinstance(
                 exc_value,
             (keystone_exception.NotFound, cinder_exception.NotFound)):
             exc_value = exception.VolumeNotFound(volume_id=volume_id)
         elif isinstance(
                 exc_value,
             (keystone_exception.BadRequest, cinder_exception.BadRequest)):
             exc_value = exception.InvalidInput(
                 reason=six.text_type(exc_value))
         raise exc_value, None, exc_trace
     except (cinder_exception.ConnectionError,
             keystone_exception.ConnectionError):
         exc_type, exc_value, exc_trace = sys.exc_info()
         exc_value = exception.CinderConnectionFailed(
             reason=six.text_type(exc_value))
         raise exc_value, None, exc_trace
     return res
示例#15
0
    def _copy_volume_data(self, context, resource_name,
                          des_gw_ip, vgw_id, template, dev_name):

        LOG.debug('Clone volume driver copy data start for %s', resource_name)
        resources = template.get('resources')
        volume_res = resources.get(resource_name)
        volume_id = volume_res.get('id')
        volume_ext_properties = volume_res.get('extra_properties')
        # 1. get gateway vm conveyor agent service ip and port
        des_gw_port = str(CONF.v2vgateway_api_listen_port)
        des_gw_url = des_gw_ip + ':' + des_gw_port
        # data transformer procotol(ftp/fillp)
        data_trans_protocol = CONF.data_transformer_procotol
        # data_trans_ports = CONF.trans_ports
        # trans_port = data_trans_ports[0]
        trans_port = utils.get_next_port_for_vgw(vgw_id)
        # 2. get source cloud gateway vm conveyor agent service ip and port
        src_gw_url = volume_ext_properties.get('gw_url')

        src_urls = src_gw_url.split(':')

        if len(src_urls) != 2:
            LOG.error("Input source gw url error: %s", src_gw_url)
            msg = "Input source gw url error: " + src_gw_url
            raise exception.InvalidInput(reason=msg)

        src_gw_ip = src_urls[0]
        src_gw_port = src_urls[1]

        # 3. get volme mount point and disk format info

        boot_index = 1
        src_mount_point = "/opt/" + volume_id

        if volume_ext_properties:
            src_dev_format = volume_ext_properties.get('guest_format')
            # volume dev name in system
            src_vol_sys_dev = volume_ext_properties.get('sys_dev_name')
            boot_index = volume_ext_properties.get('boot_index', 1)

            if dev_name:
                des_dev_name = dev_name
            else:
                des_dev_name = src_vol_sys_dev

        if not src_dev_format:
            client = birdiegatewayclient.get_birdiegateway_client(src_gw_ip,
                                                                  src_gw_port)
            d_fromat = client.vservices.get_disk_format(src_vol_sys_dev)
            src_dev_format = d_fromat.get('disk_format')

        # if disk does not format, then no data to copy
        if not src_dev_format and data_trans_protocol == 'ftp':
            rsp = {'volume_id': volume_id,
                   'des_ip': None,
                   'des_port': None,
                   'copy_tasks': None}
            return rsp

        mount_point = []
        task_ids = []

        if boot_index not in[0, '0'] and data_trans_protocol == 'ftp':
            mount_point.append(src_mount_point)

        # 4. copy data
        client = birdiegatewayclient.get_birdiegateway_client(des_gw_ip,
                                                              des_gw_port)
        clone_rsp = client.vservices.clone_volume(
                                        src_vol_sys_dev,
                                        des_dev_name,
                                        src_dev_format,
                                        mount_point,
                                        src_gw_url,
                                        des_gw_url,
                                        trans_protocol=data_trans_protocol,
                                        trans_port=trans_port)
        task_id = clone_rsp.get('body').get('task_id')
        task_ids.append(task_id)

        rsp = {'volume_id': volume_id,
               'des_ip': des_gw_ip,
               'des_port': des_gw_port,
               'copy_tasks': task_ids}

        LOG.debug('Clone volume driver copy data end for %s', resource_name)
        return rsp
示例#16
0
    def __init__(self,
                 name,
                 app,
                 host=None,
                 port=None,
                 pool_size=None,
                 protocol=eventlet.wsgi.HttpProtocol,
                 backlog=128):
        """Initialize, but do not start, a WSGI server.

        :param name: Pretty name for logging.
        :param app: The WSGI application to serve.
        :param host: IP address to serve the application.
        :param port: Port number to server the application.
        :param pool_size: Maximum number of eventlets to spawn concurrently.
        :returns: None

        """
        # Allow operators to customize http requests max header line size.
        eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
        self.client_socket_timeout = CONF.client_socket_timeout or None
        self.name = name
        self.app = app
        self._host = host or "0.0.0.0"
        self._port = port or 0
        self._server = None
        self._socket = None
        self._protocol = protocol
        self.pool_size = pool_size or self.default_pool_size
        self._pool = eventlet.GreenPool(self.pool_size)
        self._logger = logging.getLogger("eventlet.wsgi.server")

        if backlog < 1:
            raise exception.InvalidInput(
                reason='The backlog must be more than 1')

        bind_addr = (host, port)
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        try:
            info = socket.getaddrinfo(bind_addr[0], bind_addr[1],
                                      socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            family = socket.AF_INET

        cert_file = CONF.ssl_cert_file
        key_file = CONF.ssl_key_file
        ca_file = CONF.ssl_ca_file
        self._use_ssl = cert_file or key_file

        if cert_file and not os.path.exists(cert_file):
            raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)

        if ca_file and not os.path.exists(ca_file):
            raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)

        if key_file and not os.path.exists(key_file):
            raise RuntimeError(_("Unable to find key_file : %s") % key_file)

        if self._use_ssl and (not cert_file or not key_file):
            raise RuntimeError(
                _("When running server in SSL mode, you "
                  "must specify both a cert_file and "
                  "key_file option value in your "
                  "configuration file."))

        retry_until = time.time() + 30
        while not self._socket and time.time() < retry_until:
            try:
                self._socket = eventlet.listen(bind_addr,
                                               backlog=backlog,
                                               family=family)
            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)

        if not self._socket:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s "
                  "after trying for 30 seconds") % {
                      'host': host,
                      'port': port
                  })

        (self._host, self._port) = self._socket.getsockname()[0:2]
        LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s"), {
            'name': self.name,
            '_host': self._host,
            '_port': self._port
        })
示例#17
0
 def set_volume_shareable(self, context, volume_id, flag):
     try:
         item = cinderclient(context).volumes.get(volume_id)
         cinderclient(context).volumes.set_shareable(item, flag)
     except cinder_exception.BadRequest as e:
         raise exception.InvalidInput(reason=unicode(e))
示例#18
0
 def reset_state(self, context, volume_id, state):
     try:
         item = cinderclient(context).volumes.get(volume_id)
         cinderclient(context).volumes.reset_state(item, state)
     except cinder_exception.BadRequest as e:
         raise exception.InvalidInput(reason=unicode(e))
示例#19
0
    def _copy_volume_data(self, context, resource_name, template,
                          trans_data_wait_fun=None, port_wait_fun=None):
        '''copy volumes in template data'''
        resources = template.get('resources')
        instance = resources.get(resource_name)
        # 2. get server info
        server_id = instance.get('id')
        stack_id = template.get('stack_id')

        try:
            server = self.nova_api.get_server(context, server_id)
        except Exception as e:
            LOG.error("Query server %(server_id)s error: %(error)s",
                      {'server_id': server_id, 'error': e})
            raise exception.ServerNotFound(server_id=server_id)

        # 3. get volumes attached to this server
        properties = instance.get('properties')
        ext_properties = instance.get('extra_properties')
        volumes = properties.get('block_device_mapping_v2')
        if not volumes:
            LOG.warn("Clone instance warning: instance does not have volume.")
            rsp = {'server_id': server_id,
                   'port_id': None,
                   'des_ip': None,
                   'des_port': None,
                   'copy_tasks': []}
            return rsp
        bdms = []

        for v_volume in volumes:
            # if volume id is string, this volume is using exist volume,
            # so does not copy data
            vol_res_id = v_volume.get('volume_id')
            if isinstance(vol_res_id, str) or vol_res_id.get('get_param'):
                _msg = "Instance clone warning: volume does not copy data: %s" \
                     % vol_res_id
                LOG.debug(_msg)
                continue
            vol_res_name = v_volume.get('volume_id').get('get_resource')
            sys_clone = ext_properties.get('sys_clone')
            boot_index = v_volume.get('boot_index')
            # 3.1 if do not clone system volume,
            # don't add system volume to bdms
            if not sys_clone and boot_index in [0, '0']:
                continue
            volume_ext_properties = \
                resources.get(vol_res_name).get('extra_properties')
            if not volume_ext_properties.get('copy_data'):
                continue
            # 3.2 get volume id
            volume_id = self._get_resource_id(context, vol_res_name, stack_id)
            v_volume['id'] = volume_id
            if volume_ext_properties:
                v_volume['guest_format'] = \
                    volume_ext_properties.get('guest_format')
                v_volume['mount_point'] = \
                    volume_ext_properties.get('mount_point')
                # volume dev name in system
                vol_sys_dev = volume_ext_properties.get('sys_dev_name')
                # if not None, use it,otherwise use default name
                if vol_sys_dev:
                    v_volume['device_name'] = vol_sys_dev
            bdms.append(v_volume)

        if not bdms:
            return {}
        # 4. create transform data port to new instances
        server_az = server.get('OS-EXT-AZ:availability_zone', None)
        id = server.get('id', None)
        if not server_az:
            LOG.error('Can not get the availability_zone of server %s', id)
            raise exception.AvailabilityZoneNotFound(server_uuid=id)

        migrate_net_map = CONF.migrate_net_map
        migrate_net_id = migrate_net_map.get(server_az, None)

        if migrate_net_id:
            # 4.1 call neutron api create port
            LOG.debug("Instance template driver attach port to instance start")
            net_info = self.nova_api.interface_attach(context, id,
                                                      migrate_net_id,
                                                      port_id=None,
                                                      fixed_ip=None)

            interface_attachment = net_info._info
            if interface_attachment:
                LOG.debug('The interface attachment info is %s ' %
                          str(interface_attachment))
                des_gw_ip = \
                    interface_attachment.get('fixed_ips')[0].get('ip_address')
                port_id = interface_attachment.get('port_id')
            else:
                LOG.error("Instance template driver attach port failed")
                raise exception.NoMigrateNetProvided(server_uuid=id)
        else:
            retrying = 1
            while retrying < 300:
                des_gw_ip = self._get_server_ip(context, server_id)
                if des_gw_ip:
                    break
                retrying += 1
                time.sleep(2)
            port_id = None

        LOG.debug("Instance template driver attach port end: %s", des_gw_ip)
        if not des_gw_ip:
            _msg = "New clone or migrate VM data transformer IP is None"
            raise exception.V2vException(message=_msg)
        des_port = str(CONF.v2vgateway_api_listen_port)
        des_gw_url = des_gw_ip + ":" + des_port

        # data transformer procotol(ftp/fillp)
        data_trans_protocol = CONF.data_transformer_procotol
        data_trans_ports = CONF.trans_ports
        trans_port = data_trans_ports[0]
        src_gw_url = ext_properties.get('gw_url')

        src_urls = src_gw_url.split(':')

        if len(src_urls) != 2:
            LOG.error("Input source gw url error: %s", src_gw_url)
            msg = "Input source gw url error: " + src_gw_url
            raise exception.InvalidInput(reason=msg)
        # 5. request birdiegateway service to clone each volume data
        # record all volume data copy task id
        task_ids = []
        for bdm in bdms:
            # 6.1 query cloned new VM volume name
            # src_dev_name = "/dev/sdc"
            src_dev_name = bdm.get('device_name')
            client = birdiegatewayclient.get_birdiegateway_client(des_gw_ip,
                                                                  des_port)
            des_dev_name = \
                client.vservices.get_disk_name(bdm.get('id')).get('dev_name')
            if not des_dev_name:
                des_dev_name = src_dev_name

            src_dev_format = bdm.get('guest_format')
            # if template does not hava disk format and mount point info
            # query them from conveyor-agent
            if not src_dev_format:
                client = \
                    birdiegatewayclient.get_birdiegateway_client(src_urls[0],
                                                                 src_urls[1])
                d_format = client.vservices.get_disk_format(src_dev_name)
                src_dev_format = d_format.get('disk_format')
            # if volume does not format, this volume not data to transformer
            if not src_dev_format and CONF.data_transformer_procotol == 'ftp':
                continue

            src_mount_point = bdm.get('mount_point')

            if not src_mount_point:
                client = \
                    birdiegatewayclient.get_birdiegateway_client(src_urls[0],
                                                                 src_urls[1])
                m_info = client.vservices.get_disk_mount_point(src_dev_name)
                src_mount_point = m_info.get('mount_point')

            if not src_mount_point and CONF.data_transformer_procotol == 'ftp':
                continue

            mount_point = []
            mount_point.append(src_mount_point)
            LOG.debug('Volume %(dev_name)s disk format is %(disk_format)s'
                      ' and mount point is %(point)s',
                      {'dev_name': src_dev_name,
                       'disk_format': src_dev_format,
                       'point': src_mount_point})

            # get conveyor gateway client to call birdiegateway api
            LOG.debug("Instance template driver transform data start")
            client = birdiegatewayclient.get_birdiegateway_client(des_gw_ip,
                                                                  des_port)
            clone_rsp = client.vservices.clone_volume(
                            src_dev_name,
                            des_dev_name,
                            src_dev_format,
                            mount_point,
                            src_gw_url,
                            des_gw_url,
                            trans_protocol=data_trans_protocol,
                            trans_port=trans_port)
            task_id = clone_rsp.get('body').get('task_id')
            if not task_id:
                LOG.warn("Clone volume %(dev_name)s response is %(rsp)s",
                         {'dev_name': des_dev_name, 'rsp': clone_rsp})
                continue
            task_ids.append(task_id)

        rsp = {'server_id': server_id,
               'port_id': port_id,
               'des_ip': des_gw_ip,
               'des_port': des_port,
               'copy_tasks': task_ids}
        LOG.debug("Instance template driver transform data end")
        return rsp
示例#20
0
def paginate_query(query,
                   model,
                   limit,
                   sort_keys,
                   marker=None,
                   sort_dir=None,
                   sort_dirs=None,
                   offset=None):
    """Returns a query with sorting / pagination criteria added.

    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)

    We also have to cope with different sort_directions.

    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.

    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
    :param offset: the number of items to skip from the marker or from the
                    first element.

    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """

    if 'id' not in sort_keys:
        # TODO(justinsb): If this ever gives a false-positive, check
        # the actual primary key, rather than assuming its id
        LOG.warning('Id not in sort_keys; is sort_keys unique?')

    assert (not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert (len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        sort_dir_func = {
            'asc': sqlalchemy.asc,
            'desc': sqlalchemy.desc,
        }[current_sort_dir]

        try:
            sort_key_attr = getattr(model, current_sort_key)
        except AttributeError:
            raise exception.InvalidInput(reason='Invalid sort key')
#         if not api.is_orm_value(sort_key_attr):
#             raise exception.InvalidInput(reason='Invalid sort key')
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            if v is None:
                v = _get_default_column_value(model, sort_key)
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(0, len(sort_keys)):
            crit_attrs = []
            for j in range(0, i):
                model_attr = getattr(model, sort_keys[j])
                default = _get_default_column_value(model, sort_keys[j])
                attr = sa_sql.expression.case([
                    (model_attr.isnot(None), model_attr),
                ],
                                              else_=default)
                crit_attrs.append((attr == marker_values[j]))

            model_attr = getattr(model, sort_keys[i])
            default = _get_default_column_value(model, sort_keys[i])
            attr = sa_sql.expression.case([
                (model_attr.isnot(None), model_attr),
            ],
                                          else_=default)
            if sort_dirs[i] == 'desc':
                crit_attrs.append((attr < marker_values[i]))
            elif sort_dirs[i] == 'asc':
                crit_attrs.append((attr > marker_values[i]))
            else:
                raise ValueError(
                    _("Unknown sort direction, "
                      "must be 'desc' or 'asc'"))

            criteria = sqlalchemy.sql.and_(*crit_attrs)
            criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    if offset:
        query = query.offset(offset)

    return query