def upgrade(migrate_engine): """Make volume_type columns non-nullable""" meta = sa.MetaData(bind=migrate_engine) # Update volume_type columns in tables to not allow null value volumes = sa.Table('volumes', meta, autoload=True) try: volumes.c.volume_type_id.alter(nullable=False) except Exception: msg = (_('Migration cannot continue until all volumes have ' 'been migrated to the `__DEFAULT__` volume type. Please ' 'run `cinder-manage db online_data_migrations`. ' 'There are still untyped volumes unmigrated.')) raise exception.ValidationError(msg) snapshots = sa.Table('snapshots', meta, autoload=True) try: snapshots.c.volume_type_id.alter(nullable=False) except Exception: msg = (_('Migration cannot continue until all snapshots have ' 'been migrated to the `__DEFAULT__` volume type. Please ' 'run `cinder-manage db online_data_migrations`.' 'There are still %(count)i untyped snapshots unmigrated.')) raise exception.ValidationError(msg) encryption = sa.Table('encryption', meta, autoload=True) # since volume_type is a mandatory arg when creating encryption # volume_type_id column won't contain any null values so we can directly # alter it encryption.c.volume_type_id.alter(nullable=False)
def upgrade(migrate_engine): meta = MetaData(migrate_engine) # CGs to Generic Volume Groups transition consistencygroups = Table('consistencygroups', meta, autoload=True) cgsnapshots = Table('cgsnapshots', meta, autoload=True) for table in (consistencygroups, cgsnapshots): count = select([func.count()]).select_from(table).where( table.c.deleted == False).execute().scalar() # NOQA if count > 0: msg = WARNING_MSG % { 'count': count, 'table': table.name, } raise exception.ValidationError(detail=msg) # VOLUME_ prefix addition in message IDs messages = Table('messages', meta, autoload=True) count = select([func.count()]).select_from(messages).where( (messages.c.deleted == False) & (~messages.c.event_id.like('VOLUME_%'))).execute().scalar() # NOQA if count > 0: msg = WARNING_MSG % { 'count': count, 'table': 'messages', } raise exception.ValidationError(detail=msg)
def _get_providing_pool(self, volume): len_cs = len(volume.capacity_sources) if len_cs != 1: raise exception.ValidationError( detail=(_("Volume %(vol)s has %(len_cs)d capacity_sources!") % { 'vol': volume.path, 'len_cs': len_cs })) len_pp = len(volume.capacity_sources[0].providing_pools) if len_pp != 1: raise exception.ValidationError( detail=(_("Volume %(vol)s has %(len_pp)d providing_pools!") % { 'vol': volume.path, 'len_pp': len_pp })) return volume.capacity_sources[0].providing_pools[0]
def validate(self, *args, **kwargs): try: self.validator.validate(*args, **kwargs) except jsonschema.ValidationError as ex: if isinstance(ex.cause, exception.InvalidName): detail = ex.cause.msg elif len(ex.path) > 0: detail = _("Invalid input for field/attribute %(path)s." " Value: %(value)s. %(message)s") % { 'path': ex.path.pop(), 'value': ex.instance, 'message': ex.message } else: detail = ex.message raise exception.ValidationError(detail=detail) except TypeError as ex: # NOTE: If passing non string value to patternProperties parameter, # TypeError happens. Here is for catching the TypeError. detail = six.text_type(ex) raise exception.ValidationError(detail=detail)
def attach_volume_to_node(self, volume_url, node_url): LOG.info('Trying attach from node %s to volume %s', node_url, volume_url) try: volume = self._get_volume(volume_url) node = self._get_node(node_url) if len(volume.links.endpoints) > 0: raise exception.ValidationError( detail=(_("Volume %s already attached") % volume_url)) node.attach_endpoint(volume.path) except sushy_exceptions.InvalidParameterValueError: LOG.exception("Attach volume failed (not allowable)") raise RSDRetryableException(reason=(_("Not allowed to attach from " "%(node)s to %(volume)s.") % { 'node': node_url, 'volume': volume_url })) except Exception: LOG.exception("Attach volume failed (attach phase)") raise exception.VolumeBackendAPIException( data=(_("Attach failed from %(node)s to %(volume)s.") % { 'node': node_url, 'volume': volume_url })) try: volume.refresh() node.refresh() v_endpoints = volume.links.endpoints v_endpoints = self._get_nqn_endpoints(v_endpoints) if len(v_endpoints) != 1: raise exception.ValidationError( detail=(_("Attach volume error: %d target nqns") % len(v_endpoints))) target_nqn, v_endpoint = v_endpoints[0] ip_transports = v_endpoint["IPTransportDetails"] if len(ip_transports) != 1: raise exception.ValidationError( detail=(_("Attach volume error: %d target ips") % len(ip_transports))) ip_transport = ip_transports[0] target_ip = ip_transport["IPv4Address"]["Address"] target_port = ip_transport["Port"] node_system = self.rsdlib.get_system(node.links.computer_system) n_endpoints = tuple( val["@odata.id"] for val in node_system.json["Links"]["Endpoints"]) n_endpoints = self._get_nqn_endpoints(n_endpoints) if len(n_endpoints) == 0: raise exception.ValidationError( detail=(_("Attach volume error: %d host nqns") % len(n_endpoints))) host_nqn, v_endpoint = n_endpoints[0] LOG.info( 'Attachment successful: Retrieved target IP %s, ' 'target Port %s, target NQN %s and initiator NQN %s', target_ip, target_port, target_nqn, host_nqn) return (target_ip, target_port, target_nqn, host_nqn) except Exception as e: LOG.exception("Attach volume failed (post-attach)") try: node.refresh() node.detach_endpoint(volume.path) LOG.info('Detached from node %s to volume %s', node_url, volume_url) except Exception: LOG.exception("Attach volume failed (undo attach)") raise exception.VolumeBackendAPIException(data=( _("Undo-attach failed from %(node)s to %(volume)s.") % { 'node': node_url, 'volume': volume_url })) if isinstance(e, exception.ValidationError): raise RSDRetryableException( reason=(_("Validation error during post-attach from " "%(node)s to %(volume)s.") % { 'node': node_url, 'volume': volume_url })) else: raise exception.VolumeBackendAPIException(data=( _("Post-attach failed from %(node)s to %(volume)s.") % { 'node': node_url, 'volume': volume_url }))