def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # load tables for fk instances = Table('instances', meta, autoload=True) # # New Tables # instance_info_caches = Table('instance_info_caches', meta, Column('created_at', DateTime(timezone=False), default=utils.utcnow()), Column('updated_at', DateTime(timezone=False), onupdate=utils.utcnow()), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True), Column('network_info', Text()), Column('instance_id', String(36), ForeignKey('instances.uuid'), nullable=False, unique=True), mysql_engine='InnoDB') # create instance_info_caches table try: instance_info_caches.create() except Exception: LOG.error(_("Table |%s| not created!"), repr(instance_info_caches)) raise
def volume_type_destroy(context, name): session = get_session() with session.begin(): volume_type_ref = volume_type_get_by_name(context, name, session=session) volume_type_id = volume_type_ref['id'] session.query(models.VolumeTypes).\ filter_by(id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def test_cast_to_volume_host_update_db_with_volume_id(self): host = "fake_host1" method = "fake_method" fake_kwargs = {"volume_id": 31337, "extra_arg": "meow"} queue = "fake_queue" self.mox.StubOutWithMock(utils, "utcnow") self.mox.StubOutWithMock(db, "volume_update") self.mox.StubOutWithMock(rpc, "queue_get_for") self.mox.StubOutWithMock(rpc, "cast") utils.utcnow().AndReturn("fake-now") db.volume_update(self.context, 31337, {"host": host, "scheduled_at": "fake-now"}) rpc.queue_get_for(self.context, FLAGS.volume_topic, host).AndReturn(queue) rpc.cast(self.context, queue, {"method": method, "args": fake_kwargs}) self.mox.ReplayAll() driver.cast_to_volume_host(self.context, host, method, update_db=True, **fake_kwargs)
def cast_to_volume_host(context, host, method, update_db=True, **kwargs): """Cast request to a volume host queue""" if update_db: volume_id = kwargs.get("volume_id", None) if volume_id is not None: now = utils.utcnow() db.volume_update(context, volume_id, {"host": host, "scheduled_at": now}) rpc.cast(context, db.queue_get_for(context, FLAGS.volume_topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to host '%(host)s'") % locals())
def aggregate_host_delete(context, aggregate_id, host): query = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id).filter_by(host=host) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host)
def aggregate_delete(context, aggregate_id): query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'operational_state': aggregate_states.DISMISSED, 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateNotFound(aggregate_id=aggregate_id)
def test_service_is_up(self): fts_func = datetime.datetime.fromtimestamp fake_now = 1000 down_time = 5 self.flags(service_down_time=down_time) self.mox.StubOutWithMock(utils, 'utcnow') # Up (equal) utils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - down_time), 'created_at': fts_func(fake_now - down_time)} self.mox.ReplayAll() result = utils.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Up utils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - down_time + 1), 'created_at': fts_func(fake_now - down_time + 1)} self.mox.ReplayAll() result = utils.service_is_up(service) self.assertTrue(result) self.mox.ResetAll() # Down utils.utcnow().AndReturn(fts_func(fake_now)) service = {'updated_at': fts_func(fake_now - down_time - 1), 'created_at': fts_func(fake_now - down_time - 1)} self.mox.ReplayAll() result = utils.service_is_up(service) self.assertFalse(result)
def notify(publisher_id, event_type, priority, payload): """Sends a notification using the specified driver :param publisher_id: the source worker_type.host of the message :param event_type: the literal type of event (ex. Instance Creation) :param priority: patterned after the enumeration of Python logging levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) :param payload: A python dictionary of attributes Outgoing message format includes the above parameters, and appends the following: message_id a UUID representing the id for this notification timestamp the GMT timestamp the notification was sent at The composite message will be constructed as a dictionary of the above attributes, which will then be sent via the transport mechanism defined by the driver. Message example:: {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', 'timestamp': utils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} """ if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities') % priority) # Ensure everything is JSON serializable. payload = utils.to_primitive(payload, convert_instances=True) driver = importutils.import_module(FLAGS.notification_driver) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, event_type=event_type, priority=priority, payload=payload, timestamp=str(utils.utcnow())) try: driver.notify(msg) except Exception, e: LOG.exception(_("Problem '%(e)s' attempting to " "send to notification system. Payload=%(payload)s") % locals())
def aggregate_metadata_delete(context, aggregate_id, key): query = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id).\ filter_by(key=key) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, metadata_key=key)
def test_cast_to_volume_host_update_db_with_volume_id(self): host = 'fake_host1' method = 'fake_method' fake_kwargs = {'volume_id': 31337, 'extra_arg': 'meow'} queue = 'fake_queue' self.mox.StubOutWithMock(utils, 'utcnow') self.mox.StubOutWithMock(db, 'volume_update') self.mox.StubOutWithMock(db, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'cast') utils.utcnow().AndReturn('fake-now') db.volume_update(self.context, 31337, {'host': host, 'scheduled_at': 'fake-now'}) db.queue_get_for(self.context, 'volume', host).AndReturn(queue) rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs}) self.mox.ReplayAll() driver.cast_to_volume_host(self.context, host, method, update_db=True, **fake_kwargs)
def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = utils.utcnow() tests.reset_db() # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() self.injected = [] self._services = [] self._overridden_opts = []
def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) LOG.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, {'host': self.host}) # NOTE(vish): so we don't have to get volume from db again # before passing it to the driver. volume_ref['host'] = self.host try: vol_name = volume_ref['name'] vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) if snapshot_id is None: model_update = self.driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) model_update = self.driver.create_volume_from_snapshot( volume_ref, snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) model_update = self.driver.create_export(context, volume_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) except Exception: with utils.save_and_reraise_exception(): self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) now = utils.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) LOG.debug(_("volume %s: created successfully"), volume_ref['name']) self._reset_stats() return volume_id
def notify_usage_exists(context, volume_ref, current_period=False): """ Generates 'exists' notification for a volume for usage auditing purposes. Generates usage for last completed period, unless 'current_period' is True.""" begin, end = utils.last_completed_audit_period() if current_period: audit_start = end audit_end = utils.utcnow() else: audit_start = begin audit_end = end extra_usage_info = dict(audit_period_beginning=str(audit_start), audit_period_ending=str(audit_end)) notify_about_volume_usage( context, volume_ref, 'exists', extra_usage_info=extra_usage_info)
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, **kwargs): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token self.quota_class = quota_class if overwrite or not hasattr(local.store, 'context'): self.update_store()
def delete(self, context, volume): volume_id = volume['id'] if not volume['host']: # NOTE(vish): scheduling failed, so delete it self.db.volume_destroy(context, volume_id) return if volume['status'] not in ["available", "error"]: msg = _("Volume status must be available or error") raise exception.InvalidVolume(reason=msg) snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) if len(snapshots): msg = _("Volume still has %d dependent snapshots") % len(snapshots) raise exception.InvalidVolume(reason=msg) now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume_id}})
def volume_metadata_delete(context, volume_id, key): _volume_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # grab tables and (column for dropping later) instances = Table('instances', meta, autoload=True) networks = Table('networks', meta, autoload=True) fixed_ips = Table('fixed_ips', meta, autoload=True) c = instances.columns['mac_address'] interface = Column('bridge_interface', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)) virtual_interface_id = Column('virtual_interface_id', Integer()) # add interface column to networks table # values will have to be set manually before running cinder try: networks.create_column(interface) except Exception: LOG.error(_("interface column not added to networks table")) raise # # New Tables # virtual_interfaces = Table('virtual_interfaces', meta, Column('created_at', DateTime(timezone=False), default=utils.utcnow()), Column('updated_at', DateTime(timezone=False), onupdate=utils.utcnow()), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Boolean(create_constraint=True, name=None)), Column('id', Integer(), primary_key=True, nullable=False), Column('address', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), unique=True), Column('network_id', Integer(), ForeignKey('networks.id')), Column('instance_id', Integer(), ForeignKey('instances.id'), nullable=False), mysql_engine='InnoDB') # create virtual_interfaces table try: virtual_interfaces.create() except Exception: LOG.error(_("Table |%s| not created!"), repr(virtual_interfaces)) raise # add virtual_interface_id column to fixed_ips table try: fixed_ips.create_column(virtual_interface_id) except Exception: LOG.error(_("VIF column not added to fixed_ips table")) raise # populate the virtual_interfaces table # extract data from existing instance and fixed_ip tables s = select([instances.c.id, instances.c.mac_address, fixed_ips.c.network_id], fixed_ips.c.instance_id == instances.c.id) keys = ('instance_id', 'address', 'network_id') join_list = [dict(zip(keys, row)) for row in s.execute()] LOG.debug(_("join list for moving mac_addresses |%s|"), join_list) # insert data into the table if join_list: i = virtual_interfaces.insert() i.execute(join_list) # populate the fixed_ips virtual_interface_id column s = select([fixed_ips.c.id, fixed_ips.c.instance_id], fixed_ips.c.instance_id is not None) for row in s.execute(): m = select([virtual_interfaces.c.id]).\ where(virtual_interfaces.c.instance_id == row['instance_id']).\ as_scalar() u = fixed_ips.update().values(virtual_interface_id=m).\ where(fixed_ips.c.id == row['id']) u.execute() # drop the mac_address column from instances c.drop()
def delete(self, session=None): """Delete this object.""" self.deleted = True self.deleted_at = utils.utcnow() self.save(session=session)
def volume_type_extra_specs_delete(context, volume_type_id, key): _volume_type_extra_specs_query(context, volume_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')})
def auth_token_create(context, token): fake_token = FakeToken(created_at=utils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token