def __init__(self, tenant_id, resource, hard_limit, id=utils.generate_uuid(), created=timeutils.utcnow(), update=timeutils.utcnow()): self.tenant_id = tenant_id self.resource = resource self.hard_limit = hard_limit self.id = id self.created = created self.update = update
def delete(self): self['updated'] = timeutils.utcnow() LOG.debug("Deleting %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': strutils.mask_dict_password(self.__dict__)}) if self.preserve_on_delete: self['deleted_at'] = timeutils.utcnow() self['deleted'] = True return self.db_api.save(self) else: return self.db_api.delete(self)
def _get_audit_period(): now = timeutils.utcnow() start_time = now - datetime.timedelta( seconds=CONF.exists_notification_interval) audit_start = timeutils.isotime(start_time) audit_end = timeutils.isotime(now) return audit_start, audit_end
def _datastore_version_metadata_add(cls, datastore_name, datastore_version_name, datastore_version_id, key, value, exception_class): """ Create a record of the specified key and value in the metadata table. """ # if an association does not exist, create a new one. # if a deleted association exists, undelete it. # if an un-deleted association exists, raise an exception. try: db_record = DBDatastoreVersionMetadata.find_by( datastore_version_id=datastore_version_id, key=key, value=value) if db_record.deleted == 1: db_record.deleted = 0 db_record.updated_at = timeutils.utcnow() db_record.save() return else: raise exception_class(datastore=datastore_name, datastore_version=datastore_version_name, id=value) except exception.NotFound: pass # the record in the database only contains the datastore_verion_id DBDatastoreVersionMetadata.create( datastore_version_id=datastore_version_id, key=key, value=value)
def test_show_without_restart_required(self, load_server_mock, load_addr_mock, load_guest_mock, load_server_grp_mock): # Create an instance in db. instance = ins_models.DBInstance.create( name=self.random_name('instance'), flavor_id=self.random_uuid(), tenant_id=self.random_uuid(), volume_size=1, datastore_version_id=self.ds_version_imageid.id, task_status=ins_models.InstanceTasks.NONE, compute_instance_id=self.random_uuid(), server_status='ACTIVE') ins_models.InstanceServiceStatus.create( instance_id=instance.id, status=srvstatus.ServiceStatuses.HEALTHY, ) # workaround to reset updated_at field. service_status = ins_models.InstanceServiceStatus.find_by( instance_id=instance.id) service_status.updated_at = timeutils.utcnow() - timedelta( seconds=(CONF.agent_heartbeat_expiry + 60)) ins_models.get_db_api().save(service_status) ret = self.controller.show(mock.MagicMock(), mock.ANY, instance.id) self.assertEqual(200, ret.status) ret_instance = ret.data(None)['instance'] self.assertEqual('ACTIVE', ret_instance.get('status')) self.assertEqual('ERROR', ret_instance.get('operating_status'))
def setUp(self): super(OrderingTests, self).setUp() util.init_db() now = timeutils.utcnow() self.context, self.instance_id = _prep_conf(now) info = { 'tenant_id': self.context.project_id, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } four = now - datetime.timedelta(days=4) one = now - datetime.timedelta(days=1) three = now - datetime.timedelta(days=3) two = now - datetime.timedelta(days=2) # Create backups out of order, save/create set the 'updated' field, # so we need to use the db_api directly. models.DBBackup().db_api.save( models.DBBackup(name='four', updated=four, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='one', updated=one, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='three', updated=three, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='two', updated=two, id=utils.generate_uuid(), **info))
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated_at'] = timeutils.utcnow() LOG.debug("Saving %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': self.__dict__}) return get_db_api().save(self)
def delete(context, module): Module.validate_action(context, 'delete', module.tenant_id, module.auto_apply, module.visible, module.priority_apply, None) Module.enforce_live_update(module.id, module.live_update, module.md5) module.deleted = True module.deleted_at = timeutils.utcnow() module.save()
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated'] = timeutils.utcnow() LOG.debug("Saving %(name)s: %(dict)s", {'name': self.__class__.__name__, 'dict': strutils.mask_dict_password(self.__dict__)}) return self.db_api.save(self)
def test_isotime_unaware_subsecond(self): dt = timeutils.utcnow() expected = "%04d-%02d-%02dT%02d:%02d:%02d.%06dZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(expected, timeutils.isotime(dt, subsecond=True))
def setUp(self): super(BackupORMTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow()) self.backup = models.DBBackup.create(tenant_id=self.context.project_id, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, size=2.0, location=BACKUP_LOCATION) self.deleted = False
def create(cls, **values): init_vals = { 'id': utils.generate_uuid(), 'created': timeutils.utcnow(), } if hasattr(cls, 'deleted'): init_vals['deleted'] = False init_vals.update(values) instance = cls(**init_vals) if not instance.is_valid(): raise exception.InvalidModelError(errors=instance.errors) return instance.save()
def setUp(self): super(PaginationTests, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow()) # Create a bunch of backups bkup_info = { 'tenant_id': self.context.project_id, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } for backup in range(50): bkup_info.update({'name': 'Backup-%s' % backup}) models.DBBackup.create(**bkup_info)
def __init__(self, sleep_time=10, timeout=1800): self.def_sleep_time = sleep_time self.def_timeout = timeout self.instance_info.name = "TEST_" + datetime.datetime.strftime( timeutils.utcnow(), '%Y_%m_%d__%H_%M_%S') self.instance_info.dbaas_datastore = CONFIG.dbaas_datastore self.instance_info.dbaas_datastore_version = ( CONFIG.dbaas_datastore_version) self.instance_info.user = CONFIG.users.find_user_by_name("alt_demo") self.instance_info.admin_user = CONFIG.users.find_user( Requirements(is_admin=True)) if self.VOLUME_SUPPORT: self.instance_info.volume_size = CONFIG.get('trove_volume_size', 1) self.instance_info.volume = { 'size': self.instance_info.volume_size } else: self.instance_info.volume_size = None self.instance_info.volume = None self.instance_info.nics = None shared_network = CONFIG.get('shared_network', None) if shared_network: self.instance_info.nics = [{'net-id': shared_network}] self._auth_client = None self._unauth_client = None self._admin_client = None self._swift_client = None self._nova_client = None self._neutron_client = None self._test_helper = None self._servers = {} # Attempt to register the main instance. If it doesn't # exist, this will still set the 'report' and 'client' objects # correctly in LogOnFail inst_ids = [] if hasattr(self.instance_info, 'id') and self.instance_info.id: inst_ids = [self.instance_info.id] self.register_debug_inst_ids(inst_ids) self.instance_info.flavors = self.nova_client.flavors.list()
def update(self, req, body, tenant_id, id): msg = ("Updating configuration group %(cfg_id)s for tenant " "id %(tenant_id)s") LOG.info(msg, {"tenant_id": tenant_id, "cfg_id": id}) context = req.environ[wsgi.CONTEXT_KEY] group = models.Configuration.load(context, id) # Note that changing the configuration group will also # indirectly affect all the instances which attach it. # # The Trove instance itself won't be changed (the same group is still # attached) but the configuration values will. # # The operator needs to keep this in mind when defining the related # policies. self.authorize_config_action(context, 'update', group) # if name/description are provided in the request body, update the # model with these values as well. if 'name' in body['configuration']: group.name = body['configuration']['name'] if 'description' in body['configuration']: group.description = body['configuration']['description'] context.notification = notification.DBaaSConfigurationUpdate( context, request=req) with StartNotification(context, configuration_id=id, name=group.name, description=group.description): items = self._configuration_items_list(group, body['configuration']) deleted_at = timeutils.utcnow() models.Configuration.remove_all_items(context, group.id, deleted_at) models.Configuration.save(group, items) self._refresh_on_all_instances(context, id) self._refresh_on_all_clusters(context, id) return wsgi.Result(None, 202)
def update(context, module, original_module, full_access): Module.enforce_live_update( original_module.id, original_module.live_update, original_module.md5) # we don't allow any changes to 'is_admin' modules by non-admin if original_module.is_admin and not context.is_admin: raise exception.ModuleAccessForbidden( action='update', options='(Module is an admin module)') # we don't allow any changes to admin-only attributes by non-admin admin_options = Module.validate_action( context, 'update', module.tenant_id, module.auto_apply, module.visible, module.priority_apply, full_access) # make sure we set the is_admin flag, but only if it was # originally is_admin or we changed an admin option module.is_admin = original_module.is_admin or ( 1 if admin_options else 0) # but we turn it on/off if full_access is specified if full_access is not None: module.is_admin = 0 if full_access else 1 ds_id, ds_ver_id = datastore_models.get_datastore_or_version( module.datastore_id, module.datastore_version_id) if module.contents != original_module.contents: md5, processed_contents = Module.process_contents(module.contents) module.md5 = md5 module.contents = processed_contents elif hasattr(original_module, 'encrypted_contents'): # on load the contents may have been decrypted, so # we need to put the encrypted contents back before we update module.contents = original_module.encrypted_contents if module.datastore_id: module.datastore_id = ds_id if module.datastore_version_id: module.datastore_version_id = ds_ver_id module.updated = timeutils.utcnow() DBModule.save(module)
def setUp(self): super(BackupCreateTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow()) self.created = False
def setUp(self): super(BuiltInstanceTasksTest, self).setUp() self.new_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'} stub_nova_server = MagicMock() self.rpc_patches = patch.multiple(rpc, get_notifier=MagicMock(), get_client=MagicMock()) self.rpc_mocks = self.rpc_patches.start() self.addCleanup(self.rpc_patches.stop) db_instance = DBInstance(InstanceTasks.NONE, id=INST_ID, name='resize-inst-name', datastore_version_id='1', datastore_id='id-1', flavor_id='6', manager='mysql', created=timeutils.utcnow(), updated=timeutils.utcnow(), compute_instance_id='computeinst-id-1', tenant_id='testresize-tenant-id', volume_size='1', volume_id=VOLUME_ID) # this is used during the final check of whether the resize successful db_instance.server_status = 'ACTIVE' self.db_instance = db_instance self.dm_dv_load_by_uuid_patch = patch.object( datastore_models.DatastoreVersion, 'load_by_uuid', MagicMock( return_value=datastore_models.DatastoreVersion(db_instance))) self.dm_dv_load_by_uuid_mock = self.dm_dv_load_by_uuid_patch.start() self.addCleanup(self.dm_dv_load_by_uuid_patch.stop) self.dm_ds_load_patch = patch.object( datastore_models.Datastore, 'load', MagicMock(return_value=datastore_models.Datastore(db_instance))) self.dm_ds_load_mock = self.dm_ds_load_patch.start() self.addCleanup(self.dm_ds_load_patch.stop) self.instance_task = taskmanager_models.BuiltInstanceTasks( trove.common.context.TroveContext(), db_instance, stub_nova_server, InstanceServiceStatus(ServiceStatuses.RUNNING, id='inst-stat-id-0')) self.instance_task._guest = MagicMock(spec=trove.guestagent.api.API) self.instance_task._nova_client = MagicMock(spec=novaclient.client) self.stub_server_mgr = MagicMock( spec=novaclient.v2.servers.ServerManager) self.stub_running_server = MagicMock(spec=novaclient.v2.servers.Server) self.stub_running_server.status = 'ACTIVE' self.stub_running_server.flavor = {'id': 6, 'ram': 512} self.stub_verifying_server = MagicMock( spec=novaclient.v2.servers.Server) self.stub_verifying_server.status = 'VERIFY_RESIZE' self.stub_verifying_server.flavor = {'id': 8, 'ram': 768} self.stub_server_mgr.get = MagicMock( return_value=self.stub_verifying_server) self.instance_task._nova_client.servers = self.stub_server_mgr stub_flavor_manager = MagicMock( spec=novaclient.v2.flavors.FlavorManager) self.instance_task._nova_client.flavors = stub_flavor_manager nova_flavor = novaclient.v2.flavors.Flavor(stub_flavor_manager, self.new_flavor, True) stub_flavor_manager.get = MagicMock(return_value=nova_flavor) self.instance_task._volume_client = MagicMock(spec=cinderclient) self.instance_task._volume_client.volumes = Mock( spec=cinderclient_volumes.VolumeManager) answers = ( status for status in self.get_inst_service_status('inst_stat-id', [ ServiceStatuses.SHUTDOWN, ServiceStatuses.RUNNING, ServiceStatuses.RUNNING, ServiceStatuses.RUNNING ])) def side_effect_func(*args, **kwargs): if 'instance_id' in kwargs: return next(answers) elif ('id' in kwargs and 'deleted' in kwargs and not kwargs['deleted']): return db_instance else: return MagicMock() self.dbm_dbmb_patch = patch.object( trove.db.models.DatabaseModelBase, 'find_by', MagicMock(side_effect=side_effect_func)) self.dbm_dbmb_mock = self.dbm_dbmb_patch.start() self.addCleanup(self.dbm_dbmb_patch.stop) self.template_patch = patch.object( template, 'SingleInstanceConfigTemplate', MagicMock(spec=template.SingleInstanceConfigTemplate)) self.template_mock = self.template_patch.start() self.addCleanup(self.template_patch.stop) db_instance.save = MagicMock(return_value=None) self.tbmb_running_patch = patch.object(trove.backup.models.Backup, 'running', MagicMock(return_value=None)) self.tbmb_running_mock = self.tbmb_running_patch.start() self.addCleanup(self.tbmb_running_patch.stop) if 'volume' in self._testMethodName: self._stub_volume_client()
def update(context, instance_module): instance_module.updated = timeutils.utcnow() DBInstanceModule.save(instance_module)
def delete(context, instance_module): instance_module.deleted = True instance_module.deleted_at = timeutils.utcnow() instance_module.save()
def test_utcnow_tz(self): dt = timeutils.utcnow() self.assertIsNone(dt.tzinfo)
def _object_name(self): return 'log-%s' % str(timeutils.utcnow()).replace(' ', 'T')
def delete(context, group): deleted_at = timeutils.utcnow() Configuration.remove_all_items(context, group.id, deleted_at) group.deleted = True group.deleted_at = deleted_at group.save()
def delete(version_id, config_param_name): config_param = DatastoreConfigurationParameters.load_parameter_by_name( version_id, config_param_name) config_param.deleted = True config_param.deleted_at = timeutils.utcnow() config_param.save()
def __init__(self, instance_id, user): self.id = instance_id self.user = user self.created = timeutils.utcnow()
def setUp(self): super(TestBackupStrategy, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(timeutils.utcnow())
def update(self, **values): for key in values: if hasattr(self, key): setattr(self, key, values[key]) self['updated'] = timeutils.utcnow() return self.db_api.save(self)