def storage_pool_create(context, values, update=False): session = get_session() pool_results = [] with session.begin(): try: for pool in values: try: pool_list = storage_pool_get(context, pool, session) for pool_info in pool_list: pool_info['services'] = merge_services( pool_info.get('services'), pool.get('services')) upd_rec = dict(pool_info) upd_rec.pop('id') # no need to update id upd_rec['updated_at'] = timeutils.utcnow() model_query(context, models.StoragePools, session=session, read_deleted="no").\ filter_by(id=pool_info['id']).\ update(upd_rec) pool_results.append(pool_info) except (exception.StoragePoolNotFound): pool_info = dict(pool) pool_info['id'] = str(uuid.uuid4()) pool_ref = models.StoragePools() pool_ref.update(dict(dict(deleted=False), **pool_info)) pool_ref.save(session=session) pool_results.append(pool_info) except Exception as e: raise db_exc.DBError(e) return pool_results
def _storage_tier_destroy_in_session(context, filters, session): storage_tier_capability_specs_destroy(context, filters, session) model_query(context, models.StorageBackendTiers, session=session).\ filter_by(**filters).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def storage_pool_delete(context, values): session = get_session() with session.begin(): try: for pool in values: pool_list = storage_pool_get(context, pool, session) for pool_info in pool_list: if pool.get('services'): pool_info['services'] = delete_services( pool_info.get('services'), pool.get('services')) else: pool_info['services'] = None filters = {} for attr in [ 'id', 'pool', 'backend_name', 'storage_backend_id', 'storage_tier_id' ]: if pool.get(attr): filters[attr] = pool.get(attr) # delete record if it doesn't have any entries in services, otherwise just change services if pool_info.get('services') and pool_info['services'] != "": model_query(context, models.StoragePools, session=session).\ filter_by(**filters). \ update({'services': pool_info['services']}) else: model_query(context, models.StoragePools, session=session).\ filter_by(**filters). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) except Exception as e: raise db_exc.DBError(e)
def storage_backend_capability_specs_destroy(context, values, session=None, inactive=False): read_deleted = "yes" if inactive else "no" _storage_backend = _find_storage_backend(context, values, True, session, inactive=inactive) if not _storage_backend.get('capability_specs_id'): return filter_dict = dict(storage_id=_storage_backend['capability_specs_id']) if values.get('spec_id'): filter_dict['id'] = values.get('spec_id') if values.get('skey'): filter_dict['skey'] = values.get('skey') if values.get('svalue'): filter_dict['svalue'] = values.get('svalue') model_query(context, models.StorageExtraSpecs, session=session).\ filter_by(**filter_dict).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def _storage_tier_capability_specs_update_or_create(context, values, session): _storage_tier = _find_storage_tier(context, values, True, session) if not _storage_tier.get('capability_specs_id'): _storage_tier['capability_specs_id'] = str(uuid.uuid4()) model_query(context, models.StorageBackendTiers).\ filter_by(id=_storage_tier['id']).\ update({'capability_specs_id': _storage_tier['capability_specs_id'], 'updated_at': timeutils.utcnow()}) return _storage_specs_update_or_create( context, _storage_tier['capability_specs_id'], values['capability_specs'], session)
def _storage_backend_config_specs_update_or_create(context, values, session): _storage_backend = _find_storage_backend(context, values, True, session) if not _storage_backend.get('config_specs_id'): _storage_backend['config_specs_id'] = str(uuid.uuid4()) model_query(context, models.StorageBackends, session=session).\ filter_by(id=_storage_backend['id']).\ update({'config_specs_id': _storage_backend['config_specs_id'], 'updated_at': timeutils.utcnow()}) return _storage_specs_update_or_create(context, _storage_backend['config_specs_id'], values['config_specs'], session)
def _storage_backend_destroy(context, filters): session = get_session() with session.begin(): _backend = _find_storage_backend(context, filters, True, session) try: _storage_tier_destroy(context, dict(storage_backend_id=_backend['id']), session) except exception.StorageTierNotFound: pass storage_backend_capability_specs_destroy(context, filters, session) storage_backend_config_specs_destroy(context, filters, session) model_query(context, models.StorageBackends, session=session).\ filter_by(**filters).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
def index(self, req): """Return a list of all running services. Filter by host & service name. """ context = req.environ['sds.context'] detailed = self.ext_mgr.is_loaded('os-extended-services') now = timeutils.utcnow() services = db.service_get_all(context) host = '' if 'host' in req.GET: host = req.GET['host'] service = '' if 'service' in req.GET: service = req.GET['service'] LOG.deprecated(_("Query by service parameter is deprecated. " "Please use binary parameter instead.")) binary = '' if 'binary' in req.GET: binary = req.GET['binary'] if host: services = [s for s in services if s['host'] == host] # NOTE(uni): deprecating service request key, binary takes precedence binary_key = binary or service if binary_key: services = [s for s in services if s['binary'] == binary_key] svcs = [] for svc in services: delta = now - (svc['updated_at'] or svc['created_at']) alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time art = (alive and "up") or "down" active = 'enabled' if svc['disabled']: active = 'disabled' ret_fields = {'binary': svc['binary'], 'host': svc['host'], 'zone': svc['availability_zone'], 'status': active, 'state': art, 'updated_at': svc['updated_at']} if detailed: ret_fields['disabled_reason'] = svc['disabled_reason'] svcs.append(ret_fields) return {'services': svcs}
def _storage_specs_update_or_create(context, storage_id, specs, session): spec_ref = None for k, v in specs.iteritems(): try: spec_ref = _storage_extra_specs_get_item(context, storage_id, k, session) model_query(context, models.StorageExtraSpecs, session=session, read_deleted="no").\ filter_by(id=spec_ref['id']).\ update({"skey": k, "svalue": str(v), 'updated_at': timeutils.utcnow()}) except exception.StorageExtraSpecsNotFound: spec_ref = models.StorageExtraSpecs() spec_ref.update({ "skey": k, "svalue": str(v), "storage_id": storage_id, "deleted": False }) spec_ref.save(session=session) return specs
def delete(self, session): """Delete this object.""" self.deleted = True self.deleted_at = timeutils.utcnow() self.save(session=session)
def _storage_backend_update(context, values, session): model_query(context, models.StorageBackends, session=session).\ filter_by(id=values['id']).\ update({'driver': values['driver'], 'updated_at': timeutils.utcnow()})
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, project_name=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, service_catalog=None, domain=None, user_domain=None, project_domain=None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ self.user_id = user_id self.project_id = project_id self.domain = domain self.user_domain = user_domain self.project_domain = project_domain self.roles = roles or [] self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self.roles) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, basestring): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token self.quota_class = quota_class if overwrite or not hasattr(local.store, 'context'): self.update_store() if service_catalog: # Only include required parts of service_catalog self.service_catalog = [ s for s in service_catalog if s.get('type') in ('compute', 'object-store') ] else: # if list is empty or none self.service_catalog = []
def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. elapsed = total_seconds(timeutils.utcnow() - last_heartbeat) return abs(elapsed) <= CONF.service_down_time
def last_completed_audit_period(unit=None): """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous. """ if not unit: unit = CONF.volume_usage_audit_period offset = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end)