def _parse_check(rule): """ Parse a single base check rule into an appropriate Check object. """ # Handle the special checks if rule == '!': return FalseCheck() elif rule == '@': return TrueCheck() try: kind, match = rule.split(':', 1) except Exception: LOG.exception(_("Failed to understand rule %(rule)s") % locals()) # If the rule is invalid, we'll fail closed return FalseCheck() # Find what implements the check if kind in _checks: return _checks[kind](kind, match) elif None in _checks: return _checks[None](kind, match) else: LOG.error(_("No handler for matches of kind %s") % kind) return FalseCheck()
def check_content(self): if self.size is None: msg = _("Image size not specified in the content.") raise exception.ImageContentInvalid(msg) if self.size < 1: msg = _("Image size must be positive integer.") raise exception.ImageSizeInvalid(msg)
def inner(*args, **kwargs): with lock(name, lock_file_prefix, external, lock_path): LOG.debug(_('Got semaphore / lock "%(function)s"'), {'function': f.__name__}) return f(*args, **kwargs) LOG.debug(_('Semaphore / lock released "%(function)s"'), {'function': f.__name__})
def check_content(self): if self.size is None: msg = _("Volume snapshot size not specified in the content.") raise exception.VolumeSnapshotContentInvalid(msg) if self.size < 1: msg = _("Volume snapshot size must be positive integer.") raise exception.VolumeSnapshotSizeInvalid(msg)
def check_content(self): for key in ('vcpu', 'ram', 'disk'): value = self.content.get(key, None) if value is None: msg = _("%s not specified in the content.") % key.title() raise exception.InstanceContentInvalid(msg) if value < 1: msg = _(" must be positive integer.") % key.title() raise exception.InstanceContentInvalid(msg)
def add_command_parsers(subparsers): parser = subparsers.add_parser('db_version') parser = subparsers.add_parser('db_sync') parser.add_argument('version', nargs='?') parser.add_argument('current_version', nargs='?') parser = subparsers.add_parser('purge_deleted') parser.add_argument('age', nargs='?', default='90', help=_('How long to preserve deleted data.')) parser.add_argument( '-g', '--granularity', default='days', choices=['days', 'hours', 'minutes', 'seconds'], help=_('Granularity to use for age argument, defaults to days.'))
def deprecated(self, msg, *args, **kwargs): stdmsg = _("Deprecated: %s") % msg if CONF.fatal_deprecations: self.critical(stdmsg, *args, **kwargs) raise DeprecatedConfig(msg=stdmsg) else: self.warn(stdmsg, *args, **kwargs)
def check_event_data(self, data): value = data.as_dict() for key in REQUIRED_EVENT_PROPERTIES: if key not in value: msg = _("Property %s is required by the event.") % key raise exception.Invalid(msg) if value['resource_type'] not in RESOURCE_AFFINITY_TYPES: msg = _("Resource type must be in %s") % str( REQUIRED_EVENT_PROPERTIES) raise exception.ResourceTypeInvalid(msg) value['event_time'] = timeutils.parse_strtime(value['event_time']) return value
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: if exc.message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or exc.message != last_exc_message): logging.exception( _('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = exc.message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def check_content(self): # For router, we only care the usage time and unit_price, so the # content should be a empty dict. if self.content: msg = _("Router content should be empty.") raise exception.RouterContentInvalid(msg)
def __exit__(self, exc_type, exc_val, exc_tb): try: self.unlock() self.lockfile.close() except IOError: LOG.exception(_("Could not release the acquired lock `%s`"), self.fname)
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) if CONF.fatal_exception_format_errors: raise e else: # at least get the core message out if something happened message = self.message super(KoalaException, self).__init__(message)
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if "code" not in self.kwargs: try: self.kwargs["code"] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_("Exception in string format operation")) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) if CONF.fatal_exception_format_errors: raise e else: # at least get the core message out if something happened message = self.message super(KoalaException, self).__init__(message)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: logging.error( _("Original exception being dropped: %s"), traceback.format_exception(self.type_, self.value, self.tb) ) return False if self.reraise: raise self.type_, self.value, self.tb
def check_event_type(self): """Check the event type.""" if self.event_type not in self.EVENT_TYPES: msg = _("%(res_type)s event type must be in %(format)s.") % { 'res_type': self.resource_type, 'format': str(self.EVENT_TYPES)} raise exception.EventTypeInvalid(msg)
def put(self, data): """Modify the price.""" value = data.as_dict() id = value.get('id', None) if not id: msg = _("Property id is required by the price.") raise exception.Invalid(msg) else: existed_price = pecan.request.dbapi.price_get_by_id(id) if not existed_price: msg = _("Price %s not found.") % str(id) raise exception.PriceNotFound(msg) price = pecan.request.dbapi.price_update_by_id(id, value) return price
def get_one(self, resource_id): """In fact, we will list all the records by resource_id.""" records = pecan.request.dbapi.records_get_by_resource_id(resource_id) if not records: msg = _("Records of resource %s not found.") % resource_id raise exception.RecordNotFound(msg) return records
def delete(self, id): """Delete the price by id.""" price = pecan.request.dbapi.price_get_by_id(id) if not price: msg = _("Price %s not found.") % str(id) raise exception.PriceNotFound(msg) pecan.request.dbapi.price_delete_by_id(id)
def get_one(self, resource_id): """Get the resource information by id.""" resources = pecan.request.dbapi.resource_get_by_id(resource_id) if not resources: msg = _("Resource %s not found.") % resource_id raise exception.ResourceNotFound(msg) return resources[0]
def get_one(self, id): """Get the price by id.""" price = pecan.request.dbapi.price_get_by_id(id) if not price: msg = _("Price %s not found.") % str(id) raise exception.PriceNotFound(msg) return price
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: logging.error(_('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: raise self.type_, self.value, self.tb
def check_event_type(self): """Check the event type.""" if self.event_type not in self.EVENT_TYPES: msg = _("%(res_type)s event type must be in %(format)s.") % { 'res_type': self.resource_type, 'format': str(self.EVENT_TYPES) } raise exception.EventTypeInvalid(msg)
def post(self, data): """Create a new price.""" value = data.as_dict() for key in REQUIRED_PRICE_PROPERTIES: if key not in value: msg = _("Property %s is required by the price.") % key raise exception.Invalid(msg) id = value.get('id', None) if id: existed_price = pecan.request.dbapi.price_get_by_id(id) if existed_price: msg = _("Price %s already exists.") % str(id) raise exception.PriceIdConflict(msg) """Create the new price.""" price = pecan.request.dbapi.price_create(value) return price
class LogConfigError(Exception): message = _('Error loading logging config %(log_config)s: %(err_msg)s') def __init__(self, log_config, err_msg): self.log_config = log_config self.err_msg = err_msg def __str__(self): return self.message % dict(log_config=self.log_config, err_msg=self.err_msg)
def get_resource(self): """Get resource from database.""" resources = self.db_api.resource_get_by_id(self.resource_id) if resources: resource = resources[0] if resource.deleted: msg = _("Resource %s has been deleted.") % self.resource_id raise exception.ResourceDeleted(msg) else: resource = None return resource
def _ping_listener(dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL connections checked out of the pool are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ try: dbapi_conn.cursor().execute('select 1') except dbapi_conn.OperationalError as ex: if ex.args[0] in (2006, 2013, 2014, 2045, 2055): LOG.warn(_('Got mysql server has gone away: %s'), ex) raise sqla_exc.DisconnectionError("Database server went away") else: raise
def price_update_by_id(self, id, value): """Update the price by id.""" session = get_session() with session.begin(): query = model_query(models.Price, session=session) query = add_identity_filter(query, id) count = query.update(value, synchronize_session='fetch') if count < 1: msg = _("Price %s not found.") % str(id) raise exception.PriceNotFound(msg) price = query.one() return price
def get_price(self, resource_type=None): """Get the resource type by resource type and region.""" if not resource_type: resource_type = self.resource_type price = self.db_api.price_get_by_resource(resource_type, self.region) if not price: msg = _("Price of %(res_type)s in region %(region)s could not " "be found.") % {'res_type': resource_type, 'region': self.region} raise exception.PriceNotFound(msg) return price.unit_price
def get_price(self, resource_type=None): """Get the resource type by resource type and region.""" if not resource_type: resource_type = self.resource_type price = self.db_api.price_get_by_resource(resource_type, self.region) if not price: msg = _("Price of %(res_type)s in region %(region)s could not " "be found.") % { 'res_type': resource_type, 'region': self.region } raise exception.PriceNotFound(msg) return price.unit_price
def create_record(self, value): """Create a new record of the resource.""" if 'resource_id' not in value: value['resource_id'] = self.resource_id if 'end_at' not in value: value['end_at'] = self.event_time # Unit_price, consumption and description is not easye to get from # db, so it must be calculate carefully in each resource. for key in ('unit_price', 'consumption', 'description', 'start_at'): if key not in value: msg = _("Property %s is needed to generate record.") raise exception.RecordValueInvalid(msg) self.db_api.record_create(value)
class KoalaException(Exception): """Base Koala Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) if CONF.fatal_exception_format_errors: raise e else: # at least get the core message out if something happened message = self.message super(KoalaException, self).__init__(message) def format_message(self): if self.__class__.__name__.endswith('_Remote'): return self.args[0] else: return unicode(self)
def get_start_at(self): """Get the start billing time.""" last_record = self.get_last_record() # If the record is None, it means this is the second event for the # resource, so we need the get the start time from resource. if not last_record: resource = self.get_resource() start_at = resource.created_at else: start_at = last_record.end_at if start_at >= self.event_time: msg = _("Event time means that it's a privious event.") raise exception.EventTimeInvalid(msg) return start_at
def resource_update_by_id(self, resource_id, value): """Update the resource by id.""" session = get_session() with session.begin(): query = model_query(models.Resource, session=session) query = query.filter(models.Resource.resource_id == resource_id) count = query.update(value, synchronize_session='fetch') if count < 1: msg = _("Resource %s not found.") % resource_id raise exception.ResourceNotFound(msg) elif count > 1: # NOTE(fandeliang) Log.warning(resource id duplicate.) pass resource = query.first() return resource
def billing_resource(self): """Billing the resource and generate billing records. This is the mainly function for billing a resource. When the new event comes, we check whether the resource is a new or not. If it's a new resource, we need to generate a resource corresponding, otherwise, we just to calculate the consumption and update the billing records. """ self.exist_resource = self.get_resource() if self.exist_resource: if self.event_type in ('create', 'upload'): msg = _("Duplicate event.") raise exception.EventDuplicate(msg) elif self.event_type == 'exists': self.audit_exists() elif self.event_type == 'resize': self.audit_resize() elif self.event_type == 'delete': self.audit_delete() elif self.event_type == 'power_off': # Note(fandeliang) If the previous status is # shutoff, Log.warning() self.audit_power_off() elif self.event_type == 'power_on': # Note(fandeliang) If the previous status is # not shutoff, Log.warning() self.audit_power_on() else: if self.event_type in ('create', 'upload'): self.create_resource() # If we recieve a delete event with not resource records, just # ignore it. # TBD(fandeliang) Log.warning(_("Messaging missing.")) elif self.event_type == 'delete': pass else: # If we recieve the other events, create the new resource and # treat it as the create time. # TBD(fandeliang) Log.warning(_("Messaging missing")) self.create_resource()
def _find_facility_from_conf(): facility_names = logging.handlers.SysLogHandler.facility_names facility = getattr(logging.handlers.SysLogHandler, CONF.syslog_log_facility, None) if facility is None and CONF.syslog_log_facility in facility_names: facility = facility_names.get(CONF.syslog_log_facility) if facility is None: valid_facilities = facility_names.keys() consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] valid_facilities.extend(consts) raise TypeError(_('syslog facility must be one of: %s') % ', '.join("'%s'" % fac for fac in valid_facilities)) return facility
def read_cached_file(filename, force_reload=False): """Read from a file if it has been modified. :param force_reload: Whether to reload the file. :returns: A tuple with a boolean specifying if the data is fresh or not. """ global _FILE_CACHE if force_reload and filename in _FILE_CACHE: del _FILE_CACHE[filename] reloaded = False mtime = os.path.getmtime(filename) cache_info = _FILE_CACHE.setdefault(filename, {}) if not cache_info or mtime > cache_info.get('mtime', 0): LOG.debug(_("Reloading cached file %s") % filename) with open(filename) as fap: cache_info['data'] = fap.read() cache_info['mtime'] = mtime reloaded = True return (reloaded, cache_info['data'])
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: if exc.message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if cur_time - last_log_time > 60 or exc.message != last_exc_message: logging.exception(_("Unexpected exception occurred %d time(s)... " "retrying.") % exc_count) last_log_time = cur_time last_exc_message = exc.message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def read_cached_file(filename, force_reload=False): """Read from a file if it has been modified. :param force_reload: Whether to reload the file. :returns: A tuple with a boolean specifying if the data is fresh or not. """ global _FILE_CACHE if force_reload and filename in _FILE_CACHE: del _FILE_CACHE[filename] reloaded = False mtime = os.path.getmtime(filename) cache_info = _FILE_CACHE.setdefault(filename, {}) if not cache_info or mtime > cache_info.get("mtime", 0): LOG.debug(_("Reloading cached file %s") % filename) with open(filename) as fap: cache_info["data"] = fap.read() cache_info["mtime"] = mtime reloaded = True return (reloaded, cache_info["data"])