def open_r(self) -> None: super().open_r() re_match = re.match('^([^/]+)/([^@]+)@?(.+)?$', self._path) if not re_match: raise UsageError( 'URL {} is invalid . Need {}://<pool>/<imagename> or {}://<pool>/<imagename>@<snapshotname>.' .format(self.url, self.name, self.name)) self._pool_name, self._image_name, self._snapshot_name = re_match.groups( ) # try opening it and quit if that's not possible. try: ioctx = self._cluster.open_ioctx(self._pool_name) except rados.ObjectNotFound: raise FileNotFoundError('Ceph pool {} not found.'.format( self._pool_name)) from None try: rbd.Image(ioctx, self._image_name, self._snapshot_name, read_only=True) except rbd.ImageNotFound: raise FileNotFoundError( 'RBD image or snapshot {} not found.'.format( self.url)) from None
def _parse_rules(cls, rules_spec: str) -> OrderedDict: tokens = rules_spec.split(',') rules_dict: Dict[str, int] = {} for token in tokens: if not token: raise ValueError('Empty retention policy element.') match = re.search(r'([a-z]+)([0-9]+)', token) if match: category = match.group(1) timecount = int(match.group(2)) if category not in cls._valid_categories: raise ValueError('Time category {} in retention policy is invalid.'.format(category)) if category in rules_dict: raise ValueError('Time category {} listed more than once in retention policy.'.format(category)) if timecount <= 0: raise UsageError('Count of time category {} must be a positive integer.'.format(category)) rules_dict[category] = timecount continue raise ValueError('Invalid retention policy element {}.'.format(token)) rules: OrderedDict[str, int] = OrderedDict() for category in cls._valid_categories: if category in rules_dict: rules[category] = rules_dict[category] return rules
def __init__(self, *, config: Config, name: str, module_configuration: ConfigDict, url: str, block_size: int) -> None: super().__init__(config=config, name=name, module_configuration=module_configuration, url=url, block_size=block_size) if self.parsed_url.username or self.parsed_url.password or self.parsed_url.hostname or self.parsed_url.port \ or self.parsed_url.params or self.parsed_url.fragment or self.parsed_url.query: raise UsageError('The supplied URL {} is invalid.'.format(self.url)) ceph_config_file = config.get_from_dict(module_configuration, 'cephConfigFile', types=str) client_identifier = config.get_from_dict(module_configuration, 'clientIdentifier', types=str) self._cluster = rados.Rados(conffile=ceph_config_file, rados_id=client_identifier) self._cluster.connect() # create a bitwise or'd list of the configured features self._new_image_features = 0 for feature in config.get_from_dict(module_configuration, 'newImageFeatures', types=list): try: self._new_image_features = self._new_image_features | getattr(rbd, feature) except AttributeError: raise ConfigurationError('{}: Unknown image feature {}.'.format(module_configuration.full_name, feature)) self._pool_name = None self._image_name = None self._snapshot_name = None self._simultaneous_reads = config.get_from_dict(module_configuration, 'simultaneousReads', types=int) self._simultaneous_writes = config.get_from_dict(module_configuration, 'simultaneousWrites', types=int) self._read_executor: Optional[JobExecutor] = None self._write_executor: Optional[JobExecutor] = None
def open_w(self, io_name, size=None, force=False): # io_name has the form rbd://pool/imagename@snapshotname or rbd://pool/imagename self.io_name = io_name img_name = re.match('^rbd://([^/]+)/([^@]+)$', io_name) if not img_name: raise UsageError( 'Not a valid io name: {} . Need pool/imagename.'.format( io_name)) self.pool_name, self.image_name = img_name.groups() # try opening it and quit if that's not possible. try: ioctx = self._cluster.open_ioctx(self.pool_name) except rados.ObjectNotFound: raise FileNotFoundError('Pool not found: {}'.format( self.pool_name)) from None try: rbd.Image(ioctx, self.image_name) except rbd.ImageNotFound: rbd.RBD().create(ioctx, self.image_name, size, old_format=False, features=self._new_image_features) else: if not force: raise FileExistsError( 'Restore target {} already exists. Force the restore if you want to overwrite it.' .format(self.io_name)) else: if size < self.size(): raise IOError( 'Restore target {} is too small. Its size is {} bytes, but we need {} bytes for the restore.' .format(self.io_name, self.size(), size))
def open_r(self) -> None: re_match = re.match('^([^/]+)/(?:([^/]*)/)?([^@]+)(?:@(.+))?$', self.parsed_url.path) if not re_match: raise UsageError( 'URL {} is invalid . Need {}:<pool>[/<namespace>]/<imagename>[@<snapshotname>].' .format(self.url, self.name)) self._pool_name, self._namespace_name, self._image_name, self._snapshot_name = re_match.groups( ) # try opening it and quit if that's not possible. try: ioctx = self._cluster.open_ioctx(self._pool_name) if self._namespace_name is not None and len( self._namespace_name) > 0: logger.debug( f'Configuring io context to use namespace {self._namespace_name}.' ) ioctx.set_namespace(self._namespace_name) except rados.ObjectNotFound: raise FileNotFoundError('Ceph pool {} not found.'.format( self._pool_name)) from None try: self._rbd_image = rbd.Image(ioctx, self._image_name, self._snapshot_name, read_only=True) except rbd.ImageNotFound: raise FileNotFoundError( 'RBD image or snapshot {} not found.'.format( self.url)) from None
def open_r(self, io_name): # io_name has the form rbd://pool/imagename@snapshotname or rbd://pool/imagename super().open_r(io_name) self.io_name = io_name img_name = re.match('^rbd://([^/]+)/([^@]+)@?(.+)?$', io_name) if not img_name: raise UsageError( 'Not a valid io name: {} . Need pool/imagename or pool/imagename@snapshotname.' .format(io_name)) self.pool_name, self.image_name, self.snapshot_name = img_name.groups() # try opening it and quit if that's not possible. try: ioctx = self._cluster.open_ioctx(self.pool_name) except rados.ObjectNotFound: raise FileNotFoundError('Pool not found: {}'.format( self.pool_name)) from None try: rbd.Image(ioctx, self.image_name, self.snapshot_name, read_only=True) except rbd.ImageNotFound: raise FileNotFoundError('Image or snapshot not found: {}'.format( self.io_name)) from None
def open_r(self, io_name): super().open_r(io_name) _s = re.match('^file://(.+)$', io_name) if not _s: raise UsageError('Not a valid io name: {} . Need a file path, e.g. file:///somepath/file.'.format(io_name)) self.io_name = _s.groups()[0]
def open_r(self) -> None: self._read_executor = JobExecutor(name='IO-Read', workers=self._simultaneous_reads, blocking_submit=False) re_match = re.match('^([^/]+)/([^@]+)(?:@(.+))?$', self.parsed_url.path) if not re_match: raise UsageError( 'URL {} is invalid . Need {}:<pool>/<imagename> or {}:<pool>/<imagename>@<snapshotname>.' .format(self.url, self.name, self.name)) self._pool_name, self._image_name, self._snapshot_name = re_match.groups( ) # try opening it and quit if that's not possible. try: ioctx = self._cluster.open_ioctx(self._pool_name) except rados.ObjectNotFound: raise FileNotFoundError('Ceph pool {} not found.'.format( self._pool_name)) from None try: rbd.Image(ioctx, self._image_name, self._snapshot_name, read_only=True) except rbd.ImageNotFound: raise FileNotFoundError( 'RBD image or snapshot {} not found.'.format( self.url)) from None
def open_w(self, size: int, force: bool = False, sparse: bool = False) -> None: self._write_executor = JobExecutor(name='IO-Write', workers=self._simultaneous_writes, blocking_submit=True) re_match = re.match('^([^/]+)/([^@]+)$', self.parsed_url.path) if not re_match: raise UsageError( 'URL {} is invalid . Need {}:<pool>/<imagename>.'.format( self.url, self.name)) self._pool_name, self._image_name = re_match.groups() # try opening it and quit if that's not possible. try: ioctx = self._cluster.open_ioctx(self._pool_name) except rados.ObjectNotFound: raise FileNotFoundError('Ceph pool {} not found.'.format( self._pool_name)) from None try: image = rbd.Image(ioctx, self._image_name) except rbd.ImageNotFound: rbd.RBD().create(ioctx, self._image_name, size, old_format=False, features=self._new_image_features) rbd.Image(ioctx, self._image_name) else: try: if not force: raise FileExistsError( 'RBD image {} already exists. Force the restore if you want to overwrite it.' .format(self.url)) else: image_size = image.size() if size > image_size: raise IOError( 'RBD image {} is too small. Its size is {} bytes, but we need {} bytes for the restore.' .format(self.url, image_size, size)) # If this is an existing image and sparse is true discard all objects from this image # RBD discard only supports a maximum region length of 0x7fffffff. if sparse: logger.debug( 'Discarding all objects of RBD image {}.'.format( self.url)) region_start = 0 bytes_to_end = image_size while bytes_to_end > 0: region_length = min(0x7fffffff, bytes_to_end) image.discard(region_start, region_length) region_start += region_length bytes_to_end -= region_length finally: image.close()
def __init__(self, *, config: Config, name: str, module_configuration: ConfigDict, url: str, block_size: int) -> None: super().__init__(config=config, name=name, module_configuration=module_configuration, url=url, block_size=block_size) if self.parsed_url.username or self.parsed_url.password or self.parsed_url.hostname or self.parsed_url.port \ or self.parsed_url.params or self.parsed_url.fragment or self.parsed_url.query: raise UsageError('The supplied URL {} is invalid.'.format( self.url)) ceph_config_file = config.get_from_dict(module_configuration, 'cephConfigFile', types=str) client_identifier = config.get_from_dict(module_configuration, 'clientIdentifier', types=str) self._cluster = rados.Rados(conffile=ceph_config_file, rados_id=client_identifier) self._cluster.connect() # create a bitwise or'd list of the configured features self._new_image_features = 0 for feature in config.get_from_dict(module_configuration, 'newImageFeatures', types=list): try: self._new_image_features = self._new_image_features | getattr( rbd, feature) except AttributeError: raise ConfigurationError( '{}: Unknown image feature {}.'.format( module_configuration.full_name, feature)) self._pool_name = None self._image_name = None self._snapshot_name = None self._rbd_image = None self._simultaneous_reads = config.get_from_dict(module_configuration, 'simultaneousReads', types=int) self._simultaneous_writes = config.get_from_dict(module_configuration, 'simultaneousWrites', types=int) self._read_queue: Deque[DereferencedBlock] = deque() self._write_queue: Deque[Tuple[DereferencedBlock, bytes]] = deque() self._outstanding_aio_reads = 0 self._outstanding_aio_writes = 0 self._submitted_aio_writes = threading.BoundedSemaphore( self._simultaneous_writes) self._read_completion_queue: queue.Queue[Tuple[rbd.Completion, float, float, DereferencedBlock, bytes]] = queue.Queue() self._write_completion_queue: queue.Queue[Tuple[ rbd.Completion, float, float, DereferencedBlock]] = queue.Queue()
def parse_and_validate_labels(labels: List[str]) -> Tuple[List[Tuple[str, str]], List[str]]: add_list: List[Tuple[str, str]] = [] remove_list: List[str] = [] for label in labels: if len(label) == 0: raise UsageError('A zero-length label is invalid.') if label.endswith('-'): name = label[:-1] if not InputValidation.is_label_name(name): raise UsageError('Label name {} is invalid.'.format(name)) remove_list.append(name) elif label.find('=') > -1: name, value = label.split('=') if len(name) == 0: raise UsageError('Missing label key in label {}.'.format(label)) if not InputValidation.is_label_name(name): raise UsageError('Label name {} is invalid.'.format(name)) if not InputValidation.is_label_value(value): raise UsageError('Label value {} is not a valid.'.format(value)) add_list.append((name, value)) else: name = label if not InputValidation.is_label_name(name): raise UsageError('Label name {} is invalid.'.format(name)) add_list.append((name, '')) return add_list, remove_list
def __init__(self, *, config: Config, name: str, module_configuration: ConfigDict, url: str, block_size: int) -> None: super().__init__(config=config, name=name, module_configuration=module_configuration, url=url, block_size=block_size) if self.parsed_url.params or self.parsed_url.fragment: raise UsageError('The supplied URL {} is invalid.'.format( self.url)) self._read_queue: List[DereferencedBlock] = [] self._outstanding_write: Optional[Tuple[DereferencedBlock, bytes]] = None self._username = config.get_from_dict(module_configuration, 'username', None, types=str) self._password = config.get_from_dict(module_configuration, 'password', None, types=str) self._target_username = config.get_from_dict(module_configuration, 'targetUsername', None, types=str) self._target_password = config.get_from_dict(module_configuration, 'targetPassword', None, types=str) header_digest = config.get_from_dict(module_configuration, 'headerDigest', types=str) header_digest_attr_name = 'ISCSI_HEADER_DIGEST_{}'.format( header_digest) if hasattr(libiscsi, header_digest_attr_name): self._header_digest = getattr(libiscsi, header_digest_attr_name) else: raise ConfigurationError( 'Unknown header digest setting {}.'.format(header_digest)) self._initiator_name = config.get_from_dict(module_configuration, 'initiatorName', types=str) self._timeout = config.get_from_dict(module_configuration, 'timeout', None, types=int) self._iscsi_context: Any = None
def __init__(self, *, config: Config, name: str, module_configuration: ConfigDict, url: str, block_size: int) -> None: super().__init__(config=config, name=name, module_configuration=module_configuration, url=url, block_size=block_size) if self.parsed_url.username or self.parsed_url.password or self.parsed_url.hostname or self.parsed_url.port \ or self.parsed_url.params or self.parsed_url.fragment or self.parsed_url.query: raise UsageError('The supplied URL {} is invalid.'.format( self.url))
def get(cls, url: str, block_size: int) -> IOBase: parsed_url = parse.urlparse(url) name = parsed_url.scheme if not name: raise UsageError('The supplied URL {} is invalid. You must provide a scheme.'.format(url)) if name not in cls._modules: raise ConfigurationError('IO scheme {} is undefined.'.format(name)) module = cls._modules[name].module module_arguments = cls._modules[name].arguments.copy() module_arguments['url'] = url module_arguments['block_size'] = block_size return module.IO(**module_arguments)
def __init__(self, *, config: Config, name: str, module_configuration: ConfigDict, url: str, block_size: int) -> None: super().__init__(config=config, name=name, module_configuration=module_configuration, url=url, block_size=block_size) if self.parsed_url.username or self.parsed_url.password or self.parsed_url.hostname or self.parsed_url.port \ or self.parsed_url.params or self.parsed_url.fragment or self.parsed_url.query: raise UsageError('The supplied URL {} is invalid.'.format(self.url)) self._simultaneous_reads = config.get_from_dict(module_configuration, 'simultaneousReads', types=int) self._simultaneous_writes = config.get_from_dict(module_configuration, 'simultaneousWrites', types=int) self._read_executor: Optional[JobExecutor] = None self._write_executor: Optional[JobExecutor] = None
def open_w(self, io_name, size=None, force=False): _s = re.match('^file://(.+)$', io_name) if not _s: raise UsageError('Not a valid io name: {} . Need a file path, e.g. file:///somepath/file.'.format(io_name)) self.io_name = _s.groups()[0] if os.path.exists(self.io_name): if not force: raise FileExistsError('Restore target {} already exists. Force the restore if you want to overwrite it.' .format(self.io_name)) else: if size < self.size(): raise IOError( 'Restore target {} is too small. Its size is {} bytes, but we need {} bytes for the restore.' .format(self.io_name, self.size(), size)) else: # create the file with open(self.io_name, 'wb') as f: f.seek(size - 1) f.write(b'\0')
def init_logging(*, logfile: str = None, console_level: str = 'INFO', console_formatter: str = 'json', logfile_formatter: str = 'legacy') -> None: logging_config: Dict = { "version": 1, "disable_existing_loggers": False, "formatters": { "console-plain": { "()": structlog.stdlib.ProcessorFormatter, "processor": FormatRenderer(colors=False, fmt='{log_color}{level_uc:>8s}: {event:s}'), "foreign_pre_chain": _sl_foreign_pre_chain, }, "console-colored": { "()": structlog.stdlib.ProcessorFormatter, "processor": FormatRenderer(colors=True, fmt='{log_color}{level_uc:>8s}: {event:s}'), "foreign_pre_chain": _sl_foreign_pre_chain, }, "legacy": { "()": structlog.stdlib.ProcessorFormatter, "processor": FormatRenderer( colors=False, fmt= '{timestamp_local_ctime} {process:d}/{thread_name:s} {file:s}:{line:d} {level_uc:s} {event:s}' ), "foreign_pre_chain": _sl_foreign_pre_chain, }, "json": { "()": structlog.stdlib.ProcessorFormatter, "processor": structlog.processors.JSONRenderer(), "foreign_pre_chain": _sl_foreign_pre_chain, }, }, "handlers": { "console": { "level": None, # Filled in "class": "logging.StreamHandler", "formatter": None, # Filled in "stream": "ext://sys.stderr", }, "file": { "level": None, # Filled in "class": "logging.handlers.WatchedFileHandler", "filename": None, # Filled in "formatter": None, # Filled in }, }, "loggers": { "": { "handlers": None, # Filled in "level": "DEBUG", "propagate": True, }, } } if console_formatter not in logging_config['formatters'].keys(): raise UsageError( 'Event formatter {} is unknown.'.format(console_formatter)) if logfile_formatter not in logging_config['formatters'].keys(): raise UsageError( 'Event formatter {} is unknown.'.format(logfile_formatter)) logging_config['handlers']['console']['formatter'] = console_formatter logging_config['handlers']['console']['level'] = console_level if logfile is not None: logging_config['handlers']['file']['filename'] = logfile logging_config['handlers']['file']['level'] = min( logging.getLevelName(console_level), logging.INFO) logging_config['handlers']['file']['formatter'] = logfile_formatter else: del (logging_config['handlers']['file']) logging_config['loggers']['']['handlers'] = logging_config[ 'handlers'].keys() logging.config.dictConfig(logging_config)
def __init__(self, *, config: Config, name: str, module_configuration: ConfigDict, url: str, block_size: int) -> None: super().__init__(config=config, name=name, module_configuration=module_configuration, url=url, block_size=block_size) if self.parsed_url.username or self.parsed_url.password or self.parsed_url.hostname or self.parsed_url.port \ or self.parsed_url.params or self.parsed_url.fragment: raise UsageError('The supplied URL {} is invalid.'.format( self.url)) if self.parsed_url.query: try: extra_ceph_conf = parse_qs(self.parsed_url.query, keep_blank_values=True, strict_parsing=True, errors='strict') except (ValueError, UnicodeError) as exception: raise UsageError('The supplied URL {} is invalid.'.format( self.url)) from exception # parse_qs returns the values as lists, only consider the first appearance of each key in the query string. extra_ceph_conf = { key: value[0] for key, value in extra_ceph_conf.items() } else: extra_ceph_conf = {} ceph_config_file = config.get_from_dict(module_configuration, 'cephConfigFile', types=str) if 'client_identifier' in extra_ceph_conf: client_identifier = extra_ceph_conf['client_identifier'] del extra_ceph_conf['client_identifier'] else: client_identifier = config.get_from_dict(module_configuration, 'clientIdentifier', types=str) self._cluster = rados.Rados(conffile=ceph_config_file, rados_id=client_identifier, conf=extra_ceph_conf) self._cluster.connect() # create a bitwise or'd list of the configured features self._new_image_features = 0 for feature in config.get_from_dict(module_configuration, 'newImageFeatures', types=list): try: self._new_image_features = self._new_image_features | getattr( rbd, feature) except AttributeError: raise ConfigurationError( '{}: Unknown image feature {}.'.format( module_configuration.full_name, feature)) self._pool_name = None self._image_name = None self._snapshot_name = None self._rbd_image = None self._simultaneous_reads = config.get_from_dict(module_configuration, 'simultaneousReads', types=int) self._simultaneous_writes = config.get_from_dict(module_configuration, 'simultaneousWrites', types=int) self._read_queue: Deque[DereferencedBlock] = deque() self._write_queue: Deque[Tuple[DereferencedBlock, bytes]] = deque() self._outstanding_aio_reads = 0 self._outstanding_aio_writes = 0 self._aio_write_complete = threading.Event() # Set the queue limit to two times the number of simultaneous writes plus one to ensure that there are always # enough writes available even when all outstanding aio writes finish at the same time. self._max_write_queue_len = 2 * self._simultaneous_writes + 1 self._read_completion_queue: queue.Queue[Tuple[rbd.Completion, float, float, DereferencedBlock, bytes]] = queue.Queue() self._write_completion_queue: queue.Queue[Tuple[ rbd.Completion, float, float, DereferencedBlock]] = queue.Queue()
def open_w(self, size: int, force: bool = False, sparse: bool = False) -> None: re_match = re.match('^([^/]+)/(?:([^/]*)/)?([^@]+)$', self.parsed_url.path) if not re_match: raise UsageError( 'URL {} is invalid . Need {}:<pool>[/<namespace>]/<imagename>[@<snapshotname>].' .format(self.url, self.name)) self._pool_name, self._namespace_name, self._image_name = re_match.groups( ) # try opening it and quit if that's not possible. try: ioctx = self._cluster.open_ioctx(self._pool_name) if self._namespace_name is not None and len( self._namespace_name) > 0: logger.debug( f'Configuring io context to use namespace {self._namespace_name}.' ) ioctx.set_namespace(self._namespace_name) except rados.ObjectNotFound: raise FileNotFoundError('Ceph pool {} not found.'.format( self._pool_name)) from None try: self._rbd_image = rbd.Image(ioctx, self._image_name) except rbd.ImageNotFound: rbd.RBD().create(ioctx, self._image_name, size, old_format=False, features=self._new_image_features) self._rbd_image = rbd.Image(ioctx, self._image_name) else: assert self._rbd_image is not None if not force: raise FileExistsError( 'RBD image {} already exists. Force the restore if you want to overwrite it.' .format(self.url)) else: image_size = self._rbd_image.size() if size > image_size: raise IOError( 'RBD image {} is too small. Its size is {} bytes, but we need {} bytes for the restore.' .format(self.url, image_size, size)) # If this is an existing image and sparse is true discard all objects from this image # RBD discard only supports a maximum region length of 0x7fffffff. if sparse: logger.debug( 'Discarding all objects of RBD image {}.'.format( self.url)) region_start = 0 bytes_to_end = image_size while bytes_to_end > 0: region_length = min(0x7fffffff, bytes_to_end) self._rbd_image.discard(region_start, region_length) region_start += region_length bytes_to_end -= region_length
def __init__(self, *, config: Config, name: str, module_configuration: ConfigDict, url: str, block_size: int) -> None: super().__init__(config=config, name=name, module_configuration=module_configuration, url=url, block_size=block_size) if self.parsed_url.username or self.parsed_url.password or self.parsed_url.hostname or self.parsed_url.port \ or self.parsed_url.params or self.parsed_url.fragment: raise UsageError('The supplied URL {} is invalid.'.format( self.url)) if self.parsed_url.query: try: extra_ceph_conf = parse_qs(self.parsed_url.query, keep_blank_values=True, strict_parsing=True, errors='strict') except (ValueError, UnicodeError) as exception: raise UsageError('The supplied URL {} is invalid.'.format( self.url)) from exception # parse_qs returns the values as lists, only consider the first appearance of each key in the query string. extra_ceph_conf = { key: value[0] for key, value in extra_ceph_conf.items() } else: extra_ceph_conf = {} ceph_config_file = config.get_from_dict(module_configuration, 'cephConfigFile', types=str) if 'client_identifier' in extra_ceph_conf: client_identifier = extra_ceph_conf['client_identifier'] del extra_ceph_conf['client_identifier'] else: client_identifier = config.get_from_dict(module_configuration, 'clientIdentifier', types=str) self._cluster = rados.Rados(conffile=ceph_config_file, rados_id=client_identifier, conf=extra_ceph_conf) self._cluster.connect() # create a bitwise or'd list of the configured features self._new_image_features = 0 for feature in config.get_from_dict(module_configuration, 'newImageFeatures', types=list): try: self._new_image_features = self._new_image_features | getattr( rbd, feature) except AttributeError: raise ConfigurationError( '{}: Unknown image feature {}.'.format( module_configuration.full_name, feature)) self._pool_name = None self._image_name = None self._snapshot_name = None self._simultaneous_reads = config.get_from_dict(module_configuration, 'simultaneousReads', types=int) self._simultaneous_writes = config.get_from_dict(module_configuration, 'simultaneousWrites', types=int) self._read_executor: Optional[JobExecutor] = None self._write_executor: Optional[JobExecutor] = None
def init_logging(logfile: Optional[str], console_level: str, console_formatter: str = "console-plain", logfile_formatter: str = 'legacy') -> None: logging_config: Dict = { "version": 1, "disable_existing_loggers": False, "formatters": { "console-plain": { "()": structlog.stdlib.ProcessorFormatter, "processor": FormatRenderer(colors=False, fmt='{log_color}{level_uc:>8s}: {event:s}'), "foreign_pre_chain": _sl_foreign_pre_chain, }, "console-colored": { "()": structlog.stdlib.ProcessorFormatter, "processor": FormatRenderer(colors=True, fmt='{log_color}{level_uc:>8s}: {event:s}'), "foreign_pre_chain": _sl_foreign_pre_chain, }, "legacy": { "()": structlog.stdlib.ProcessorFormatter, "processor": FormatRenderer( colors=False, fmt= '{timestamp_local_ctime} {process:d}/{thread_name:s} {file:s}:{line:d} {level_uc:s} {event:s}' ), "foreign_pre_chain": _sl_foreign_pre_chain, }, "json": { "()": structlog.stdlib.ProcessorFormatter, "processor": structlog.processors.JSONRenderer(), "foreign_pre_chain": _sl_foreign_pre_chain, }, }, "handlers": { "console": { "level": None, # Filled in "class": "logging.StreamHandler", "formatter": None, # Filled in "stream": "ext://sys.stderr", }, "file": { "level": None, # Filled in "class": "logging.handlers.WatchedFileHandler", "filename": None, # Filled in "formatter": None, # Filled in }, }, "loggers": { "": { "handlers": None, # Filled in "level": "DEBUG", "propagate": True, }, } } if console_formatter not in logging_config['formatters'].keys(): raise UsageError( 'Event formatter {} is unknown.'.format(console_formatter)) if logfile_formatter not in logging_config['formatters'].keys(): raise UsageError( 'Event formatter {} is unknown.'.format(logfile_formatter)) logging_config['handlers']['console']['formatter'] = console_formatter logging_config['handlers']['console']['level'] = console_level if logfile is not None: logging_config['handlers']['file']['filename'] = logfile logging_config['handlers']['file']['level'] = min( logging.getLevelName(console_level), logging.INFO) logging_config['handlers']['file']['formatter'] = logfile_formatter else: del (logging_config['handlers']['file']) logging_config['loggers']['']['handlers'] = logging_config[ 'handlers'].keys() logging.config.dictConfig(logging_config) # silence alembic logging.getLogger('alembic').setLevel(logging.WARN) # silence boto3 # See https://github.com/boto/boto3/issues/521 logging.getLogger('boto3').setLevel(logging.WARN) logging.getLogger('botocore').setLevel(logging.WARN) logging.getLogger('nose').setLevel(logging.WARN) # This disables ResourceWarnings from boto3 which are normal # See: https://github.com/boto/boto3/issues/454 warnings.filterwarnings( "ignore", category=ResourceWarning, message=r'unclosed.*<(?:ssl.SSLSocket|socket\.socket).*>') # silence b2 logging.getLogger('b2').setLevel(logging.WARN) if os.getenv('BENJI_DEBUG_SQL') == '1': logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) logger.info('$ ' + ' '.join(sys.argv))