def __init__(self, shard): Service.__init__(self, shard) # Determine location of log file, and make directories. log_dir = os.path.join(config.log_dir, "cms") if not mkdir(config.log_dir) or \ not mkdir(log_dir): logger.error("Cannot create necessary directories.") self.exit() return log_filename = "%d.log" % int(time.time()) # Install a global file handler. self.file_handler = FileHandler(os.path.join(log_dir, log_filename), mode='w', encoding='utf-8') self.file_handler.setLevel(logging.DEBUG) self.file_handler.setFormatter(DetailedFormatter(False)) root_logger.addHandler(self.file_handler) # Provide a symlink to the latest log file. try: os.remove(os.path.join(log_dir, "last.log")) except OSError: pass os.symlink(log_filename, os.path.join(log_dir, "last.log")) self._last_messages = deque(maxlen=self.LAST_MESSAGES_COUNT)
def __init__(self, service=None, path=None): """Initialization. service (Service): the service we are running in. If None, we simply avoid caching and allowing the service to step in once in a while. path (string): if specified, back the FileCacher with a file system-based storage instead that the default database-based one. The specified directory will be used as root for the storage and it will be created if it doesn't exist. """ self.service = service if path is None: self.backend = DBBackend(self.service) else: self.backend = FSBackend(path, self.service) if self.service is None: self.base_dir = tempfile.mkdtemp(dir=config.temp_dir) else: self.base_dir = os.path.join( config.cache_dir, "fs-cache-%s-%d" % (service._my_coord.name, service._my_coord.shard)) self.tmp_dir = os.path.join(self.base_dir, "tmp") self.obj_dir = os.path.join(self.base_dir, "objects") if not mkdir(config.cache_dir) or \ not mkdir(self.base_dir) or \ not mkdir(self.tmp_dir) or \ not mkdir(self.obj_dir): logger.error("Cannot create necessary directories.")
def start_backdoor(self, backlog=50): """Start a backdoor server on a local UNIX domain socket. """ backdoor_path = self.get_backdoor_path() try: os.remove(backdoor_path) except OSError as error: if error.errno != errno.ENOENT: raise else: logger.warning("A backdoor socket has been found and deleted.") mkdir(os.path.dirname(backdoor_path)) backdoor_sock = gevent.socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) backdoor_sock.setblocking(0) backdoor_sock.bind(backdoor_path) user = pwd.getpwnam(config.cmsuser) # We would like to also set the user to "cmsuser" but only root # can do that. Therefore we limit ourselves to the group. os.chown(backdoor_path, os.getuid(), user.pw_gid) os.chmod(backdoor_path, 0o770) backdoor_sock.listen(backlog) self.backdoor = BackdoorServer(backdoor_sock, locals={'service': self}) self.backdoor.start()
def purge_cache(self): """Empty the local cache. """ self.destroy_cache() if not mkdir(config.cache_dir) or not mkdir(self.file_dir): logger.error("Cannot create necessary directories.") raise RuntimeError("Cannot create necessary directories.")
def purge_cache(self): """Delete all the content of the cache. """ shutil.rmtree(self.base_dir) if not mkdir(config.cache_dir) or \ not mkdir(self.base_dir) or \ not mkdir(self.tmp_dir) or \ not mkdir(self.obj_dir): logger.error("Cannot create necessary directories.")
def purge_cache(self): """Empty the local cache. This function must not be called if the cache directory is shared. """ self.destroy_cache() if not mkdir(config.cache_dir) or not mkdir(self.file_dir): logger.error("Cannot create necessary directories.") raise RuntimeError("Cannot create necessary directories.")
def __init__(self, service=None, path=None, null=False): """Initialize. By default the database-powered backend will be used, but this can be changed using the parameters. service (Service|None): the service we are running for. Only used if present to determine the location of the file-system cache (and to provide the shard number to the Sandbox... sigh!). path (string|None): if specified, back the FileCacher with a file system-based storage instead of the default database-based one. The specified directory will be used as root for the storage and it will be created if it doesn't exist. null (bool): if True, back the FileCacher with a NullBackend, that just discards every file it receives. This setting takes priority over path. """ self.service = service if null: self.backend = NullBackend() elif path is None: if config.s3_backend_enabled: self.backend = S3Backend( region=config.s3_backend_region, bucket=config.s3_backend_bucket, prefix=config.s3_backend_prefix, s3_proxy=config.s3_backend_proxy, base_url_for_fetch=config.s3_backend_fetch_base_url, ) else: self.backend = DBBackend() else: self.backend = FSBackend(path) if service is None: self.file_dir = tempfile.mkdtemp(dir=config.temp_dir) # Delete this directory on exit since it has a random name and # won't be used again. atexit.register(lambda: rmtree(self.file_dir)) else: self.file_dir = os.path.join( config.cache_dir, "fs-cache-%s-%d" % (service.name, service.shard)) self.temp_dir = os.path.join(self.file_dir, "_temp") if not mkdir(config.cache_dir) or not mkdir(config.temp_dir) \ or not mkdir(self.file_dir) or not mkdir(self.temp_dir): logger.error("Cannot create necessary directories.") raise RuntimeError("Cannot create necessary directories.") atexit.register(lambda: rmtree(self.temp_dir))
def initialize_logging(self): """Set up additional logging handlers. What we do, in detail, is to add a logger to file (whose filename depends on the coords) and a remote logger to a LogService. We also attach the service coords to all log messages. """ filter_ = ServiceFilter(self.name, self.shard) # Update shell handler to attach service coords. shell_handler.addFilter(filter_) # Determine location of log file, and make directories. log_dir = os.path.join(config.log_dir, "%s-%d" % (self.name, self.shard)) mkdir(config.log_dir) mkdir(log_dir) log_filename = time.strftime("%Y-%m-%d-%H-%M-%S.log") # Install a file handler. file_handler = FileHandler(os.path.join(log_dir, log_filename), mode='w', encoding='utf-8') if config.file_log_debug: file_log_level = logging.DEBUG else: file_log_level = logging.INFO file_handler.setLevel(file_log_level) file_handler.setFormatter(DetailedFormatter(False)) file_handler.addFilter(filter_) root_logger.addHandler(file_handler) # Provide a symlink to the latest log file. try: os.remove(os.path.join(log_dir, "last.log")) except OSError: pass os.symlink(log_filename, os.path.join(log_dir, "last.log")) # Setup a remote LogService handler (except when we already are # LogService, to avoid circular logging). if self.name != "LogService": log_service = self.connect_to(ServiceCoord("LogService", 0)) remote_handler = LogServiceHandler(log_service) remote_handler.setLevel(logging.INFO) remote_handler.addFilter(filter_) root_logger.addHandler(remote_handler)
def __init__(self, service=None, path=None, null=False, enabled=None): """Initialize. By default the database-powered backend will be used, but this can be changed using the parameters. service (Service|None): the service we are running for. Only used if present to determine the location of the file-system cache (and to provide the shard number to the Sandbox... sigh!). path (string|None): if specified, back the FileCacher with a file system-based storage instead of the default database-based one. The specified directory will be used as root for the storage and it will be created if it doesn't exist. null (bool): if True, back the FileCacher with a NullBackend, that just discards every file it receives. This setting takes priority over path. enabled (bool): overrides config.use_cache. If True, the files are always retrieved from the backend. Otherwise, the FileCacher behaves normally. """ self.service = service if null: self.backend = NullBackend() elif path is None: self.backend = DBBackend() else: self.backend = FSBackend(path) if enabled is None: self.enabled = config.use_cache else: self.enabled = enabled if service is None: self.file_dir = tempfile.mkdtemp(dir=config.temp_dir) else: self.file_dir = os.path.join( config.cache_dir, "fs-cache-%s-%d" % (service.name, service.shard)) self.temp_dir = os.path.join(self.file_dir, "_temp") if not mkdir(config.cache_dir) or not mkdir(self.file_dir) \ or not mkdir(self.temp_dir): logger.error("Cannot create necessary directories.") raise RuntimeError("Cannot create necessary directories.")
def initialize_logging(service_name, service_shard): """Set up additional logging handlers. Some of the logging handlers are only activated when running for a service (by choice or because of technical issues). We therefore provide this method for services to call as soon as possible, with their coords as parameters, to complete logger initialization. What we do, in detail, is to add a logger to file (whose filename depends on the coords) and a remote logger to a LogService. We also attach the service coords to all log messages. """ filter_ = ServiceFilter(service_name, service_shard) # Update shell handler to attach service coords. shell_handler.addFilter(filter_) # Determine location of log file, and make directories. log_dir = os.path.join(config.log_dir, "%s-%d" % (service_name, service_shard)) mkdir(config.log_dir) mkdir(log_dir) log_filename = "%d.log" % int(time.time()) # Install a file handler. file_handler = FileHandler(os.path.join(log_dir, log_filename), mode='w', encoding='utf-8') file_handler.setLevel(logging.INFO) file_handler.setFormatter(CustomFormatter(False)) file_handler.addFilter(filter_) root_logger.addHandler(file_handler) # Provide a symlink to the latest log file. try: os.remove(os.path.join(log_dir, "last.log")) except OSError: pass os.symlink(log_filename, os.path.join(log_dir, "last.log")) # Setup a remote LogService handler (except when we already are # LogService, to avoid circular logging). if service_name != "LogService": remote_handler = LogServiceHandler() remote_handler.setLevel(logging.INFO) remote_handler.addFilter(filter_) root_logger.addHandler(remote_handler)
def initialize_logging(service_name, service_shard): """Set up additional logging handlers. Some of the logging handlers are only activated when running for a service (by choice or because of technical issues). We therefore provide this method for services to call as soon as possible, with their coords as parameters, to complete logger initialization. What we do, in detail, is to add a logger to file (whose filename depends on the coords) and a remote logger to a LogService. We also attach the service coords to all log messages. """ filter_ = ServiceFilter(service_name, service_shard) # Update shell handler to attach service coords. shell_handler.addFilter(filter_) # Determine location of log file, and make directories. log_dir = os.path.join(config.log_dir, "%s-%d" % (service_name, service_shard)) mkdir(config.log_dir) mkdir(log_dir) log_filename = "%d.log" % int(time.time()) # Install a file handler. file_handler = FileHandler(os.path.join(log_dir, log_filename), mode='w', encoding='utf-8') file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(CustomFormatter(False)) file_handler.addFilter(filter_) root_logger.addHandler(file_handler) # Provide a symlink to the latest log file. try: os.remove(os.path.join(log_dir, "last.log")) except OSError: pass os.symlink(log_filename, os.path.join(log_dir, "last.log")) # Setup a remote LogService handler (except when we already are # LogService, to avoid circular logging). if service_name != "LogService": remote_handler = LogServiceHandler() remote_handler.setLevel(logging.INFO) remote_handler.addFilter(filter_) root_logger.addHandler(remote_handler)
def __init__(self, shard): logger.initialize(ServiceCoord("LogService", shard)) Service.__init__(self, shard, custom_logger=logger) log_dir = os.path.join(config.log_dir, "cms") if not mkdir(config.log_dir) or \ not mkdir(log_dir): logger.error("Cannot create necessary directories.") self.exit() return log_filename = "%d.log" % int(time.time()) self._log_file = codecs.open(os.path.join(log_dir, log_filename), "w", "utf-8") try: os.remove(os.path.join(log_dir, "last.log")) except OSError: pass os.symlink(log_filename, os.path.join(log_dir, "last.log")) self._last_messages = []
def __init__(self, service=None): """Initialization. service (Service): the service we are running in. If None, we simply avoid caching and allowing the service to step in once in a while. """ self.service = service if self.service is None: self.base_dir = tempfile.mkdtemp(config.temp_dir) else: self.base_dir = os.path.join( config.cache_dir, "fs-cache-%s-%d" % (service._my_coord.name, service._my_coord.shard)) self.tmp_dir = os.path.join(self.base_dir, "tmp") self.obj_dir = os.path.join(self.base_dir, "objects") if not mkdir(config.cache_dir) or \ not mkdir(self.base_dir) or \ not mkdir(self.tmp_dir) or \ not mkdir(self.obj_dir): logger.error("Cannot create necessary directories.")
def start_backdoor(self, backlog=50): """Start a backdoor server on a local UNIX domain socket. """ backdoor_path = self.get_backdoor_path() try: os.remove(backdoor_path) except OSError as error: if error.errno != errno.ENOENT: raise else: logger.warning("A backdoor socket has been found and deleted.") mkdir(os.path.dirname(backdoor_path)) backdoor_sock = _socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) backdoor_sock.setblocking(0) backdoor_sock.bind(backdoor_path) user = pwd.getpwnam("cmsuser") # We would like to also set the user to "cmsuser" but only root # can do that. Therefore we limit ourselves to the group. os.chown(backdoor_path, os.getuid(), user.pw_gid) os.chmod(backdoor_path, 0o770) backdoor_sock.listen(backlog) self.backdoor = BackdoorServer(backdoor_sock, locals={'service': self}) self.backdoor.start()
def _create_directory_or_die(self, dir): """Create dir and ensure it exists, or raise a RuntimeError.""" if not mkdir(dir): msg = "Cannot create required directory '%s'." % dir logger.error(msg) raise RuntimeError(msg)
def _create_directory_or_die(directory): """Create directory and ensure it exists, or raise a RuntimeError.""" if not mkdir(directory): msg = "Cannot create required directory '%s'." % directory logger.error(msg) raise RuntimeError(msg)