def test_pid_custom_name(): with pid.PidFile(pidname="testpidfile"): pass
def all_procedure(ctx, prepare, backup, partial, tag, show_tags, verbose, log_file, log, defaults_file, dry_run, test_mode, log_file_max_bytes, log_file_backup_count, keyring_vault): config = GeneralClass(defaults_file) formatter = logging.Formatter( fmt='%(asctime)s %(levelname)s [%(module)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') if verbose: ch = logging.StreamHandler() # control console output log level ch.setLevel(logging.INFO) ch.setFormatter(formatter) logger.addHandler(ch) if log_file: try: if config.log_file_max_bytes and config.log_file_backup_count: file_handler = RotatingFileHandler( log_file, mode='a', maxBytes=int(config.log_file_max_bytes), backupCount=int(config.log_file_backup_count)) else: file_handler = RotatingFileHandler( log_file, mode='a', maxBytes=log_file_max_bytes, backupCount=log_file_backup_count) file_handler.setFormatter(formatter) logger.addHandler(file_handler) except PermissionError as err: exit("{} Please consider to run as root or sudo".format(err)) # set log level in order: 1. user argument 2. config file 3. @click default if log is not None: logger.setLevel(log) elif 'log_level' in config.__dict__: logger.setLevel(config.log_level) else: # this is the fallback default log-level. logger.setLevel('INFO') validate_file(defaults_file) pid_file = pid.PidFile(piddir=config.pid_dir) try: with pid_file: # User PidFile for locking to single instance if (prepare is False and backup is False and partial is False and verbose is False and dry_run is False and test_mode is False and show_tags is False): print_help(ctx, None, value=True) elif show_tags and defaults_file: b = Backup(config=defaults_file) b.show_tags(backup_dir=b.backupdir) elif test_mode and defaults_file: logger.warning("Enabled Test Mode!!!") logger.info("Starting Test Mode") test_obj = RunnerTestMode(config=defaults_file) for basedir in test_obj.basedirs: if ('5.7' in basedir) and ('2_4_ps_5_7' in defaults_file): if keyring_vault == 1: test_obj.wipe_backup_prepare_copyback( basedir=basedir, keyring_vault=1) else: test_obj.wipe_backup_prepare_copyback( basedir=basedir) elif ('8.0' in basedir) and ('8_0_ps_8_0' in defaults_file): if keyring_vault == 1: test_obj.wipe_backup_prepare_copyback( basedir=basedir, keyring_vault=1) else: test_obj.wipe_backup_prepare_copyback( basedir=basedir) elif ('5.6' in basedir) and ('2_4_ps_5_6' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.6' in basedir) and ('2_3_ps_5_6' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.5' in basedir) and ('2_3_ps_5_5' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) elif ('5.5' in basedir) and ('2_4_ps_5_5' in defaults_file): test_obj.wipe_backup_prepare_copyback(basedir=basedir) else: logger.error( "Please pass proper already generated config file!" ) logger.error( "Please check also if you have run prepare_env.bats file" ) elif prepare and not test_mode: if not dry_run: if tag: a = Prepare(config=defaults_file, tag=tag) a.prepare_backup_and_copy_back() else: a = Prepare(config=defaults_file) a.prepare_backup_and_copy_back() else: logger.warning("Dry run enabled!") logger.warning("Do not recover/copy-back in this mode!") if tag: a = Prepare(config=defaults_file, dry_run=1, tag=tag) a.prepare_backup_and_copy_back() else: a = Prepare(config=defaults_file, dry_run=1) a.prepare_backup_and_copy_back() elif backup and not test_mode: if not dry_run: if tag: b = Backup(config=defaults_file, tag=tag) b.all_backup() else: b = Backup(config=defaults_file) b.all_backup() else: logger.warning("Dry run enabled!") if tag: b = Backup(config=defaults_file, dry_run=1, tag=tag) b.all_backup() else: b = Backup(config=defaults_file, dry_run=1) b.all_backup() elif partial: if not dry_run: c = PartialRecovery(config=defaults_file) c.final_actions() else: logger.critical( "Dry run is not implemented for partial recovery!") except pid.PidFileAlreadyLockedError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat( pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical("Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) # logger.warn("Pid file already exists: " + str(error)) except pid.PidFileAlreadyRunningError as error: if hasattr(config, 'pid_runtime_warning'): if time.time() - os.stat( pid_file.filename).st_ctime > config.pid_runtime_warning: pid.fh.seek(0) pid_str = pid.fh.read(16).split("\n", 1)[0].strip() logger.critical("Backup (pid: " + pid_str + ") has been running for logger than: " + str( humanfriendly.format_timespan( config.pid_runtime_warning))) # logger.warn("Pid already running: " + str(error)) except pid.PidFileUnreadableError as error: logger.warning("Pid file can not be read: " + str(error)) except pid.PidFileError as error: logger.warning("Generic error with pid file: " + str(error)) logger.info("Xtrabackup command history:") for i in ProcessRunner.xtrabackup_history_log: logger.info(str(i)) logger.info("Autoxtrabackup completed successfully!") return True
def main(self): """ Initial AppDaemon entry point. Parse command line arguments, load configuration, set up logging. """ self.init_signals() # Get command line args parser = argparse.ArgumentParser() parser.add_argument("-c", "--config", help="full path to config directory", type=str, default=None) parser.add_argument("-p", "--pidfile", help="full path to PID File", default=None) parser.add_argument( "-t", "--timewarp", help="speed that the scheduler will work at for time travel", default=1, type=float) parser.add_argument( "-s", "--starttime", help="start time for scheduler <YYYY-MM-DD HH:MM:SS>", type=str) parser.add_argument( "-e", "--endtime", help="end time for scheduler <YYYY-MM-DD HH:MM:SS>", type=str, default=None) parser.add_argument( "-D", "--debug", help="global debug level", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]) parser.add_argument('-m', '--moduledebug', nargs=2, action='append', help=argparse.SUPPRESS) parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + utils.__version__) parser.add_argument('--profiledash', help=argparse.SUPPRESS, action='store_true') args = parser.parse_args() config_dir = args.config pidfile = args.pidfile if config_dir is None: config_file_yaml = utils.find_path("appdaemon.yaml") else: config_file_yaml = os.path.join(config_dir, "appdaemon.yaml") if config_file_yaml is None: print( "FATAL: no configuration directory defined and defaults not present\n" ) parser.print_help() sys.exit(1) module_debug = {} if args.moduledebug is not None: for arg in args.moduledebug: module_debug[arg[0]] = arg[1] # # First locate secrets file # try: # # Initially load file to see if secret directive is present # yaml.add_constructor('!secret', utils._dummy_secret) with open(config_file_yaml, 'r') as yamlfd: config_file_contents = yamlfd.read() config = yaml.load(config_file_contents) if "secrets" in config: secrets_file = config["secrets"] else: secrets_file = os.path.join(os.path.dirname(config_file_yaml), "secrets.yaml") # # Read Secrets # if os.path.isfile(secrets_file): with open(secrets_file, 'r') as yamlfd: secrets_file_contents = yamlfd.read() utils.secrets = yaml.load(secrets_file_contents) else: if "secrets" in config: print( "ERROR", "Error loading secrets file: {}".format( config["secrets"])) sys.exit() # # Read config file again, this time with secrets # yaml.add_constructor('!secret', utils._secret_yaml) with open(config_file_yaml, 'r') as yamlfd: config_file_contents = yamlfd.read() config = yaml.load(config_file_contents) except yaml.YAMLError as exc: print("ERROR", "Error loading configuration") if hasattr(exc, 'problem_mark'): if exc.context is not None: print("ERROR", "parser says") print("ERROR", str(exc.problem_mark)) print("ERROR", str(exc.problem) + " " + str(exc.context)) else: print("ERROR", "parser says") print("ERROR", str(exc.problem_mark)) print("ERROR", str(exc.problem)) sys.exit() if "appdaemon" not in config: print("ERROR", "no 'appdaemon' section in {}".format(config_file_yaml)) sys.exit() appdaemon = config["appdaemon"] if "disable_apps" not in appdaemon: appdaemon["disable_apps"] = False appdaemon["config_dir"] = config_dir appdaemon["config_file"] = config_file_yaml appdaemon["app_config_file"] = os.path.join( os.path.dirname(config_file_yaml), "apps.yaml") appdaemon["module_debug"] = module_debug if args.starttime is not None: appdaemon["starttime"] = args.starttime if args.endtime is not None: appdaemon["endtime"] = args.endtime if "timewarp" not in appdaemon: appdaemon["timewarp"] = args.timewarp appdaemon["loglevel"] = args.debug appdaemon["config_dir"] = os.path.dirname(config_file_yaml) appdaemon["stop_function"] = self.stop hadashboard = None if "hadashboard" in config: if config["hadashboard"] is None: hadashboard = {} else: hadashboard = config["hadashboard"] hadashboard["profile_dashboard"] = args.profiledash hadashboard["config_dir"] = config_dir hadashboard["config_file"] = config_file_yaml hadashboard["config_dir"] = os.path.dirname(config_file_yaml) if args.profiledash: hadashboard["profile_dashboard"] = True if "dashboard" not in hadashboard: hadashboard["dashboard"] = True admin = None if "admin" in config: if config["admin"] is None: admin = {} else: admin = config["admin"] api = None if "api" in config: if config["api"] is None: api = {} else: api = config["api"] http = None if "http" in config: http = config["http"] # Setup _logging if "log" in config: print( "ERROR", "'log' directive deprecated, please convert to new 'logs' syntax" ) sys.exit(1) if "logs" in config: logs = config["logs"] else: logs = {} self.logging = logging.Logging(logs, args.debug) self.logger = self.logging.get_logger() if "time_zone" in config["appdaemon"]: self.logging.set_tz(pytz.timezone( config["appdaemon"]["time_zone"])) # Startup message self.logger.info("AppDaemon Version %s starting", utils.__version__) self.logger.info("Configuration read from: %s", config_file_yaml) self.logging.dump_log_config() self.logger.debug("AppDaemon Section: %s", config.get("appdaemon")) self.logger.debug("HADashboard Section: %s", config.get("hadashboard")) utils.check_path("config_file", self.logger, config_file_yaml, pathtype="file") if pidfile is not None: self.logger.info("Using pidfile: %s", pidfile) dir = os.path.dirname(pidfile) name = os.path.basename(pidfile) try: with pid.PidFile(name, dir) as p: self.run(appdaemon, hadashboard, admin, api, http) except pid.PidFileError: self.logger.error("Unable to aquire pidfile - terminating") else: self.run(appdaemon, hadashboard, admin, api, http)
def test_register_atexit_true(mock_atexit_register): with pid.PidFile(register_atexit=True) as pidfile: mock_atexit_register.assert_called_once_with(pidfile.close)
def test_pid_context_manager(): with pid.PidFile() as pidfile: pass assert not os.path.exists(pidfile.filename)
def check_const_samepid(): with pid.PidFile(allow_samepid=True) as pidfile: assert pidfile.check() == pid.PID_CHECK_SAMEPID assert not os.path.exists(pidfile.filename)
def check_samepid_with_blocks_separate_objects(): with pid.PidFile(allow_samepid=True): with pid.PidFile(allow_samepid=True): pass
def test_pid_already_locked_custom_name(): with pid.PidFile(pidname="testpidfile"): with raising(pid.PidFileAlreadyLockedError): with pid.PidFile(pidname="testpidfile"): pass
def test_pid_custom_name(): with pid.PidFile(pidname="testpidfile") as pidfile: pass assert not os.path.exists(pidfile.filename)
def test_pid_chmod(): with pid.PidFile(chmod=0o600): pass
def test_pid_already_locked(): with pid.PidFile(): with raising(pid.PidFileAlreadyLockedError): with pid.PidFile(): pass
def test_pid_no_term_signal(): with pid.PidFile(register_term_signal_handler=False): pass
def test_pid_custom_dir(): with pid.PidFile(piddir="/tmp/testpidfile.dir/"): pass
def test_pid_force_tmpdir(): with pid.PidFile(force_tmpdir=True): pass
def test_pid_gid_win32(): gid = 123 with raising(pid.PidFileConfigurationError): with pid.PidFile(gid=gid): pass
def test_pid_enforce_dotpid_postfix(): with pid.PidFile(pidname="testpidfile", enforce_dotpid_postfix=False) as pidfile: assert not pidfile.filename.endswith(".pid") assert not os.path.exists(pidfile.filename)
def test_pid_check_const_nofile(): pidfile = pid.PidFile() assert pidfile.check() == pid.PID_CHECK_NOFILE
def test_pid_force_tmpdir(): with pid.PidFile(force_tmpdir=True) as pidfile: pass assert not os.path.exists(pidfile.filename)
def test_pid_check_already_running(): with pid.PidFile() as pidfile: pidfile2 = pid.PidFile() with raising(pid.PidFileAlreadyRunningError): pidfile2.check() assert not os.path.exists(pidfile.filename)
def test_pid_custom_dir(): with pid.PidFile(piddir=os.path.join(pid.DEFAULT_PID_DIR, "testpidfile.dir")) as pidfile: pass assert not os.path.exists(pidfile.filename)
def test_register_atexit_false(mock_atexit_register): with pid.PidFile(register_atexit=False): mock_atexit_register.assert_not_called()
def test_pid_piddir_exists_as_file(): with tempfile.NamedTemporaryFile() as tmpfile: with raising(IOError): with pid.PidFile(piddir=tmpfile.name): pass
def test_pid_class(): pidfile = pid.PidFile() pidfile.create() pidfile.close() assert not os.path.exists(pidfile.filename)
def test_pid_chmod(): with pid.PidFile(chmod=0o600) as pidfile: pass assert not os.path.exists(pidfile.filename)
def test_pid_pid(): with pid.PidFile() as pidfile: pidnr = int(open(pidfile.filename, "r").readline().strip()) assert pidnr == os.getpid(), "%s != %s" % (pidnr, os.getpid()) assert not os.path.exists(pidfile.filename)
def test_pid_chmod_win32(): with raising(pid.PidFileConfigurationError): with pid.PidFile(chmod=0o600): pass
async def new(cls, data_root: str, pidfile: str, host: str = None, port: str = None, preload: bool = False, close_timeout: int = 10, strategy: str = model.Strategy.No.value, logger: logging.Logger = internal_logger): """Create new instance of the server.""" self = cls() pidfile = pathlib.Path(pidfile) self.pid = pid.PidFile(piddir=pidfile.parent, pidname=pidfile.name) # Create a data root directory where all server data is persisted. data_root = pathlib.Path(data_root) data_root.mkdir(parents=True, exist_ok=True) # TODO: use different execution strategies for models and # fallback to the server-default execution strategy. loader = model.Loader(strategy=strategy, logger=logger) storage = saving.FsModelsStorage.new(path=data_root, loader=loader) models = await model.Cache.new(storage=storage, preload=preload) # Experiments storage based on regular file system. experiments = saving.FsExperimentsStorage.new(path=data_root) self.app = aiohttp.web.Application(client_max_size=1024**10) self.app.on_startup.append(cls.app_callback(self.pid.create)) self.app.on_response_prepare.append(self._prepare_response) self.app.on_shutdown.append(cls.app_callback(storage.close)) self.app.on_shutdown.append(cls.app_callback(experiments.close)) self.app.on_shutdown.append(cls.app_callback(self.pid.close)) route = partial(route_to, api_version=tensorcraft.__apiversion__) models_view = httpapi.ModelView(models) server_view = httpapi.ServerView(models) experiments_view = httpapi.ExperimentView(experiments) self.app.add_routes([ # Model-related endpoints. aiohttp.web.get(models_view.list.url, route(models_view.list)), aiohttp.web.put(models_view.save.url, route(models_view.save)), aiohttp.web.get(models_view.export.url, route(models_view.export)), aiohttp.web.delete(models_view.delete.url, route(models_view.delete)), aiohttp.web.post(models_view.predict.url, route(models_view.predict)), # Experiment-related endpoints. aiohttp.web.post(experiments_view.create.url, route(experiments_view.create)), aiohttp.web.post(experiments_view.create_epoch.url, route(experiments_view.create_epoch)), aiohttp.web.get(experiments_view.get.url, route(experiments_view.get)), aiohttp.web.get(experiments_view.list.url, route(experiments_view.list)), # Server-related endpoints. aiohttp.web.get(server_view.status.url, route(server_view.status)), # aiohttp.web.static("/ui", "static"), ]) setup(self.app) logger.info("Server initialization completed") return self
def test_pid_gid(): gid = os.getgid() with pid.PidFile(gid=gid) as pidfile: pass assert not os.path.exists(pidfile.filename)
# reset python's default SIGINT handler signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda num, frame: sys.exit(1)) # synchronously-delivered signals such as SIGSEGV and SIGILL cannot be # handled properly from python, so install signal handlers from the C # function in isys. isys.installSyncSignalHandlers() setup_environment() # make sure we have /var/log soon, some programs fail to start without it util.mkdirChain("/var/log") # Create a PID file. The exit handler, installed later, will clean it up. pidfile = pid.PidFile(pidname='anaconda', register_term_signal_handler=False) try: pidfile.create() except pid.PidFileError as e: log.error("Unable to create %s, exiting", pidfile.filename) # If we had a $DISPLAY at start and zenity is available, we may be # running in a live environment and we can display an error dialog. # Otherwise just print an error. if flags.preexisting_x11 and os.access("/usr/bin/zenity", os.X_OK): # The module-level _() calls are ok here because the language may # be set from the live environment in this case, and anaconda's # language setup hasn't happened yet. # FIXME: change the line below back to found-_-in-module-class once it works in pylint # pylint: disable=W9902
def test_pid_context_manager(): with pid.PidFile(): pass