def setUp(self): """Enable logging to the terminal and prepare a temporary package cache.""" coloredlogs.install() coloredlogs.set_level(logging.DEBUG) self.db_directory = tempfile.mkdtemp() self.load_package_cache() os.environ['DPT_FORCE_ENTROPY'] = 'yes'
def setUp(self): coloredlogs.set_level(40) self.data_dir = pathlib.Path(__file__).parent / "data" self.inputs = [ str(path) for path in self.data_dir.glob("*.*") if path.is_file() ] self.runner = CliRunner()
def redraw_loop(screen, manager): from perf_moon.cli import report_metrics, line_is_heading coloredlogs.set_level(logging.ERROR) cursor_mode = curses.curs_set(0) curses.noraw() screen.nodelay(True) try: while True: lnum = 0 for line in report_metrics(manager): attributes = 0 if line_is_heading(line): attributes |= curses.A_BOLD screen.addstr(lnum, 0, line, attributes) lnum += 1 screen.refresh() for i in range(10): if screen.getch() == ord('q'): return time.sleep(0.1) manager.refresh() screen.erase() finally: curses.curs_set(cursor_mode) screen.erase()
def init_common_options(args, validate_executors): """ Does common initialization dependent on command line options. This includes any initialization that uses the logger :param args: From ArgumentParser.parse_args() :param validate_executors: Whether to validate if executors is valid as well """ # Init logger based on verbosity colorama.init() coloredlogs.install(fmt=LOG_FORMAT, datefmt=TIME_FORMAT) if args.verbose: coloredlogs.set_level(logging.DEBUG) # Init pause_on_exist global pause_when_done pause_when_done = args.pause_when_done # Validate config and executors msg = data.validate_config_object(data.get_config()) if msg: logging.error(f'Error while parsing config: {msg}') exit() if validate_executors: msg = data.validate_executors_object(data.get_executors()) if msg: logging.error(f'Error while parsing executors: {msg}') exit()
def setUp(self): """Reset the logging level before every test runs.""" coloredlogs.set_level('DEBUG') # Separate the name of the test method (printed by the superclass # and/or py.test without a newline at the end) from the first line of # logging output that the test method is likely going to generate. sys.stderr.write("\n")
def setUp(self): try: # Optional external dependency. import coloredlogs coloredlogs.install() coloredlogs.set_level(logging.DEBUG) except ImportError: logging.basicConfig()
def set_loglevel(config): """Set logger level. Arguments: config (Config): Dictionary of the config. """ loglevel = config.get("loglevel", LOG_LEVEL) coloredlogs.set_level(loglevel)
def __init__(self, *args, **kw): """ Initialize the test suite. """ # Initialize super classes. super(VcsRepoMgrTestCase, self).__init__(*args, **kw) # Set up logging to the terminal. coloredlogs.install() coloredlogs.set_level(logging.DEBUG)
def setUp(self): """Start each test from a known state.""" # Reset global state. coloredlogs.install() coloredlogs.set_level(logging.INFO) # Reset local state. self.stream = StringIO() self.handler = coloredlogs.ColoredStreamHandler(stream=self.stream, isatty=False) self.logger_name = ''.join(random.choice(string.ascii_letters) for i in range(25)) self.logger = verboselogs.VerboseLogger(self.logger_name) self.logger.addHandler(self.handler)
def parse_args(self, *args, **kwargs): if self.group_general is None: self.add_group_general() # add only help message self._add_auto_arguments() self.group_general.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='show this help message and exit') new_args = super().parse_args(*args, **kwargs) if 'verbose' in new_args: # Change verbosity globally if new_args.verbose: if settings.COLOR_LOGS: coloredlogs.set_level(logging.DEBUG) else: logging.basicConfig(level=logging.DEBUG) # Input Serafin file if 'in_slf' in new_args: try: with open(new_args.in_slf): pass except FileNotFoundError: logger.critical('No such file or directory: %s' % new_args.in_slf) sys.exit(3) if 'out_slf' in new_args: # avoid input file overwriting if os.path.isfile(new_args.out_slf): if os.path.samefile(new_args.in_slf, new_args.out_slf): logger.critical('Cannot overwrite to the input file.') sys.exit(3) # Output files if 'force' in self.args_known_ids: if not new_args.force: for out_arg in ('out_csv', 'out_slf'): if out_arg in new_args: out_path = getattr(new_args, out_arg) if os.path.isfile(out_path): logger.critical('Output file already exists: %s' % out_path) sys.exit(3) if any(arg in new_args for arg in ('in_slf', 'out_slf')): if 'slf_lang' not in new_args: new_args.in_lang = settings.LANG return new_args
def setup_logging(level=logging.DEBUG): if _KYTHON_LOGLEVEL_VAR in os.environ: level = getattr(logging, os.environ[_KYTHON_LOGLEVEL_VAR] ) # TODO ugh a bit ugly, but whatever.. logging.basicConfig(level=level) try: import coloredlogs # type: ignore coloredlogs.install(fmt=COLOREDLOGGER_FORMAT) coloredlogs.set_level(level) except ImportError as e: if e.name == 'coloredlogs': logging.exception(e) logging.warning("Install coloredlogs for fancy colored logs!") else: raise e logging.getLogger('requests').setLevel(logging.CRITICAL)
def test_decrease_verbosity(self): """Make sure decrease_verbosity() respects default and custom levels.""" # Start from a known state. set_level(logging.INFO) assert get_level() == logging.INFO # INFO -> WARNING. decrease_verbosity() assert get_level() == logging.WARNING # WARNING -> ERROR. decrease_verbosity() assert get_level() == logging.ERROR # ERROR -> CRITICAL. decrease_verbosity() assert get_level() == logging.CRITICAL # CRITICAL -> CRITICAL. decrease_verbosity() assert get_level() == logging.CRITICAL
def test_increase_verbosity(self): """Make sure increase_verbosity() respects default and custom levels.""" # Start from a known state. set_level(logging.INFO) assert get_level() == logging.INFO # INFO -> VERBOSE. increase_verbosity() assert get_level() == logging.VERBOSE # VERBOSE -> DEBUG. increase_verbosity() assert get_level() == logging.DEBUG # DEBUG -> NOTSET. increase_verbosity() assert get_level() == logging.NOTSET # NOTSET -> NOTSET. increase_verbosity() assert get_level() == logging.NOTSET
def set_loglevel(cls, level): """ Set output logging level to given level. """ # Level must fall between <-2, 2> if not isinstance(level, int): cls.important("Verbosity level must be integer between " f"-2 and 2, not '{level}'") cls.important("Setting default verbosity level INFO") level = 0 elif level < -2 or level > 2: level = min(max(-2, level), 2) cls.important(f"Truncating verbosity level to {level}") level_value = cls.verbosity_to_level[level] coloredlogs.set_level(level_value) cls.info("Setting output logging level to '{}'" .format(logging.getLevelName(level_value)))
def redraw_loop(screen, manager): """The main loop that continuously redraws Apache web server metrics.""" # Ugly workaround to avoid circular import errors due to interdependencies # between the apache_manager.cli and apache_manager.interactive modules. from apache_manager.cli import report_metrics, line_is_heading # Hide warnings (they'll mess up the curses layout). coloredlogs.set_level(logging.ERROR) # Hide the text cursor. cursor_mode = curses.curs_set(0) # Make Control-C behave normally. curses.noraw() # Enable non-blocking getch(). screen.nodelay(True) try: # Repeat until the user aborts. while True: lnum = 0 for line in report_metrics(manager): attributes = 0 if line_is_heading(line): attributes |= curses.A_BOLD screen.addstr(lnum, 0, line, attributes) lnum += 1 # Redraw screen. screen.refresh() # Wait a while before refreshing the screen, but enable the user to # quit in the mean time. for i in range(10): if screen.getch() == ord('q'): return # Don't burn through CPU like crazy :-). time.sleep(0.1) # Update metrics in next iteration. manager.refresh() # Clear screen for next iteration. screen.erase() finally: # Restore cursor mode. curses.curs_set(cursor_mode) # Clean up the screen after we're done. screen.erase()
def main(debug: bool, namespace: str) -> None: """ Checks a kubernetes cluster to see if any running pods, cron jobs or deployments have updated image tags or image digests on their repositories. Can be run either external to a cluster (requires `~/.kube/config` to be setup correctly) or within a cluster as a pod. In the latter case, notifications about available updates will be posted to a webhook. This script can be figured using annotations on the pods themselves. Pods can be ignored with: `growse.com/version-checker-ignore: "true"` And can have their elgiable tags scroped with a regular expression: `growse.com/version-checker-tag-regex: "^v.+?-amd64$" """ if debug: coloredlogs.set_level("DEBUG") try: config.load_incluster_config() except config.config_exception.ConfigException: config.load_kube_config() images = get_top_level_resources(namespace) notifications = [] for resource, containers in sorted(images.items()): logger.info( "Considering {kind}: {name} ({container_count} running containers)" .format(kind=resource.kind, name=resource.name, container_count=len(containers))) notifications.extend(check_resouce_for_new_image_tags(resource)) notifications.extend( check_resource_containers_for_updated_image_digests( resource, containers)) log_notifications(notifications)
def test_is_verbose(self): """Make sure is_verbose() does what it should :-).""" set_level(logging.INFO) assert not is_verbose() set_level(logging.DEBUG) assert is_verbose() set_level(logging.VERBOSE) assert is_verbose()
def cli(verbose, name, version, *args, **kwargs): random_seed_value = kwargs["random_seed_value"] numpy.random.seed(random_seed_value) coloredlogs.install() if verbose > 1: coloredlogs.set_level(level=logging.DEBUG) elif verbose == 1: coloredlogs.set_level(level=logging.INFO) elif verbose == 0: coloredlogs.set_level(level=logging.ERROR) model_config, model_config_string = get_default_model_config(kwargs) changeset_config, changeset_config_string = get_default_changeset_config() kwargs.update({ 'changeset_config': changeset_config, 'changeset_config_string': changeset_config_string }) kwargs.update({ 'model_config': model_config, 'model_config_string': model_config_string }) # load project info projects = common.load_projects(kwargs) if name: name = name.lower() projects = [x for x in projects if x.name == name] if version: version = version.lower() projects = [x for x in projects if x.version == version] mrr = dict() firstrels = dict() for project in projects: if project.goldset: goldsets.build_goldset(project) elif project.optimize_model: optimize_model(project) elif project.optimize_corpus: optimize_corpus(project) else: pn = project.printable_name firstrels[pn] = run_experiments(project) if pn not in mrr: mrr[pn] = dict() for source in project.source: mrr[pn][source] = utils.calculate_mrr( num for num, _, _ in firstrels[pn][source]) pprint(mrr)
def _validate_params(self): # Set log level first. self._params["default"]["log_level"] = is_log_level( self._params["default"]["log_level"], DEFAULT_LOG_LEVEL) coloredlogs.set_level(self._params["default"]["log_level"]) # Default. self._params["default"]["browser_type"] = is_browser_type( "default.browser_type", self._params["default"]["browser_type"], DEFAULT_BROWSER_TYPE) self._params["default"]["browser_argument"] = is_list( "default.browser_argument", self._params["default"]["browser_argument"], DEFAULT_BROWSER_ARGUMENT) self._params["default"]["browser_extension"] = is_list( "default.browser_extension", self._params["default"]["browser_extension"], DEFAULT_BROWSER_EXTENSION) self._params["default"]["browser_geometry"], \ self._params["default"]["browser_geometry_x"], \ self._params["default"]["browser_geometry_y"] = is_browser_geometry( "default.browser_geometry", self._params["default"]["browser_geometry"], DEFAULT_BROWSER_GEOMETRY) self._params["default"]["browser_instance"] = is_int( "default.browser_instance", self._params["default"]["browser_instance"], DEFAULT_BROWSER_INSTANCE) self._params["default"]["browser_instance_tab"] = is_int( "default.browser_instance_tab", self._params["default"]["browser_instance_tab"], DEFAULT_BROWSER_INSTANCE_TAB) self._params["default"]["browser_page_size"] = is_bytes( "default.browser_page_size", self._params["default"]["browser_page_size"], DEFAULT_BROWSER_PAGE_SIZE) self._params["default"]["browser_page_timeout"] = is_int( "default.browser_page_timeout", self._params["default"]["browser_page_timeout"], DEFAULT_BROWSER_PAGE_TIMEOUT) self._params["default"]["browser_script_timeout"] = is_int( "default.browser_script_timeout", self._params["default"]["browser_script_timeout"], DEFAULT_BROWSER_SCRIPT_TIMEOUT) self._params["default"]["browser_proxy"] = is_string( "default.browser_proxy", self._params["default"]["browser_proxy"], DEFAULT_BROWSER_PROXY) self._params["default"]["browser_retry_codes"] = is_list( "default.browser_retry_codes", self._params["default"]["browser_retry_codes"], DEFAULT_BROWSER_RETRY_CODES) self._params["default"]["browser_retry_codes_tries"] = is_int( "default.browser_retry_codes_tries", self._params["default"]["browser_retry_codes_tries"], DEFAULT_BROWSER_RETRY_CODES_TRIES) self._params["default"]["chrome_driver_path"] = is_file( "default.chrome_driver_path", self._params["default"]["chrome_driver_path"], CHROME_DRIVER_PATH) self._params["default"]["chrome_extensions_dir"] = is_dir( "default.chrome_extensions_dir", self._params["default"]["chrome_extensions_dir"], CHROME_EXTENSIONS_DIR) self._params["default"]["chrome_path"] = is_file( "default.chrome_path", self._params["default"]["chrome_path"], CHROME_PATH) self._params["default"]["chrome_profiles_dir"] = is_dir( "default.chrome_profiles_dir", self._params["default"]["chrome_profiles_dir"], CHROME_PROFILES_DIR) self._params["default"]["chunk_size"] = is_bytes( "default.chunk_size", self._params["default"]["chunk_size"], DEFAULT_CHUNK_SIZE) self._params["default"]["cpu_load"] = is_int( "default.cpu_load", self._params["default"]["cpu_load"], DEFAULT_CPU_LOAD) self._params["default"]["firefox_driver_path"] = is_file( "default.firefox_driver_path", self._params["default"]["firefox_driver_path"], FIREFOX_DRIVER_PATH) self._params["default"]["firefox_extensions_dir"] = is_dir( "default.firefox_extensions_dir", self._params["default"]["firefox_extensions_dir"], FIREFOX_EXTENSIONS_DIR) self._params["default"]["firefox_path"] = is_file( "default.firefox_path", self._params["default"]["firefox_path"], FIREFOX_PATH) self._params["default"]["firefox_profiles_dir"] = is_dir( "default.firefox_profiles_dir", self._params["default"]["firefox_profiles_dir"], FIREFOX_PROFILES_DIR) self._params["default"]["keep_temp"] = is_bool( "default.keep_temp", self._params["default"]["keep_temp"], DEFAULT_KEEP_TEMP) self._params["default"]["mem_free"] = is_bytes( "default.mem_free", self._params["default"]["mem_free"], DEFAULT_MEM_FREE) self._params["default"]["task_timeout"] = is_int( "default.task_timeout", self._params["default"]["task_timeout"], DEFAULT_TASK_TIMEOUT) # Client. self._params["client"]["batch_size"] = is_int( "client.batch_size", self._params["client"]["batch_size"], DEFAULT_CLIENT_BATCH_SIZE) self._params["client"]["client_id"] = is_string( "client.client_id", self._params["client"]["client_id"], DEFAULT_CLIENT_ID) self._params["client"]["output_dir"] = is_dir( "client.output_dir", self._params["client"]["output_dir"], DEFAULT_CLIENT_OUTPUT_DIR) self._params["client"]["server"] = is_string( "client.server", self._params["client"]["server"], DEFAULT_CLIENT_SERVER) self._params["client"]["scripts"] = is_list( "client.scripts", self._params["client"]["scripts"], DEFAULT_CLIENT_SCRIPTS) # Server. self._params["server"]["listen"] = is_string( "server.listen", self._params["server"]["listen"], DEFAULT_SERVER_LISTEN) self._params["server"]["workers"] = is_string( "server.workers", self._params["server"]["workers"], DEFAULT_SERVER_WORKERS)
def main(): global basedir try: options = parser.parse_args() except ArgumentError, e: logging.error('OptionError: ', e) print >> sys.stderr, parser.print_help() return 1 if options.verbose: logging.info('enable verbose output') logging.level = logging.DEBUG if 'coloredlogs' in globals(): coloredlogs.set_level(logging.DEBUG) if options.tofile is None or options.fromfile is None: print >> sys.stderr, parser.print_help() return 1 logging.info("simtrans (version %s)" % __version__) options.tofile = os.path.abspath(utils.resolveFile(options.tofile)) options.fromfile = os.path.abspath(utils.resolveFile(options.fromfile)) logging.info("converting from: %s" % options.fromfile) logging.info(" to: %s" % options.tofile) basedir = os.path.dirname(os.path.abspath(options.tofile)) writer = None meshwriter = None
def main(): parser = ArgumentParser(description="Shell Manager") parser.add_argument("-d", "--debug", action="store_true", default=False, help="show debug information") parser.add_argument("--colorize", default="auto", choices=["auto", "never"], help="support colored output") subparsers = parser.add_subparsers() install_parser = subparsers.add_parser("install", help="problem installation") install_parser.add_argument("problem_paths", nargs="*", type=str, help="paths to problem source directories") install_parser.add_argument( "--reinstall", action="store_true", default=None, help="reinstall over an existing version of this problem") install_parser.set_defaults(func=install_problems) uninstall_parser = subparsers.add_parser( "uninstall", help="problem removal - undeploy instances first") uninstall_parser.add_argument("problem_names", nargs="*", type=str, help="installed problem names") uninstall_parser.set_defaults(func=uninstall_problems) deploy_parser = subparsers.add_parser("deploy", help="problem instance deployment") deploy_parser.add_argument( "-n", "--num-instances", type=int, default=1, help="number of instances to deploy (numbers 0 through n-1).") deploy_parser.add_argument("-i", "--instances", action="append", type=int, help="particular instance(s) to deploy.") deploy_parser.add_argument("-d", "--dry", action="store_true", help="don't make persistent changes.") deploy_parser.add_argument( "-r", "--redeploy", action="store_true", help="redeploy instances that have already been deployed") deploy_parser.add_argument("-nr", "--no-restart", action="store_true", help="do not restart xinetd after deployment.") deploy_parser.add_argument("problem_names", nargs="*", type=str, help="installed problem names") deploy_parser.set_defaults(func=deploy_problems) undeploy_parser = subparsers.add_parser( "undeploy", help="problem instance undeployment") undeploy_parser.add_argument( "-n", "--num-instances", type=int, default=1, help="number of instances to undeploy (numbers 0 through n-1).") undeploy_parser.add_argument("-i", "--instances", action="append", type=int, help="particular instance(s) to undeploy.") undeploy_parser.add_argument("problem_names", nargs="*", type=str, help="deployed problem names") undeploy_parser.set_defaults(func=undeploy_problems) install_bundle_parser = subparsers.add_parser("install-bundle", help="bundle installation") install_bundle_parser.add_argument("bundle_path", type=str, help="path to bundle file") install_bundle_parser.set_defaults(func=install_bundle) uninstall_bundle_parser = subparsers.add_parser("uninstall-bundle", help="bundle removal") uninstall_bundle_parser.add_argument("bundle_name", type=str, help="name of installed bundle") uninstall_bundle_parser.set_defaults(func=uninstall_bundle) status_parser = subparsers.add_parser( "status", help="list installed problems and bundles") status_parser.add_argument( "-a", "--all", action="store_true", help="Show information about all problem instanes.") status_parser.add_argument( "-p", "--problem", type=str, default=None, help="Display status information for a given problem.") status_parser.add_argument( "-b", "--bundle", type=str, default=None, help="Display status information for a given bundle.") status_parser.add_argument( "-j", "--json", action="store_true", default=None, help="Display status information in json format") status_parser.add_argument( "-e", "--errors-only", action="store_true", help="Only print problems with failing service status.") status_parser.set_defaults(func=status) clean_parser = subparsers.add_parser("clean", help="clean up problem staging data") clean_parser.set_defaults(func=clean) publish_parser = subparsers.add_parser( "publish", help="export this shell server's state") publish_parser.set_defaults(func=publish) config_parser = subparsers.add_parser( "config", help="view or modify configuration options") config_parser.add_argument( "-j", "--json", action="store_true", default=False, help= "Whether to display the configuration options in JSON form or pretty printed. Defaults to False." ) config_parser.add_argument( 'config_type', choices=['shared', 'local'], help='Which configuration settings to access: shared (across all ' + 'shell servers), or local (to this shell server)') config_parser.set_defaults(func=print_configuration) config_subparsers = config_parser.add_subparsers() config_set_parser = config_subparsers.add_parser( "set", help="Set configuration options") config_set_parser.add_argument("-f", "--field", type=str, required=True, help="which field to set") config_set_parser.add_argument("-v", "--value", type=str, required=True, help="option's new value") config_set_parser.add_argument("-j", "--json", action="store_true", default=False, help="interpret the given value as JSON") config_set_parser.add_argument( "--allow-type-change", action="store_true", default=False, help="allow the supplied field to change types if already specified") config_set_parser.set_defaults(func=set_configuration_option) args = parser.parse_args() if args.colorize == "never": coloredlogs.DEFAULT_LEVEL_STYLES = {} coloredlogs.DEFAULT_FIELD_STYLES = {} coloredlogs.install() if args.debug: coloredlogs.set_level(logging.DEBUG) try: if "func" in args: args.func(args) else: parser.print_help() except FatalException: exit(1)
def test_is_verbose(self): """Make sure is_verbose() does what it should :-).""" assert coloredlogs.root_handler.level == logging.INFO assert not coloredlogs.is_verbose() coloredlogs.set_level(logging.VERBOSE) assert coloredlogs.is_verbose()
def __enter__(self): """Customize the logging verbosity when entering the :keyword:`with` block.""" if self.original_level is None: self.original_level = get_level() set_level(self.level)
def __exit__(self, exc_type=None, exc_value=None, traceback=None): """Restore the original logging verbosity when leaving the :keyword:`with` block.""" if self.original_level is not None: set_level(self.original_level) self.original_level = None
def setUp(self): """Reset logging verbosity before each test.""" coloredlogs.set_level(logging.INFO)
action="store_true", dest="export", help="Export data from the server rather than import") parser.add_argument("-t", "--testing", default=False, action="store_true", dest="test", help="Enables unit testing mode") #parser.add_argument("-n", "--network", default=False, action="store_true", dest="network_mode", help="Controls whether the program will use network mode to import and tag whole networks") args = parser.parse_args() if args.test == True: import Tests # Suppress logging coloredlogs.set_level('CRITICAL') logger = logging.getLogger('__main__') logger.propagate = False Tests.tests(args.address, args.username, args.password, args.configuration) exit(0) app = App(filename=args.filename, verbose=args.verbose, user_input=args.user_input, address=args.address, username=args.username, password=args.password, configuration=args.configuration, upload=args.export) app.start(args.export)
def setUp(self): """Reset the logging level before every test runs.""" coloredlogs.set_level(logging.DEBUG)
def set_level(level): coloredlogs.set_level(level) getLogger(__name__).info('Nivel de logs actualizado: ' + str(level))
def set_logging_level(self, level): coloredlogs.set_level(level.upper()) self.logger.info('Log level: {}'.format(level))
def setUp(self): coloredlogs.install() coloredlogs.set_level(logging.DEBUG)
def set_loglevel(level): logging.getLogger().setLevel(level) coloredlogs.set_level(level)
def cli(args=None): set_level(WARNING) unknown = build_and_parse_args(args) return build_and_run_command(unknown)
def main(args): # pylint: disable=too-many-branches """Handle arguments.""" add_callback = None if args.progress: progress = tqdm.tqdm(unit='mgz') coloredlogs.set_level('CRITICAL') add_callback = progress.update db_api = API(args.database, args.store_path, voobly_key=args.voobly_key, voobly_username=args.voobly_username, voobly_password=args.voobly_password, consecutive=args.consecutive, callback=add_callback) # Add if args.cmd == CMD_ADD: db_api.start() # File if args.subcmd == SUBCMD_FILE: for rec in args.rec_path: db_api.add_file(rec, args.source, None) if args.progress: progress.total = db_api.total # Match elif args.subcmd == SUBCMD_MATCH: for url in args.url: db_api.add_match(args.platform, url) if args.progress: progress.total = db_api.total # Series elif args.subcmd == SUBCMD_SERIES: for path in args.zip_path: series, series_id = parse_series_path(path) db_api.add_series(path, series, series_id) if args.progress: progress.total = db_api.total # Database elif args.subcmd == SUBCMD_DB: remote_api = API(args.remote_db_url, args.remote_store_path) db_api.add_db(remote_api) if args.progress: progress.total = db_api.total # Archive elif args.subcmd == SUBCMD_ARCHIVE: db_api.add_archive(args.archive_path) if args.progress: progress.total = db_api.total db_api.finished() if args.progress: progress.close() # Remove elif args.cmd == CMD_REMOVE: db_api.remove(file_id=args.file, match_id=args.match) # Query elif args.cmd == CMD_QUERY: print(json.dumps(db_api.query(args.subcmd, **vars(args)), indent=2)) # Get elif args.cmd == CMD_GET: filename, data = db_api.get(args.file) output_filename = args.output_path or filename if os.path.exists(output_filename): print('file already exists:', output_filename) return with open(output_filename, 'wb') as handle: handle.write(data) print(output_filename) # Reset elif args.cmd == CMD_RESET: if input('reset database completely? [y/N] ') == 'y': db_api.reset()
def setUp(self): coloredlogs.install() coloredlogs.set_level(logging.DEBUG) self.db_directory = tempfile.mkdtemp() self.load_package_cache() os.environ['DPT_FORCE_ENTROPY'] = 'yes'
def main(): parser = ArgumentParser(description="Shell Manager") parser.add_argument( "-d", "--debug", action="store_true", default=False, help="show debug information") parser.add_argument( "--colorize", default="auto", choices=["auto", "never"], help="support colored output") subparsers = parser.add_subparsers() problem_package_parser = subparsers.add_parser( "package", help="problem package management") problem_package_parser.add_argument( "-s", "--staging-dir", help="use an explicit directory for problem staging.") problem_package_parser.add_argument( "-o", "--out", help="folder to store problem package.") problem_package_parser.add_argument( "-i", "--ignore", dest="ignore", default=[], action="append", help="list of files to ignore adding to the deb") problem_package_parser.add_argument( "problem_paths", nargs="*", type=str, help="paths to problems.") problem_package_parser.set_defaults(func=problem_builder) publish_repo_parser = subparsers.add_parser( "publish_repo", help="publish packaged problems") publish_repo_parser.add_argument( "-r", "--repository", default="/usr/local/ctf-packages", help="Location of problem repository.") publish_repo_parser.add_argument("repo_type", choices=["local", "remote"]) publish_repo_parser.add_argument( "package_paths", nargs="+", type=str, help="problem packages to publish.") publish_repo_parser.set_defaults(func=update_repo) bundle_parser = subparsers.add_parser( "bundle", help="create a bundle of problems") bundle_parser.add_argument( "bundle_path", type=str, help="the name of the bundle.") bundle_parser.add_argument( "-s", "--staging-dir", help="use an explicit directory for problem staging.") bundle_parser.add_argument( "-o", "--out", type=str, help="folder to store the bundle.") bundle_parser.set_defaults(func=bundle_problems) deploy_parser = subparsers.add_parser("deploy", help="problem deployment") deploy_parser.add_argument( "-n", "--num-instances", type=int, default=1, help="number of instances to generate (numbers 0 through n-1).") deploy_parser.add_argument( "-i", "--instances", action="append", type=int, help="particular instance(s) to generate.") deploy_parser.add_argument( "-d", "--dry", action="store_true", help="don't make persistent changes.") deploy_parser.add_argument( "-r", "--redeploy", action="store_true", help="redeploy instances that have already been deployed") deploy_parser.add_argument( "-s", "--secret", action="store", type=str, help="use a different deployment secret for this invocation.") deploy_parser.add_argument( "-D", "--deployment-directory", type=str, default=None, help="the directory to deploy to") deploy_parser.add_argument( "-b", "--bundle", action="store_true", help="specify a bundle of problems to deploy.") deploy_parser.add_argument( "-nr", "--no-restart", action="store_true", help="do not restart xinetd after deployment.") deploy_parser.add_argument( "problem_paths", nargs="*", type=str, help="paths to problems.") deploy_parser.set_defaults(func=deploy_problems) undeploy_parser = subparsers.add_parser( "undeploy", help= "problem undeployment. cannot guarantee full removal of problem files") undeploy_parser.add_argument( "-n", "--num-instances", type=int, default=1, help="number of instances to undeploy (numbers 0 through n-1).") undeploy_parser.add_argument( "-i", "--instances", action="append", type=int, help="particular instance(s) to generate.") undeploy_parser.add_argument( "-b", "--bundle", action="store_true", help="specify a bundle of problems to undeploy.") undeploy_parser.add_argument( "problem_paths", nargs="*", type=str, help="paths to problems.") undeploy_parser.set_defaults(func=undeploy_problems) clean_parser = subparsers.add_parser( "clean", help="Clean up the intermediate staging data stored during deployments") clean_parser.set_defaults(func=clean) status_parser = subparsers.add_parser( "status", help= "List the installed problems and bundles and any instances associated with them." ) status_parser.add_argument( "-a", "--all", action="store_true", help="Show information about all problem instanes.") status_parser.add_argument( "-p", "--problem", type=str, default=None, help="Display status information for a given problem.") status_parser.add_argument( "-b", "--bundle", type=str, default=None, help="Display status information for a given bundle.") status_parser.add_argument( "-j", "--json", action="store_true", default=None, help="Display status information in json format") status_parser.add_argument( "-e", "--errors-only", action="store_true", help="Only print problems with failing service status.") status_parser.set_defaults(func=status) publish_parser = subparsers.add_parser( "publish", help= "Generate the information needed by the web server for this deployment." ) publish_parser.set_defaults(func=publish) config_parser = subparsers.add_parser( "config", help="View or modify configuration options") config_parser.add_argument( "-f", "--file", type=str, default=None, help= "Which configuration file to access. If none is provided, the system wide configuration file will be used." ) config_parser.add_argument( "-j", "--json", action="store_true", default=False, help= "Whether to display the configuration options in JSON form or pretty printed. Defaults to False." ) config_parser.set_defaults(func=print_configuration) config_subparsers = config_parser.add_subparsers() config_set_parser = config_subparsers.add_parser( "set", help="Set configuration options") config_set_parser.add_argument( "-f", "--field", type=str, required=True, help="which field to set") config_set_parser.add_argument( "-v", "--value", type=str, required=True, help="options's new value") config_set_parser.add_argument( "-j", "--json", action="store_true", default=False, help="interpret the given value as JSON") config_set_parser.add_argument( "--allow-type-change", action="store_true", default=False, help="Allow the supplied field to change types if already specified") config_set_parser.set_defaults(func=set_configuration_option) config_new_parser = config_subparsers.add_parser( "new", help="Make a new configuration files with defaults") config_new_parser.add_argument( "files", nargs="+", help="Configuration files to make") config_new_parser.add_argument( "--overwrite", action="store_true", default=False, help="whether to overwrite files that already exist") config_new_parser.set_defaults(func=new_configuration_file) args = parser.parse_args() if args.colorize == "never": coloredlogs.DEFAULT_LEVEL_STYLES = {} coloredlogs.DEFAULT_FIELD_STYLES = {} coloredlogs.install() if args.debug: coloredlogs.set_level(logging.DEBUG) try: try: config = get_hacksports_config() except PermissionError: logger.error("You must run shell_manager with sudo.") raise FatalException except FileNotFoundError: place_default_config() logger.info( "There was no default configuration. One has been created for you. Please edit it accordingly using the 'shell_manager config' subcommand before deploying any instances." ) raise FatalException # Call the default function if "func" in args: args.func(args, config) else: parser.print_help() except FatalException: exit(1)
def main(): parser = ArgumentParser(description="Shell Manager") parser.add_argument("-d", "--debug", action="store_true", default=False, help="show debug information") parser.add_argument("--colorize", default="auto", choices=["auto", "never"], help="support colored output") subparsers = parser.add_subparsers() problem_package_parser = subparsers.add_parser("package", help="problem package management") problem_package_parser.add_argument("-s", "--staging-dir", help="use an explicit directory for problem staging.") problem_package_parser.add_argument("-o", "--out", help="folder to store problem package.") problem_package_parser.add_argument("-i", "--ignore", dest="ignore", default=[], action="append", help="list of files to ignore adding to the deb") problem_package_parser.add_argument("problem_paths", nargs="*", type=str, help="paths to problems.") problem_package_parser.set_defaults(func=problem_builder) publish_repo_parser = subparsers.add_parser("publish_repo", help="publish packaged problems") publish_repo_parser.add_argument("-r", "--repository", default="/usr/local/ctf-packages", help="Location of problem repository.") publish_repo_parser.add_argument("repo_type", choices=["local", "remote"]) publish_repo_parser.add_argument("package_paths", nargs="+", type=str, help="problem packages to publish.") publish_repo_parser.set_defaults(func=update_repo) bundle_parser = subparsers.add_parser("bundle", help="create a bundle of problems") bundle_parser.add_argument("bundle_path", type=str, help="the name of the bundle.") bundle_parser.add_argument("-s", "--staging-dir", help="use an explicit directory for problem staging.") bundle_parser.add_argument("-o", "--out", type=str, help="folder to store the bundle.") bundle_parser.set_defaults(func=bundle_problems) deploy_parser = subparsers.add_parser("deploy", help="problem deployment") deploy_parser.add_argument("-n", "--num-instances", type=int, default=1, help="number of instances to generate (numbers 0 through n-1).") deploy_parser.add_argument("-i", "--instances", action="append", type=int, help="particular instance(s) to generate.") deploy_parser.add_argument("-d", "--dry", action="store_true", help="don't make persistent changes.") deploy_parser.add_argument("-r", "--redeploy", action="store_true", help="redeploy instances that have already been deployed") deploy_parser.add_argument("-s", "--secret", action="store", type=str, help="use a different deployment secret for this invocation.") deploy_parser.add_argument("-D", "--deployment-directory", type=str, default=None, help="the directory to deploy to") deploy_parser.add_argument("-b", "--bundle", action="store_true", help="specify a bundle of problems to deploy.") deploy_parser.add_argument("problem_paths", nargs="*", type=str, help="paths to problems.") deploy_parser.set_defaults(func=deploy_problems) undeploy_parser = subparsers.add_parser("undeploy", help="problem undeployment. cannot guarantee full removal of problem files") undeploy_parser.add_argument("-n", "--num-instances", type=int, default=1, help="number of instances to undeploy (numbers 0 through n-1).") undeploy_parser.add_argument("-i", "--instances", action="append", type=int, help="particular instance(s) to generate.") undeploy_parser.add_argument("-b", "--bundle", action="store_true", help="specify a bundle of problems to undeploy.") undeploy_parser.add_argument("problem_paths", nargs="*", type=str, help="paths to problems.") undeploy_parser.set_defaults(func=undeploy_problems) clean_parser = subparsers.add_parser("clean", help="Clean up the intermediate staging data stored during deployments") clean_parser.set_defaults(func=clean) status_parser = subparsers.add_parser("status", help="List the installed problems and bundles and any instances associated with them.") status_parser.add_argument("-a", "--all", action="store_true", help="Show information about all problem instanes.") status_parser.add_argument("-p", "--problem", type=str, default=None, help="Display status information for a given problem.") status_parser.add_argument("-b", "--bundle", type=str, default=None, help="Display status information for a given bundle.") status_parser.add_argument("-j", "--json", action="store_true", default=None, help="Display status information in json format") status_parser.add_argument("-e", "--errors-only", action="store_true", help="Only print problems with failing service status.") status_parser.set_defaults(func=status) publish_parser = subparsers.add_parser("publish", help="Generate the information needed by the web server for this deployment.") publish_parser.set_defaults(func=publish) config_parser = subparsers.add_parser("config", help="View or modify configuration options") config_parser.add_argument("-f", "--file", type=str, default=None, help="Which configuration file to access. If none is provided, the system wide configuration file will be used.") config_parser.add_argument("-j", "--json", action="store_true", default=False, help="Whether to display the configuration options in JSON form or pretty printed. Defaults to False.") config_parser.set_defaults(func=print_configuration) config_subparsers = config_parser.add_subparsers() config_set_parser = config_subparsers.add_parser("set", help="Set configuration options") config_set_parser.add_argument("-f", "--field", type=str, required=True, help="which field to set") config_set_parser.add_argument("-v", "--value", type=str, required=True, help="options's new value") config_set_parser.add_argument("-j", "--json", action="store_true", default=False, help="interpret the given value as JSON") config_set_parser.add_argument("--allow-type-change", action="store_true", default=False, help="Allow the supplied field to change types if already specified") config_set_parser.set_defaults(func=set_configuration_option) config_new_parser = config_subparsers.add_parser("new", help="Make a new configuration files with defaults") config_new_parser.add_argument("files", nargs="+", help="Configuration files to make") config_new_parser.add_argument("--overwrite", action="store_true", default=False, help="whether to overwrite files that already exist") config_new_parser.set_defaults(func=new_configuration_file) args = parser.parse_args() if args.colorize == "never": coloredlogs.DEFAULT_LEVEL_STYLES = {} coloredlogs.DEFAULT_FIELD_STYLES = {} coloredlogs.install() if args.debug: coloredlogs.set_level(logging.DEBUG) try: try: config = get_hacksports_config() except PermissionError: logger.error("You must run shell_manager with sudo.") raise FatalException except FileNotFoundError: place_default_config() logger.info("There was no default configuration. One has been created for you. Please edit it accordingly using the 'shell_manager config' subcommand before deploying any instances.") raise FatalException #Call the default function if "func" in args: args.func(args, config) else: parser.print_help() except FatalException: exit(1)
ret.append(f) return ret if __name__ == "__main__": argument_parser = argparse.ArgumentParser() argument_parser.add_argument("--log-level", help="Logging level to use.", choices=_debug_levels.keys(), default='info') argument_parser.add_argument("--force", "-f", help="Force fresh download of all emotes.", action="store_true") argument_parser.add_argument("--skip-already-downloaded", help="Skip emotes that already exist on disk.\ (This is useful if you accidentally deleted an emote or two)", action="store_true") args = argument_parser.parse_args() coloredlogs.set_level(_debug_levels[args.log_level]) l.info("Desktop PonyMotes {} running!".format(__version__)) if not os.path.isdir(config_dir_path): l.warning("Config directory missing, performing first time setup...") first_run_setup() print("Just performed some first time setup!") print("Look in {} and edit the config to your heart's desire...".format( os.path.abspath(config_dir_path))) print("Then run this again!") sys.exit(0) os.chdir(config_dir_path) global config config = configparser.ConfigParser(allow_no_value=True, empty_lines_in_values=False) if not config.read(config_file): # Returns [] on failure l.warning("Config file not found! Remaking from default...") make_default_config()