def __init__(self, pack, file_path, parameters=None, parent_args=None): """ :param pack: Name of the pack this action belongs to. :type pack: ``str`` :param file_path: Path to the action module. :type file_path: ``str`` :param parameters: action parameters. :type parameters: ``dict`` or ``None`` :param parent_args: Command line arguments passed to the parent process. :type parse_args: ``list`` """ self._pack = pack self._file_path = file_path self._parameters = parameters or {} self._parent_args = parent_args or [] self._class_name = None self._logger = logging.getLogger('PythonActionWrapper') try: config.parse_args(args=self._parent_args) except Exception: pass else: db_setup()
def main(): monkey_patch() cli_opts = [ cfg.BoolOpt("sensors", default=False, help="diff sensor alone."), cfg.BoolOpt("actions", default=False, help="diff actions alone."), cfg.BoolOpt("rules", default=False, help="diff rules alone."), cfg.BoolOpt("all", default=False, help="diff sensors, actions and rules."), cfg.BoolOpt("verbose", default=False), cfg.BoolOpt( "simple", default=False, help="In simple mode, tool only tells you if content is missing." + "It doesn't show you content diff between disk and db.", ), cfg.StrOpt("pack-dir", default=None, help="Path to specific pack to diff."), ] do_register_cli_opts(cli_opts) config.parse_args() # Connect to db. db_setup() # Diff content pack_dir = cfg.CONF.pack_dir or None content_diff = not cfg.CONF.simple if cfg.CONF.all: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) return if cfg.CONF.sensors: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.actions: _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.rules: _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) # Disconnect from db. db_teardown()
def test_read_large_string_value(benchmark, fixture_file: str, approach: str) -> None: with open(os.path.join(FIXTURES_DIR, fixture_file), "rb") as fp: content = fp.read() db_setup() if approach == "string_field": model_cls = LiveActionDB_StringField content = content.decode("utf-8") elif approach == "binary_field": model_cls = LiveActionDB_BinaryField else: raise ValueError("Unsupported approach") # 1. Insert the model live_action_db = model_cls() live_action_db.status = "succeeded" live_action_db.action = "core.local" live_action_db.value = content inserted_live_action_db = LiveAction.add_or_update(live_action_db) def run_benchmark(): retrieved_live_action_db = LiveAction.get_by_id( inserted_live_action_db.id) return retrieved_live_action_db retrieved_live_action_db = benchmark.pedantic(run_benchmark, iterations=10, rounds=10) assert retrieved_live_action_db == inserted_live_action_db assert retrieved_live_action_db.value == content
def test_save_large_string_value(benchmark, fixture_file: str, approach: str) -> None: # Here we time how long it takes to save a long string value on StringField and BinaryField with open(os.path.join(FIXTURES_DIR, fixture_file), "rb") as fp: content = fp.read() db_setup() if approach == "string_field": model_cls = LiveActionDB_StringField content = content.decode("utf-8") elif approach == "binary_field": model_cls = LiveActionDB_BinaryField else: raise ValueError("Unsupported approach") def run_benchmark(): live_action_db = model_cls() live_action_db.status = "succeeded" live_action_db.action = "core.local" live_action_db.value = content inserted_live_action_db = LiveAction.add_or_update(live_action_db) return inserted_live_action_db inserted_live_action_db = benchmark.pedantic(run_benchmark, iterations=10, rounds=10) assert bool(inserted_live_action_db.value)
def test_read_large_execution(benchmark, fixture_file: str, approach: str) -> None: with open(os.path.join(FIXTURES_DIR, fixture_file), "r") as fp: content = fp.read() data = json.loads(content) db_setup() # 1. Insert the large execution model_cls = get_model_class_for_approach(approach=approach) live_action_db = model_cls() live_action_db.status = "succeeded" live_action_db.action = "core.local" live_action_db.result = data inserted_live_action_db = LiveAction.add_or_update(live_action_db) def run_benchmark(): retrieved_live_action_db = LiveAction.get_by_id( inserted_live_action_db.id) return retrieved_live_action_db retrieved_live_action_db = benchmark.pedantic(run_benchmark, iterations=3, rounds=3) # Assert that result is correctly converted back to dict on retrieval assert retrieved_live_action_db == inserted_live_action_db assert retrieved_live_action_db.result == data
def test_save_multiple_fields(benchmark, fixture_file: str, approach: str) -> None: # Here we benchmark a scenario where a single model contains multiple fields with a new # field type. # NOTE: We don't test with 8 MB fixture since this would cause BSON 16 MB document limit to # be reached with EscapedDynamicField type with open(os.path.join(FIXTURES_DIR, fixture_file), "r") as fp: content = fp.read() data = json.loads(content) db_setup() model_cls = get_model_class_for_approach(approach=approach) def run_benchmark(): live_action_db = model_cls() live_action_db.status = "succeeded" live_action_db.action = "core.local" live_action_db.field1 = data live_action_db.field2 = data live_action_db.field3 = data inserted_live_action_db = LiveAction.add_or_update(live_action_db) return inserted_live_action_db inserted_live_action_db = benchmark.pedantic(run_benchmark, iterations=3, rounds=3) retrieved_live_action_db = LiveAction.get_by_id(inserted_live_action_db.id) # Assert that result is correctly converted back to dict on retrieval assert inserted_live_action_db.field1 == data assert inserted_live_action_db.field2 == data assert inserted_live_action_db.field3 == data assert inserted_live_action_db == retrieved_live_action_db
def main(): monkey_patch() cli_opts = [ cfg.BoolOpt('sensors', default=False, help='diff sensor alone.'), cfg.BoolOpt('actions', default=False, help='diff actions alone.'), cfg.BoolOpt('rules', default=False, help='diff rules alone.'), cfg.BoolOpt('all', default=False, help='diff sensors, actions and rules.'), cfg.BoolOpt('verbose', default=False), cfg.BoolOpt( 'simple', default=False, help='In simple mode, tool only tells you if content is missing.' + 'It doesn\'t show you content diff between disk and db.'), cfg.StrOpt('pack-dir', default=None, help='Path to specific pack to diff.') ] do_register_cli_opts(cli_opts) config.parse_args() # Connect to db. db_setup() # Diff content pack_dir = cfg.CONF.pack_dir or None content_diff = not cfg.CONF.simple if cfg.CONF.all: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) return if cfg.CONF.sensors: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.actions: _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.rules: _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) # Disconnect from db. db_teardown()
def main(): config.parse_args() # Connect to db. db_setup() # Migrate rules. migrate_rules() # Disconnect from db. db_teardown()
def main(): _monkey_patch() cli_opts = [ cfg.StrOpt('action_ref', default=None, help='Root action to begin analysis.'), cfg.StrOpt('link_trigger_ref', default='core.st2.generic.actiontrigger', help='Root action to begin analysis.'), cfg.StrOpt('out_file', default='pipeline') ] do_register_cli_opts(cli_opts) config.parse_args() db_setup() rule_links = LinksAnalyzer().analyze(cfg.CONF.action_ref, cfg.CONF.link_trigger_ref) Grapher().generate_graph(rule_links, cfg.CONF.out_file)
def main(): monkey_patch() cli_opts = [ cfg.StrOpt('action_ref', default=None, help='Root action to begin analysis.'), cfg.StrOpt('link_trigger_ref', default='core.st2.generic.actiontrigger', help='Root action to begin analysis.'), cfg.StrOpt('out_file', default='pipeline') ] do_register_cli_opts(cli_opts) config.parse_args() db_setup() rule_links = LinksAnalyzer().analyze(cfg.CONF.action_ref, cfg.CONF.link_trigger_ref) Grapher().generate_graph(rule_links, cfg.CONF.out_file)
def test_read_execution(benchmark, fixture_file: str, compression): with open(os.path.join(FIXTURES_DIR, fixture_file), "rb") as fp: content = fp.read() cfg.CONF.set_override(name="compressors", group="database", override=compression) # NOTE: It's important we correctly reestablish connection before each setting change disconnect() connection = db_setup() if compression is None: assert "compressors" not in str(connection) elif compression == "zstd": assert "compressors=['zstd']" in str(connection) live_action_db = LiveActionDB() live_action_db.status = "succeeded" live_action_db.action = "core.local" live_action_db.result = content inserted_live_action_db = LiveAction.add_or_update(live_action_db) def run_benchmark(): retrieved_live_action_db = LiveAction.get_by_id( inserted_live_action_db.id) return retrieved_live_action_db retrieved_live_action_db = benchmark(run_benchmark) # Assert that result is correctly converted back to dict on retrieval assert retrieved_live_action_db == inserted_live_action_db
def _setup(argv): config.parse_args() log_level = logging.DEBUG logging.basicConfig(format='%(asctime)s %(levelname)s [-] %(message)s', level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [logging.AUDIT, logging.DEBUG] handlers = logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) db_setup() register_exchanges()
def main(): monkey_patch() cli_opts = [ cfg.BoolOpt('sensors', default=False, help='diff sensor alone.'), cfg.BoolOpt('actions', default=False, help='diff actions alone.'), cfg.BoolOpt('rules', default=False, help='diff rules alone.'), cfg.BoolOpt('all', default=False, help='diff sensors, actions and rules.'), cfg.BoolOpt('verbose', default=False), cfg.BoolOpt('simple', default=False, help='In simple mode, tool only tells you if content is missing.' + 'It doesn\'t show you content diff between disk and db.'), cfg.StrOpt('pack-dir', default=None, help='Path to specific pack to diff.') ] do_register_cli_opts(cli_opts) config.parse_args() # Connect to db. db_setup() # Diff content pack_dir = cfg.CONF.pack_dir or None content_diff = not cfg.CONF.simple if cfg.CONF.all: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) return if cfg.CONF.sensors: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.actions: _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.rules: _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) # Disconnect from db. db_teardown()
def main(): config.parse_args() # Connect to db. db_setup() # Migrate rules. try: migrate_datastore() print('SUCCESS: Datastore items migrated successfully.') exit_code = 0 except: print('ABORTED: Datastore migration aborted on first failure.') exit_code = 1 # Disconnect from db. db_teardown() sys.exit(exit_code)
def __init__(self, pack, file_path, parameters=None, user=None, parent_args=None): """ :param pack: Name of the pack this action belongs to. :type pack: ``str`` :param file_path: Path to the action module. :type file_path: ``str`` :param parameters: action parameters. :type parameters: ``dict`` or ``None`` :param user: Name of the user who triggered this action execution. :type user: ``str`` :param parent_args: Command line arguments passed to the parent process. :type parse_args: ``list`` """ self._pack = pack self._file_path = file_path self._parameters = parameters or {} self._user = user self._parent_args = parent_args or [] self._class_name = None self._logger = logging.getLogger('PythonActionWrapper') try: config.parse_args(args=self._parent_args) except Exception: pass db_setup() # Note: We can only set a default user value if one is not provided after parsing the # config if not self._user: self._user = cfg.CONF.system_user.user
def main(): monkey_patch() cli_opts = [ cfg.StrOpt("action_ref", default=None, help="Root action to begin analysis."), cfg.StrOpt( "link_trigger_ref", default="core.st2.generic.actiontrigger", help="Root action to begin analysis.", ), cfg.StrOpt("out_file", default="pipeline"), ] do_register_cli_opts(cli_opts) config.parse_args() db_setup() rule_links = LinksAnalyzer().analyze(cfg.CONF.action_ref, cfg.CONF.link_trigger_ref) Grapher().generate_graph(rule_links, cfg.CONF.out_file)
def main(): config.parse_args() # Connect to db. db_setup() try: handler = scheduler_handler.get_handler() handler._cleanup_policy_delayed() LOG.info('SUCCESS: Completed clean up of executions with deprecated policy-delayed status.') exit_code = 0 except Exception as e: LOG.error( 'ABORTED: Clean up of executions with deprecated policy-delayed status aborted on ' 'first failure. %s' % e.message ) exit_code = 1 # Disconnect from db. db_teardown() sys.exit(exit_code)
def setup(config, setup_db=True, register_mq_exchanges=True): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config config.parse_args() # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig( format='%(asctime)s %(levelname)s [-] %(message)s', level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges_with_retry()
def setup(config, setup_db=True, register_mq_exchanges=True): """ Common setup function. Currently it performs the following operations: 1. Parses config and CLI arguments 2. Establishes DB connection 3. Suppress DEBUG log level if --verbose flag is not used 4. Registers RabbitMQ exchanges :param config: Config object to use to parse args. """ # Register common CLI options register_common_cli_options() # Parse args to setup config config.parse_args() # Set up logging log_level = stdlib_logging.DEBUG stdlib_logging.basicConfig(format='%(asctime)s %(levelname)s [-] %(message)s', level=log_level) if not cfg.CONF.verbose: # Note: We still want to print things at the following log levels: INFO, ERROR, CRITICAL exclude_log_levels = [stdlib_logging.AUDIT, stdlib_logging.DEBUG] handlers = stdlib_logging.getLoggerClass().manager.root.handlers for handler in handlers: handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) # All other setup code which requires config to be parsed and logging to be correctly setup if setup_db: db_setup() if register_mq_exchanges: register_exchanges()
def _setup_db(): db_setup()