def setup_app(config={}): LOG.info('Creating st2api: %s as OpenAPI app.', VERSION_STRING) is_gunicorn = config.get('is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() st2api_config.register_opts() capabilities = { 'name': 'api', 'listen_host': cfg.CONF.api.host, 'listen_port': cfg.CONF.api.port, 'type': 'active' } # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. common_setup(service='api', config=st2api_config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=True, run_migrations=True, service_registry=True, capabilities=capabilities, config_args=config.get('config_args', None)) # Additional pre-run time checks validate_rbac_is_correctly_configured() router = Router(debug=cfg.CONF.api.debug, auth=cfg.CONF.auth.enable, is_gunicorn=is_gunicorn) spec = spec_loader.load_spec('st2common', 'openapi.yaml.j2') transforms = { '^/api/v1/$': ['/v1'], '^/api/v1/': ['/', '/v1/'], '^/api/v1/executions': ['/actionexecutions', '/v1/actionexecutions'], '^/api/exp/': ['/exp/'] } router.add_spec(spec, transforms=transforms) app = router.as_wsgi # Order is important. Check middleware for detailed explanation. app = StreamingMiddleware(app, path_whitelist=['/v1/executions/*/output*']) app = ErrorHandlingMiddleware(app) app = CorsMiddleware(app) app = LoggingMiddleware(app, router) app = ResponseInstrumentationMiddleware(app, router, service_name='api') app = RequestIDMiddleware(app) app = RequestInstrumentationMiddleware(app, router, service_name='api') return app
def main(): monkey_patch() cli_opts = [ cfg.BoolOpt("sensors", default=False, help="diff sensor alone."), cfg.BoolOpt("actions", default=False, help="diff actions alone."), cfg.BoolOpt("rules", default=False, help="diff rules alone."), cfg.BoolOpt("all", default=False, help="diff sensors, actions and rules."), cfg.BoolOpt("verbose", default=False), cfg.BoolOpt( "simple", default=False, help="In simple mode, tool only tells you if content is missing." + "It doesn't show you content diff between disk and db.", ), cfg.StrOpt("pack-dir", default=None, help="Path to specific pack to diff."), ] do_register_cli_opts(cli_opts) config.parse_args() # Connect to db. db_setup() # Diff content pack_dir = cfg.CONF.pack_dir or None content_diff = not cfg.CONF.simple if cfg.CONF.all: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) return if cfg.CONF.sensors: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.actions: _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.rules: _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) # Disconnect from db. db_teardown()
def main(): monkey_patch() cli_opts = [ cfg.BoolOpt('sensors', default=False, help='diff sensor alone.'), cfg.BoolOpt('actions', default=False, help='diff actions alone.'), cfg.BoolOpt('rules', default=False, help='diff rules alone.'), cfg.BoolOpt('all', default=False, help='diff sensors, actions and rules.'), cfg.BoolOpt('verbose', default=False), cfg.BoolOpt( 'simple', default=False, help='In simple mode, tool only tells you if content is missing.' + 'It doesn\'t show you content diff between disk and db.'), cfg.StrOpt('pack-dir', default=None, help='Path to specific pack to diff.') ] do_register_cli_opts(cli_opts) config.parse_args() # Connect to db. db_setup() # Diff content pack_dir = cfg.CONF.pack_dir or None content_diff = not cfg.CONF.simple if cfg.CONF.all: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) return if cfg.CONF.sensors: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.actions: _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.rules: _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) # Disconnect from db. db_teardown()
def main(): monkey_patch() cli_opts = [ cfg.IntOpt( 'rate', default=100, help='Rate of trigger injection measured in instances in per sec.' + ' Assumes a default exponential distribution in time so arrival is poisson.' ), cfg.ListOpt( 'triggers', required=False, help='List of triggers for which instances should be fired.' + ' Uniform distribution will be followed if there is more than one' + 'trigger.'), cfg.StrOpt('schema_file', default=None, help='Path to schema file defining trigger and payload.'), cfg.IntOpt('duration', default=1, help='Duration of stress test in minutes.') ] do_register_cli_opts(cli_opts) config.parse_args() # Get config values triggers = cfg.CONF.triggers trigger_payload_schema = {} if not triggers: if (cfg.CONF.schema_file is None or cfg.CONF.schema_file == '' or not os.path.exists(cfg.CONF.schema_file)): print( 'Either "triggers" need to be provided or a schema file containing' + ' triggers should be provided.') return with open(cfg.CONF.schema_file) as fd: trigger_payload_schema = yaml.safe_load(fd) triggers = list(trigger_payload_schema.keys()) print('Triggers=%s' % triggers) rate = cfg.CONF.rate rate_per_trigger = int(rate / len(triggers)) duration = cfg.CONF.duration dispatcher_pool = eventlet.GreenPool(len(triggers)) for trigger in triggers: payload = trigger_payload_schema.get(trigger, {}) dispatcher_pool.spawn(_inject_instances, trigger, rate_per_trigger, duration, payload=payload) eventlet.sleep(random.uniform(0, 1)) dispatcher_pool.waitall()
def setup_app(config={}): LOG.info('Creating st2api: %s as OpenAPI app.', VERSION_STRING) is_gunicorn = config.get('is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() st2api_config.register_opts() capabilities = { 'name': 'api', 'listen_host': cfg.CONF.api.host, 'listen_port': cfg.CONF.api.port, 'type': 'active' } # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. common_setup(service='api', config=st2api_config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=True, run_migrations=True, service_registry=True, capabilities=capabilities, config_args=config.get('config_args', None)) # Additional pre-run time checks validate_rbac_is_correctly_configured() router = Router(debug=cfg.CONF.api.debug, auth=cfg.CONF.auth.enable, is_gunicorn=is_gunicorn) spec = spec_loader.load_spec('st2common', 'openapi.yaml.j2') transforms = { '^/api/v1/$': ['/v1'], '^/api/v1/': ['/', '/v1/'], '^/api/v1/executions': ['/actionexecutions', '/v1/actionexecutions'], '^/api/exp/': ['/exp/'] } router.add_spec(spec, transforms=transforms) app = router.as_wsgi # Order is important. Check middleware for detailed explanation. app = StreamingMiddleware(app, path_whitelist=['/v1/executions/*/output*']) app = ErrorHandlingMiddleware(app) app = CorsMiddleware(app) app = LoggingMiddleware(app, router) app = ResponseInstrumentationMiddleware(app, router, service_name='api') app = RequestIDMiddleware(app) app = RequestInstrumentationMiddleware(app, router, service_name='api') return app
def main(): monkey_patch() cli_opts = [ cfg.IntOpt('rate', default=100, help='Rate of trigger injection measured in instances in per sec.' + ' Assumes a default exponential distribution in time so arrival is poisson.'), cfg.ListOpt('triggers', required=False, help='List of triggers for which instances should be fired.' + ' Uniform distribution will be followed if there is more than one' + 'trigger.'), cfg.StrOpt('schema_file', default=None, help='Path to schema file defining trigger and payload.'), cfg.IntOpt('duration', default=60, help='Duration of stress test in seconds.'), cfg.BoolOpt('max-throughput', default=False, help='If True, "rate" argument will be ignored and this script will try to ' 'saturize the CPU and achieve max utilization.') ] do_register_cli_opts(cli_opts) config.parse_args() # Get config values triggers = cfg.CONF.triggers trigger_payload_schema = {} if not triggers: if (cfg.CONF.schema_file is None or cfg.CONF.schema_file == '' or not os.path.exists(cfg.CONF.schema_file)): print('Either "triggers" need to be provided or a schema file containing' + ' triggers should be provided.') return with open(cfg.CONF.schema_file) as fd: trigger_payload_schema = yaml.safe_load(fd) triggers = list(trigger_payload_schema.keys()) print('Triggers=%s' % triggers) rate = cfg.CONF.rate rate_per_trigger = int(rate / len(triggers)) duration = cfg.CONF.duration max_throughput = cfg.CONF.max_throughput if max_throughput: rate = 0 rate_per_trigger = 0 dispatcher_pool = eventlet.GreenPool(len(triggers)) for trigger in triggers: payload = trigger_payload_schema.get(trigger, {}) dispatcher_pool.spawn(_inject_instances, trigger, rate_per_trigger, duration, payload=payload, max_throughput=max_throughput) eventlet.sleep(random.uniform(0, 1)) dispatcher_pool.waitall()
def setup_app(config=None): LOG.info('Creating st2api: %s as Pecan app.', VERSION_STRING) is_gunicorn = getattr(config, 'is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() st2api_config.register_opts() # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. common_setup(service='api', config=st2api_config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=True, run_migrations=True, config_args=config.config_args) if not config: # standalone HTTP server case config = _get_pecan_config() else: # gunicorn case if is_gunicorn: config.app = _get_pecan_config().app app_conf = dict(config.app) active_hooks = [ hooks.RequestIDHook(), hooks.JSONErrorResponseHook(), hooks.LoggingHook() ] active_hooks.append(hooks.AuthHook()) active_hooks.append(hooks.CorsHook()) app = pecan.make_app(app_conf.pop('root'), logging=getattr(config, 'logging', {}), hooks=active_hooks, **app_conf) # Static middleware which servers common static assets such as logos static_root = os.path.join(BASE_DIR, 'public') app = StaticFileMiddleware(app=app, directory=static_root) LOG.info('%s app created.' % __name__) return app
def setup_app(config={}): LOG.info("Creating st2stream: %s as OpenAPI app.", VERSION_STRING) is_gunicorn = config.get("is_gunicorn", False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() st2stream_config.register_opts() capabilities = { "name": "stream", "listen_host": cfg.CONF.stream.host, "listen_port": cfg.CONF.stream.port, "type": "active", } # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. common_setup( service="stream", config=st2stream_config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=False, service_registry=True, capabilities=capabilities, config_args=config.get("config_args", None), ) router = Router(debug=cfg.CONF.stream.debug, auth=cfg.CONF.auth.enable, is_gunicorn=is_gunicorn) spec = spec_loader.load_spec("st2common", "openapi.yaml.j2") transforms = {"^/stream/v1/": ["/", "/v1/"]} router.add_spec(spec, transforms=transforms) app = router.as_wsgi # Order is important. Check middleware for detailed explanation. app = StreamingMiddleware(app) app = ErrorHandlingMiddleware(app) app = CorsMiddleware(app) app = LoggingMiddleware(app, router) app = ResponseInstrumentationMiddleware(app, router, service_name="stream") app = RequestIDMiddleware(app) app = RequestInstrumentationMiddleware(app, router, service_name="stream") return app
def setup_app(config=None): LOG.info('Creating st2api: %s as Pecan app.', VERSION_STRING) is_gunicorn = getattr(config, 'is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() st2api_config.register_opts() # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. common_setup(service='api', config=st2api_config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=True, run_migrations=True, config_args=config.config_args) if not config: # standalone HTTP server case config = _get_pecan_config() else: # gunicorn case if is_gunicorn: config.app = _get_pecan_config().app app_conf = dict(config.app) active_hooks = [hooks.RequestIDHook(), hooks.JSONErrorResponseHook(), hooks.LoggingHook()] if cfg.CONF.auth.enable: active_hooks.append(hooks.AuthHook()) active_hooks.append(hooks.CorsHook()) app = pecan.make_app(app_conf.pop('root'), logging=getattr(config, 'logging', {}), hooks=active_hooks, **app_conf ) # Static middleware which servers common static assets such as logos static_root = os.path.join(BASE_DIR, 'public') app = StaticFileMiddleware(app=app, directory=static_root) LOG.info('%s app created.' % __name__) return app
def main(): monkey_patch() cli_opts = [ cfg.BoolOpt('sensors', default=False, help='diff sensor alone.'), cfg.BoolOpt('actions', default=False, help='diff actions alone.'), cfg.BoolOpt('rules', default=False, help='diff rules alone.'), cfg.BoolOpt('all', default=False, help='diff sensors, actions and rules.'), cfg.BoolOpt('verbose', default=False), cfg.BoolOpt('simple', default=False, help='In simple mode, tool only tells you if content is missing.' + 'It doesn\'t show you content diff between disk and db.'), cfg.StrOpt('pack-dir', default=None, help='Path to specific pack to diff.') ] do_register_cli_opts(cli_opts) config.parse_args() username = cfg.CONF.database.username if hasattr(cfg.CONF.database, 'username') else None password = cfg.CONF.database.password if hasattr(cfg.CONF.database, 'password') else None # Connect to db. db_setup(cfg.CONF.database.db_name, cfg.CONF.database.host, cfg.CONF.database.port, username=username, password=password) # Diff content pack_dir = cfg.CONF.pack_dir or None content_diff = not cfg.CONF.simple if cfg.CONF.all: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) return if cfg.CONF.sensors: _diff_sensors(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.actions: _diff_actions(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) if cfg.CONF.rules: _diff_rules(pack_dir=pack_dir, verbose=cfg.CONF.verbose, content_diff=content_diff) # Disconnect from db. db_teardown()
def main(): monkey_patch() cli_opts = [ cfg.StrOpt('action_ref', default=None, help='Root action to begin analysis.'), cfg.StrOpt('link_trigger_ref', default='core.st2.generic.actiontrigger', help='Root action to begin analysis.'), cfg.StrOpt('out_file', default='pipeline') ] do_register_cli_opts(cli_opts) config.parse_args() db_setup() rule_links = LinksAnalyzer().analyze(cfg.CONF.action_ref, cfg.CONF.link_trigger_ref) Grapher().generate_graph(rule_links, cfg.CONF.out_file)
def main(): monkey_patch() cli_opts = [ cfg.StrOpt('action_ref', default=None, help='Root action to begin analysis.'), cfg.StrOpt('link_trigger_ref', default='core.st2.generic.actiontrigger', help='Root action to begin analysis.'), cfg.StrOpt('out_file', default='pipeline') ] do_register_cli_opts(cli_opts) config.parse_args() db_setup() rule_links = LinksAnalyzer().analyze(cfg.CONF.action_ref, cfg.CONF.link_trigger_ref) Grapher().generate_graph(rule_links, cfg.CONF.out_file)
def setup_app(config={}): LOG.info('Creating st2api: %s as OpenAPI app.', VERSION_STRING) is_gunicorn = config.get('is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() st2api_config.register_opts() # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. common_setup(service='api', config=st2api_config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=True, run_migrations=True, config_args=config.get('config_args', None)) # Additional pre-run time checks validate_rbac_is_correctly_configured() router = Router(debug=cfg.CONF.api.debug, auth=cfg.CONF.auth.enable) spec = spec_loader.load_spec('st2common', 'openapi.yaml') transforms = { '^/api/v1/': ['/', '/v1/'], '^/api/v1/executions': ['/actionexecutions', '/v1/actionexecutions'], '^/api/exp/': ['/exp/'] } router.add_spec(spec, transforms=transforms) app = router.as_wsgi app = CorsMiddleware(app) app = LoggingMiddleware(app, router) app = ErrorHandlingMiddleware(app) app = RequestIDMiddleware(app) return app
def setup_app(config=None): LOG.info('Creating st2auth: %s as Pecan app.', VERSION_STRING) is_gunicorn = getattr(config, 'is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. st2auth_config.register_opts() common_setup(service='auth', config=st2auth_config, setup_db=True, register_mq_exchanges=False, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=False, config_args=config.config_args) if not config: # standalone HTTP server case config = _get_pecan_config() else: # gunicorn case if is_gunicorn: config.app = _get_pecan_config().app app_conf = dict(config.app) app = pecan.make_app(app_conf.pop('root'), logging=getattr(config, 'logging', {}), hooks=[ hooks.JSONErrorResponseHook(), hooks.CorsHook(), hooks.AuthHook() ], **app_conf) LOG.info('%s app created.' % __name__) return app
def main(): monkey_patch() cli_opts = [ cfg.StrOpt("action_ref", default=None, help="Root action to begin analysis."), cfg.StrOpt( "link_trigger_ref", default="core.st2.generic.actiontrigger", help="Root action to begin analysis.", ), cfg.StrOpt("out_file", default="pipeline"), ] do_register_cli_opts(cli_opts) config.parse_args() db_setup() rule_links = LinksAnalyzer().analyze(cfg.CONF.action_ref, cfg.CONF.link_trigger_ref) Grapher().generate_graph(rule_links, cfg.CONF.out_file)
def setup_app(config={}): LOG.info('Creating st2stream: %s as OpenAPI app.', VERSION_STRING) is_gunicorn = config.get('is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() st2stream_config.register_opts() # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. common_setup(service='stream', config=st2stream_config, setup_db=True, register_mq_exchanges=True, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=False, config_args=config.get('config_args', None)) router = Router(debug=cfg.CONF.stream.debug, auth=cfg.CONF.auth.enable, is_gunicorn=is_gunicorn) spec = spec_loader.load_spec('st2common', 'openapi.yaml.j2') transforms = {'^/stream/v1/': ['/', '/v1/']} router.add_spec(spec, transforms=transforms) app = router.as_wsgi # Order is important. Check middleware for detailed explanation. app = StreamingMiddleware(app) app = ErrorHandlingMiddleware(app) app = CorsMiddleware(app) app = LoggingMiddleware(app, router) app = RequestIDMiddleware(app) return app
def setup_app(config={}): LOG.info('Creating st2auth: %s as OpenAPI app.', VERSION_STRING) is_gunicorn = config.get('is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. st2auth_config.register_opts() common_setup(service='auth', config=st2auth_config, setup_db=True, register_mq_exchanges=False, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=False, config_args=config.get('config_args', None)) # Additional pre-run time checks validate_auth_backend_is_correctly_configured() router = Router(debug=cfg.CONF.auth.debug) spec = spec_loader.load_spec('st2common', 'openapi.yaml.j2') transforms = { '^/auth/v1/': ['/', '/v1/'] } router.add_spec(spec, transforms=transforms) app = router.as_wsgi # Order is important. Check middleware for detailed explanation. app = ErrorHandlingMiddleware(app) app = CorsMiddleware(app) app = LoggingMiddleware(app, router) app = RequestIDMiddleware(app) return app
def setup_app(config=None): LOG.info('Creating st2auth: %s as Pecan app.', VERSION_STRING) is_gunicorn = getattr(config, 'is_gunicorn', False) if is_gunicorn: # Note: We need to perform monkey patching in the worker. If we do it in # the master process (gunicorn_config.py), it breaks tons of things # including shutdown monkey_patch() # This should be called in gunicorn case because we only want # workers to connect to db, rabbbitmq etc. In standalone HTTP # server case, this setup would have already occurred. st2auth_config.register_opts() common_setup(service='auth', config=st2auth_config, setup_db=True, register_mq_exchanges=False, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=False, config_args=config.config_args) if not config: # standalone HTTP server case config = _get_pecan_config() else: # gunicorn case if is_gunicorn: config.app = _get_pecan_config().app app_conf = dict(config.app) app = pecan.make_app( app_conf.pop('root'), logging=getattr(config, 'logging', {}), hooks=[hooks.JSONErrorResponseHook(), hooks.CorsHook()], **app_conf ) LOG.info('%s app created.' % __name__) return app
from eventlet import wsgi from st2common import log as logging from st2common.service_setup import setup as common_setup from st2common.service_setup import teardown as common_teardown from st2common.util.monkey_patch import monkey_patch from st2common.constants.auth import VALID_MODES from st2auth import config config.register_opts() from st2auth import app __all__ = [ 'main' ] monkey_patch() LOG = logging.getLogger(__name__) def _setup(): common_setup(service='auth', config=config, setup_db=True, register_mq_exchanges=False, register_signal_handlers=True, register_internal_trigger_types=False, run_migrations=False) if cfg.CONF.auth.mode not in VALID_MODES: raise ValueError('Valid modes are: %s' % (','.join(VALID_MODES))) def _run_server(): host = cfg.CONF.auth.host
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import print_function # NOTE: We need to perform monkeypatch before importing ssl module otherwise tests will fail. # See https://github.com/StackStorm/st2/pull/4834 for details from st2common.util.monkey_patch import monkey_patch monkey_patch() try: import simplejson as json except ImportError: import json import os import os.path import sys import shutil import logging import six import eventlet import psutil
def main(): monkey_patch() cli_opts = [ cfg.IntOpt( "rate", default=100, help="Rate of trigger injection measured in instances in per sec." + " Assumes a default exponential distribution in time so arrival is poisson.", ), cfg.ListOpt( "triggers", required=False, help="List of triggers for which instances should be fired." + " Uniform distribution will be followed if there is more than one" + "trigger.", ), cfg.StrOpt( "schema_file", default=None, help="Path to schema file defining trigger and payload.", ), cfg.IntOpt("duration", default=60, help="Duration of stress test in seconds."), cfg.BoolOpt( "max-throughput", default=False, help= 'If True, "rate" argument will be ignored and this script will try to ' "saturize the CPU and achieve max utilization.", ), ] do_register_cli_opts(cli_opts) config.parse_args() # Get config values triggers = cfg.CONF.triggers trigger_payload_schema = {} if not triggers: if (cfg.CONF.schema_file is None or cfg.CONF.schema_file == "" or not os.path.exists(cfg.CONF.schema_file)): print( 'Either "triggers" need to be provided or a schema file containing' + " triggers should be provided.") return with open(cfg.CONF.schema_file) as fd: trigger_payload_schema = yaml.safe_load(fd) triggers = list(trigger_payload_schema.keys()) print("Triggers=%s" % triggers) rate = cfg.CONF.rate rate_per_trigger = int(rate / len(triggers)) duration = cfg.CONF.duration max_throughput = cfg.CONF.max_throughput if max_throughput: rate = 0 rate_per_trigger = 0 dispatcher_pool = eventlet.GreenPool(len(triggers)) for trigger in triggers: payload = trigger_payload_schema.get(trigger, {}) dispatcher_pool.spawn( _inject_instances, trigger, rate_per_trigger, duration, payload=payload, max_throughput=max_throughput, ) eventlet.sleep(random.uniform(0, 1)) dispatcher_pool.waitall()