Exemplo n.º 1
0
def export_stubs():
    parser = ArgumentParser(description="Export scenario stubs")
    parser.add_argument('-s',
                        '--scenario',
                        dest='scenario',
                        help="scenario name")
    parser.add_argument('-p',
                        '--static-path-dir',
                        dest='static_path',
                        default=None,
                        help="Path to static dir to export files. If not "
                        "specified the output will placed in a tmp dir.")

    args = parser.parse_args()
    scenario = args.scenario
    if args.static_path:
        request = DummyRequestHandler(static_path=args.static_path)
    else:
        request = DummyRequestHandler()
    init_mongo()
    slave, master = start_redis({})
    response = api.export_stubs(request, scenario)
    print json.dumps(response,
                     sort_keys=True,
                     indent=4,
                     separators=(',', ': '))
Exemplo n.º 2
0
def list_stubs_core(host, scenario):
    init_mongo()
    response = api.list_stubs(DummyRequestHandler(), scenario, host)
    if response:
        result = json.dumps(response, sort_keys=True, indent=4, 
                         separators=(',', ': '))
    else:
        result = 'no stubs found'
    return result    
Exemplo n.º 3
0
def list_stubs_core(host, scenario):
    init_mongo()
    response = api.list_stubs(DummyRequestHandler(), scenario, host)
    if response:
        result = json.dumps(response,
                            sort_keys=True,
                            indent=4,
                            separators=(',', ': '))
    else:
        result = 'no stubs found'
    return result
Exemplo n.º 4
0
def stub_count():
    parser = ArgumentParser(
          description="Scenario stub count"
        ) 
    parser.add_argument('--host', dest='host', default='localhost', 
                        help="hostname") 
    parser.add_argument('-s', '--scenario', dest='scenario',
        help="scenario name")
    args = parser.parse_args()
    init_mongo()
    print api.stub_count(args.host, args.scenario)
Exemplo n.º 5
0
def stub_count():
    parser = ArgumentParser(description="Scenario stub count")
    parser.add_argument('--host',
                        dest='host',
                        default='localhost',
                        help="hostname")
    parser.add_argument('-s',
                        '--scenario',
                        dest='scenario',
                        help="scenario name")
    args = parser.parse_args()
    init_mongo()
    print api.stub_count(args.host, args.scenario)
Exemplo n.º 6
0
    def get_app(self):
        from tornado.ioloop import IOLoop
        from stubo.service.run_stubo import TornadoManager
        from stubo.utils import init_mongo, start_redis, init_ext_cache
        self.testdb = testdb_name()

        self.cfg.update({
            'redis.host': '127.0.0.1',
            'redis.port': 6379,
            'redis.db': 9,
            'redis_master.host': '127.0.0.1',
            'redis_master.port': 6379,
            'redis_master.db': 9,
            'request_cache_limit': 10,
        })

        self.db = init_mongo({'tz_aware': True, 'db': self.testdb})
        args = {'capped': True, 'size': 100000}
        self.db.create_collection("tracker", **args)
        self.db.tracker.create_index('start_time', -1)

        # install() asserts that its not been initialised so setting it directly
        #self.io_loop.install()
        IOLoop._instance = self.io_loop
        tm = TornadoManager(os.environ.get('STUBO_CONFIG_FILE_PATH'))
        self.redis_server, _ = start_redis(self.cfg)
        tm.cfg['ext_cache'] = init_ext_cache(self.cfg)
        tm.cfg['mongo.db'] = self.testdb
        tm.cfg.update(self.cfg)
        app = tm.get_app()
        self.app = app
        from concurrent.futures import ProcessPoolExecutor
        self.app.settings['process_executor'] = ProcessPoolExecutor()
        return app
Exemplo n.º 7
0
def create_tracker_collection():
    parser = ArgumentParser(description="Create tracker collection")
    parser.add_argument('-s',
                        '--size',
                        default=1000000000,
                        dest='size',
                        help="size of the collection in bytes, default is 1GB")
    parser.add_argument(
        '-c',
        '--config',
        dest='config',
        help='Path to configuration file (defaults to $CWD/etc/dev.ini)',
        metavar='FILE')

    args = parser.parse_args()
    size = int(args.size)
    config = args.config or get_default_config()
    logging.config.fileConfig(config)
    db = init_mongo()
    log.info('creating tracker collection: size={0}b in db={1}'.format(
        size, db.name))
    args = {'capped': True, 'size': size}
    try:
        db.create_collection("tracker", **args)
    except CollectionInvalid, e:
        log.fatal(e)
        sys.exit(-1)
Exemplo n.º 8
0
    def start_server(self):
        """Make Tornado app, start server and Tornado ioloop.
        """
        dbenv = default_env.copy()
        dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in self.cfg.iteritems() if k.startswith("mongo."))
        log.debug("mongo params: {0}".format(dbenv))
        retry_count = int(self.cfg.get("retry_count", 10))
        retry_interval = int(self.cfg.get("retry_interval", 10))
        # getting database
        mongo_client = None
        for i in range(retry_count):
            try:
                mongo_client = init_mongo(dbenv)
                break
            except Exception as ex:
                log.warn("mongo not available, try again in {0} secs. Error: {1}".format(retry_interval, ex))
                time.sleep(retry_interval)
        if not mongo_client:
            log.critical("Unable to connect to mongo, exiting ...")
            sys.exit(1)
        log.info("mongo server_info: {0}".format(mongo_client.connection.server_info()))

        # ensure tracker and scenario_stub indexing
        create_tracker_collection(mongo_client)
        ensure_scenario_stub_indexes(mongo_client)

        slave, master = start_redis(self.cfg)
        self.cfg["is_cluster"] = False
        if slave != master:
            log.info("redis master is not the same as the slave")
            self.cfg["is_cluster"] = True
        self.cfg["ext_cache"] = init_ext_cache(self.cfg)
        tornado_app = self.get_app()
        log.info('Started with "{0}" config'.format(tornado_app.settings))

        server = tornado.httpserver.HTTPServer(tornado_app)
        server.conn_params.decompress = self.cfg["decompress_request"]
        tornado_port = self.cfg["tornado.port"]
        try:
            server.bind(tornado_port)
        except Exception:
            # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm
            server.bind(tornado_port, "0.0.0.0")
        server.start(self.cfg["num_processes"])

        max_process_workers = self.cfg.get("max_process_workers")
        if max_process_workers:
            max_process_workers = int(max_process_workers)
        tornado_app.settings["process_executor"] = ProcessPoolExecutor(max_process_workers)
        log.info("started with {0} worker processes".format(tornado_app.settings["process_executor"]._max_workers))

        cmd_queue = InternalCommandQueue()
        cmd_queue_poll_interval = self.cfg.get("cmd_queue_poll_interval", 60 * 1000)
        tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start()
        tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 9
0
    def start_server(self):
        """Make Tornado app, start server and Tornado ioloop.
        """
        dbenv = default_env.copy()
        dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \
                     self.cfg.iteritems() if k.startswith('mongo.'))
        log.debug('mongo params: {0}'.format(dbenv))
        retry_count = int(self.cfg.get('retry_count', 10))
        retry_interval = int(self.cfg.get('retry_interval', 10))
        # getting database
        mongo_client = None
        for i in range(retry_count):
            try:
                mongo_client = init_mongo(dbenv)
                break
            except Exception as ex:
                log.warn('mongo not available, try again in {0} secs. Error: {1}'.format(retry_interval, ex))
                time.sleep(retry_interval)
        if not mongo_client:
            log.critical('Unable to connect to mongo, exiting ...')
            sys.exit(1)
        log.info('mongo server_info: {0}'.format(
            mongo_client.connection.server_info()))

        slave, master = start_redis(self.cfg)
        self.cfg['is_cluster'] = False
        if slave != master:
            log.info('redis master is not the same as the slave')
            self.cfg['is_cluster'] = True
        self.cfg['ext_cache'] = init_ext_cache(self.cfg)
        tornado_app = self.get_app()
        log.info('Started with "{0}" config'.format(tornado_app.settings))

        server = tornado.httpserver.HTTPServer(tornado_app)
        server.conn_params.decompress = self.cfg['decompress_request']
        tornado_port = self.cfg['tornado.port']
        try:
            server.bind(tornado_port)
        except Exception:
            # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm    
            server.bind(tornado_port, '0.0.0.0')
        server.start(self.cfg['num_processes'])

        max_process_workers = self.cfg.get('max_process_workers')
        if max_process_workers:
            max_process_workers = int(max_process_workers)
        tornado_app.settings['process_executor'] = ProcessPoolExecutor(max_process_workers)
        log.info('started with {0} worker processes'.format(tornado_app.settings['process_executor']._max_workers))

        cmd_queue = InternalCommandQueue()
        cmd_queue_poll_interval = self.cfg.get('cmd_queue_poll_interval',
                                               60 * 1000)
        tornado.ioloop.PeriodicCallback(cmd_queue.process,
                                        cmd_queue_poll_interval).start()
        tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 10
0
 def start_server(self):
     """Make Tornado app, start server and Tornado ioloop.
     """
     dbenv = default_env.copy()
     dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \
                  self.cfg.iteritems() if k.startswith('mongo.'))
     log.debug('mongo params: {0}'.format(dbenv))
     retry_count = int(self.cfg.get('retry_count', 10)) 
     retry_interval = int(self.cfg.get('retry_interval', 10))
     mongo_client = None
     for i in range(retry_count):  
         try:         
             mongo_client = init_mongo(dbenv)
             break
         except:
             log.warn('mongo not available, try again in {0} '
                      'secs'.format(retry_interval))
             time.sleep(retry_interval) 
     if not mongo_client:
         log.critical('Unable to connect to mongo, exiting ...')
         sys.exit(1)
     log.info('mongo server_info: {0}'.format(
                         mongo_client.connection.server_info()))    
     slave, master = start_redis(self.cfg) 
     self.cfg['is_cluster'] = False
     if slave != master:
         log.info('redis master is not the same as the slave')
         self.cfg['is_cluster'] = True
     self.cfg['ext_cache'] = init_ext_cache(self.cfg)
     tornado_app = self.get_app()
     log.info('Started with "{0}" config'.format(tornado_app.settings))
   
     server = tornado.httpserver.HTTPServer(tornado_app)
     server.conn_params.decompress =  self.cfg['decompress_request']
     tornado_port = self.cfg['tornado.port']
     try:
         server.bind(tornado_port)
     except Exception:
         # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm    
         server.bind(tornado_port, '0.0.0.0')
     server.start(self.cfg['num_processes']) 
     
     max_process_workers = self.cfg.get('max_process_workers')
     if max_process_workers:
         max_process_workers = int(max_process_workers)
     tornado_app.settings['process_executor'] = ProcessPoolExecutor(max_process_workers)
     log.info('started with {0} worker processes'.format(tornado_app.settings['process_executor']._max_workers))
     
     cmd_queue = InternalCommandQueue() 
     cmd_queue_poll_interval = self.cfg.get('cmd_queue_poll_interval', 
                                            60*1000)
     tornado.ioloop.PeriodicCallback(cmd_queue.process, 
                                     cmd_queue_poll_interval).start()
     tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 11
0
def export_stubs():
    parser = ArgumentParser(
        description="Export scenario stubs"
    )
    parser.add_argument('-s', '--scenario', dest='scenario',
                        help="scenario name")
    parser.add_argument('-p', '--static-path-dir', dest='static_path',
                        default=None, help="Path to static dir to export files. If not "
                                           "specified the output will placed in a tmp dir.")

    args = parser.parse_args()
    scenario = args.scenario
    if args.static_path:
        request = DummyRequestHandler(static_path=args.static_path)
    else:
        request = DummyRequestHandler()
    init_mongo()
    slave, master = start_redis({})
    response = api.export_stubs(request, scenario)
    print json.dumps(response, sort_keys=True, indent=4, separators=(',', ': '))
Exemplo n.º 12
0
    def get_app(self):
        from tornado.ioloop import IOLoop
        from stubo.service.run_stubo import TornadoManager
        from stubo.utils import init_mongo, start_redis, init_ext_cache
        import motor

        self.testdb = testdb_name()

        self.cfg.update({
            'redis.host': '127.0.0.1',
            'redis.port': 6379,
            'redis.db': 9,
            'redis_master.host': '127.0.0.1',
            'redis_master.port': 6379,
            'redis_master.db': 9,
            'request_cache_limit': 10,
        })

        self.db = init_mongo({
            'tz_aware': True,
            'db': self.testdb
        })
        args = {'capped': True, 'size': 100000}
        self.db.create_collection("tracker", **args)
        self.db.tracker.create_index('start_time', -1)

        # add motor driver
        client = motor.MotorClient()
        self.mdb = client[self.testdb]
        self.cfg.update({'mdb': self.mdb})

        # install() asserts that its not been initialised so setting it directly
        # self.io_loop.install()
        IOLoop._instance = self.io_loop
        tm = TornadoManager(os.environ.get('STUBO_CONFIG_FILE_PATH'))
        self.redis_server, _ = start_redis(self.cfg)
        tm.cfg['ext_cache'] = init_ext_cache(self.cfg)
        tm.cfg['mongo.db'] = self.testdb
        tm.cfg.update(self.cfg)
        app = tm.get_app()
        self.app = app
        from concurrent.futures import ProcessPoolExecutor

        self.app.settings['process_executor'] = ProcessPoolExecutor()
        return app
Exemplo n.º 13
0
def delete_test_dbs():
    parser = ArgumentParser(
        description="Delete test databases"
    )
    parser.add_argument('-l', '--list', action='store_const', const=True,
                        dest='list_only', help="Just list the test databases.")
    args = parser.parse_args()
    list_only = args.list_only
    db_conn = init_mongo().connection
    test_dbs = [x for x in db_conn.database_names() if x.startswith('test_')]
    if list_only:
        print test_dbs
    else:
        if test_dbs:
            print 'deleting databases: {0}'.format(", ".join(test_dbs))
            for dbname in test_dbs:
                db_conn.drop_database(dbname)
            print 'deleted databases'
        else:
            print 'no test databases to delete'
Exemplo n.º 14
0
def create_tracker_collection():
    parser = ArgumentParser(
        description="Create tracker collection"
    )
    parser.add_argument('-s', '--size', default=1000000000,
                        dest='size', help="size of the collection in bytes, default is 1GB")
    parser.add_argument('-c', '--config', dest='config',
                        help='Path to configuration file (defaults to $CWD/etc/dev.ini)',
                        metavar='FILE')

    args = parser.parse_args()
    size = int(args.size)
    config = args.config or get_default_config()
    logging.config.fileConfig(config)
    db = init_mongo()
    log.info('creating tracker collection: size={0}b in db={1}'.format(size,
                                                                       db.name))
    args = {'capped': True, 'size': size}
    try:
        db.create_collection("tracker", **args)
    except CollectionInvalid, e:
        log.fatal(e)
        sys.exit(-1)
Exemplo n.º 15
0
def purge_stubs():
    # importing helper handler from testing deps
    from stubo.testing import DummyRequestHandler
    parser = ArgumentParser(
        description="Purge stubs older than given expiry date."
    )
    parser.add_argument('-l', '--list', action='store_const', const=True,
                        dest='list_only', help="Just list the stubs to delete.")
    parser.add_argument('-e', '--expiry', default=14, dest='expiry',
                        help="expiry is number of days from now (default is 14).")
    parser.add_argument('--host', default='all', dest='host',
                        help="specify the host uri to use (defaults to all)")
    parser.add_argument('-c', '--config', dest='config',
                        help='Path to configuration file (defaults to $CWD/etc/dev.ini)',
                        metavar='FILE')

    args = parser.parse_args()
    list_only = args.list_only or False
    expiry_days = args.expiry
    expiry = datetime.today().date() - timedelta(int(expiry_days))
    host = args.host
    config = args.config or get_default_config()
    logging.config.fileConfig(config)

    settings = read_config(config)
    dbenv = default_env.copy()
    dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \
                 settings.iteritems() if k.startswith('mongo.'))
    log.debug('mongo params: {0}'.format(dbenv))

    log.info('purge stubs whereby all sessions in the scenario were last used before {0}'.format(expiry))

    db_conn = init_mongo(dbenv).connection
    slave, master = start_redis(settings)
    response = list_scenarios(host)
    if 'error' in response:
        print response['error']
        sys.exit(-1)

    handler = DummyRequestHandler()
    session_handler = DummyRequestHandler()

    for scenario_key in response['data']['scenarios']:
        log.debug("*** scenario '{0}' ***".format(scenario_key))
        hostname, scenario = scenario_key.split(':')
        if host != 'all' and host != hostname:
            continue
        handler.host = hostname
        handler.request.host = '{0}:8001'.format(hostname)
        session_handler.host = hostname
        session_handler.request.host = '{0}:8001'.format(hostname)
        handler.request.arguments['scenario'] = [scenario]
        status = get_status(handler)
        if 'error' in status:
            log.warn('get_status error: {0}'.format(status['error']))
        else:
            scenario_last_used = []
            sessions = status['data']['sessions']
            for session in zip(*sessions)[0]:
                log.debug("*** -> session '{0}' ***".format(session))
                session_handler.request.arguments['session'] = [session]
                session_status = get_status(session_handler)
                if 'error' in session_status:
                    log.warn('get_status error: {0}'.format(status['error']))
                else:
                    last_used = session_status['data']['session'].get('last_used', '-')
                    if last_used != '-':
                        scenario_last_used.append(as_date(last_used[0:10]))

            if scenario_last_used and (max(scenario_last_used) < expiry):
                log.info("sessions in scenario '{0}' were last used '{1}' which"
                         " is before expiry date '{2}'".format(scenario_key,
                                                               max(scenario_last_used), expiry))
                if not list_only:
                    response = delete_stubs(handler, scenario_name=scenario,
                                            force=True)
                    if 'error' in response:
                        log.error('delete stubs error: {0}'.format(response['error']))
                    else:
                        log.info('deleted stubs: {0}'.format(response['data']))