def _prepareDB(self): # since the DBMgr instance will be replicated across objects, # we just set it as None for this one. # first, store the server address - this wouldn't normally be needed, # but the tests won't work otherwise (as the DB is _not_ the default one) hostname, port = DBMgr._instance._db.storage._addr DBMgr.setInstance(DBMgr(hostname, port)) self._dbi = DBMgr.getInstance()
def _start(args): _setup(args) running = _check_running() if not args.force and running: raise Exception("The daemon seems to be already running (consider -f?)") if hasattr(args, 'standalone') and args.standalone: SchedulerApp(args).run() else: pid = os.fork() if pid: print pid return else: DBMgr.setInstance(None) SchedulerApp(args).run() return 0
def _start(args): _setup(args) running = _check_running() if not args.force and running: raise Exception( "The daemon seems to be already running (consider -f?)") if hasattr(args, 'standalone') and args.standalone: SchedulerApp(args).run() else: pid = os.fork() if pid: print pid return else: DBMgr.setInstance(None) SchedulerApp(args).run() return 0
def main(): global scenarios, results #logger = logging.getLogger('') #handler = logging.StreamHandler() #logger.addHandler(handler) #logger.setLevel(logging.DEBUG) dirpath = tempfile.mkdtemp() server = TestZEOServer(12355, os.path.join(dirpath, 'data.fs'), 'localhost') server.start() DBMgr.setInstance(DBMgr(hostname='localhost', port=12355)) scenarios = list((nworkers, sbatch) for nworkers in range(1, 10) \ for sbatch in range(100, 1100, 100)) dbi = DBMgr.getInstance() dbi.startRequest() runTests('pcuds86.cern.ch', 80, scenarios) dbi.abort() server.shutdown() DBMgr.setInstance(None) with open('/tmp/buploader.csv', 'w') as f: csvfile = csv.writer(f) for params, result in results.iteritems(): csvfile.writerow(list(params) + [result])