def setUp(self): if Config.USE_REDIS: __db = redis.StrictRedis(Config.REDIS_HOSTNAME, Config.REDIS_PORT, db=0) __db.flushall() self.job1 = TangoJob( name="sample_job_1", vm="ilter.img", outputFile="sample_job_1_output", input=[], timeout=30, notifyURL="notifyMeUrl", maxOutputFileSize=4096, ) self.job2 = TangoJob( name="sample_job_2", vm="ilter.img", outputFile="sample_job_2_output", input=[], timeout=30, notifyURL="notifyMeUrl", maxOutputFileSize=4096, ) self.jobQueue = JobQueue(None) self.jobQueue.reset() self.jobId1 = self.jobQueue.add(self.job1) self.jobId2 = self.jobQueue.add(self.job2)
def __init__(self): self.daemon = True vmms = None if Config.VMMS_NAME == "tashiSSH": from vmms.tashiSSH import TashiSSH vmms = TashiSSH() elif Config.VMMS_NAME == "ec2SSH": from vmms.ec2SSH import Ec2SSH vmms = Ec2SSH() elif Config.VMMS_NAME == "localDocker": from vmms.localDocker import LocalDocker vmms = LocalDocker() elif Config.VMMS_NAME == "distDocker": from vmms.distDocker import DistDocker vmms = DistDocker() self.preallocator = Preallocator({Config.VMMS_NAME: vmms}) self.jobQueue = JobQueue(self.preallocator) if not Config.USE_REDIS: # creates a local Job Manager if there is no persistent # memory between processes. Otherwise, JobManager will # be initiated separately JobManager(self.jobQueue).start() logging.basicConfig( filename=Config.LOGFILE, format="%(levelname)s|%(asctime)s|%(name)s|%(message)s", level=Config.LOGLEVEL, ) self.start_time = time.time() self.log = logging.getLogger("TangoServer") self.log.info("Starting Tango server")
def setUp(self): self.app = www.app.test_client() try: os.remove(quePath) except: pass self.jobQueue = JobQueue(quePath)
def setUp(self): try: os.remove(quePath) except: pass self.que = JobQueue(quePath) self.jobProcess = JobProcess(quePath)
def setUp(s): try: #os.remove(quePath) os.remove(serverQueuePath) except: pass www.app.config['UNIT_TEST'] = True s.app = www.app.test_client() s.que = JobQueue(quePath)
def getStatus(id, queuePath): # Retrieve the status and result of the given job ID. # @param id: the job ID # @param queuePath: the job queue path # @returns: a dict of the form: {'status': <status>, 'result': <dict>} # where result is None if the job is not found; # only Success and Error may have an optional result; if # there is no result, no result property is returned statusResult = JobQueue(queuePath).getStatus(id) if statusResult == None: raise ErrorResp('unknown job ID of: ' + str(id)) return statusResult
def __init__(self): logging.basicConfig( filename = self.LOGFILE, format = "%(levelname)s|%(asctime)s|%(name)s|%(message)s", level = Config.LOGLEVEL ) vmms = None if Config.VMMS_NAME == "localSSH": from vmms.localSSH import LocalSSH vmms = LocalSSH() elif Config.VMMS_NAME == "tashiSSH": from vmms.tashiSSH import TashiSSH vmms = TashiSSH() elif Config.VMMS_NAME == "ec2SSH": from vmms.ec2SSH import Ec2SSH vmms = Ec2SSH() elif Config.VMMS_NAME == "localDocker": from vmms.localDocker import LocalDocker vmms = LocalDocker() self.vmms = {Config.VMMS_NAME: vmms} self.preallocator = Preallocator(self.vmms) self.queue = JobQueue(self.preallocator) if not Config.USE_REDIS: # creates a local Job Manager if there is no persistent # memory between processes. Otherwise, JobManager will # be initiated separately JobManager(self.queue, self.vmms, self.preallocator) self.tango = TangoServer(self.queue, self.preallocator, self.vmms) logging.basicConfig( filename=self.LOGFILE, format="%(levelname)s|%(asctime)s|%(name)s|%(message)s", level=Config.LOGLEVEL ) logging.getLogger('boto').setLevel(logging.INFO) self.log = logging.getLogger("TangoREST") self.log.info("Starting RESTful Tango server") self.status = Status()
def add(email, operation, parms, ctx): # Add a job to the tail end of the job queue. # @param email: email/username requesting the job # @param operation: job operation to run; the python module that # contains the calcMain() function should be in the # file, <operation>_www.py # @param parms: parameters as a python dict to be passed to # <operation>_www.py.calcMain() # @params ctx: the job context holding information for the postCalc # @returns: (jobId, status) # Extract any doNotEmail flag. doNotEmail = parms.pop('doNotEmail', None) if 'map' in parms: ctx.map = parms['map'] elif 'mapId' in parms: ctx.map = parms['mapId'] if email != None: ctx.email = email packedTask = _packTask(operation, parms, ctx) queuePath = ctx.app.jobQueuePath jobId = JobQueue(queuePath).add(id, packedTask, email, doNotEmail) # Get the status of the job just added to the queue. result = getStatus(jobId, queuePath) # Run the job now. if not ctx.app.unitTest: _runNow(jobId, ctx.app.jobProcessPath, queuePath) # Return the id and status. return { 'status': 'InJobQueue', 'jobId': jobId, 'jobStatusUrl': ctx.app.jobStatusUrl + str(jobId), }
def main(args): queuePath = args[0] id = int(args[1]) # TODO these should be wrapped with a try-except because any errors here # will not be reported in the server log. jobProcess = JobProcess(queuePath) operation, parms, ctx = jobProcess.unpackTask(jobProcess.queue.getTask(id)) try: status, result = jobProcess.run(id, operation, parms, ctx) except Exception as e: status = 'Error' result = _formatError(str(e), traceback.format_exc(100), operation, parms) except: status = 'Error' result = _formatError(None, traceback.format_exc(100), operation, parms) # Set the completion status. JobQueue(queuePath).setResult(id, status, result, ctx, operation)
def __init__(s, queuePath): s.queuePath = queuePath s._connection_cache = {} s.queue = JobQueue(queuePath)
if __name__ == "__main__": if not Config.USE_REDIS: print( "You need to have Redis running to be able to initiate stand-alone\ JobManager") else: vmms = None if Config.VMMS_NAME == "localSSH": from vmms.localSSH import LocalSSH vmms = LocalSSH() elif Config.VMMS_NAME == "tashiSSH": from vmms.tashiSSH import TashiSSH vmms = TashiSSH() elif Config.VMMS_NAME == "ec2SSH": from vmms.ec2SSH import Ec2SSH vmms = Ec2SSH() elif Config.VMMS_NAME == "localDocker": from vmms.localDocker import LocalDocker vmms = LocalDocker() vmms = {Config.VMMS_NAME: vmms} preallocator = Preallocator(vmms) queue = JobQueue(preallocator) JobManager(queue, vmms, preallocator) print("Starting the stand-alone Tango JobManager")
def getAll(queuePath): # Dump all jobs in the queue. # @param queuePath: the job queue path # @returns: an array of jobs in an object return {'jobs': JobQueue(queuePath).getAll()}