def createJob(handler, job, rename = None): tryName = job["name"] userID = None if 'user_id' in job: userID = job['user_id'] else: userID = handler.user.user_id() jobNames = [x.name for x in db.Query(StochKitJobWrapper).filter('user_id =', userID).run()] if job["name"] in jobNames: if not rename: return None else: i = 1 tryName = '{0}_{1}'.format(job["name"], i) while tryName in jobNames: i = i + 1 tryName = '{0}_{1}'.format(job["name"], i) if rename: job["name"] = tryName jobWrap = StochKitJobWrapper() jobWrap.user_id = userID jobWrap.name = job['name'] if 'seed' not in job: job['seed'] = -1 jobWrap.modelName = job['modelName'] # This is probably not a good idea... jobWrap.indata = json.dumps(dict([(k, job[k]) for k in ['type', 'final_time', 'increment', 'realizations', 'exec_type', 'units', 'epsilon', 'threshold', 'seed'] if k in job])) if 'startTime' in job: jobWrap.startTime = job['startTime'] if 'resource' in job: jobWrap.resource = job['resource'] else: jobWrap.resource = 'Local' jobWrap.outData = job['output_location'] if 'output_url' in job: jobWrap.outputURL = job['output_url'] jobWrap.stdout = job['stdout'] jobWrap.stderr = job['stderr'] if 'output_stored' in job: jobWrap.output_stored = job['output_stored'] jobWrap.put() return jobWrap.key().id()
def createJob(handler, job, rename = None): tryName = job["name"] userID = None if 'user_id' in job: userID = job['user_id'] else: userID = handler.user.user_id() jobNames = [x.name for x in db.Query(StochKitJobWrapper).filter('user_id =', userID).run()] if job["name"] in jobNames: if not rename: return None else: i = 1 tryName = '{0}_{1}'.format(job["name"], i) while tryName in jobNames: i = i + 1 tryName = '{0}_{1}'.format(job["name"], i) if rename: job["name"] = tryName jobWrap = StochKitJobWrapper() jobWrap.user_id = userID jobWrap.name = job['name'] if 'seed' not in job: job['seed'] = -1 jobWrap.modelName = job['modelName'] # This is probably not a good idea... jobWrap.indata = json.dumps(dict([(k, job[k]) for k in ['type', 'final_time', 'increment', 'realizations', 'exec_type', 'units', 'epsilon', 'rTol', 'aTol', 'mxSteps', 'threshold', 'seed'] if k in job])) if 'startTime' in job: jobWrap.startTime = job['startTime'] if 'resource' in job: jobWrap.resource = job['resource'].lower() else: jobWrap.resource = 'local' jobWrap.outData = job['output_location'] if 'output_url' in job: jobWrap.outputURL = job['output_url'] jobWrap.stdout = job['stdout'] jobWrap.stderr = job['stderr'] if 'output_stored' in job: jobWrap.output_stored = job['output_stored'] jobWrap.put() return jobWrap.key().id()
def runCloud(self, params): model = StochKitModelWrapper.get_by_id(params["id"]).createStochKitModel() if not model: raise Exception('Failed to retrive the model \'{0}\' to simulate'.format(params["id"])) #the parameter dictionary to be passed to the backend param = {} # Execute as concentration or population? exec_type = params['execType'].lower() if exec_type not in ["deterministic", "stochastic"]: raise Exception('exec_type must be concentration or population. Found \'{0}\''.format(exec_type)) if model.units.lower() == 'concentration' and exec_type.lower() == 'stochastic': raise Exception('Concentration models cannot be executed Stochastically' ) executable = exec_type.lower() document = model.serialize() # Wow, what a hack if executable == 'deterministic' and model.units.lower() == 'population': model = StochMLDocument.fromString(document).toModel(model.name) for reactionN in model.getAllReactions(): reaction = model.getAllReactions()[reactionN] if reaction.massaction: if len(reaction.reactants) == 1 and reaction.reactants.values()[0] == 2: reaction.marate.setExpression(reaction.marate.expression + ' / 2') document = model.serialize() params['document']=str(document) filepath = "" params['file'] = filepath ensemblename = params['jobName'] stime = params['time'] realizations = params['realizations'] increment = params['increment'] if int(params['seed']) < 0: random.seed() params['seed'] = random.randint(0, 2147483647) seed = params['seed'] # Assemble the argument list args = '' args+=' -t ' args+=str(stime) num_output_points = str(int(float(stime)/float(increment))) args+=' -i ' + str(num_output_points) path = os.path.dirname(__file__) # Algorithm, SSA or Tau-leaping? if executable != 'deterministic': params['job_type'] = 'stochkit' executable = params['algorithm'] args+=' --realizations ' args+=str(realizations) # We keep all the trajectories by default. The user can select to only store means and variance # through the advanced options. if not "only-moments" in params: args+=' --keep-trajectories' if "keep-histograms" in params: args+=' --keep-histograms' args+=' --seed ' args+=str(seed) else: params['job_type'] = 'stochkit_ode' executable = "stochkit_ode.py" # Columns need to be labeled for visulatization page to work. args += ' --label' cmd = executable+' '+args params['paramstring'] = cmd bucketname = self.user_data.getBucketName() params['bucketname'] = bucketname params['user_id'] = self.user.user_id() # Call backendservices and execute StochKit service = backendservices(self.user_data) cloud_result = service.submit_cloud_task(params) if not cloud_result["success"]: e = cloud_result["exception"] raise Exception('Cloud execution failed: {0}'.format(e)) celery_task_id = cloud_result["celery_pid"] taskid = cloud_result["db_id"] # Create a StochKitJob instance job = StochKitJobWrapper() job.resource = cloud_result['resource'] # stochkit_job.uuid = res['uuid'] job.user_id = self.user.user_id() job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") job.name = params['jobName'] job.modelName = model.name #job.pid = taskid job.celeryPID = celery_task_id job.cloudDatabaseID = taskid # Create a StochKitJob instance job.indata = json.dumps({ "type" : 'StochKit2 Ensemble', "final_time" : params['time'], "realizations" : params['realizations'], "increment" : params['increment'], "seed" : params['seed'], "exec_type" : params['execType'], "units" : model.units.lower(), "epsilon" : params['epsilon'], "threshold" : params['threshold'] }) job.output_stored = 'True' job.outData = None #job.stdout = '{0}/stdout'.format(dataDir) #job.stderr = '{0}/stderr'.format(dataDir) job.status = 'Running' job.put() return job
def runCloud(self, params): model = StochKitModelWrapper.get_by_id( params["id"]).createStochKitModel() if not model: raise Exception( 'Failed to retrive the model \'{0}\' to simulate'.format( params["id"])) #the parameter dictionary to be passed to the backend param = {} # Execute as concentration or population? exec_type = params['execType'].lower() if exec_type not in ["deterministic", "stochastic"]: raise Exception( 'exec_type must be concentration or population. Found \'{0}\''. format(exec_type)) if model.units.lower() == 'concentration' and exec_type.lower( ) == 'stochastic': raise Exception( 'Concentration models cannot be executed Stochastically') executable = exec_type.lower() document = model.serialize() # Wow, what a hack if executable == 'deterministic' and model.units.lower( ) == 'population': model = StochMLDocument.fromString(document).toModel(model.name) for reactionN in model.getAllReactions(): reaction = model.getAllReactions()[reactionN] if reaction.massaction: if len(reaction.reactants ) == 1 and reaction.reactants.values()[0] == 2: reaction.marate.setExpression( reaction.marate.expression + ' / 2') document = model.serialize() params['document'] = str(document) filepath = "" params['file'] = filepath ensemblename = params['jobName'] stime = params['time'] realizations = params['realizations'] increment = params['increment'] if int(params['seed']) < 0: random.seed() params['seed'] = random.randint(0, 2147483647) seed = params['seed'] # Assemble the argument list args = '' args += ' -t ' args += str(stime) num_output_points = str(int(float(stime) / float(increment))) args += ' -i ' + str(num_output_points) path = os.path.dirname(__file__) # Algorithm, SSA or Tau-leaping? if executable != 'deterministic': params['job_type'] = 'stochkit' executable = params['algorithm'] args += ' --realizations ' args += str(realizations) # We keep all the trajectories by default. The user can select to only store means and variance # through the advanced options. if not "only-moments" in params: args += ' --keep-trajectories' if "keep-histograms" in params: args += ' --keep-histograms' args += ' --seed ' args += str(seed) else: params['job_type'] = 'stochkit_ode' executable = "stochkit_ode.py" # Columns need to be labeled for visulatization page to work. args += ' --label' cmd = executable + ' ' + args params['paramstring'] = cmd bucketname = self.user_data.getBucketName() params['bucketname'] = bucketname params['user_id'] = self.user.user_id() # Call backendservices and execute StochKit service = backendservices(self.user_data) cloud_result = service.submit_cloud_task(params) if not cloud_result["success"]: e = cloud_result["exception"] raise Exception('Cloud execution failed: {0}'.format(e)) celery_task_id = cloud_result["celery_pid"] taskid = cloud_result["db_id"] # Create a StochKitJob instance job = StochKitJobWrapper() job.resource = cloud_result['resource'] # stochkit_job.uuid = res['uuid'] job.user_id = self.user.user_id() job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") job.name = params['jobName'] job.modelName = model.name #job.pid = taskid job.celeryPID = celery_task_id job.cloudDatabaseID = taskid # Create a StochKitJob instance job.indata = json.dumps({ "type": 'StochKit2 Ensemble', "final_time": params['time'], "realizations": params['realizations'], "increment": params['increment'], "seed": params['seed'], "exec_type": params['execType'], "units": model.units.lower(), "epsilon": params['epsilon'], "rTol": params['rTol'], "aTol": params['aTol'], "mxSteps": params['mxSteps'], "threshold": params['threshold'] }) job.output_stored = 'True' job.outData = None #job.stdout = '{0}/stdout'.format(dataDir) #job.stderr = '{0}/stderr'.format(dataDir) job.status = 'Running' job.put() return job
def runQsub(self, data, cluster_info): from db_models.parameter_sweep_job import ParameterSweepJobWrapper from modeleditor import StochKitModelWrapper import parametersweep_qsub logging.error("*" * 80) logging.error("simulate.runQsub() modelType={0}".format(data['execType'])) logging.error("*" * 80) modelDb = StochKitModelWrapper.get_by_id(int(data["id"])) # TODO: Ben needs to fix the following code to work directly with StochKitModelWrappers # model = StochKitModelWrapper.get_by_id(params["id"]).createStochKitModel() # # if not model: # raise Exception('Failed to retrive the model \'{0}\' to simulate'.format(params["id"])) # # # Execute as concentration or population? # exec_type = params['execType'].lower() # # if exec_type not in ["deterministic", "stochastic"]: # raise Exception('exec_type must be concentration or population. Found \'{0}\''.format(exec_type)) # # if model.units.lower() == 'concentration' and exec_type.lower() == 'stochastic': # raise Exception('Concentration models cannot be executed Stochastically' ) # # document = model.serialize() # # # Wow, what a hack # # if executable == 'deterministic' and model.units.lower() == 'population': # model = StochMLDocument.fromString(document).toModel(model.name) # # for reactionN in model.getAllReactions(): # reaction = model.getAllReactions()[reactionN] # if reaction.massaction: # if len(reaction.reactants) == 1 and reaction.reactants.values()[0] == 2: # reaction.marate.setExpression(reaction.marate.expression + ' / 2') path = os.path.abspath(os.path.dirname(__file__)) basedir = path + '/../' dataDir = tempfile.mkdtemp(dir=basedir + 'output') job = StochKitJobWrapper() job.user_id = self.user.user_id() job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S") job.name = data["jobName"] #job.inData = json.dumps(data) job.indata = json.dumps({ "type" : 'StochKit2 Ensemble', "final_time" : data['time'], "realizations" : data['realizations'], "increment" : data['increment'], "seed" : data['seed'], "exec_type" : data['execType'], "units" : modelDb.units.lower(), "epsilon" : data['epsilon'], "rTol" : data['rTol'], "aTol" : data['aTol'], "mxSteps" : data['mxSteps'], "threshold" : data['threshold'] }) job.modelName = modelDb.name job.outData = dataDir job.status = "Pending" job.output_stored = "False" job.is_simulation = True job.resource = "qsub" try: templateData = { "name": modelDb.name, "modelType": modelDb.type, "species": modelDb.species, "parameters": modelDb.parameters, "reactions": modelDb.reactions, # "speciesSelect": data['speciesSelect'], "speciesSelect": data['selections'], # "maxTime": data['maxTime'], "maxTime": data['time'], "increment": data['increment'], # "trajectories": data['trajectories'], "trajectories": data['realizations'], "seed": data['seed'], "isSpatial": modelDb.isSpatial, "isLocal": True } if modelDb.isSpatial: try: meshWrapperDb = mesheditor.MeshWrapper.get_by_id(modelDb.spatial["mesh_wrapper_id"]) except Exception as e: logging.exception(e) logging.error("No Mesh file set. Choose one in the Mesh tab of the Model Editor") raise Exception("No Mesh file set. Choose one in the Mesh tab of the Model Editor") try: meshFileObj = fileserver.FileManager.getFile(self, meshWrapperDb.meshFileId, noFile=False) templateData["mesh"] = meshFileObj["data"] except IOError as e: logging.exception(e) logging.error("Mesh file inaccessible. Try another mesh") raise Exception("Mesh file inaccessible. Try another mesh") templateData['reaction_subdomain_assignments'] = modelDb.spatial["reactions_subdomain_assignments"] templateData['species_subdomain_assignments'] = modelDb.spatial["species_subdomain_assignments"] templateData['species_diffusion_coefficients'] = modelDb.spatial["species_diffusion_coefficients"] templateData['initial_conditions'] = modelDb.spatial["initial_conditions"] templateData['subdomains'] = meshWrapperDb.subdomains if data['execType'] == "stochastic": job.qsubHandle = pickle.dumps(parametersweep_qsub.stochastic(templateData, cluster_info, not_full_parameter_sweep=True)) elif data['execType'] == "deterministic": job.qsubHandle = pickle.dumps(parametersweep_qsub.deterministic(templateData, cluster_info, not_full_parameter_sweep=True)) elif data['execType'] == "spatial": job.qsubHandle = pickle.dumps(parametersweep_qsub.spatial(templateData, cluster_info, not_full_parameter_sweep=True)) else: raise Exception("Trying to runQsub on unsupported modelType {0}".format(data['modelType'])) job.put() except Exception as e: exc_info = sys.exc_info() logging.exception(e) job.status = 'Failed' try: job.delete(self) except Exception as e: pass raise exc_info[1], None, exc_info[2] return job