Beispiel #1
0
    def getMolnsState(self):
        config = molns.MOLNSConfig(db_file=self.getMolnsConfigPath())

        output = {}

        for providerType in providerToNames:
            output[providerType] = {
                'provider':
                molns.MOLNSProvider.provider_get_config(
                    name=providerToNames[providerType]['providerName'],
                    provider_type=providerType,
                    config=config),
                'controller':
                molns.MOLNSController.controller_get_config(
                    name=providerToNames[providerType]['controllerName'],
                    provider_type=providerType,
                    config=config),
                'worker':
                molns.MOLNSWorkerGroup.worker_group_get_config(
                    name=providerToNames[providerType]['workerName'],
                    provider_type=providerType,
                    config=config)
            }

        return output
Beispiel #2
0
    def __process_getJobStatus(self, service, job, number):
        try:
            if self.molnsConfig is None:
                molnsConfigDb = db.GqlQuery(
                    "SELECT * FROM MolnsConfigWrapper WHERE user_id = :1",
                    self.user.user_id()).get()
                if molnsConfigDb:
                    self.molnsConfig = molns.MOLNSConfig(
                        config_dir=molnsConfigDb.folder)

            status = self.getJobStatus(service, job, self.molnsConfig)
        except Exception as e:
            traceback.print_exc()
            status = {
                "status":
                'Error: {0}'.format(e),
                "name":
                job.name,
                "uuid":
                job.cloudDatabaseID
                if hasattr(job, 'cloudDatabaseID') else None,
                "output_stored":
                None,
                "resource":
                'Error',
                "id":
                job.key().id()
            }
        status["number"] = number
        return status
Beispiel #3
0
def startMolns(providerName, password, configFilename):
    #print providerName, config

    config = molns.MOLNSConfig(db_file=configFilename)
    molns.MOLNSProvider.provider_initialize(providerName, config)
    molns.MOLNSProvider.provider_get_config(name=providerName,
                                            provider_type='EC2',
                                            config=config)
    molns.MOLNSController.start_controller(['goat'], config, password=password)
Beispiel #4
0
    def get(self, jobID=None):

        jobID = int(jobID)

        initialData = {}

        jobDb = ParameterSweepJobWrapper.get_by_id(jobID)
        logging.info(
            "ParameterSweepVisualizationPage.get() jobID={0} path={1}".format(
                jobID, jobDb.outData))

        initialData = jobDb.getJSON()

        if jobDb.resource == 'qsub':
            initialData['stdout'] = ""
            initialData['stderr'] = ""
        else:
            try:
                with open(os.path.join(jobDb.outData, 'stdout'), 'r') as f:
                    initialData['stdout'] = f.read()
                with open(os.path.join(jobDb.outData, 'stderr'), 'r') as f:
                    initialData['stderr'] = f.read()
            except IOError as e:
                molnsConfigDb = db.GqlQuery(
                    "SELECT * FROM MolnsConfigWrapper WHERE user_id = :1",
                    self.user.user_id()).get()
                initialData['data'] = {}
                if not molnsConfigDb:
                    initialData[
                        'stdout'] = 'ERROR: could not lookup molnsConfigDb'
                molnsConfig = molns.MOLNSConfig(
                    config_dir=molnsConfigDb.folder)
                #TODO: Check if the molns service is active
                try:
                    log = molns.MOLNSExec.job_logs([jobDb.molnsPID],
                                                   molnsConfig)
                    initialData['stdout'] = log['msg']
                except (IOError, molns.MOLNSException) as e:
                    initialData['stdout'] = str(e)

        if jobDb.resource == 'local' or jobDb.resource == 'qsub' or jobDb.output_stored:
            try:
                with open(os.path.join(jobDb.outData, 'results'), 'r') as f:
                    initialData['data'] = pickle.load(f)

                initialData['status'] = 'Finished'
            except IOError as e:
                initialData['data'] = {}
        #logging.error('*'*80)
        #logging.error("{0}".format(**{'initialData' : json.dumps(initialData)}))
        #logging.error("{0}".format(initialData))
        #logging.error('*'*80)
        self.render_response('parameter_sweep_visualization.html',
                             **{'initialData': json.dumps(initialData)})
Beispiel #5
0
    def getMolnsState(self):
        config = molns.MOLNSConfig(db_file=os.path.join(appDir, "test.db"))

        provider_conf_items = molns.MOLNSProvider.provider_get_config(
            name='mountain', provider_type='EC2', config=config)
        controller_conf_items = molns.MOLNSController.controller_get_config(
            name='goat', provider_type='EC2', config=config)

        return {
            'EC2': {
                'provider': provider_conf_items,
                'controller': controller_conf_items
            }
        }
Beispiel #6
0
def startMolns(providerName, controllerName, workerName, providerType,
               password, configFilename):
    #print providerName, config

    config = molns.MOLNSConfig(db_file=configFilename)

    molns.MOLNSProvider.provider_initialize(providerName, config)
    molns.MOLNSProvider.provider_get_config(name=providerName,
                                            provider_type=providerType,
                                            config=config)
    molns.MOLNSController.start_controller([controllerName],
                                           config,
                                           password=password)
    molns.MOLNSWorkerGroup.start_worker_groups([workerName], config)
Beispiel #7
0
def startMolns(providerName, controllerName, workerName, providerType,
               password, configFilename):
    config = molns.MOLNSConfig(db_file=configFilename)

    molns.MOLNSProvider.provider_initialize(providerName, config)
    molns.MOLNSProvider.provider_get_config(name=providerName,
                                            provider_type=providerType,
                                            config=config)
    molns.MOLNSController.start_controller([controllerName],
                                           config,
                                           password=password,
                                           openWebBrowser=False,
                                           reserved_cpus=1)
    molns.MOLNSWorkerGroup.start_worker_groups([workerName], config)
    def stop(self, handler):
        # TODO: Call the backend to kill and delete the job and all associated files.
        service = backendservices(handler.user_data)

        if self.resource == "molns":
            molnsConfigDb = db.GqlQuery(
                "SELECT * FROM MolnsConfigWrapper WHERE user_id = :1",
                handler.user.user_id()).get()

            if not molnsConfigDb:
                return

            config = molns.MOLNSConfig(config_dir=molnsConfigDb.folder)

            # Stopping is deleting cloud data for this job type
            try:
                molns.MOLNSExec.cleanup_job([self.molnsPID], config)
            except Exception as e:
                logging.info("Error while deleting cloud data: {0}".format(e))
Beispiel #9
0
    def updateMolnsState(self, state):
        if 'process' in cherrypy.session and cherrypy.session['process'][
                0].is_alive():
            raise Exception(
                'Currently running process, cannot update state while this is ongoing'
            )

        config = molns.MOLNSConfig(db_file=os.path.join(appDir, "test.db"))

        provider_conf_items = molns.MOLNSProvider.provider_get_config(
            name='mountain', provider_type='EC2', config=config)

        json_obj = {'name': 'mountain', 'type': 'EC2', 'config': {}}

        provider = state['EC2']['provider']

        # Update those values that have changed
        for i in range(len(provider)):
            if provider[i]['value'] != provider_conf_items[i]['value']:
                json_obj['config'][provider_conf_items[i]
                                   ['key']] = provider[i]['value']

        molns.MOLNSProvider.provider_import('', config, json_obj)

        controller_conf_items = molns.MOLNSController.controller_get_config(
            name='goat', provider_type='EC2', config=config)

        controller = state['EC2']['controller']

        json_obj = {'name': 'goat', 'provider_name': 'mountain', 'config': {}}

        for i in range(len(controller)):
            if controller[i]['value'] != controller_conf_items[i]['value']:
                json_obj['config'][controller_conf_items[i]
                                   ['key']] = controller[i]['value']

        molns.MOLNSController.controller_import('', config, json_obj)
Beispiel #10
0
def addWorkers(workerName, number, configFilename):
    config = molns.MOLNSConfig(db_file=configFilename)

    molns.MOLNSWorkerGroup.add_worker_groups([workerName, number], config)
Beispiel #11
0
def rebuildMolns(providerName, configFilename):
    config = molns.MOLNSConfig(db_file=configFilename)
    molns.MOLNSProvider.provider_rebuild([providerName], config)
Beispiel #12
0
def terminateMolns(controllerName, configFilename):
    config = molns.MOLNSConfig(db_file=configFilename)
    molns.MOLNSController.terminate_controller([controllerName], config)
Beispiel #13
0
def stopMolns(controllerName, configFilename):
    config = molns.MOLNSConfig(db_file=configFilename)
    molns.MOLNSController.stop_controller([controllerName], config)
Beispiel #14
0
    def updateMolnsState(self, state):
        if 'process' in self.session:
            process = MolnsConfigProcessWrapper.get_by_id(
                self.session['process'][0])

            if process is not None and process.is_alive():
                raise {
                    status:
                    False,
                    msg:
                    'Currently running process, cannot update state while this is ongoing'
                }

        config = molns.MOLNSConfig(db_file=self.getMolnsConfigPath())

        for providerType in state:
            providerName = providerToNames[providerType]['providerName']
            controllerName = providerToNames[providerType]['controllerName']
            workerName = providerToNames[providerType]['workerName']

            provider_conf_items = molns.MOLNSProvider.provider_get_config(
                name=providerName, provider_type=providerType, config=config)

            json_obj = {
                'name': providerName,
                'type': providerType,
                'config': {}
            }

            provider = state[providerType]['provider']

            # Update those values that have changed
            for i in range(len(provider)):
                if provider[i]['value'] != provider_conf_items[i]['value']:
                    json_obj['config'][provider_conf_items[i]
                                       ['key']] = provider[i]['value']

            molns.MOLNSProvider.provider_import('', config, json_obj)

            controller_conf_items = molns.MOLNSController.controller_get_config(
                name=controllerName, provider_type=providerType, config=config)

            controller = state[providerType]['controller']

            json_obj = {
                'name': controllerName,
                'provider_name': providerName,
                'config': {}
            }

            for i in range(len(controller)):
                if controller[i]['value'] != controller_conf_items[i]['value']:
                    json_obj['config'][controller_conf_items[i]
                                       ['key']] = controller[i]['value']

            molns.MOLNSController.controller_import('', config, json_obj)

            worker_conf_items = molns.MOLNSWorkerGroup.worker_group_get_config(
                name=workerName, provider_type=providerType, config=config)

            worker = state[providerType]['worker']

            json_obj = {
                'name': workerName,
                'controller_name': controllerName,
                'provider_name': providerName,
                'config': {}
            }

            for i in range(len(worker)):
                if worker[i]['value'] != worker_conf_items[i]['value']:
                    json_obj['config'][worker_conf_items[i]
                                       ['key']] = worker[i]['value']

            molns.MOLNSWorkerGroup.worker_group_import('', config, json_obj)
Beispiel #15
0
def stopMolns(providerName, configFilename):

    config = molns.MOLNSConfig(db_file=configFilename)
    molns.MOLNSController.stop_controller(['goat'], config)
Beispiel #16
0
    def pollSystemState(self):
        output = []

        config = molns.MOLNSConfig(db_file=self.getMolnsConfigPath())

        controllerName = providerToNames['EC2']['controllerName']

        if 'process' in self.session:
            processId, functionName = self.session['process']

            process = MolnsConfigProcessWrapper.get_by_id(processId)

            if not process:
                functionName = None
                is_alive = False
            else:
                is_alive = process.is_alive()

                stdout, stderr = process.communicate()

                # Get new messages
                if len(stdout) > 0:
                    output.append({'status': 1, 'msg': stdout})

                if len(stderr) > 0:
                    output.append({'status': 0, 'msg': stderr})
        else:
            functionName = None
            is_alive = False

        try:
            status = molns.MOLNSController.status_controller([controllerName],
                                                             config)

            if 'column_names' in status:
                status['column_names'] = [
                    s.capitalize() for s in status['column_names']
                ]
            else:
                status = {
                    'type':
                    'table',
                    'column_names': [
                        'Name', 'Status', 'Type', 'Provider', 'Instance id',
                        'IP address'
                    ],
                    'data': []
                }
        except:
            status = {
                'type':
                'table',
                'column_names': [
                    'Name', 'Status', 'Type', 'Provider', 'Instance id',
                    'IP address'
                ],
                'data': []
            }

        return {
            'molns': self.getMolnsState(),
            'instanceStatus': status,
            'messages': output,
            'process': {
                'name': functionName,
                'status': is_alive
            }
        }
Beispiel #17
0
    def post(self):
        reqType = self.request.get('reqType')
        self.response.content_type = 'application/json'

        if reqType == 'newJob':
            # Run via Molns cloud
            data = json.loads(self.request.get('data'))

            self.user_data.set_selected(2)

            job = db.GqlQuery(
                "SELECT * FROM ParameterSweepJobWrapper WHERE user_id = :1 AND name = :2",
                self.user.user_id(), data["jobName"].strip()).get()

            if job != None:
                self.response.write(
                    json.dumps({
                        "status": False,
                        "msg": "Job name must be unique"
                    }))
                return

            try:
                result = self.runMolns(data=data)

                return self.response.write(
                    json.dumps({
                        "status": True,
                        "msg": "Job launched",
                        "id": result.key().id()
                    }))
            except Exception as e:
                logging.exception(e)
                result = {'status': False, 'msg': 'Error: {0}'.format(e)}
                self.response.write(json.dumps(result))
                return
        elif reqType == 'newJobLocal':
            logging.error("*" * 80)
            logging.error("parametersweep.newJobLocal")
            logging.error("*" * 80)
            data = json.loads(self.request.get('data'))

            self.user_data.set_selected(0)

            job = db.GqlQuery(
                "SELECT * FROM ParameterSweepJobWrapper WHERE user_id = :1 AND name = :2",
                self.user.user_id(), data["jobName"].strip()).get()

            if job != None:
                logging.error(
                    "parametersweep.newJobLocal: error: Job name must be unique"
                )
                self.response.write(
                    json.dumps({
                        "status": False,
                        "msg": "Job name must be unique"
                    }))
                return

            try:
                result = self.runLocal(data=data)

                return self.response.write(
                    json.dumps({
                        "status": True,
                        "msg": "Job launched",
                        "id": result.key().id()
                    }))
            except Exception as e:
                logging.exception(e)
                result = {'status': False, 'msg': 'Error: {0}'.format(e)}
                self.response.write(json.dumps(result))
                return
        elif reqType == 'newJobQsub':
            logging.error("*" * 80)
            logging.error("parametersweep.newJobQsub")
            logging.error("*" * 80)
            data = json.loads(self.request.get('data'))

            # cluster_node_info = self.user_data.get_cluster_node_info()[0]
            # files = fileserver.FileManager.getFiles(self, 'clusterKeyFiles')
            # cluster_ssh_key_info = {f['id']: {'id': f['id'], 'keyname': f['path']} for f in files}

            cluster_info = dict()
            received_cluster_info = json.loads(
                self.request.get('cluster_info'))
            cluster_info['ip_address'] = received_cluster_info['ip']
            cluster_info['username'] = received_cluster_info['username']
            cluster_info['ssh_key'] = fileserver.FileWrapper.get_by_id(
                received_cluster_info['key_file_id']).storePath

            self.user_data.set_selected(received_cluster_info['uuid'])

            #logging.info("PARAMETER_SWEEP_CLUSTER_INFO = {0}".format(cluster_info))
            #cluster_info = json.loads(self.request.get('cluster_info'))

            job = db.GqlQuery(
                "SELECT * FROM ParameterSweepJobWrapper WHERE user_id = :1 AND name = :2",
                self.user.user_id(), data["jobName"].strip()).get()

            if job != None:
                logging.error(
                    "parametersweep.newJobQsub: error: Job name must be unique"
                )
                self.response.write(
                    json.dumps({
                        "status": False,
                        "msg": "Job name must be unique"
                    }))
                return

            try:
                result = self.runQsub(data=data, cluster_info=cluster_info)

                return self.response.write(
                    json.dumps({
                        "status": True,
                        "msg": "Job launched",
                        "id": result.key().id()
                    }))
            except Exception as e:
                logging.exception(e)
                result = {'status': False, 'msg': 'Error: {0}'.format(e)}
                self.response.write(json.dumps(result))
                return

        elif reqType == 'delJob':
            jobID = json.loads(self.request.get('id'))

            jobID = int(jobID)

            job = ParameterSweepJobWrapper.get_by_id(jobID)

            if job.user_id == self.user.user_id():
                job.delete(self)
            else:
                self.response.write(
                    json.dumps({
                        "status":
                        False,
                        "msg":
                        "No permissions to delete this job (this should never happen)"
                    }))
                return

        elif reqType == 'getDataCloud':
            try:
                jobID = json.loads(self.request.get('id'))
                job = ParameterSweepJobWrapper.get_by_id(int(jobID))

                molnsConfigDb = db.GqlQuery(
                    "SELECT * FROM MolnsConfigWrapper WHERE user_id = :1",
                    self.user.user_id()).get()

                if not molnsConfigDb:
                    return

                molnsConfig = molns.MOLNSConfig(
                    config_dir=molnsConfigDb.folder)
                try:
                    log = molns.MOLNSExec.job_logs([job.molnsPID], molnsConfig)
                    with open(os.path.join(job.outData, 'stdout'), 'w') as f:
                        f.write(log['msg'])
                    molns.MOLNSExec.fetch_job_results([
                        job.molnsPID, "results",
                        os.path.join(job.outData, 'results')
                    ], molnsConfig)
                    job.output_stored = True
                except (IOError, molns.MOLNSException) as e:
                    logging.info('Could not fetch results: {0}'.format(e))

                # Save the updated status
                job.put()
                self.response.headers['Content-Type'] = 'application/json'
                self.response.write(
                    json.dumps({
                        'status': True,
                        'msg': 'Job downloaded'
                    }))
                return
            except Exception as e:
                traceback.print_exc()
                self.response.write(
                    json.dumps({
                        "status": False,
                        "msg": "Error: {0}".format(e)
                    }))
                return

        elif reqType == 'getDataLocal':
            jobID = json.loads(self.request.get('id'))

            jobID = int(jobID)

            job = ParameterSweepJobWrapper.get_by_id(jobID)

            if not job.zipFileName:
                szip = exportimport.SuperZip(os.path.abspath(
                    os.path.dirname(__file__) + '/../static/tmp/'),
                                             preferredName=job.name + "_")

                job.zipFileName = szip.getFileName()

                szip.addParameterSweepJob(job, True)

                szip.close()

                # Save the updated status
                job.put()

            relpath = '/' + os.path.relpath(
                job.zipFileName,
                os.path.abspath(os.path.dirname(__file__) + '/../'))

            self.response.headers['Content-Type'] = 'application/json'
            self.response.write(
                json.dumps({
                    'status': True,
                    'msg': 'Job prepared',
                    'url': relpath
                }))
            return

        self.response.write(json.dumps({'status': True, 'msg': 'Success'}))
Beispiel #18
0
    def runMolns(self, data):
        self.user_data.set_selected(2)
        modelDb = StochKitModelWrapper.get_by_id(data["modelID"])

        path = os.path.abspath(os.path.dirname(__file__))

        basedir = path + '/../'
        dataDir = tempfile.mkdtemp(dir=basedir + 'output')

        job = ParameterSweepJobWrapper()
        job.user_id = self.user.user_id()
        job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
        job.name = data["jobName"]
        job.inData = json.dumps(data)
        job.modelName = modelDb.name
        job.outData = dataDir
        job.status = "Pending"
        job.output_stored = False

        # # execute cloud task
        try:
            template_filename = 'parametersweep_template_{0}.py'.format(
                data['modelType'])
            logging.error(
                "parametersweep.runMolns() template_filename={0}".format(
                    template_filename))
            logging.error("*" * 80)
            with open(os.path.join(path, template_filename), 'r') as f:
                template = f.read()

            templateData = {
                "name": modelDb.name,
                "modelType": modelDb.type,
                "species": modelDb.species,
                "parameters": modelDb.parameters,
                "reactions": modelDb.reactions,
                "speciesSelect": data['speciesSelect'],
                "maxTime": data['maxTime'],
                "increment": data['increment'],
                "trajectories": data['trajectories'],
                "seed": data['seed'],
                "parameterA": data['parameterA'],
                "minValueA": data['minValueA'],
                "maxValueA": data['maxValueA'],
                "stepsA": data['stepsA'],
                "logA": data['logA'],
                "parameterB": data['parameterB'],
                "minValueB": data['minValueB'],
                "maxValueB": data['maxValueB'],
                "stepsB": data['stepsB'],
                "logB": data['logB'],
                "variableCount": data['variableCount'],
                "isSpatial": modelDb.isSpatial,
                "isLocal": False
            }

            if modelDb.isSpatial:
                try:
                    meshWrapperDb = mesheditor.MeshWrapper.get_by_id(
                        modelDb.spatial["mesh_wrapper_id"])
                except Exception as e:
                    raise Exception(
                        "No Mesh file set. Choose one in the Mesh tab of the Model Editor"
                    )

                try:
                    meshFileObj = fileserver.FileManager.getFile(
                        self, meshWrapperDb.meshFileId, noFile=False)
                    templateData["mesh"] = meshFileObj["data"]
                except IOError as e:
                    raise Exception("Mesh file inaccessible. Try another mesh")

                templateData[
                    'reaction_subdomain_assignments'] = modelDb.spatial[
                        "reactions_subdomain_assignments"]
                templateData[
                    'species_subdomain_assignments'] = modelDb.spatial[
                        "species_subdomain_assignments"]
                templateData[
                    'species_diffusion_coefficients'] = modelDb.spatial[
                        "species_diffusion_coefficients"]
                templateData['initial_conditions'] = modelDb.spatial[
                    "initial_conditions"]
                templateData['subdomains'] = meshWrapperDb.subdomains

            program = os.path.join(dataDir, 'program.py')

            with open(program, 'w') as f:
                jsonString = json.dumps(templateData, indent=4, sort_keys=True)

                # We've got to double escape the strings here cause of how we're substituting the JSON data in a source file
                jsonString = jsonString.replace('\\', '\\\\')

                f.write(template.replace('___JSON_STRING___', jsonString))

            molnsConfigDb = db.GqlQuery(
                "SELECT * FROM MolnsConfigWrapper WHERE user_id = :1",
                self.user.user_id()).get()
            if not molnsConfigDb:
                raise Exception("Molns not initialized")

            config = molns.MOLNSConfig(config_dir=molnsConfigDb.folder)
            result = molns.MOLNSExec.start_job(
                ['EC2_controller', "python {0}".format(program)], config)

            job.resource = "molns"
            job.molnsPID = result['id']
            job.put()
        except Exception as e:
            job.status = 'Failed'
            job.delete(self)
            raise

        return job
Beispiel #19
0
    def runMolns(self, params):
        """ Submit a remote molns StochKit job """
        modelDb = StochKitModelWrapper.get_by_id(params["id"])
        sys.stderr.write("*" * 80 + "\n")
        sys.stderr.write("*" * 80 + "\n")
        sys.stderr.write("runMolns\n")
        logging.info('runMolns')
        sys.stderr.write("*" * 80 + "\n")
        sys.stderr.write("*" * 80 + "\n")

        if not modelDb:
            return {
                'status': False,
                'msg': 'Failed to retrive the model to simulate.'
            }

        model = modelDb.createStochKitModel()

        # Execute as concentration or population?
        execType = params['execType'].lower()

        if execType not in ["deterministic", "stochastic", "sensitivity"]:
            raise Exception(
                'exec_type must be deterministic, sensitivity, or stochastic. Found "{0}"'
                .format(execType))

        if model.units.lower() == 'concentration' and execType.lower(
        ) == 'stochastic':
            raise Exception(
                'Concentration models cannot be executed stochastically')

        # Assemble the argument list
        args = ''
        args += ' -t {0} '.format(params['time'])
        num_output_points = int(
            float(params['time']) / float(params['increment']))
        args += ' -i {0} '.format(num_output_points)
        path = os.path.abspath(os.path.dirname(__file__))
        # Algorithm, SSA or Tau-leaping?
        if params['execType'] != 'deterministic':
            executable = "/usr/local/StochKit/{0}".format(params['algorithm'])

            args += ' --realizations {0} '.format(params['realizations'])
            args += ' --keep-trajectories '

            if int(params['seed']) < 0:
                random.seed()
                params['seed'] = random.randint(0, 2147483647)

            args += '--seed {0} '.format(params['seed'])
        else:
            executable = "/usr/local/ode-1.0.2/stochkit_ode.py"

        # Columns need to be labeled for visulatization page to work.
        args += ' --label'

        cmd = executable + ' ' + args

        basedir = path + '/../'
        dataDir = tempfile.mkdtemp(dir=basedir + 'output')

        # Wow, what a hack
        if params['execType'] == 'deterministic' and model.units.lower(
        ) == 'population':
            document = model.serialize()

            model = StochMLDocument.fromString(document).toModel(model.name)

            for reactionN in model.getAllReactions():
                reaction = model.getAllReactions()[reactionN]
                if reaction.massaction:
                    if len(reaction.reactants
                           ) == 1 and reaction.reactants.values()[0] == 2:
                        reaction.marate.setExpression(
                            reaction.marate.expression + ' / 2')

        modelFileName = '{0}/{1}.xml'.format(dataDir, model.name)
        with open(modelFileName, 'w') as fmodelHandle:
            fmodelHandle.write(model.serialize())

        cmd += ' -m {0} --out-dir ./result'.format(
            os.path.basename(modelFileName))

        sys.stderr.write('*' * 80 + "\n")
        logging.error("cmd =\n{}".format(cmd))
        sys.stderr.write('simulation.runMolns(): cmd={0}\n'.format(cmd))

        with tempfile.NamedTemporaryFile() as exec_file:
            exec_file.write(cmd + "\n")
            exec_file.write("tar -czf result.tar.gz result")
            exec_file.flush()

            controllerName = 'EC2_controller'  #TODO: look this up

            exec_str = "bash {0} {1}".format(exec_file.name, modelFileName)
            sys.stderr.write(
                "result = molns.MOLNSExec.start_job(['{0}', '{1}])".format(
                    controllerName, exec_str))
            sys.stderr.write('*' * 80 + "\n")

            molnsConfigDb = db.GqlQuery(
                "SELECT * FROM MolnsConfigWrapper WHERE user_id = :1",
                self.user.user_id()).get()
            if not molnsConfigDb:
                raise Exception("Molns not initialized")

            config = molns.MOLNSConfig(config_dir=molnsConfigDb.folder)
            result = molns.MOLNSExec.start_job([controllerName, exec_str],
                                               config)
            sys.stderr.write('result = {0}'.format(result))

        sys.stderr.write('*' * 80 + "\n")

        # Create a wrapper to store the Job description in the datastore
        # Create a StochKitJob instance
        job = StochKitJobWrapper()
        job.resource = 'Molns'
        job.user_id = self.user.user_id()
        job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
        job.name = params['jobName']
        job.modelName = model.name
        #job.pid = None
        job.pid = result['id']
        job.indata = json.dumps({
            "type": 'StochKit2 Ensemble',
            "final_time": params['time'],
            "realizations": params['realizations'],
            "increment": params['increment'],
            "seed": params['seed'],
            "exec_type": params['execType'],
            "units": model.units.lower(),
            "epsilon": params['epsilon'],
            "threshold": params['threshold']
        })
        job.outData = dataDir
        job.status = 'Running'
        job.put()

        return job
Beispiel #20
0
    def updateMolnsState(self, state):
        if 'process' in cherrypy.session and cherrypy.session['process'][
                0].is_alive():
            raise Exception(
                'Currently running process, cannot update state while this is ongoing'
            )

        config = molns.MOLNSConfig(db_file=os.path.join(appDir, "test.db"))

        for providerType in state:
            providerName = providerToNames[providerType]['providerName']
            controllerName = providerToNames[providerType]['controllerName']
            workerName = providerToNames[providerType]['workerName']

            provider_conf_items = molns.MOLNSProvider.provider_get_config(
                name=providerName, provider_type=providerType, config=config)

            json_obj = {
                'name': providerName,
                'type': providerType,
                'config': {}
            }

            provider = state[providerType]['provider']

            # Update those values that have changed
            for i in range(len(provider)):
                if provider[i]['value'] != provider_conf_items[i]['value']:
                    json_obj['config'][provider_conf_items[i]
                                       ['key']] = provider[i]['value']

            molns.MOLNSProvider.provider_import('', config, json_obj)

            controller_conf_items = molns.MOLNSController.controller_get_config(
                name=controllerName, provider_type=providerType, config=config)

            controller = state[providerType]['controller']

            json_obj = {
                'name': controllerName,
                'provider_name': providerName,
                'config': {}
            }

            for i in range(len(controller)):
                if controller[i]['value'] != controller_conf_items[i]['value']:
                    json_obj['config'][controller_conf_items[i]
                                       ['key']] = controller[i]['value']

            molns.MOLNSController.controller_import('', config, json_obj)

            worker_conf_items = molns.MOLNSWorkerGroup.worker_group_get_config(
                name=workerName, provider_type=providerType, config=config)

            worker = state[providerType]['worker']

            json_obj = {
                'name': workerName,
                'controller_name': controllerName,
                'provider_name': providerName,
                'config': {}
            }

            for i in range(len(worker)):
                if worker[i]['value'] != worker_conf_items[i]['value']:
                    json_obj['config'][worker_conf_items[i]
                                       ['key']] = worker[i]['value']

            molns.MOLNSWorkerGroup.worker_group_import('', config, json_obj)