Esempio n. 1
0
        async def on_offloding_event(source, rpcname, code, resourcename,
                                     params):
            print("Received from: {} app".format(source))
            print("Received RCP name: {}".format(rpcname))
            print("Received the source code: {}".format(code))
            print("Received params: {}".format(params))

            if source == "Android":
                self.params = params

                # Java file already cached in MAMoC Repository
                if path.exists("java_classes/{}.java".format(self.class_name)):
                    result = self.executor.startExecuting(
                        self.class_name, "{}.java".format(self.class_name),
                        params)

                else:
                    # if it is a class, it must start with package keyword
                    if code.strip().split(' ', 1)[0] == "package":
                        code, self.class_name = Transformer(
                            code, resourcename, params).start()
                    else:
                        code, self.class_name = Transformer(
                            code, resourcename, params).start(type="method")

                    with open("java_classes/{}.java".format(self.class_name),
                              "w") as java_file:
                        print("{}".format(code), file=java_file)

                    result = self.executor.startExecuting(
                        self.class_name, "{}.java".format(self.class_name),
                        params)

                print(result)

                if result:  # if building and execution were successful, send back output and duration in seconds
                    output = result[0]
                    duration = result[1]

                    output = self.decode_bytes(output)

                    self.publish('uk.ac.standrews.cs.mamoc.offloadingresult',
                                 output, duration)

                    # register the procedure for next time rpc request
                    try:
                        re = await self.register(
                            self.execute_java,
                            rpcname,
                            options=RegisterOptions(invoke=u'roundrobin'))
                    except ApplicationError as e:
                        print("could not register procedure: {0}".format(e))
                    else:
                        print("{} endpoints registered".format(re))

            elif source == "iOS":
                print("received from iOS app")
            else:
                print("unrecognized source!")
Esempio n. 2
0
    async def onJoin(self, details):
        await self.register(self, options=RegisterOptions(invoke='roundrobin'))

        await self.subscribe(self.on_sim_telemetry, 'sim.telemetry')

        self.is_running = True

        while self.is_running:
            await self.pass_outgoing_cmd()
            await asyncio.sleep(0.1)
Esempio n. 3
0
class DockingWampApi(ComponentSession):

    def authorize_request(self, uri, claims):
        return True

    @endpoint('docking', 'docking_request', 'docking_response', options=RegisterOptions(invoke='roundrobin'))
    def run_docking(self, request, claims):
        """
        Perform a PLANTS (Protein-Ligand ANT System) molecular docking.
        For a detail description of the input see the file:
        schemas/endpoints/docking-request.v1.json
        """
        task_id = self.component_config.session.session_id
        self.log.info("Plants Docking ID: {}".format(task_id))

        # Docking options are equal to the request
        plants_config = request.copy()

        # Transfer the files content
        plants_config['protein_file'] = request['protein_file']['content']
        plants_config['ligand_file'] = request['ligand_file']['content']

        # Prepare docking directory
        workdir = os.path.abspath(request['workdir'])
        plants_config["workdir"] = prepare_work_dir(
            workdir, create=True)

        # location of the executable
        file_path = os.path.realpath(__file__)
        root = os.path.split(file_path)[0]

        plants_config['exec_path'] = os.path.join(root, 'plants_linux')

        # Run docking
        docking = PlantsDocking(**plants_config)
        success = docking.run(plants_config['protein_file'], plants_config['ligand_file'])

        if success:
            status = 'completed'
            results = docking.results()
            output = {key: encode_file(value) for key, value in results.items()}

            # Add path to cluster dendrogram
            clusterplot = os.path.join(workdir, 'cluster_dendrogram.pdf')
            if os.path.isfile(clusterplot):
                results['clusterplot'] = {'content': None, 'extension': 'pdf', 'path': clusterplot}

        else:
            self.log.error('PLANTS docking FAILS!!')
            docking.delete()
            status = 'failed'
            output = None

        return {'status': status, 'output': output}
Esempio n. 4
0
    async def onJoin(self, details):
        print("Join:", details)

        self.register(self.ping,
                      "com.myapp.ping",
                      options=RegisterOptions(details_arg="details"))

        counter = 0
        while True:
            print("publish: com.myapp.hello", counter)
            self.publish("com.myapp.hello", counter)
            counter += 1
            await asyncio.sleep(1)
Esempio n. 5
0
 async def _register(self):
     try:
         options = RegisterOptions(**self._register_options_kwargs)
         session = self._manager.session.application_session
         print(
             f'Registration of {self._procedure} with name {self.name} starting'
         )
         self._registration = await session.register(
             endpoint=self._endpoint_wrapper,
             procedure=self._procedure,
             options=options,
             prefix=self._prefix)
         print(
             f'Registration of {self._procedure} with name {self.name} succeeded'
         )
     except Exception as e:
         print(
             f'Registration of {self._procedure} with name {self.name} failed'
         )
         self._exception = e
Esempio n. 6
0
def bootstrap():
    wamp = FlaskAutobahnSync(app)
    app.wamp = wamp
    serve_history = []

    @wamp.subscribe(u'com.flask_app.page_served')
    def page_served(wid, timestamp):
        data = (wid, timestamp)
        print('[Worker %s] Received %s' % (worker_id, data))
        serve_history.append(data)

    # Authorize multiple registers given we can start this app in concurrent workers
    register_opt = RegisterOptions(invoke=u'random')

    @wamp.register(u'com.flask_app.get_request_history', options=register_opt)
    def get_request_history(wid):
        print('[Worker %s] Send request history to worker %s' %
              (worker_id, wid))
        return serve_history

    return app
Esempio n. 7
0
class RoundrobinComponent(ComponentSession):
    def authorize_request(self, uri, claims):
        # Authorize calls to API endpoints
        return True

    @endpoint('parallel',
              'roundrobin_request',
              'roundrobin_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def parallel_call(self, request, claims):
        """
        Parallel call

        Simulate some CPU intensive task by sleeping for 5 seconds
        then return number variable to the power of 2.
        """

        self.sleep(request['number'])
        request['number'] = request['number']**POWER
        return request

    @endpoint('sequential', 'roundrobin_request', 'roundrobin_response')
    def sequential_call(self, request, claims):
        """
        Sequential call

        Similar to parallel_call but without the roundrobin registration
        """

        self.sleep(request['number'])
        request['number'] = request['number']**POWER
        return request

    def sleep(self, number):

        self.log.info('Process number {0} after {1} seconds delay'.format(
            number, DELAY))
        sleep(5)
Esempio n. 8
0
class WorkflowWampApi(ComponentSession):
    """
    Workflow WAMP methods.

    Defines `require_config` to retrieve system and database configuration
    upon WAMP session setup
    """
    def authorize_request(self, uri, claims):

        return True

    @endpoint('run_workflow',
              'run_workflow_request',
              'run_workflow_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def retrieve_structures(self, workflow, session=None):
        """
        Run a LIE Workflow
        """

        print(workflow)
        print(session)

        return session
class MDWampApi(ComponentSession):
    """
    Molecular dynamics WAMP methods.
    """
    def authorize_request(self, uri, claims):
        return True

    @endpoint('query_gromacs_results',
              'query_gromacs_results_request',
              'async_gromacs_response',
              options=RegisterOptions(invoke='roundrobin'))
    def query_gromacs_results(self, request, claims):
        """
        Check the status of the simulation and return the results if available.

        The request should at least contain a task_id stored in the cerise job DB.
        The response is a typical async_gromacs response, a 'Future' object.
        """

        output = yield query_simulation_results(request, self.db)
        for key, value in request.items():
            if key not in output:
                output[key] = value

        return_value(output)

    @endpoint('async_gromacs_ligand',
              'async_gromacs_ligand_request',
              'async_gromacs_response',
              options=RegisterOptions(invoke='roundrobin'))
    def run_async_ligand_solvent_md(self, request, claims):
        """
        Run Gromacs MD of ligand in solvent. Invoke a ligand solvent simulation
        and returns immediate, returning to the  caller information for querying the results.

        TODO: Still requires the protein topology and positional restraint
              (include) files. Makes no sense for ligand in solvent but
              required by gromit somehow.
        """
        # Protein structure not needed. Explicitly set to None
        request['protein_file'] = None

        output = yield self.run_async_gromacs_gromacs(request, claims)
        return_value(output)

    @endpoint('async_gromacs_protein',
              'async_gromacs_protein_request',
              'async_gromacs_response',
              options=RegisterOptions(invoke='roundrobin'))
    def run_async_protein_protein_md(self, request, claims):
        """
        Run asynchronous Gromacs MD of a protein-ligand system in solvent
        """

        output = yield self.run_async_gromacs_gromacs(request, claims)
        return_value(output)

    @endpoint('gromacs_ligand',
              'gromacs_ligand_request',
              'gromacs_response',
              options=RegisterOptions(invoke='roundrobin'))
    def run_ligand_solvent_md(self, request, claims):
        """
        Run Gromacs MD of ligand in solvent

        TODO: Still requires the protein topology and positional restraint
              (include) files. Makes no sense for ligand in solvent but
              required by gromit somehow.
        """
        # Protein structure not needed. Explicitly set to None
        request['protein_file'] = None

        output = yield self.run_gromacs_gromacs(request, claims)
        return_value(output)

    @endpoint('gromacs_protein',
              'gromacs_protein_request',
              'gromacs_response',
              options=RegisterOptions(invoke='roundrobin'))
    def run_ligand_protein_md(self, request, claims):
        """
        Run Gromacs MD of a protein-ligand system in solvent
        """

        output = yield self.run_gromacs_gromacs(request, claims)
        return_value(output)

    @chainable
    def run_gromacs_gromacs(self, request, claims):
        """
        First it calls gromit to compute the Ligand-solute energies, then
        calls gromit to calculate the protein-ligand energies.

        The Cerise-client infrastructure is used to perform the computations
        in a remote server, see:
        http://cerise-client.readthedocs.io/en/master/index.html

        This function expects the following keywords files to call gromit:
            * cerise_file
            * protein_file (optional)
            * protein_top
            * ligand_file
            * topology_file
            * residues

        The cerise_file is the path to the file containing the configuration
        information required to start a Cerise service.

        Further include files (e.g. *itp files) can be included as a list:
        include=[atom_types.itp, another_itp.itp]

        To perform the energy decomposition a list of the numerical residues
        identifiers is expected, for example:
        residues=[1, 5, 7, 8]

        Note: the protein_file arguments is optional if you do not provide it
        the method will perform a SOLVENT LIGAND MD if you provide the
        `protein_file` it will perform a PROTEIN-LIGAND MD.
        """
        cerise_config, gromacs_config = self.setup_environment(request)
        cerise_config['clean_remote'] = request.get('clean_remote_workdir',
                                                    True)

        # Run the MD and retrieve the energies
        output = yield call_cerise_gromit(gromacs_config, cerise_config,
                                          self.db)

        return_value(output)

    @chainable
    def run_async_gromacs_gromacs(self, request, claims):
        """
        async version of the `run_gromacs_gromacs` function.
        """
        cerise_config, gromacs_config = self.setup_environment(request)
        cerise_config['clean_remote'] = request.get('clean_remote_workdir',
                                                    True)

        output = yield call_async_cerise_gromit(gromacs_config, cerise_config,
                                                self.db)

        return_value(output)

    def setup_environment(self, request):
        """
        Set all the configuration to perform a simulation.
        """
        # Base workdir needs to exist. Might be shared between docker and host
        check_workdir(request['workdir'])

        task_id = uuid.uuid1().hex
        request.update({"task_id": task_id})
        self.log.info("starting gromacs task_id: {}".format(task_id))

        task_workdir = create_task_workdir(request['workdir'])

        request['workdir'] = task_workdir
        self.log.info("store output in: {0}".format(task_workdir))

        # Copy input files to task workdir
        request = copy_file_path_objects_to_workdir(request.copy())

        # Build 'include' file list for cerise/CWL
        request['include'] = []
        for file_type in ('attype_itp', 'protein_posre_itp'):
            request['include'].append(request[file_type])

        # Load GROMACS configuration
        gromacs_config = set_gromacs_input(request)

        # Load Cerise configuration
        cerise_config = create_cerise_config(request)
        cerise_config['task_id'] = task_id

        with open(os.path.join(request['workdir'], "cerise.json"), "w") as f:
            json.dump(cerise_config, f)

        return cerise_config, gromacs_config
Esempio n. 10
0
class StructuresWampApi(CheminfoDescriptorsWampApi, CheminfoMolhandleWampApi,
                        CheminfoFingerprintsWampApi, ComponentSession):
    """
    Structure database WAMP methods.
    """
    def authorize_request(self, uri, claims):
        return True

    @endpoint('chemical_similarity',
              'chemical_similarity_request',
              'chemical_similarity_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def calculate_chemical_similarity(self, request, claims):
        request['workdir'] = os.path.abspath(request['workdir'])
        return super(StructuresWampApi,
                     self).calculate_chemical_similarity(request, claims)

    @endpoint('descriptors',
              'descriptors_request',
              'descriptors_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def get_descriptors(self, request, claims):
        request['workdir'] = os.path.abspath(request['workdir'])
        return super(StructuresWampApi, self).get_descriptors(request, claims)

    @endpoint('convert',
              'convert_request',
              'convert_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def convert_structures(self, request, claims):
        request['workdir'] = os.path.abspath(request['workdir'])
        return super(StructuresWampApi,
                     self).convert_structures(request, claims)

    @endpoint('addh',
              'addh_request',
              'addh_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def addh_structures(self, request, claims):
        request['workdir'] = os.path.abspath(request['workdir'])
        return super(StructuresWampApi, self).addh_structures(request, claims)

    @endpoint('removeh',
              'removeh_request',
              'removeh_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def removeh_structures(self, request, claims):
        request['workdir'] = os.path.abspath(request['workdir'])
        return super(StructuresWampApi,
                     self).removeh_structures(request, claims)

    @endpoint('make3d',
              'make3d_request',
              'make3d_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def make3d_structures(self, request, claims):
        request['workdir'] = os.path.abspath(request['workdir'])
        return super(StructuresWampApi,
                     self).make3d_structures(request, claims)

    @endpoint('info',
              'info_request',
              'info_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def structure_attributes(self, request, claims):
        return super(StructuresWampApi,
                     self).structure_attributes(request, claims)

    @endpoint('rotate',
              'rotate_request',
              'rotate_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def rotate_structures(self, request, claims):
        request['workdir'] = os.path.abspath(request['workdir'])
        return super(StructuresWampApi,
                     self).rotate_structures(request, claims)

    @endpoint('supported_toolkits',
              'supported_toolkits_request',
              'supported_toolkits_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def supported_toolkits(self, request, claims):
        """
        Query available toolkits.

        For a detailed input description see the file:
           mdstudio_structures/schemas/endpoints/supported_toolkits_request_v1.json
        And for a detailed description of the output see:
           mdstudio_structures/schemas/endpoints/supported_toolkits_response_v1.json
        """
        return {'status': 'completed', 'toolkits': list(toolkits.keys())}

    @endpoint('remove_residues',
              'remove_residues_request',
              'remove_residues_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def remove_residues(self, request, claims):
        """
        Remove residues from a PDB structure

        For a detailed input description see the file:
           mdstudio_structures/schemas/endpoints/removed_residues_request_v1.json
        And for a detailed description of the output see:
           mdstudio_structures/schemas/endpoints/removed_residues_response_v1.json
        """
        request['workdir'] = os.path.abspath(request['workdir'])
        # Parse the structure
        parser = PDBParser(PERMISSIVE=True)
        struc_obj = StringIO(request.get('mol'))

        structure = parser.get_structure('mol_object', struc_obj)
        struc_obj.close()

        to_remove = [r.upper() for r in request.get('residues', [])]
        removed = []
        for model in structure:
            for chain in model:
                for residue in chain:
                    if residue.get_resname() in to_remove:
                        chain.detach_child(residue.id)
                        removed.append(residue.get_resname())
                if len(chain) == 0:
                    model.detach_child(chain.id)
        self.log.info('Removed residues: {0}'.format(','.join(removed)))

        # Save to file or string
        pdbio = PDBIO()
        pdbio.set_structure(structure)

        status = 'completed'
        if request.get('workdir'):
            result = os.path.join(request.get('workdir'), 'structure.pdb')
            pdbio.save(result)
        else:
            outfile = StringIO()
            pdbio.save(outfile)
            outfile.seek(0)
            result = outfile.read()

        return {'status': status, 'mol': result}

    @endpoint('retrieve_rcsb_structure',
              'retrieve_rcsb_structure_request',
              'retrieve_rcsb_structure_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def fetch_rcsb_structure(self, request, claims):
        """
        Download a structure file from the RCSB database using a PDB ID

        For a detailed input description see the file:
           mdstudio_structures/schemas/endpoints/retrieve_rcsb_structures_request_v1.json
        And for a detailed description of the output see:
           mdstudio_structures/schemas/endpoints/retrieve_rcsb_structures_response_v1.json
        """
        # Create workdir and save file
        request['workdir'] = os.path.abspath(request['workdir'])
        workdir = os.path.join(request.get('workdir', tempfile.gettempdir()))
        if not os.path.isdir(workdir):
            os.makedirs(workdir)

        # Retrieve the PDB file
        pdb_id = request['pdb_id'].upper()
        pdb = PDBList()
        dfile = pdb.retrieve_pdb_file(pdb_id,
                                      file_format=request.get(
                                          'rcsb_file_format', 'pdb'),
                                      pdir=workdir,
                                      overwrite=True)

        # Change file extension
        base, ext = os.path.splitext(dfile)
        ext = ext.lstrip('.')
        if ext == 'ent':
            os.rename(dfile, '{0}.pdb'.format(base))
            dfile = '{0}.pdb'.format(base)

        # Return file path if workdir in function arguments else return
        # file content inline.
        if os.path.isfile(dfile):
            status = 'completed'
            if 'workdir' in request:
                molecule = dfile
            else:
                with open(dfile, 'r') as f:
                    molecule = f.read()

        else:
            self.log.error('Unable to download structure: {0}'.format(pdb_id))
            status = 'failed'
            molecule = None

        result = {'path': None, 'content': molecule, 'extension': ext}

        return {'status': status, 'mol': result}
Esempio n. 11
0
                              id="ltri-csgo-gsi-money", options=options)
        yield session.publish(f"{SRC_PREFIX}ltri-csgo-gsi-money-inv",
                              wamp_component.state.player.state.money_as_color_inv() * LEDS_IN_ARRAY_DEFAULT,
                              id="ltri-csgo-gsi-money-inv", options=options)
    yield None


@wamp_component.on_join
@inlineCallbacks
def joined(session, details):
    print("session ready")
    wamp_component.loop_gsi_publish = task.LoopingCall(publish_gsi, session=session)
    wamp_component.loop_gsi_publish.start(.05)


@wamp_component.register("com.lambentri.edge.la4.machine.list", options=RegisterOptions(invoke="roundrobin"))
def list_active_machine_instances():
    """List all available machines"""
    schema = MachineDict(
        machines=
        {
            # these don't quite work yet
            "cs-gsi-de": Machine(
                name="CSGO-GSI",
                iname="DE_",
                id="ltri-csgo-gsi-de",
                desc="Machine State for CSGO Defusal games"
            ),
            "cs-gsi-cs": Machine(
                name="CSGO-GSI",
                iname="CS_",
Esempio n. 12
0
class ATBWampApi(ComponentSession):
    """
    Automated Topology Builder API WAMP methods.
    """
    def authorize_request(self, uri, claims):
        return True

    @staticmethod
    def get_mol(geometry):
        """
        Retrieve molecular geometry
        """

        structure = geometry['content']
        if structure is None and geometry['path']:
            if os.path.isfile(geometry['path']):
                with open(geometry['path'], 'r') as molfile:
                    structure = molfile.read()
            else:
                raise IOError('Structure file not defined')

        return structure

    def _parse_server_error(self, error):
        """
        Parse ATB server JSON error construct
        """

        error_dict = {}
        if hasattr(error, 'read'):
            try:
                error_dict = json.load(error)
                self.log.error('ATB server error: {0}'.format(
                    error_dict.get('error_msg')))
            except Exception:
                self.log.error('Unknown ATB server error')

        return error_dict

    def _exceute_api_call(self, call, **kwargs):
        """
        Execute ATB server API call
        """

        try:
            return call(**kwargs)
        except URLError as error:
            self.log.error('ATB server URL {0} not known/reachable'.format(
                SETTINGS['atb_url']))
            return self._parse_server_error(error)
        except HTTPError as error:
            return self._parse_server_error(error)

    def _init_atb_api(self, api_token=None):
        """
        Start ATB server API interface.

        The ATB API is initiated at every WAMP method request using a global
        or user specific ATB API token.

        :param api_token: valid ATB server API token
        :type api_token:  :py:str

        :rtype:           ATBServerApi object
        """

        if not api_token:
            self.log.error('Using the ATB server requires a valid API token')
            return None

        api = ATBServerApi(api_token=api_token,
                           timeout=SETTINGS['atb_api_timeout'],
                           debug=SETTINGS['atb_api_debug'],
                           host=SETTINGS['atb_url'],
                           api_format=u'json')

        if not api:
            raise IOError('Unable to use the ATB API')

        return api

    @endpoint('submit',
              'atb_submit_request',
              'atb_submit_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def atb_submit_calculation(self, request, claims):
        """
        Submit a new calculation to the ATB server
        """

        # Init ATBServerApi
        api = self._init_atb_api(api_token=request['atb_api_token'])

        # Open file if needed
        pdb = self.get_mol(request['pdb'])

        response = self._exceute_api_call(api.Molecules.submit,
                                          pdb=pdb,
                                          netcharge=request['netcharge'],
                                          moltype=request['moltype'],
                                          public=request['public'])
        if response and response.get(u'status', None) == u'error':

            # Check if molecule has been calculated already
            if response.get(
                    u'error_msg',
                    '').startswith('Your submission matched a previously'):
                # Extract molid from response
                m = re.search('(?<=molid=)[0-9]*',
                              response.get(u'error_msg', ''))
                molid = None
                if m:
                    molid = m.group()
                    if molid.isdigit():
                        molid = int(molid)

                # Get the molecule data for molid
                response = [
                    self._exceute_api_call(api.Molecules.molid, molid=molid)
                ]
                return {
                    'result': [
                        mol.moldict for mol in response
                        if isinstance(mol, ATB_Mol)
                    ]
                }

    @endpoint('get_structure',
              'atb_get_structure_request',
              'atb_get_structure_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def atb_structure_download(self, request, claims):
        """
        Retrieve a structure file from the ATB server by molid

        Supports download of structures in the following file formats:
        - pqr_allatom_optimised:    APBS pqr format
        - pqr_allatom_unoptimised:  APBS pqr format
        - pqr_uniatom_optimised:    APBS pqr format
        - pqr_uniatom_unoptimised:  APBS pqr format
        - pdb_allatom_optimised:    PDB format
        - pdb_allatom_unoptimised:  PDB format
        - pdb_uniatom_optimised:    PDB format
        - pdb_uniatom_unoptimised:  PDB format
        - cif_allatom:              CIF format
        - cif_allatom_extended:     CIF format
        - cif_uniatom:              CIF format
        - cif_uniatom_extended:     CIF format
        """

        if request['fformat'] not in SUPPORTED_STRUCTURE_FILE_FORMATS:
            self.log.error('Structure format {0} not supported'.format(
                request['fformat']))
            return

        # Init ATBServerApi
        api = self._init_atb_api(api_token=request['atb_api_token'])

        # Get the molecule by molid
        molecule = self._exceute_api_call(api.Molecules.molid,
                                          molid=request['molid'])
        filename = None
        if 'workdir' in request:
            workdir = os.path.abspath(request['workdir'])
            filename = os.path.join(
                workdir, '{0}.{1}'.format(
                    request['fformat'],
                    SUPPORTED_FILE_EXTENTIONS.get(request['fformat'], 'txt')))

        if molecule and isinstance(molecule, ATB_Mol):
            structure = self._exceute_api_call(
                molecule.download_file,
                file=request['fformat'],
                outputType=SUPPORTED_STRUCTURE_FILE_FORMATS.get(
                    request['fformat'], 'cry'),
                ffVersion=request['ffversion'],
                hash=request['atb_hash'],
                fnme=filename)
            return structure
        else:
            self.log.error(
                'Unable to retrieve structure file for molid: {0}'.format(
                    request['molid']))

    @endpoint('get_topology',
              'atb_get_topology_request',
              'atb_get_topology_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def atb_topology_download(self, request, claims):
        """
        Retrieve a topology and parameter files from the ATB server by molid

        Supports download of structures in the following file formats:
        - lammps_allatom_optimised:     LAMMPS MD
        - lammps_allatom_unoptimised:   LAMMPS MD
        - lammps_uniatom_optimised:     LAMMPS MD
        - lammps_uniatom_unoptimised:   LAMMPS MD
        - mtb96_allatom:                GROMOS96
        - mtb96_uniatom:                GROMOS96
        - mtb_allatom:                  GROMOS11
        - mtb_uniatom:                  GROMOS11
        - rtp_allatom:                  GROMACS
        - rtp_uniatom:                  GROMACS
        - cns_allatom_top:              CNS
        - cns_allatom_param:            CNS
        - cns_uniatom_top:              CNS
        - cns_uniatom_param:            CNS
        """

        if request['fformat'] not in SUPPORTED_TOPOLOGY_FILE_FORMATS:
            self.log.error('Structure format {0} not supported'.format(
                request['fformat']))
            return

        # Init ATBServerApi
        api = self._init_atb_api(api_token=request['atb_api_token'])

        # Get the molecule by molid
        molecule = self._exceute_api_call(api.Molecules.molid,
                                          molid=request['molid'])
        filename = None
        if 'workdir' in request:
            workdir = os.path.abspath(request['workdir'])
            filename = os.path.join(
                workdir, '{0}.{1}'.format(
                    request['fformat'],
                    SUPPORTED_FILE_EXTENTIONS.get(request['fformat'], 'top')))

        if molecule and isinstance(molecule, ATB_Mol):
            structure = self._exceute_api_call(
                molecule.download_file,
                file=SUPPORTED_TOPOLOGY_FILE_FORMATS[request['fformat']],
                outputType='top',
                ffVersion=request['ffversion'],
                hash=request['atb_hash'],
                fnme=filename)
            return structure
        else:
            self.log.error(
                'Unable to retrieve topology/parameter file for molid: {0}'.
                format(request['molid']))

    @endpoint('structure_query',
              'atb_structure_query_request',
              'atb_structure_query_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def atb_structure_query(self, request, claims):
        """
        Query the ATB server database for molecules based on a structure

        16-11-2017: Method only works with HTTP GET
        """

        # Get structure
        structure = self.get_mol(request['mol'])

        # Adjust structure format if needed
        if request['mol']['extension'] and request['mol'][
                'extension'] != request['structure_format']:
            if not request['mol']['extension'] in ('pdb', 'mol', 'mol2', 'sdf',
                                                   'inchi'):
                raise IOError('File format not supported: {0}'.format(
                    request['mol']['extension']))
            request['structure_format'] = request['mol']['extension']

        # Init ATBServerApi
        api = self._init_atb_api(api_token=request['atb_api_token'])

        result = self._exceute_api_call(
            api.Molecules.structure_search,
            structure_format=request['structure_format'],
            structure=structure,
            netcharge=request.get('netcharge', '*'),
            method=u'GET')

        output = {'matches': result.get('matches', [])}
        if 'search_molecule' in result:
            for key in ('inchi', 'inchi_key'):
                output['search_{0}'.format(
                    key)] = result['search_molecule'][key]

        return output

    @endpoint('molecule_query',
              'atb_molecule_query_request',
              'atb_molecule_query_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def atb_molecule_query(self, request, claims):
        """
        Query the ATB server database for molecules based on molecule meta-data

        The ATB server molecules database can be queried using the
        ./api/v0.1/molecules/search.py endpoint using any of the following query
        attributes or 'any' for a whildcard search. Multiple query attributes
        will be chained using the AND logical operator.

        iupac:              the official IUPAC name of the  molecule (str)
        inchi_key:          the unique InChI code of the molecule (str)
        smiles:             canonical SMILES of the molecule (str)
        common_name:        the common name of the molecule (str)
        formula:            the molecular formula (e.a. C2H6O) (str)
        maximum_qm_level:   string of comma seperated integers.
        curation_trust:     level of expected accuracy of the molecule parameters
                            as string of comma seperated integers. 0 by default:
                            -1 = unfinished ATB molecule
                             0 = finished ATB molecule
                             1 = manual parameters from reliable users
                             2 = manual parameters from official source
        is_finished:        are calculations for the molecule still running
        user_label:         any user specific label that may have been given to
                            the molecule.
        is_finished:        query for finished molecules only
        max_atoms:          maximum number of atoms (int)
        min_atoms:          minimum number of atoms (int)
        has_pdb_hetId:      molecule has PDB heteroatom ID (bool)
        match_partial:      enable partial matching of string attributes (bool)
                            False by default.

        Query values are case insensitive but the attributes (keys) are.
        """

        # Init ATBServerApi
        api = self._init_atb_api(api_token=request['atb_api_token'])

        # Get molecule directly using ATB molid
        if 'molid' in request:
            response = [
                self._exceute_api_call(api.Molecules.molid,
                                       molid=request['molid'])
            ]
        else:
            # Execute ATB molecule search query
            del request['atb_api_token']
            response = [
                mol.moldict for mol in self._exceute_api_call(
                    api.Molecules.search, **request)
                if isinstance(mol, ATB_Mol)
            ]

        if isinstance(response, list):
            if not len(response):
                self.log.info('ATB molecule query did not yield any results')
            return {'result': response}
        else:
            self.log.error('Unable to execute ATB molecule query')
Esempio n. 13
0
class LambentMachine(DocMixin, ApplicationSession):
    regs = {}
    subs = {}
    grp = "machine"

    tickers = {}
    machines = {
        # "SlowFakeMachine-a.b": SlowFakeMachine(),
        # "FastFakeMachine-c.d": FastFakeMachine()
    }
    machine_library = [
        SlowFakeMachine,
        FastFakeMachine,
        "lambents.solids.SolidStep",
        "lambents.solids.SolidStepHSV",
        "lambents.chasers.SimpleColorChaser",
        "lambents.chasers.MultiColorChaser",
        "lambents.chasers.MultiNoSpaceChaser",
        "lambents.chasers.MultiNoSpaceChaserRGB",
        "lambents.rainbows.RainbowChaser",
        "lambents.scapes.BounceScape",
        "lambents.rocker.SolidRocker",
        "lambents.rocker.ChasingRocker",
        "lambents.growth.FireflyHSV",
        "lambents.growth.GMFireflyRandomHSV",
    ]

    brightness_tgt = BrightnessEnum(255)
    brightness_act = 255

    def _handle_loading_config_from_file(self, path):
        try:
            f = open(path)
            loaded = load(f)
            print(loaded)
        except FileNotFoundError:
            self.log.info("config file: attempted to load but file not found")
            return
        except YAMLError:
            self.log.info("config file: failed to parse YAML")
            return

        self.log.info("config file: loaded from path")
        if "machines" not in loaded:
            self.log.info("config file: no machines segment in config!")
            return

        machines = loaded['machines']
        for mach in machines:
            try:
                self.init_machine_instance(
                    machine_cls=mach.get("cls"),
                    machine_name=mach.get("name"),
                    machine_kwargs=mach.get("kwargs"),
                    direct=True,
                )
            except MachineLoadException as e:
                print("failed to load from config", e)

    def __init__(self, config=None):
        ApplicationSession.__init__(self, config)
        txaio.start_logging()

        if os.environ.get("LA4_CONFIG_PATH"):
            path = os.environ.get("LA4_CONFIG_PATH")
            self.log.info("config file: specified in environ @ %s" % path)
            self._handle_loading_config_from_file(path)

        else:
            parser = argparse.ArgumentParser()
            parser.add_argument("--config", help="A config file to read from")
            args = parser.parse_args()
            self.log.info("config file: passed in via args @ %s" % args.config)
            self._handle_loading_config_from_file(args.config)

        # pull built-in machines from a config file if specified

        # get redis from config
        # check for passed in config file
        # if no redis/config, try and find the default config file

        # start the tick runner(s). There is one for each of the speeds
        # The tick runner will iterate through all machines and find machines set to the specific enum
        # and step them along ever X seconds
        self._init_timers()

        # In all config cases, then check for machine instances to spin up and spin them out

        # make sure to handle machine funk and log effectively:
        # One crashing machine should not kill the component.
        # Code changes that break configs should not kill the component

        # in the redis config mode, changes to machines are stored in redis for persistence automatically
        # in the file config mode, the persistence save calls are made by function call and (maybe have history?

    def _init_timers(self):
        for enum in TickEnum:
            self.tickers[enum] = task.LoopingCall(self.do_tick, enum=enum)
            self.tickers[enum].start(enum.value)

        print("alll timers initd")

    @docupub(topics=["com.lambentri.edge.la4.machine.link.src."],
             shapes={
                 "com.lambentri.edge.la4.machine.link.src.": {
                     "args.res": ["int*450+"],
                     "id": "str"
                 }
             })
    @inlineCallbacks
    def do_tick(self, enum: TickEnum):
        """A function that gets called hundreds of times per second. Depending on the TickEnum may emit stuff"""
        # TODO rewrite this to use n/100 fractions instead of this *gestures* insanity
        if not self.is_connected():
            print("TICK, not connected, passings")
            return
        if enum == TickEnum.FHUNDREDTHS and self.brightness_act != self.brightness_tgt.value:
            # if self.brightness_tgt == BrightnessEnum.OFF:
            #     if self.brightness_act > 40:
            #         self.brightness_act = 0
            #     else:
            #         self.brightness_act += .15
            # elif abs(self.brightness_tgt.value - self.brightness_act) < .3:
            #     self.brightness_act = self.brightness_tgt.value
            # else:
            #     fract = abs(self.brightness_tgt.value - self.brightness_act) / 10.
            #     # avoid steps too big at the start however, fades may need more tuning of the divisor values
            #     if fract > .5:
            #         fract = fract/3
            #     if self.brightness_act < self.brightness_tgt.value:
            #         self.brightness_act = self.brightness_act + fract
            #     elif self.brightness_act > self.brightness_tgt.value:
            #         self.brightness_act = self.brightness_act - fract
            if self.brightness_act < self.brightness_tgt.value:
                self.brightness_act += 1
            else:
                self.brightness_act -= 1
        # print(self.machines.values())
        operating_machines = filter(
            lambda x: x.speed.value == enum.value and x.running.value ==
            RunningEnum.RUNNING.value, self.machines.values())
        # print(operating_machines)
        for mach in operating_machines:
            res = mach.step()
            if self.brightness_act == 0:
                res = [0] * len(res)
            elif self.brightness_act != 255:
                res = [int(i * self.brightness_act / 255.) for i in res]

            # print(mach.speed.value == TickEnum.TENS.value)
            # if mach.speed.value == TickEnum.TENS.value:
            #     # print(res)
            #     print(vars)
            #     print(vars(mach))
            #
            #     # yield self.publish(f"com.lambentri.edge.la4.machine.link.srx.{mach.id}", res)
            #     # yield self.publish(f"com.lambentri.edge.la4.device.82667777.esp_0602a5", res)
            # print(res[0:12])

            # sparkle publishy test
            # chunked = chunks(res, 3)
            # res = list(itertools.chain.from_iterable([[int(v*random.choice([.20, .40, .60, .80, 1.00]))  for v in i] for i in chunked]))

            options = PublishOptions(retain=True)
            yield self.publish(
                f"com.lambentri.edge.la4.machine.link.src.{mach.id}",
                res,
                id=mach.id,
                options=options)

    def change_machine_ticks(self, machine_name: str, machine_tick: TickEnum):
        pass  # changes the value of the tick enum on the machine

    def _find_class_in_library(self, name):
        for item in self.machine_library:
            if isinstance(item, str):
                try:
                    mod = pydoc.locate("components." + item)
                    if mod.__name__ == name:
                        return mod
                except:
                    pass
            elif issubclass(item, FakeMachine):
                if item.__name__ == name:
                    return item

        else:
            raise MachineLoadException(
                f"Unable to locate a lighting class named '{name}'")

    @wamp.register("com.lambentri.edge.la4.machine.tick_up")
    def machine_tick_up(self, machine_name: str):
        """Ticks up a machine's tick index (slower)"""
        mach = self.machines.get(machine_name)
        mach.speed = mach.speed.next_up(mach.speed)
        return {"speed": mach.speed.name}

    @wamp.register("com.lambentri.edge.la4.machine.tick_dn")
    def machine_tick_dn(self, machine_name: str):
        """Ticks down a machine's tick indes (faster)"""
        mach = self.machines.get(machine_name)
        mach.speed = mach.speed.next_dn(mach.speed)
        return {"speed": mach.speed.name}

    @wamp.register("com.lambentri.edge.la4.machine.library",
                   options=RegisterOptions(invoke="roundrobin"))
    def machine_library_retrieve(self):
        """Lists available machine templates in the library"""
        machine_ret = {}
        for item in self.machine_library:
            if isinstance(item, str):
                # print(item)
                # TODO cache this
                try:
                    component_name = f"components.{item}"
                    # print(component_name)
                    mod = pydoc.locate(component_name)
                    machine_ret[mod.name] = {
                        "desc": mod.desc,
                        "cls": mod.__name__,
                        "grp": mod.grps,
                        "conf": mod.get_config()
                    }
                except AttributeError as e:
                    pass
                    #print(f"Unable to find machine of class '{item}'")
                    # print(e)
                pass  # import inplace and inspect to get k:v
            elif issubclass(item, FakeMachine):
                machine_ret[item.name] = {
                    "desc": item.desc,
                    "cls": item.__name__,
                    "grp": item.grps,
                    "conf": {}
                }
            else:
                print("unknown")
                print(type(item))

        return machine_ret

    # @inlineCallbacks
    @wamp.register("com.lambentri.edge.la4.machine.init")
    def init_machine_instance(self,
                              machine_cls: str,
                              machine_name: str,
                              machine_kwargs: dict,
                              direct: bool = False):
        # TODO FIGURE OUT HOW TO SPECIFY WHICH MACHINE PROVIDER TO USE WHEN CALLING INIT
        # PERHAPS EACH IMPL SHOULD SELF IDENTIFY AND CHECK? FOR NOW THE OTHER MACHINE PROVIDERS ARE UNCONFIGURABLE
        """Starts a machine. NOTE passing in the same machine_name will overwrite whatever was there. This can be leveraged to edit machine details!!"""
        # this is called on startup as well as when adding new configs
        # startup uses direct=True, since we're not trying to de-chunk our chunked frontend UI
        # print(machine_cls)
        cls = self._find_class_in_library(machine_cls)
        # print(machine_kwargs)
        # print(machine_name)
        # print(cls.__name__)

        # build kwargs for cls
        if not direct:
            built_kwargs = {}
            for k, v in cls.meta.config.items():
                if v['cls'].serialize()['name'] == "TupleConfig":
                    matching_config = [
                        vi for ki, vi in machine_kwargs.items()
                        if ki.startswith(k + "-")
                    ]
                    built_kwargs[k] = tuple(matching_config)

        else:
            built_kwargs = machine_kwargs

        if hasattr(cls.meta, "state_status"):
            set_status = cls.meta.state_status
        else:
            set_status = False

        id = f"{cls.__name__}-x-{machine_name}"
        mach = cls(config_params=built_kwargs, set_status=set_status)
        mach.set_id(id)
        mach.set_instance_name(machine_name)

        if hasattr(cls.meta, "state_status"):
            if cls.meta.state_status:
                pass
        self.machines[id] = mach
        res = self.machines[id].step()
        # yield self.publish(f"com.lambentri.edge.la4.device.82667777.esp_0602a5", res)

    # @wamp.register("com.lambentri.edge.la4.machine.edit")
    # def modify_machine_instance(self):
    #     # this allows you to change execution parameters on a machine and restart it
    #     pass

    @wamp.register("com.lambentri.edge.la4.machine.pause")
    def pause_machine(self, machine_name: str):
        """ Toggles a machine's RunningEnum"""
        mach = self.machines.get(machine_name)
        if mach.running == RunningEnum.RUNNING:
            mach.running = RunningEnum.NOTRUNNING
        else:
            mach.running = RunningEnum.RUNNING
        return {"running": mach.running.name}

    @wamp.register("com.lambentri.edge.la4.machine.rm")
    def destroy_machine(self, machine_name: str):
        """Deletes a machine instance"""
        try:
            del self.machines[machine_name]
        except:
            print(
                f"Someone attempted to delete a machine that doesn't exist : {machine_name}"
            )
        # deletes machine (from live/redis instance)
        schema = MachineDictSerializer()
        serialized = schema.dump({
            "machines": self.machines,
            "speed_enum":
            {k: v.value
             for k, v in TickEnum.__members__.items()}
        })
        return serialized.data

    # pass
    @wamp.register("com.lambentri.edge.la4.machine.list",
                   options=RegisterOptions(invoke="roundrobin"))
    def list_active_machine_instances(self):
        """List all available machines"""
        schema = MachineDictSerializer()
        # print(self.machines)
        serialized = schema.dump({
            "machines": self.machines,
            "speed_enum":
            {k: v.value
             for k, v in TickEnum.__members__.items()}
        })
        return serialized.data

    @inlineCallbacks
    def global_brightness_publish(self):
        yield self.publish("com.lambentri.edge.la4.machine.gb",
                           brightness=self.brightness_tgt.value)

    @wamp.register("com.lambentri.edge.la4.machine.gb.up")
    def global_brightness_value_up(self):
        """Move the global brightness up a single tick"""
        self.brightness_tgt = self.brightness_tgt.next_up(self.brightness_tgt)
        self.global_brightness_publish()
        return {"brightness": self.brightness_tgt.value}

    @wamp.register("com.lambentri.edge.la4.machine.gb.dn")
    def global_brightness_value_dn(self):
        """Move the global brightness down a single tick"""
        self.brightness_tgt = self.brightness_tgt.next_dn(self.brightness_tgt)
        self.global_brightness_publish()
        return {"brightness": self.brightness_tgt.value}

    @wamp.register("com.lambentri.edge.la4.machine.gb.set")
    def global_brightness_value_set(self, value: int):
        """Set the global brightness"""
        self.brightness_tgt = BrightnessEnum(value)
        self.global_brightness_publish()
        return {"brightness": self.brightness_tgt.value}

    @wamp.register("com.lambentri.edge.la4.machine.gb.get")
    def global_brightness_value_get(self):
        """get the global brightness"""
        return {"brightness": self.brightness_tgt.value}

    @inlineCallbacks
    def onJoin(self, details):
        print("joined")
        self.regs = yield self.register(self)
        self.subs = yield self.subscribe(self)
        self.document()
Esempio n. 14
0
 def onJoin(self, details):
     print('Joined session={}'.format(details.realm))
     reactor.suggestThreadPoolSize(MAX_CONCURRENT_TASKS)
     options = RegisterOptions(concurrency=MAX_CONCURRENT_TASKS, invoke='roundrobin')
     yield self.register(self.get_faces_coordinates, "io.crossbar.demo.cvengine.detect_faces", options)
 def __init__(self, config=None):
     super().__init__(config)
     self.machine_id = get_machine_id()
     self.display = display.Display()
     self.register_options = RegisterOptions(match='exact', invoke='roundrobin')
Esempio n. 16
0
            id="ltri-dota-gsi-money-inv",
            options=options)
    yield None


@wamp_component.on_join
@inlineCallbacks
def joined(session, details):
    print("session ready")
    wamp_component.loop_gsi_publish = task.LoopingCall(publish_gsi,
                                                       session=session)
    wamp_component.loop_gsi_publish.start(.05)


@wamp_component.register("com.lambentri.edge.la4.machine.list",
                         options=RegisterOptions(invoke="roundrobin"))
def list_active_machine_instances():
    """List all available machines"""
    schema = MachineDict(
        machines={
            # these don't quite work yet

            # these work
            "dota-gsi-health":
            Machine(name="dota-GSI",
                    iname="HEALTH",
                    id="ltri-dota-gsi-health",
                    desc="Machine State for DOTA Health"),
            "dota-gsi-health-inv":
            Machine(name="dota-GSI",
                    iname="HEALTH_INV",
Esempio n. 17
0
class PylieWampApi(ComponentSession):
    """
    Pylie WAMP methods.

    Defines `require_config` to retrieve system and database configuration
    upon WAMP session setup
    """
    def authorize_request(self, uri, claims):
        return True

    @staticmethod
    def get_file_content(path_file):
        """
        Get path_file content, inline or from file
        """

        file_content = path_file['content']
        if file_content is None and path_file['path']:
            if os.path.isfile(path_file['path']):
                with open(path_file['path'], 'r') as fc:
                    file_content = fc.read()
            else:
                raise IOError('Structure file not defined')

        return file_content

    def _get_config(self, config, name):

        ref_config = pylie_config.get(name).dict()
        ref_config.update(config)

        return ref_config

    def _import_to_dataframe(self, infile, ext='csv'):

        df = PANDAS_IMPORTERS[ext](infile)
        if 'Unnamed: 0' in df:
            del df['Unnamed: 0']

        return df

    def _export_dataframe(self, df, outfile, file_format='csv'):

        if file_format not in PANDAS_EXPORTERS:
            self.log.error('Unsupported file format: {0}'.format(file_format))
            return False

        if hasattr(df, PANDAS_EXPORTERS[file_format]):
            method = getattr(df, PANDAS_EXPORTERS[file_format])

            # Export to file
            with open(outfile, 'w') as outf:
                method(outf)

            return True
        return False

    def _check_file_status(self, filepath):
        if os.path.isfile(filepath):
            return 'completed'
        else:
            self.log.error("File: {} does not exist!".format(filepath))
            return 'failed'

    @endpoint('liedeltag',
              'liedeltag_request',
              'liedeltag_response',
              options=RegisterOptions(invoke='roundrobin'))
    def calculate_lie_deltag(self, request, claims):
        """
        For a detailed input description see:
          pylie/schemas/endpoints/liedeltag_request.json

        For a detailed output description see:
          pylie/schemas/endpoints/liedeltag_response.json
        """

        alpha_beta_gamma = request[u'alpha_beta_gamma']

        # Filter DataFrame
        file_string = StringIO(self.get_file_content(request[u'dataframe']))
        dfobject = LIEDataFrame(self._import_to_dataframe(file_string))
        dg_calc = dfobject.liedeltag(params=alpha_beta_gamma,
                                     kBt=request[u'kBt'])

        # Create workdir to save file
        workdir = os.path.abspath(request[u'workdir'])

        # Save dataframe
        file_format = request[u'fileformat']
        filepath = os.path.join(workdir, 'liedeltag.{0}'.format(file_format))
        if self._export_dataframe(dg_calc, filepath, file_format=file_format):
            results = dg_calc.to_dict()
        else:
            return None

        return {'liedeltag_file': encoder(filepath), 'liedeltag': results}

    @endpoint('concat_dataframes',
              'concat_dataframes_request',
              'concat_dataframes_response',
              options=RegisterOptions(invoke='roundrobin'))
    def concat_dataframes(self, request, claims):
        """
        Combine multiple tabular DataFrames into one new DataFrame using
        the Python Pandas library.

        For a detailed input description see:
          pylie/schemas/endpoints/concat_dataframes_request.json

        For a detailed output description see:
          pylie/schemas/endpoints/concat_dataframes_response.json
        """

        # Import all files
        dfs = []
        for dataframe in request[u'dataframes']:
            file_string = StringIO(self.get_file_content(dataframe))
            dfobject = self._import_to_dataframe(file_string)
            if isinstance(dfobject, DataFrame):
                dfs.append(dfobject)

        # Concatenate dataframes
        if len(dfs) > 1:
            concat_df = concat(dfs,
                               ignore_index=request[u'ignore_index'],
                               axis=request[u'axis'],
                               join=request[u'join'])

            # Create workdir to save file
            workdir = os.path.abspath(request[u'workdir'])

            file_format = request[u'file_format']
            filepath = os.path.join(workdir, 'joined.{0}'.format(file_format))
            if self._export_dataframe(concat_df,
                                      filepath,
                                      file_format=file_format):
                concat_mdframe = filepath
        else:
            return None

        return {'concat_mdframe': encoder(concat_mdframe)}

    @endpoint('calculate_lie_average',
              'calculate_lie_average_request',
              'calculate_lie_average_response',
              options=RegisterOptions(invoke='roundrobin'))
    def calculate_lie_average(self, request, claims):
        """
        Calculate LIE electrostatic and Van der Waals energy averages from
        a MDFrame.

        For a detailed input description see:
          pylie/schemas/endpoints/calculate_lie_average_request.v1.json

        For a detailed output description see:
          pydlie/schemas/endpoints/calculate_lie_average_response.v1.json
        """

        mdframe = request[u'mdframe']

        # Create workdir to save file
        workdir = os.path.abspath(request[u'workdir'])

        # Import CSV file and run spline fitting filter
        file_string = StringIO(self.get_file_content(mdframe))
        liemdframe = LIEMDFrame(read_csv(file_string))
        if 'Unnamed: 0' in liemdframe.columns:
            del liemdframe['Unnamed: 0']

        ave = liemdframe.inliers(
            method=request[u'inlierFilterMethod']).get_average()
        filepath = os.path.join(workdir, 'averaged.csv')
        ave.to_csv(filepath)

        return {'averaged': encoder(filepath)}

    @endpoint('gaussian_filter',
              'gaussian_filter_request',
              'gaussian_filter_response',
              options=RegisterOptions(invoke='roundrobin'))
    def filter_gaussian(self, request, claims):
        """
        Use multivariate Gaussian Distribution analysis to
        filter VdW/Elec values

        For a detailed input description see:
          pylie/schemas/endpoints/gaussian_filter_request.v1.json

        For a detailed output description see:
          pydlie/schemas/endpoints/gaussian_filter_response.v1.json
        """

        # Filter DataFrame
        file_string = StringIO(self.get_file_content(request[u'dataframe']))
        dfobject = LIEDataFrame(self._import_to_dataframe(file_string))
        gaussian = FilterGaussian(dfobject, confidence=request[u'confidence'])
        filtered = gaussian.filter()
        self.log.info("Filter detected {0} outliers.".format(
            len(filtered.outliers.cases)))

        # Create workdir to save file
        workdir = os.path.abspath(request[u'workdir'])

        # Plot results
        if request[u'plot']:
            outp = os.path.join(workdir, 'gauss_filter.pdf')
            p = gaussian.plot()
            p.savefig(outp)

        # Save filtered dataframe
        file_format = request[u'file_format']
        filepath = os.path.join(workdir,
                                'gauss_filter.{0}'.format(file_format))
        if not self._export_dataframe(
                filtered, filepath, file_format=file_format):
            return None

        return {'gauss_filter': encoder(filepath)}

    @endpoint('filter_stable_trajectory',
              'filter_stable_trajectory_request',
              'filter_stable_trajectory_response',
              options=RegisterOptions(invoke='roundrobin'))
    def filter_stable_trajectory(self, request, claims):
        """
        Use FFT and spline-based filtering to detect and extract stable regions
        in the MD energy trajectory

        For a detailed input description see:
          pylie/schemas/endpoints/filter_stable_request.v1.json

        For a detailed output description see:
          pydlie/schemas/endpoints/filter_stable_response.v1.json
        """

        mdframe = request[u'mdframe']

        # Create workdir to save file
        workdir = os.path.abspath(request[u'workdir'])

        # Import CSV file and run spline fitting filter
        file_string = StringIO(self.get_file_content(mdframe))
        liemdframe = LIEMDFrame(read_csv(file_string))

        if 'Unnamed: 0' in liemdframe.columns:
            del liemdframe['Unnamed: 0']

        splines = FilterSplines(liemdframe, **request[u'FilterSplines'])
        liemdframe = splines.filter()

        output = {}
        # Report the selected stable regions
        filtered = liemdframe.inliers()

        for pose in filtered.poses:
            stable = filtered.get_stable(pose)
            if stable:
                output['stable_pose_{0}'.format(pose)] = stable

        # Create plots
        if request[u'do_plot']:
            if os.path.exists(workdir):
                currpath = os.getcwd()
                os.chdir(workdir)
                paths = splines.plot(tofile=True,
                                     filetype=request[u'plotFileType'])
                for i, image_paths in enumerate(paths, start=1):
                    fid = '{0}-{1}'.format(os.path.basename(image_paths), i)
                    output[fid] = encoder(image_paths, inline_content=False)
                os.chdir(currpath)
            else:
                self.log.error(
                    'Working directory does not exist: {0}'.format(workdir))

        # Filter the mdframe
        if request[u'do_filter']:
            filepath = os.path.join(workdir, 'mdframe_splinefiltered.csv')
            filtered.to_csv(filepath)

        output['filtered_mdframe'] = encoder(filepath)
        return output

    @endpoint('collect_energy_trajectories',
              'collect_energy_trajectories_request',
              'collect_energy_trajectories_response',
              options=RegisterOptions(invoke='roundrobin'))
    def import_mdene_files(self, request, claims):
        """
        Import GROMACS MD trajectory energy files into a LIEMDFrame.

        The constructed LIEMDFrame should represents simulations for the same
        system with one simulation for the unbound state of the ligand and one
        or more simulations for the bound system with the ligand in potentially
        multiple binding poses.

        For a detailed input description see:
          pylie/schemas/endpoints/collect_energy_trajectories_request.v1.json

        For a detailed output description see:
          pylie/schemas/endpoints/collect_energy_trajectories_response.v1.json
        """

        # Use absolute path to save file
        workdir = os.path.abspath(request[u'workdir'])

        # Collect trajectories
        mdframe = LIEMDFrame()
        vdw_header = request[u'lie_vdw_header']
        ele_header = request[u'lie_ele_header']
        for pose, trj in enumerate(request[u'bound_trajectory']):
            mdframe.from_file(self.get_file_content(trj), {
                vdw_header: 'vdw_bound_{0}'.format(pose + 1),
                ele_header: 'coul_bound_{0}'.format(pose + 1)
            },
                              filetype=request[u'filetype'])
            self.log.debug('Import file: {0}, pose: {1}'.format(trj, pose))

        mdframe.from_file(self.get_file_content(
            request[u'unbound_trajectory']), {
                vdw_header: 'vdw_unbound',
                ele_header: 'coul_unbound'
            },
                          filetype=request[u'filetype'])
        self.log.debug('Import unbound file: {0}'.format(
            request[u'unbound_trajectory'][u'path']))

        # Set the case ID
        mdframe.case = request[u'case']

        # Store to file
        filepath = os.path.join(workdir, 'mdframe.csv')
        mdframe.to_csv(filepath)

        return {'mdframe': encoder(filepath)}

    @endpoint('adan_residue_decomp',
              'adan_residue_decomp_request',
              'adan_residue_decomp_response',
              options=RegisterOptions(invoke='roundrobin'))
    def adan_residue_decomp(self, request, claims):
        """
        For a detailed input description see:
          pylie/schemas/endpoints/adan_residue_decomp_request.v1.json

        For a detailed output description see:
          pydlie/schemas/endpoints/adan_residue_decomp_response.v1.json
        """

        # Load the model
        binary = self.get_file_content(request[u'model_pkl'])
        model = dill.loads(binary)

        # Parse gromacs residue decomposition energy files to DataFrame
        decomp_dfs = []
        for dcfileobj in request[u'decompose_files']:
            dcfile = StringIO(self.get_file_content(dcfileobj))
            decomp_dfs.append(parse_gromacs_decomp(dcfile))

        # Run AD test
        ene = ad_residue_decomp(decomp_dfs,
                                model['AD']['decVdw'],
                                model['AD']['decEle'],
                                cases=request[u'cases'])

        # Use absolute path to save file
        workdir = os.path.abspath(request[u'workdir'])

        filepath = os.path.join(workdir, 'adan_residue_decomp.csv')
        ene.to_csv(filepath)

        return {'decomp': ene.to_dict()}

    @endpoint('adan_dene',
              'adan_dene_request',
              'adan_dene_response',
              options=RegisterOptions(invoke='roundrobin'))
    def adan_dene(self, request, claims):
        """
        For a detailed input description see:
          pylie/schemas/endpoints/adan_dene_request.v1.json

        For a detailed output description see:
          pydlie/schemas/endpoints/adan_dene_response.v1.json
        """

        # Load the model
        binary = self.get_file_content(request[u'model_pkl'])
        model = dill.loads(binary)

        # Parse gromacs residue decomposition energy files to DataFrame
        file_string = StringIO(self.get_file_content(request[u'dataframe']))
        dfobject = self._import_to_dataframe(file_string)

        # Run AD test
        ene = ad_dene(dfobject,
                      model['AD']['Dene']['CovMatrix'],
                      center=request[u'center'],
                      ci_cutoff=request[u'ci_cutoff'])

        # Use absolute path to save file
        workdir = os.path.abspath(request[u'workdir'])

        filepath = os.path.join(workdir, 'adan_dene.csv')
        ene.to_csv(filepath)

        return {'decomp': ene.to_dict()}

    @endpoint('adan_dene_yrange',
              'adan_dene_yrange_request',
              'adan_dene_yrange_response',
              options=RegisterOptions(invoke='roundrobin'))
    def adan_dene_yrange(self, request, claims):
        """
        For a detailed input description see:
          pylie/schemas/endpoints/adan_dene_yrange_request.v1.json

        For a detailed output description see:
          pydlie/schemas/endpoints/adan_dene_yrange_response.v1.json
        """

        # Parse gromacs residue decomposition energy files to DataFrame
        file_string = StringIO(self.get_file_content(request[u'dataframe']))
        dfobject = self._import_to_dataframe(file_string)

        # Run AD test
        ene = ad_dene_yrange(dfobject, request[u'ymin'], request[u'ymax'])

        # Use absolute path to save file
        workdir = os.path.abspath(request[u'workdir'])

        filepath = os.path.join(workdir, 'adan_dene_yrange.csv')
        ene.to_csv(filepath)

        return {'decomp': ene.to_dict()}
Esempio n. 18
0
class SmartCypWampApi(ComponentSession):
    """
    WAMP SMARTCyp endpoint methods
    """

    def authorize_request(self, uri, claims):

        return True

    @endpoint('smartcyp_info', 'smartcyp_info_request', 'smartcyp_info_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def smartcyp_info(self, request, claims):
        """
        Returns an informative summary of the supported SMARTCyp version
        and configuration.
        """

        info = smartcyp_version_info()
        if info:
            return {'status': 'completed', 'info': info}

        return {'status': 'failed'}

    @endpoint('smartcyp', 'smartcyp_request', 'smartcyp_response', options=RegisterOptions(invoke=u'roundrobin'))
    def smartcyp_prediction(self, request, claims):
        """
        Run a SMARTCyp prediction for a molecule

        :param request:
        :param claims:
        :return:
        """

        # Validate input path_file object for mol
        ligand_file = mol_validate_file_object(request['ligand_file'])

        # Run smartcyp
        base_dir = os.environ.get('BASE_WORK_DIR', request.get('base_work_dir'))
        smartcyp = SmartCypRunner(log=self.log, base_work_dir=base_dir)
        result_dict = smartcyp.run(ligand_file['content'],
                                   is_smiles=ligand_file['extension'] == 'smi',
                                   output_format=request['output_format'],
                                   noempcorr=request['noempcorr'])

        # Remove temporary working directory
        smartcyp.delete()

        result_dict['status'] = 'completed' if result_dict['result'] is not None else 'failed'
        return result_dict

    @endpoint('docking_info', 'docking_info_request', 'docking_info_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def plants_info(self, request, claims):
        """
        Returns an informative summary of the supported PLANTS version
        and configuration.
        """

        info = plants_version_info()
        if info:
            return {'status': 'completed', 'info': info}

        return {'status': 'failed'}

    @endpoint('docking', 'docking_request', 'docking_response', options=RegisterOptions(invoke='roundrobin'))
    def plants_docking(self, request, claims):
        """
        Perform a PLANTS (Protein-Ligand ANT System) molecular docking.
        For a detail description of the input see the file:
        schemas/endpoints/docking-request.v1.json
        """

        # Validate input path_file object for protein and ligand file
        protein_file = mol_validate_file_object(request['protein_file'])
        ligand_file = mol_validate_file_object(request['ligand_file'])

        # Run docking
        base_dir = os.environ.get('BASE_WORK_DIR', request.get('base_work_dir'))

        for drop_key in ('protein_file', 'ligand_file', 'base_work_dir'):
            if drop_key in request:
                del request[drop_key]

        docking = PlantsDocking(log=self.log, base_work_dir=base_dir, **request)

        if docking.run(protein_file['content'], ligand_file['content']):
            return {'status': 'completed', 'result': docking.get_results()}

        self.log.error('PLANTS docking failed')
        return {'status': 'failed'}

    @endpoint('docking_statistics', 'docking_statistics_request', 'docking_statistics_response',
              options=RegisterOptions(invoke='roundrobin'))
    def plants_docking_statistics(self, request, claims):
        """
        Return PLANTS docking statistics for particular docking solutions run previously.
        Clustering will also be redone and optionally adjusted.
        """

        base_dir = os.environ.get('BASE_WORK_DIR', request.get('base_work_dir'))
        if 'base_work_dir' in request:
            del request['base_work_dir']

        docking = PlantsDocking(log=self.log, base_work_dir=base_dir, **request)

        try:
            results = docking.get_results(structures=request.get('paths'))
        except MDStudioException as error:
            self.log.error(repr(error))
            return {'status': 'failed'}

        if results:
            return {'status': 'completed', 'result': results}

        self.log.error('PLANTS docking failed')
        return {'status': 'failed'}

    @endpoint('docking_structures', 'docking_structures_request', 'docking_structures_response',
              options=RegisterOptions(invoke='roundrobin'))
    def plants_docking_structures(self, request, claims):
        """
        Return PLANTS docked structures
        """

        base_dir = os.environ.get('BASE_WORK_DIR', request.get('base_work_dir'))
        docking = PlantsDocking(log=self.log, base_work_dir=base_dir)

        try:
            results = docking.get_structures(structures=request.get('paths'),
                                             output_format=request.get('output_format', 'mol2'),
                                             include_protein=request.get('include_protein', False),
                                             create_ensemble=request.get('create_ensemble', True))
        except MDStudioException as error:
            self.log.error(repr(error))
            return {'status': 'failed'}

        # Encode files
        if not isinstance(results, list):
            results = [results]

        output = [{"path": None, "extension": request.get('output_format', 'mol2'), "content": mol, "encoding": "utf8"}
                  for mol in results]

        return {'status': 'completed', 'result': output}

    @endpoint('spores_info', 'spores_info_request', 'spores_info_response',
              options=RegisterOptions(invoke=u'roundrobin'))
    def spores_info(self, request, claims):
        """
        Returns an informative summary of the supported SPORES version
        and configuration.
        """

        info = spores_version_info()
        if info:
            return {'status': 'completed', 'info': info}

        return {'status': 'failed'}

    @endpoint('spores', 'spores_request', 'spores_response', options=RegisterOptions(invoke='roundrobin'))
    def spores_run(self, request, claims):
        """
        Perform a SPORES (Structure PrOtonation and REcognition System) structure preparation.
        For a detail description of the input see the file:
        schemas/endpoints/spores-request.v1.json
        """

        # Validate input path_file object for mol
        mol = mol_validate_file_object(request['mol'])

        spores = SporesRunner(log=self.log, base_work_dir=os.environ.get('BASE_WORK_DIR', request.get('base_work_dir')))
        try:
            result_dict = spores.run(mol['content'], mode=request['spores_mode'], input_format=request['input_format'])
        except Exception as e:
            self.log.error(str(e))
            return {'status': 'failed'}
        finally:
            spores.delete()

        return {'status': 'completed', 'result': result_dict}

    @endpoint('som_prediction', 'som_prediction_request', 'som_prediction_response',
              options=RegisterOptions(invoke='roundrobin'))
    def som_prediction(self, request, claims):
        """
        Run a REST based SOM prediction run
        """

        # Validate input path_file object for protein and ligand file
        ligand_file = mol_validate_file_object(request['ligand_file'])

        # Run combined structure/reactivity prediction
        base_dir = os.environ.get('BASE_WORK_DIR', request.get('base_work_dir'))
        sompred = CombinedPrediction(base_work_dir=base_dir, **request)
        prediction = sompred.run(ligand_file, filter_clusters=request['filter_clusters'])

        if prediction:
            return {'status': 'completed', 'output': prediction}

        self.log.error('SOM prediction failed')
        return {'status': 'failed'}