Esempio n. 1
0
 def assemble_dynamic_pysb(self, mode='local', bucket=EMMAA_BUCKET_NAME):
     """Assemble a version of a PySB model for dynamic simulation."""
     # First need to run regular assembly
     if not self.assembled_stmts:
         self.run_assembly()
     if 'dynamic' in self.assembly_config:
         logger.info('Assembling dynamic PySB model')
         ap = AssemblyPipeline(self.assembly_config['dynamic'])
         # Not overwrite assembled stmts
         stmts = deepcopy(self.assembled_stmts)
         self.dynamic_assembled_stmts = ap.run(stmts)
         pa = PysbAssembler()
         pa.add_statements(self.dynamic_assembled_stmts)
         pysb_model = pa.make_model()
         if mode == 's3' and 'gromet' in self.export_formats:
             fname = f'gromet_{self.date_str}.json'
             pysb_to_gromet(pysb_model, self.name,
                            self.dynamic_assembled_stmts, fname)
             logger.info(f'Uploading {fname}')
             client = get_s3_client(unsigned=False)
             client.upload_file(fname, bucket,
                                f'exports/{self.name}/{fname}')
         return pysb_model
     logger.info('Did not find dynamic assembly steps')
Esempio n. 2
0
def test_handler():
    """Test the lambda handler locally."""
    dts = make_date_str()
    key = f'models/test/test_model_{dts}.pkl'
    event = {'Records': [{'s3': {'object': {'key': key}}}]}
    context = None
    res = lambda_handler(event, context)
    print(res)
    assert res['statusCode'] == 200, res
    assert res['result'] == 'SUCCESS', res
    assert res['job_id'], res
    job_id = res['job_id']

    results = {}
    monitor = BatchMonitor(QUEUE, [{'jobId': job_id}])
    monitor.watch_and_wait(result_record=results)
    print(results)
    assert job_id in [job_def['jobId'] for job_def in results['succeeded']], \
        results['failed']

    s3 = get_s3_client()
    s3_res = s3.list_objects(Bucket='emmaa', Prefix='results/test/' + dts[:10])
    print(s3_res.keys())
    assert s3_res, s3_res
Esempio n. 3
0
def setup_bucket(add_model=False,
                 add_mm=False,
                 add_tests=False,
                 add_results=False,
                 add_model_stats=False,
                 add_test_stats=False):
    """
    This function creates a new (local) bucket mocking S3 bucket at each call.
    Then all calls to S3 are calling this bucket instead of real S3 bucket.
    Depending on the test we might or might not need the bucket to contain
    different files. For faster computation, only required files for the test
    are generated and stored in the bucket. Files can be added by setting
    corresponding arguments to True when calling this function.
    """
    # Local imports are recommended when using moto
    from emmaa.util import get_s3_client
    from emmaa.model import save_config_to_s3
    from emmaa.model_tests import ModelManager, save_model_manager_to_s3, \
        StatementCheckingTest
    # Create a mock s3 bucket
    client = get_s3_client()
    bucket = client.create_bucket(Bucket=TEST_BUCKET_NAME, ACL='public-read')
    date_str = make_date_str()
    emmaa_model = None
    if add_model:
        # Put config and model files into empty bucket
        config_dict = {
            'ndex': {
                'network': 'a08479d1-24ce-11e9-bb6a-0ac135e8bacf'
            },
            'search_terms': [{
                'db_refs': {
                    'HGNC': '20974'
                },
                'name': 'MAPK1',
                'search_term': 'MAPK1',
                'type': 'gene'
            }],
            'test': {
                'test_corpus': 'simple_tests',
                'default_test_corpus': 'simple_tests'
            },
            'human_readable_name':
            'Test Model',
            'assembly': [{
                'function': 'filter_no_hypothesis'
            }, {
                'function': 'map_grounding'
            }, {
                'function': 'filter_grounded_only'
            }, {
                'function': 'filter_human_only'
            }, {
                'function': 'map_sequence'
            }, {
                'function': 'run_preassembly',
                'kwargs': {
                    'return_toplevel': False
                }
            }, {
                'function': 'filter_top_level'
            }]
        }
        save_config_to_s3('test', config_dict, bucket=TEST_BUCKET_NAME)
        emmaa_model = create_model()
        emmaa_model.save_to_s3(bucket=TEST_BUCKET_NAME)
    if add_mm:
        # Add a ModelManager to bucket
        if not emmaa_model:
            emmaa_model = create_model()
        mm = ModelManager(emmaa_model)
        mm.date_str = date_str
        mm.save_assembled_statements(upload_to_db=False,
                                     bucket=TEST_BUCKET_NAME)
        save_model_manager_to_s3('test', mm, bucket=TEST_BUCKET_NAME)
    if add_tests:
        tests = [
            StatementCheckingTest(Activation(Agent('BRAF'), Agent('MAPK1')))
        ]
        test_dict = {
            'test_data': {
                'description': 'Tests for functionality testing'
            },
            'tests': tests
        }
        client.put_object(Body=pickle.dumps(test_dict),
                          Bucket=TEST_BUCKET_NAME,
                          Key=f'tests/simple_tests.pkl')
    if add_results:
        client.put_object(
            Body=json.dumps(previous_results, indent=1),
            Bucket=TEST_BUCKET_NAME,
            Key=f'results/test/results_simple_tests_{date_str}.json')
    if add_model_stats:
        client.put_object(Body=json.dumps(previous_model_stats, indent=1),
                          Bucket=TEST_BUCKET_NAME,
                          Key=f'model_stats/test/model_stats_{date_str}.json')
    if add_test_stats:
        client.put_object(
            Body=json.dumps(previous_test_stats, indent=1),
            Bucket=TEST_BUCKET_NAME,
            Key=f'stats/test/test_stats_simple_tests_{date_str}.json')
    return client
Esempio n. 4
0
def run_model_tests_from_s3(model_name,
                            upload_mm=True,
                            upload_results=True,
                            upload_stats=True,
                            registered_queries=True,
                            db=None):
    """Run a given set of tests on a given model, both loaded from S3.

    After loading both the model and the set of tests, model/test overlap
    is determined using a ScopeTestConnector and tests are run.


    Parameters
    ----------
    model_name : str
        Name of EmmaaModel to load from S3.
    upload_mm : Optional[bool]
        Whether to upload a model manager instance to S3 as a pickle file.
        Default: True
    upload_results : Optional[bool]
        Whether to upload test results to S3 in JSON format. Can be set
        to False when running tests. Default: True
    upload_stats : Optional[bool]
        Whether to upload latest statistics about model and a test.
        Default: True
    registered_queries : Optional[bool]
        If True, registered queries are fetched from the database and
        executed, the results are then saved to the database. Default: True
    db : Optional[emmaa.db.manager.EmmaaDatabaseManager]
        If given over-rides the default primary database.

    Returns
    -------
    emmaa.model_tests.ModelManager
        Instance of ModelManager containing the model data, list of applied
        tests and the test results.
    emmaa.analyze_test_results.StatsGenerator
        Instance of StatsGenerator containing statistics about model and test.
    """
    model = EmmaaModel.load_from_s3(model_name)
    test_corpus = model.test_config.get('test_corpus',
                                        'large_corpus_tests.pkl')
    tests = load_tests_from_s3(test_corpus)
    mm = ModelManager(model)
    if upload_mm:
        save_model_manager_to_s3(model_name, mm)
    tm = TestManager([mm], tests)
    tm.make_tests(ScopeTestConnector())
    tm.run_tests()
    results_json_dict = mm.results_to_json()
    results_json_str = json.dumps(results_json_dict, indent=1)
    # Optionally upload test results to S3
    if upload_results:
        client = get_s3_client(unsigned=False)
        date_str = make_date_str()
        result_key = f'results/{model_name}/results_{date_str}.json'
        logger.info(f'Uploading test results to {result_key}')
        client.put_object(Bucket='emmaa',
                          Key=result_key,
                          Body=results_json_str.encode('utf8'))
    tr = TestRound(results_json_dict)
    sg = StatsGenerator(model_name, latest_round=tr)
    sg.make_stats()

    # Optionally upload statistics to S3
    if upload_stats:
        sg.save_to_s3()
    if registered_queries:
        qm = QueryManager(db=db, model_managers=[mm])
        qm.answer_registered_queries(model_name)
    return (mm, sg)
Esempio n. 5
0
def save_model_manager_to_s3(model_name, model_manager):
    client = get_s3_client(unsigned=False)
    logger.info(f'Saving a model manager for {model_name} model to S3.')
    client.put_object(Body=pickle.dumps(model_manager),
                      Bucket='emmaa',
                      Key=f'results/{model_name}/latest_model_manager.pkl')