def test_md5_updater_newmd5(update_ffmeta_event_data_newmd5): event = update_ffmeta_event_data_newmd5 tibanna_settings = event.get('_tibanna', {}) tibanna = pony_utils.Tibanna(**tibanna_settings) awsem = Awsem(update_ffmeta_event_data_newmd5) ouf = awsem.output_files()['report'] md5_updater('uploaded', ouf, None, tibanna)
def test_register_to_higlass2(used_env): bucket = 'elasticbeanstalk-fourfront-webdev-wfoutput' bigwig_key = 'a940cf00-6001-473e-80d1-1e4a43866863/4DNFI75GAT6T.bw' tibanna = pony_utils.Tibanna(used_env) with mock.patch('requests.post') as mock_request: res = register_to_higlass(tibanna, bucket, bigwig_key, 'bigwig', 'vector') mock_request.assert_called_once() LOG.info(res) assert res
def real_handler(event, context): # check the status and other details of import ''' this is to check if the task run is done: http://docs.sevenbridges.com/reference#get-task-execution-details ''' # get data # used to automatically determine the environment tibanna_settings = event.get('_tibanna', {}) tibanna = pony_utils.Tibanna(tibanna_settings['env'], settings=tibanna_settings) ff_meta = pony_utils.create_ffmeta_awsem( app_name=event.get('ff_meta').get('awsem_app_name'), **event.get('ff_meta')) pf_meta = [ pony_utils.ProcessedFileMetadata(**pf) for pf in event.get('pf_meta') ] # ensure this bad boy is always initialized patch_meta = False awsem = pony_utils.Awsem(event) # go through this and replace awsemfile_report with awsf format # actually interface should be look through ff_meta files and call # give me the status of this thing from the runner, and runner.output_files.length # so we just build a runner with interface to sbg and awsem # runner.output_files.length() # runner.output_files.file.status # runner.output_files.file.loc # runner.output_files.file.get if event.get('error', False): ff_meta.run_status = 'error' ff_meta.description = event.get('error') ff_meta.patch(key=tibanna.ff_keys) raise Exception(event.get('error')) awsem_output = awsem.output_files() awsem_output_extra = awsem.secondary_output_files() ff_output = len(ff_meta.output_files) if len(awsem_output) != ff_output: ff_meta.run_status = 'error' ff_meta.description = "%d files output expected %s" % ( ff_output, len(awsem_output)) ff_meta.patch(key=tibanna.ff_keys) raise Exception( "Failing the workflow because outputed files = %d and ffmeta = %d" % (awsem_output, ff_output)) for _, awsemfile in awsem_output.iteritems(): upload_key = awsemfile.key status = awsemfile.status print("awsemfile res is %s", status) if status == 'COMPLETED': patch_meta = OUTFILE_UPDATERS[awsemfile.argument_type]('uploaded', awsemfile, ff_meta, tibanna) if pf_meta: for pf in pf_meta: if pf.accession == awsemfile.accession: try: add_higlass_to_pf(pf, tibanna, awsemfile) except Exception as e: raise Exception("failed to regiter to higlass %s" % e) try: add_md5_filesize_to_pf(pf, awsemfile) except Exception as e: raise Exception( "failed to update processed file metadata %s" % e) elif status in ['FAILED']: patch_meta = OUTFILE_UPDATERS[awsemfile.argument_type]( 'upload failed', awsemfile, ff_meta, tibanna) ff_meta.run_status = 'error' ff_meta.patch(key=tibanna.ff_keys) raise Exception("Failed to export file %s" % (upload_key)) for _, awsemfile in awsem_output_extra.iteritems(): upload_key = awsemfile.key status = awsemfile.status print("awsemfile res is %s", status) if status == 'COMPLETED': if pf_meta: for pf in pf_meta: if pf.accession == awsemfile.accession: try: add_md5_filesize_to_pf_extra(pf, awsemfile) except Exception as e: raise Exception( "failed to update processed file metadata %s" % e) elif status in ['FAILED']: ff_meta.run_status = 'error' ff_meta.patch(key=tibanna.ff_keys) raise Exception("Failed to export file %s" % (upload_key)) # if we got all the awsemfiles let's go ahead and update our ff_metadata object ff_meta.run_status = "complete" # allow for a simple way for updater to add appropriate meta_data if patch_meta: ff_meta.__dict__.update(patch_meta) # add postrunjson log file to ff_meta as a url ff_meta.awsem_postrun_json = get_postrunjson_url(event) # make all the file awsemfile meta-data stuff here # TODO: fix bugs with ff_meta mapping for output and input file try: ff_meta.patch(key=tibanna.ff_keys) except Exception as e: raise Exception("Failed to update run_status %s" % str(e)) # patch processed files - update only status, extra_files, md5sum and file_size if pf_meta: patch_fields = ['uuid', 'status', 'extra_files', 'md5sum', 'file_size'] try: for pf in pf_meta: print(pf.as_dict()) pf.patch(key=tibanna.ff_keys, fields=patch_fields) except Exception as e: raise Exception("Failed to update processed metadata %s" % str(e)) event['ff_meta'] = ff_meta.as_dict() event['pf_meta'] = [_.as_dict() for _ in pf_meta] return event