def test_compendia(self): hsc1 = ComputedFile() hsc1.absolute_file_path = "/null/1.tsv" hsc1.filename = "1.tsv" hsc1.sha1 = "abc" hsc1.size_in_bytes = 1 hsc1.is_smashable = False hsc1.is_qn_target = False hsc1.result = self.result hsc1.is_compendia = True hsc1.quant_sf_only = False hsc1.compendia_organism = self.homo_sapiens hsc1.compendium_version = 1 hsc1.s3_bucket = "dr-compendia" hsc1.s3_key = "hsc1.tsv" hsc1.save() hsc2 = ComputedFile() hsc2.absolute_file_path = "/null/2.tsv" hsc2.filename = "2.tsv" hsc2.sha1 = "abc" hsc2.size_in_bytes = 1 hsc2.is_smashable = False hsc2.is_qn_target = False hsc2.result = self.result hsc2.is_compendia = True hsc2.quant_sf_only = False hsc2.compendia_organism = self.homo_sapiens hsc2.compendium_version = 2 hsc2.s3_bucket = "dr-compendia" hsc2.s3_key = "hsc2.tsv" hsc2.save() drc1 = ComputedFile() drc1.absolute_file_path = "/null/1.tsv" drc1.filename = "1.tsv" drc1.sha1 = "abc" drc1.size_in_bytes = 1 drc1.is_smashable = False drc1.is_qn_target = False drc1.result = self.result drc1.is_compendia = True drc1.quant_sf_only = True drc1.compendia_organism = self.danio_rerio drc1.compendium_version = 1 drc1.s3_bucket = "dr-compendia" drc1.s3_key = "drc2.tsv" drc1.save() response = self.client.get( reverse("computed_files", kwargs={"version": API_VERSION}), {"is_compendia": True}) response_json = response.json()["results"] self.assertEqual(3, len(response_json)) # Prove that the download_url field is missing and not None. self.assertEqual("NotPresent", response_json[0].get("download_url", "NotPresent")) # We don't actually want AWS to generate a temporary URL for # us, and it won't unless we're running in the cloud, but if # we provide an API Token and use the WithUrl serializer then # it will set the download_url field to None rather than # generate one. # create token response = self.client.post( reverse("token", kwargs={"version": API_VERSION}), json.dumps({"is_activated": True}), content_type="application/json", ) token_id = response.json()["id"] response = self.client.get( reverse("computed_files", kwargs={"version": API_VERSION}), {"is_compendia": True}, HTTP_API_KEY=token_id, ) response_json = response.json()["results"] self.assertEqual(3, len(response_json)) self.assertIsNone(response_json[0]["download_url"])
def _create_result_objects(job_context: Dict) -> Dict: """ Store and host the result as a ComputationalResult object. """ archive_path = job_context["archive_path"] compendia_organism = job_context["compendia_organism"] compendium_version = job_context["compendium_version"] result = ComputationalResult() result.commands.append(" ".join(job_context["formatted_command"])) result.is_ccdl = True result.is_public = True result.time_start = job_context["time_start"] result.time_end = job_context["time_end"] try: processor_key = "CREATE_QUANTPENDIA" result.processor = utils.find_processor(processor_key) except Exception as e: return utils.handle_processor_exception(job_context, processor_key, e) result.save() archive_computed_file = ComputedFile() archive_computed_file.absolute_file_path = archive_path archive_computed_file.filename = FileUtils.get_filename(archive_path) archive_computed_file.calculate_sha1() archive_computed_file.calculate_size() archive_computed_file.is_smashable = False archive_computed_file.is_qn_target = False archive_computed_file.result = result archive_computed_file.is_compendia = True archive_computed_file.quant_sf_only = True archive_computed_file.compendia_organism = compendia_organism archive_computed_file.compendium_version = compendium_version archive_computed_file.save() compendium_result = CompendiumResult() compendium_result.quant_sf_only = True compendium_result.result = result compendium_result.primary_organism = compendia_organism compendium_result.compendium_version = compendium_version compendium_result.save() logger.info("Quantpendia created! Uploading to S3.", job_id=job_context["job_id"], archive_path=archive_path, organism_name=compendia_organism.name, **get_process_stats()) # Upload the result to S3 timestamp = str(int(time.time())) s3_key = compendia_organism.name + "_" + str( compendium_version) + "_" + timestamp + ".zip" uploaded_to_s3 = archive_computed_file.sync_to_s3(S3_COMPENDIA_BUCKET_NAME, s3_key) if not uploaded_to_s3: archive_computed_file.delete() raise utils.ProcessorJobError( "Failed to upload compendia to S3", success=False, computed_file_id=archive_computed_file.id, ) if settings.RUNNING_IN_CLOUD: archive_computed_file.delete_local_file() job_context["result"] = result job_context["success"] = True return job_context