def setUpClass(cls): settings.UNIT_TESTING = True # create user cls.user = User.objects.create_user(username='******', first_name="jonny", last_name="appleseed", email='*****@*****.**', password='******') cls.user.save() # create profile p_dict = {"copo_id": "000000000", "description": "Test Description", "user_id": 1, "title": "Test Title"} cls.pid = Profile().save_record(dict(), **p_dict) # create datafile p = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "dummy_datafile_cgcore.json") with open(p) as f: p_dict = json.loads(f.read()) p_dict["file_location"] = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "fish.png") p_dict["name"] = "fish.png" profile = Profile().get_collection_handle().find_one({"copo_id": "000000000"}) p_dict["profile_id"] = str(cls.pid["_id"]) cls.d = DataFile().get_collection_handle().insert(p_dict) # create submission p = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "dummy_cgcore_dataverse_submission_existing.json") with open(p) as f: p_dict = json.loads(f.read()) p_dict["bundle_meta"][0]["file_path"] = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "fish.png") p_dict["bundle_meta"][0]["file_id"] = str(cls.d) p_dict["profile_id"] = str(cls.pid["_id"]) p_dict["bundle"].append(str(cls.d)) cls.s_dv = Submission().get_collection_handle().insert(p_dict)
def get_info_for_new_dataverse(request): # method to prepopulate dataverse creation form with currently available metadata values out = dict() p_id = request.session['profile_id'] profile = Profile().get_record(p_id) out['dvAlias'] = str(profile['title']).lower() person_list = list(Person(p_id).get_people_for_profile()) out['dvPerson'] = person_list orcid = Orcid().get_orcid_profile(request.user) try: affiliation = orcid.get('op', {}).get('activities_summary', {}).get('employments', {}) \ .get('employment_summary', {})[0].get('organization', "").get('name', "") except: affiliation = "" out['dsAffiliation'] = affiliation df = list(DataFile().get_for_profile(p_id)) file = df[0] out['dvName'] = profile.get('title', "") out['dsTitle'] = file.get('description', {}).get('attributes', {}) \ .get('title_author_contributor', {}).get('dcterms:title', "") out['dsDescriptionValue'] = file.get('description', {}).get('attributes', {}) \ .get('subject_description', {}).get('dcterms:description', "") out['dsSubject'] = file.get('description', {}).get('attributes', {}) \ .get('subject_description', {}).get('dcterms:subject', "") return HttpResponse(json_util.dumps(out))
def _get_dataset(self, profile_id, dataFile_ids, dataverse): # create new dataset if none exists already ds_details = Profile().check_for_dataset_details(profile_id) if not ds_details: ds_details = self._create_dataset(dataFile_ids=dataFile_ids, dataverse=dataverse) Profile().add_dataverse_dataset_details(profile_id, ds_details) return ds_details
def _get_dataverse(self, profile_id): # create new dataverse if none already exists u = data_utils.get_current_user() # create new dataverse if none exists already dv_details = Profile().check_for_dataverse_details(profile_id) if not dv_details: # dataverse = connection.create_dataverse(dv_alias, '{0} {1}'.format(u.first_name, u.last_name), u.email) dv_details = self._create_dataverse(profile_id) Profile().add_dataverse_details(profile_id, dv_details) return dv_details
def profile_tests(self): data = {'study_abstract': 'test abstract', 'study_title': 'test title'} response = self.client.post(reverse('copo:new_profile'), data, follow=True) plist = Profile().get_for_user(self.user.id) self.assertEqual(plist.count(), 1, 'Wrong number of Profiles detected, should be one.') p = plist[0] self.profile_id = p['_id'] self.assertNotEqual(p['copo_id'], '0000000000000', 'COPO ID not produced, are you able to ping the ID issuing server?') self.assertEqual(response.status_code, 200, 'Page not rendered correctly.') self.assertTemplateUsed(response, 'copo/landing_page.html', 'Correct Template not used, should have returned to index page.')
def do_row_data(self): record_object = self.param_dict.get("record_object", dict()) table_data_dict = dict( publication=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), person=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), sample=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), profile=(htags.generate_copo_profiles_data, dict(profiles=Profile().get_for_user())), datafile=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), repository=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), ) # NB: in table_data_dict, use an empty dictionary as a parameter to functions that define zero arguments if self.component in table_data_dict: kwargs = table_data_dict[self.component][1] self.context["table_data"] = table_data_dict[self.component][0]( **kwargs) self.context["component"] = self.component return self.context
def get_continuation_studies(): user = data_utils.get_current_user() profiles = Profile().get_for_user(user.id) output = list() for p in profiles: output.append({"value": p.title, "label": p._id}) return output
def copo_data(request, profile_id): request.session['datafile_url'] = request.path request.session["profile_id"] = profile_id profile = Profile().get_record(profile_id) table_columns = htags.generate_table_columns("datafile") return render(request, 'copo/copo_data.html', {'profile_id': profile_id, 'profile': profile, 'table_columns': jsonpickle.encode(table_columns)})
def view_copo_profile(request, profile_id): request.session["profile_id"] = profile_id profile = Profile().get_record(profile_id) if not profile: return render(request, 'copo/error_page.html') context = {"p_id": profile_id, 'counts': ProfileInfo(profile_id).get_counts(), "profile": profile} return render(request, 'copo/copo_profile.html', context)
def tearDownClass(cls): u = User.objects.get(pk=1) u.delete() Profile().get_collection_handle().remove({"copo_id": "000000000"}) DataFile().get_collection_handle().remove({"_id": cls.d}) # Submission().get_collection_handle().remove({"_id": cls.s_dv}) Submission().get_collection_handle().remove({"_id": cls.s_ckan_new}) Submission().get_collection_handle().remove({"_id": cls.s_ckan_existing})
def tearDownClass(cls): u = User.objects.get(username=settings.TEST_USER_NAME) u.delete() Profile().get_collection_handle().remove({"copo_id": "000000000"}) DataFile().get_collection_handle().remove({"test_file": True}) Repository().get_collection_handle().remove({"_id": cls.r["_id"]}) Submission().get_collection_handle().remove({"_id": cls.s_dv}) Submission().get_collection_handle().remove({"_id": cls.s_ds_new}) Submission().get_collection_handle().remove({"_id": cls.s_ds_existing})
def do_study_xml(sub_id): # get submission object from mongo sub = Submission().get_record(sub_id) # get datafile objects dfs = list() for d in sub["bundle"]: dfs.append(DataFile().get_record(d)) df = dfs[0] # get profile object p = Profile().get_record(df["profile_id"]) # Do STUDY_SET study_set = Element("STUDY_SET") study_set.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance") study_set.set("xsi:noNamespaceSchemaLocation", "ftp://ftp.sra.ebi.ac.uk/meta/xsd/sra_1_5/SRA.study.xsd") # Do STUDY study = Element("STUDY") study.set("alias", str(sub["_id"])) study.set( "center_name", df["description"]["attributes"]["study_type"] ["study_analysis_center_name"]) study_set.append(study) # Do DESCRIPTOR descriptor = Element("DESCRIPTOR") # create element, append to parent and add text SubElement(descriptor, "STUDY_TITLE").text = p["title"] study_type = Element("STUDY_TYPE") es = get_study_type_enumeration( df["description"]["attributes"]["study_type"]["study_type"]) # es = df["description"]["attributes"]["study_type"]["study_type"] study_type.set("existing_study_type", es) descriptor.append(study_type) SubElement(descriptor, "STUDY_ABSTRACT").text = p["description"] study.append(descriptor) # Do STUDY_ATTRIBUTES study_attributes = Element("STUDY_ATTRIBUTES") # do attribute for date study_attribute = Element("STUDY_ATTRIBUTE") SubElement(study_attribute, "TAG").text = "Submission Date" SubElement(study_attribute, "VALUE").text = datetime.datetime.now().strftime('%Y-%m-%d') study_attributes.append(study_attribute) # here we can loop to add other STUDY_ATTRIBUTES study.append(study_attributes) return prettify(study_set)
def generate_copo_shared_profiles_data(profiles=list()): data_set = list() for pr in profiles: temp_set = list() temp_set.append({"header": "ID", "data": str(pr["_id"]), "key": "_id"}) for f in Profile().get_schema().get("schema_dict"): if f.get("show_in_table", True): temp_set.append({"header": f.get("label", str()), "data": resolve_control_output(pr, f), "key": f["id"].split(".")[-1]}) data_set.append(temp_set) return_dict = dict(dataSet=data_set) return return_dict
def do_table_data(self): table_data_dict = dict( annotation=(htags.generate_copo_table_data, dict(profile_id=self.profile_id, component=self.component)), publication=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), person=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), datafile=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), sample=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), source=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), submission=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), repository=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), metadata_template=(htags.generate_table_records, dict(profile_id=self.profile_id, component=self.component)), profile=(htags.generate_copo_profiles_data, dict(profiles=Profile().get_all_profiles())), ) # NB: in table_data_dict, use an empty dictionary as a parameter for listed functions that define zero arguments if self.component in table_data_dict: kwargs = table_data_dict[self.component][1] self.context["table_data"] = table_data_dict[self.component][0]( **kwargs) self.context["component"] = self.component return self.context
def submit(self, sub_id, dataFile_ids): submission_record = Submission().get_record(sub_id) # bundle_meta, if present, should provide a better picture of what datafiles need to be uploaded if "bundle_meta" in submission_record: pending_files = [ x["file_id"] for x in submission_record['bundle_meta'] if not x["upload_status"] ] dataFile_ids = pending_files # physically transfer files path2library = os.path.join(BASE_DIR, REPOSITORIES['ASPERA']['resource_path']) # change these to be collected properly user_name = REPOSITORIES['ASPERA']['user_token'] password = REPOSITORIES['ASPERA']['password'] # create transfer record transfer_token = RemoteDataFile().create_transfer(sub_id)['_id'] self.submission = Submission().get_record(sub_id) self.profile = Profile().get_record(self.submission['profile_id']) remote_path = d_utils.get_ena_remote_path(sub_id) # get each file in the bundle file_path = [] for idx, f_id in enumerate(dataFile_ids): mongo_file = DataFile().get_record(ObjectId(f_id)) self.d_files.append(mongo_file) file_path.append(mongo_file.get("file_location", str())) case = self._do_aspera_transfer(transfer_token=transfer_token, user_name=user_name, password=password, remote_path=remote_path, file_path=file_path, path2library=path2library, sub_id=sub_id) return case
def generate_copo_profiles_data(profiles=list()): data_set = list() for pr in profiles: temp_set = list() temp_set.append({"header": "ID", "data": str(pr["_id"]), "key": "_id"}) for f in Profile().get_schema().get("schema_dict"): if f.get("show_in_table", True): temp_set.append({"header": f.get("label", str()), "data": resolve_control_output(pr, f), "key": f["id"].split(".")[-1]}) # add whether this is a shared profile shared = dict() shared['header'] = None shared['data'] = pr.get('shared', False) shared['key'] = 'shared_profile' temp_set.append(shared) data_set.append(temp_set) return_dict = dict(dataSet=data_set) return return_dict
def copo_submissions(request, profile_id): request.session["profile_id"] = profile_id profile = Profile().get_record(profile_id) return render(request, 'copo/copo_submission.html', {'profile_id': profile_id, 'profile': profile})
def get_dataset_details(request): profile_id = request.GET['profile_id'] resp = Profile().check_for_dataset_details(profile_id) return HttpResponse(json.dumps(resp))
def setUpClass(cls): cls.factory = RequestFactory() settings.UNIT_TESTING = True # create user cls.user = User.objects.create_user(username='******', first_name=settings.TEST_USER_NAME, last_name="appleseed", email='*****@*****.**', password='******') cls.user.save() # create profile p_dict = { "copo_id": "000000000", "description": "Test Description", "user_id": cls.user.id, "title": "Test Title" } cls.pid = Profile().save_record(dict(), **p_dict) # create datafile p = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "dummy_datafile.json") with open(p) as f: p_dict = json.loads(f.read()) p_dict["file_location"] = os.path.join( os.path.dirname(os.path.realpath(__file__)), "fixtures", "fish.png") p_dict["name"] = "fish.png" profile = Profile().get_collection_handle().find_one( {"copo_id": "000000000"}) p_dict["profile_id"] = str(cls.pid["_id"]) cls.d = DataFile().get_collection_handle().insert(p_dict) # create dataverse repository p = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "dummy_dataverse_repo.json") with open(p) as f: p_dict = json.loads(f.read()) cls.r = Repository().save_record(dict(), **p_dict) # create submission record for dataverse p = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "dummy_dataverse_submission.json") with open(p) as f: p_dict = json.loads(f.read()) p_dict["bundle_meta"][0]["file_path"] = os.path.join( os.path.dirname(os.path.realpath(__file__)), "fixtures", "fish.png") p_dict["bundle_meta"][0]["file_id"] = str(cls.d) p_dict["profile_id"] = str(cls.pid["_id"]) p_dict["bundle"].append(str(cls.d)) cls.s_dv = Submission().get_collection_handle().insert(p_dict) # create submission record for new dspace p = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "dummy_dspace_submission.json") with open(p) as f: p_dict = json.loads(f.read()) p_dict["bundle_meta"][0]["file_path"] = os.path.join( os.path.dirname(os.path.realpath(__file__)), "fixtures", "fish.png") p_dict["bundle_meta"][0]["file_id"] = str(cls.d) p_dict["profile_id"] = str(cls.pid["_id"]) p_dict["bundle"].append(str(cls.d)) p_dict["meta"]["new_or_existing"] = "new" # query for item id resp = requests.post("http://demo.dspace.org/rest/collections") collections = json.loads(resp.content.decode("utf-8")) collection = collections[0] p_dict["meta"]["identifier"] = collection["uuid"] cls.s_ds_new = Submission().get_collection_handle().insert(p_dict) # create submission record for existing dspace p = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fixtures", "dummy_dspace_submission.json") with open(p) as f: p_dict = json.loads(f.read()) p_dict["bundle_meta"][0]["file_path"] = os.path.join( os.path.dirname(os.path.realpath(__file__)), "fixtures", "fish.png") p_dict["bundle_meta"][0]["file_id"] = str(cls.d) p_dict["profile_id"] = str(cls.pid["_id"]) p_dict["bundle"].append(str(cls.d)) p_dict["meta"]["new_or_existing"] = "existing" # query for item id resp = requests.post("http://demo.dspace.org/rest/items") items = json.loads(resp.content.decode("utf-8")) item = items[0] p_dict["meta"]["identifier"] = item["uuid"] p_dict["item_id"] = item["uuid"] cls.s_ds_existing = Submission().get_collection_handle().insert(p_dict) cls.ckan_api = "http://demo.ckan.org/api/3/action/"
def test_get_profile(self): p = Profile().get_record(self.pid["_id"]) self.assertEquals(p["description"], "Test Description", "Error creating profile")
def view_groups(request): # g = Group().create_group(description="test descrition") profile_list = cursor_to_list(Profile().get_for_user()) group_list = cursor_to_list(CopoGroup().get_by_owner(request.user.id)) return render(request, 'copo/copo_group.html', {'request': request, 'profile_list': profile_list, 'group_list': group_list})
def copo_repository(request, profile_id): profile = Profile().get_record(profile_id) return render(request, 'copo/copo_repo.html', {'profile_id': profile_id, 'profile': profile})
def do_profiles_counts(self): self.context["profiles_counts"] = htags.generate_copo_profiles_counts( Profile().get_all_profiles()) return self.context
def do_analysis_xml(sub_id): sub = Submission().get_record(sub_id) dfs = list() for d in sub["bundle"]: dfs.append(DataFile().get_record(d)) df = dfs[0] p = Profile().get_record(df["profile_id"]) analysis_set = Element("ANALYSIS_SET") analysis = Element("ANALYSIS") alias = make_alias(sub) analysis.set("alias", alias + "_anaysis") center_name = df["description"]["attributes"]["study_type"][ "study_analysis_center_name"] analysis.set("analysis_center", center_name) broker_name = df["description"]["attributes"]["study_type"]["study_broker"] analysis.set("broker_name", broker_name) analysis_date = df["description"]["attributes"]["study_type"][ "study_analysis_date"] # ad = analysis_date.split('/') # d = datetime.date(int(ad[2]), int(ad[1]), int(ad[0])) # analysis.set("anlalysis_date", d) # analysis_set.append(analysis) title = Element("TITLE") title.text = df["description"]["attributes"]["study_type"]["study_title"] analysis.append(title) description = Element("DESCRIPTION") description.text = df["description"]["attributes"]["study_type"][ "study_description"] analysis.append(description) study_ref = Element("STUDY_REF") study_ref.set("refname", str(sub["_id"])) analysis.append(study_ref) # TODO - Sample is not required for annotation submissions....ENA documentation saying it is is not correct. Will remove these stages from the wizard at some point s_ref = get_sample_ref(df) sample_ref = Element("SAMPLE_REF") sample_ref.set("refname", s_ref) # analysis.append(sample_ref) analysis_type = Element("ANALYSIS_TYPE") SubElement(analysis_type, "SEQUENCE_ANNOTATION") analysis.append(analysis_type) files = Element("FILES") file = Element("FILE") filename = df["name"] file_hash = df["file_hash"] fqfn = str( sub_id) + '/' + data_utils.get_current_user().username + '/' + filename file.set("filename", fqfn) file.set("filetype", "tab") file.set("checksum_method", "MD5") file.set("checksum", file_hash) file.set("unencrypted_checksum", file_hash) files.append(file) analysis.append(files) attrs = Element("ANALYSIS_ATTRIBUTES") for a in df["description"]["attributes"]["attach_study_samples"][ "attributes"]: attr = Element("ANALYSIS_ATTRIBUTE") tag = Element("TAG") tag.text = a["name"] value = Element("VALUE") value.text = a["value"] attr.append(tag) attr.append(value) attrs.append(attr) analysis.append(attrs) return prettify(analysis)
def view_templates(request, profile_id): request.session["profile_id"] = profile_id profile = Profile().get_record(profile_id) return render(request, 'copo/metadata_templates.html', {'profile_id': profile_id, 'profile': profile})