def post(self, request, *args, **kwargs): from_old_template = False if 'isa_tab_url' in request.POST: # TODO: I think isa_tab_url is already a full url, # making this redundant. full_isa_tab_url = get_full_url(request.POST['isa_tab_url']) from_old_template = True else: request_body = request.body if not request_body: err_msg = "Neither form data nor a request body has been sent." logger.error(err_msg) return HttpResponseBadRequest(err_msg) try: body = json.loads(request_body) except Exception as e: err_msg = "Request body is no valid JSON" logger.error("%s: %s" % (err_msg, e)) return HttpResponseBadRequest("%s." % err_msg) if "data_set_uuid" in body: data_set_uuid = body["data_set_uuid"] else: err_msg = "Request body doesn't contain data_set_uuid." logger.error(err_msg) return HttpResponseBadRequest(err_msg) try: full_isa_tab_url = DataSet.objects.get( uuid=data_set_uuid).get_isa_archive().get_datafile_url() except (DataSet.DoesNotExist, DataSet.MultipleObjectsReturned, Exception) as e: err_msg = "Something went wrong" logger.error("%s: %s" % (err_msg, e)) return HttpResponseBadRequest("%s." % err_msg) if from_old_template: # Redirect to process_isa_tab view response = HttpResponseRedirect( get_full_url(reverse('process_isa_tab'))) else: # Redirect to process_isa_tab view with arg 'ajax' if request is # not coming from old Django Template response = HttpResponseRedirect( get_full_url(reverse('process_isa_tab', args=['ajax']))) # set cookie response.set_cookie('isa_tab_url', full_isa_tab_url) return response
def post(self, request, *args, **kwargs): from_old_template = False if 'isa_tab_url' in request.POST: full_isa_tab_url = get_full_url(request.POST['isa_tab_url']) from_old_template = True else: request_body = request.body if not request_body: err_msg = "Neither form data nor a request body has been sent." logger.error(err_msg) return HttpResponseBadRequest(err_msg) try: body = json.loads(request_body) except Exception as e: err_msg = "Request body is no valid JSON" logger.error("%s: %s" % (err_msg, e)) return HttpResponseBadRequest("%s." % err_msg) if "data_set_uuid" in body: data_set_uuid = body["data_set_uuid"] else: err_msg = "Request body doesn't contain data_set_uuid." logger.error(err_msg) return HttpResponseBadRequest(err_msg) try: full_isa_tab_url = get_full_url(DataSet.objects.get( uuid=data_set_uuid).get_isa_archive().get_datafile_url()) except (DataSet.DoesNotExist, DataSet.MultipleObjectsReturned, Exception) as e: err_msg = "Something went wrong" logger.error("%s: %s" % (err_msg, e)) return HttpResponseBadRequest("%s." % err_msg) if from_old_template: # Redirect to process_isa_tab view response = HttpResponseRedirect( reverse('process_isa_tab') ) else: # Redirect to process_isa_tab view with arg 'ajax' if request is # not coming from old Django Template response = HttpResponseRedirect( reverse('process_isa_tab', args=['ajax']) ) # set cookie response.set_cookie('isa_tab_url', full_isa_tab_url) return response
def get_file_name(nodeuuid, sampFile=None, is_file_uuid=False): """Helper function for getting a file_name from a filestore uuid :param fileuuid: Filestore uuid :type fileuuid: String """ # if uuid is a file_store uuid (associated w/ analysis results) if is_file_uuid: temp_fs = FileStoreItem.objects.get(uuid=nodeuuid) else: # getting the current file_uuid from the given node_uuid curr_file_uuid = Node.objects.get(uuid=nodeuuid).file_uuid # checking to see if it has a file_server item temp_fs = get_aux_file_item(curr_file_uuid) # If no associated file_server auxiliary file then use main data file for # IGV if temp_fs is None: # getting file information based on file_uuids temp_fs = FileStoreItem.objects.get(uuid=curr_file_uuid) temp_name = temp_fs.datafile.name.split('/') temp_name = temp_name[len(temp_name) - 1] # full path to selected UUID File temp_url = get_full_url(temp_fs.get_datafile_url()) # IGV SEG FILE HACK if sampFile: if temp_name.startswith("metaData"): new_name = temp_name.split("_") if len(new_name) > 1: temp_name = new_name[0] return temp_name, temp_url
def addIGVSamples(fields, results_samp, annot_samples=None): """creates phenotype file for IGV :param samples: Solr results for samples to be included :type samples: Array. :param annot_samples: includes annotation files included with solr results :type annot_samples: Array """ # creates human readable indexes of fields to iterate over fields_dict = {} for i in fields: find_index = i.find("_Characteristics_") if find_index > -1: new_key = i.split("_Characteristics_")[0] fields_dict[i] = new_key # Creating temp file to enter into file_store tempsampname = tempfile.NamedTemporaryFile(delete=False) # writing header to sample file tempsampname.write("#sampleTable" + "\n") # writing column names to sample file col_names = "Linking_id" for k, v in fields_dict.iteritems(): col_names = col_names + '\t' + v tempsampname.write(col_names + "\n") # iterating over sample files pheno_results = get_sample_lines(fields_dict, results_samp) tempsampname.write(pheno_results) # if annotations are not null if annot_samples: pheno_annot = get_sample_lines(fields_dict, annot_samples) tempsampname.write(pheno_annot) # closing temp file tempsampname.close() # getting file_store_uuid filestore_uuid = create(tempsampname.name, permanent=True, filetype="txt") filestore_item = import_file(filestore_uuid, permanent=True, refresh=True) # file to rename temp_file = filestore_item.datafile.name.split('/') temp_file = temp_file[len(temp_file) - 1] + '.txt' # rename file by way of file_store filestore_item = rename(filestore_uuid, temp_file) # getting file information based on file_uuids curr_fs = FileStoreItem.objects.get(uuid=filestore_uuid) # full path to selected UUID File curr_url = get_full_url(curr_fs.get_datafile_url()) # delete temp file os.unlink(tempsampname.name) return curr_url
def test_get_full_url_remote_file(self): """Check if the source URL is returned for files that have not been imported """ # create FileStoreItem instances without any disk operations item_from_url = FileStoreItem.objects.create(source=self.url_source, sharename=self.sharename) self.assertEqual(get_full_url(item_from_url.get_datafile_url()), item_from_url.source)
def logout(request): token = request.session.get('webinar_access_token') if token is not None: requests.post(get_full_url(request, 'revoke-token'), data={ 'token': token, }) request.session.pop('webinar_access_token', None) return redirect('index')
def test_get_full_url_local_file(self): """Check if the full URL is properly returned for files that exist in Refinery """ # create FileStoreItem instances without any disk operations local_file = FileStoreItem.objects.create(datafile=SimpleUploadedFile( self.filename, 'Coffee is delicious!'), source=self.path_source, sharename=self.sharename) self.assertEqual( get_full_url(local_file.get_datafile_url()), '{}://{}{}'.format(settings.REFINERY_URL_SCHEME, Site.objects.get_current().domain, local_file.datafile.url))
def post(self, request, *args, **kwargs): serializer_class = self.get_serializer_class() serializer = serializer_class(data=request.data, context={'request': request}) serializer.is_valid(raise_exception=True) shortcut = serializer.validated_data['shortcut'] full_url = get_full_url(shortcut) if full_url: return JsonResponse({ 'status': 'ok', 'full_url': full_url }, status=status.HTTP_200_OK) return HttpResponseBadRequest()
def index(request): # create callback url callback_url = get_full_url(request, 'callback') # get access token from session token = request.session.get('webinar_access_token') context = { 'registered': bool(token is not None), 'auth_url': (f'{settings.OAUTH_SERVER_URL}/o/authorize?response_type=code' f'&client_id={settings.CLIENT_ID}&redirect_uri={callback_url}'), 'form': ResourceForm() } return render(request, 'front/index.html', context)
def token(request): """ Gets token with auth code. Input should be in the format: {"code": "code"} """ redir_url = get_full_url(request, 'callback') resp = requests.post( f'{OAUTH_SERVER_URL}/o/token/', data={ 'grant_type': 'authorization_code', 'redirect_uri': redir_url, 'code': request.data['code'], 'client_id': CLIENT_ID, 'client_secret': CLIENT_SECRET, }, ) return Response(resp.json())
def callback(request): # clear stored auth code request.session.pop('webinar_auth_code', None) # get auth code sent from Authorization server code = request.GET.get('code') if code is not None: # save it in the session request.session['webinar_auth_code'] = code resp = requests.post(get_full_url(request, 'get-token'), data={ 'code': code, }) resp_data = resp.json() if 'access_token' in resp_data: # delete auth code, as it is no longer needed request.session.pop('webinar_auth_code', None) request.session['webinar_access_token'] = resp_data['access_token'] return redirect('index')
def _import_analysis_in_galaxy(ret_list, library_id, connection): """Take workflow configuration and import files into galaxy assign galaxy_ids to ret_list """ logger.debug("Uploading analysis input files to Galaxy") for fileset in ret_list: for k, v in fileset.iteritems(): cur_item = fileset[k] # getting the current file_uuid from the given node_uuid try: curr_file_uuid = Node.objects.get( uuid=cur_item['node_uuid']).file_uuid except Node.DoesNotExist: logger.error("Couldn't fetch Node") return None try: current_filestore_item = FileStoreItem.objects.get_item( uuid=curr_file_uuid) except FileStoreItem.DoesNotExist: logger.error("Couldn't fetch FileStoreItem") return None # Create url based on filestore_item's location (local file or # external file) file_url = get_full_url(current_filestore_item.get_datafile_url()) try: file_id = connection.libraries.upload_file_from_url( library_id, file_url)[0]['id'] except (galaxy.client.ConnectionError, IOError) as exc: logger.error( "Failed adding file '%s' to Galaxy " "library '%s': %s", curr_file_uuid, library_id, exc) raise cur_item["id"] = file_id return ret_list
def import_analysis_in_galaxy(ret_list, library_id, connection): """Take workflow configuration and import files into galaxy assign galaxy_ids to ret_list """ logger.debug("Uploading analysis input files to Galaxy") for fileset in ret_list: for k, v in fileset.iteritems(): cur_item = fileset[k] # getting the current file_uuid from the given node_uuid try: curr_file_uuid = Node.objects.get( uuid=cur_item['node_uuid']).file_uuid except Node.DoesNotExist: logger.error("Couldn't fetch Node!") return None try: current_filestore_item = FileStoreItem.objects.get_item( uuid=curr_file_uuid) except FileStoreItem.DoesNotExist: logger.error("Couldn't fetch FileStoreItem!") return None # Create url based on filestore_item's location (local file or # external file) file_url = get_full_url(current_filestore_item.get_datafile_url()) try: file_id = connection.libraries.upload_file_from_url( library_id, file_url)[0]['id'] except (galaxy.client.ConnectionError, IOError) as exc: logger.error("Failed adding file '%s' to Galaxy " "library '%s': %s", curr_file_uuid, library_id, exc) raise cur_item["id"] = file_id return ret_list
def test_get_full_url_local_file(self): """Check if the full URL is properly returned for files that exist in Refinery """ # create FileStoreItem instances without any disk operations local_file = FileStoreItem.objects.create(source=self.path_source, sharename=self.sharename) local_file.datafile = self.test_file local_file.save() self.assertEqual( get_full_url(local_file.get_datafile_url()), '{}://{}{}'.format( settings.REFINERY_URL_SCHEME, Site.objects.get_current().domain, local_file.datafile.url ) )
def create_temp_filename_and_url(temp_fs, samp_file): """Helper function for creating a temporary filename and file url from a given FileStore Item and sample file :param temp_fs: Filestore uuid :type temp_fs: FileStoreItem :param samp_file: T/F value for # IGV SEG FILE HACK :type samp_file: Bool :returns: temp_name, temp_url """ temp_name = temp_fs.datafile.name.split('/') temp_name = temp_name[len(temp_name) - 1] # full path to selected UUID File temp_url = get_full_url(temp_fs.get_datafile_url()) # IGV SEG FILE HACK if samp_file: if temp_name.startswith("metaData"): new_name = temp_name.split("_") if len(new_name) > 1: temp_name = new_name[0] return temp_name, temp_url
def create_temp_filename_and_url(temp_fs, samp_file): """Helper function for creating a temporary filename and file url from a given FileStore Item and sample file :param temp_fs: Filestore uuid :type temp_fs: FileStoreItem :param samp_file: T/F value for # IGV SEG FILE HACK :type samp_file: Bool :returns: temp_name, temp_url """ temp_name = temp_fs.datafile.name.split("/") temp_name = temp_name[len(temp_name) - 1] # full path to selected UUID File temp_url = get_full_url(temp_fs.get_datafile_url()) # IGV SEG FILE HACK if samp_file: if temp_name.startswith("metaData"): new_name = temp_name.split("_") if len(new_name) > 1: temp_name = new_name[0] return temp_name, temp_url
def get(self, request, shortcut): full_url = get_full_url(shortcut) if full_url: return HttpResponseRedirect(full_url) return HttpResponseBadRequest()
def create_igv_session(genome, uuids, is_file_uuid=False): """ Creates session file for selected file uuids, returns newly created filestore uuid :param is_file_uuid: :param genome: Genome to be used in session file i.e. hg18, dm3 :type genome: string. :param uuids: Array of UUIDs to be used :type uuids: array. :param uuids: Host URL i.e. 127.0.0.1:8000 :type uuids: string """ # Create IGV Session file and put into Filestore """ http://www.postneo.com/projects/pyxml/ <?xml version="1.0" encoding="UTF-8"?> <Global genome="hg18" locus="EGFR" version="3"> <Resources> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/tcga/gbm/GBM_batch1-8_level3_exp.txt.recentered.080820.gct.tdf"/> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/annotations/hg18/rna_genes.bed"/> <Resource name="sno/miRNA" path="http://www.broadinstitute.org/igvdata/tcga/gbm/Sample_info.txt"/> </Resources> </Global> """ logger.debug("visualization_manager.create_igv_session called") # Create the minidom document doc = Document() # Create the <wml> base element xml = doc.createElement("Global") xml.setAttribute("genome", genome) xml.setAttribute("locus", "All") xml.setAttribute("version", "4") doc.appendChild(xml) # Add Resources xml_resources = doc.createElement("Resources") xml.appendChild(xml_resources) # get paths to url for samp in uuids: # gets filestore item curr_name, curr_url = get_file_name(samp, is_file_uuid=is_file_uuid) logger.debug('New resource: ' + curr_name + ' - ' + curr_url) # What to do if fs does not exist? if curr_name: # creates Resource element res = doc.createElement("Resource") res.setAttribute("name", curr_name) res.setAttribute("path", curr_url) xml_resources.appendChild(res) # Creating temp file to enter into file_store tempfilename = tempfile.NamedTemporaryFile(delete=False) tempfilename.write(doc.toprettyxml(indent=" ")) tempfilename.close() # getting file_store_uuid filestore_uuid = create(tempfilename.name, filetype="xml") filestore_item = import_file(filestore_uuid, refresh=True) # file to rename temp_name = filestore_item.datafile.name.split('/') temp_name = temp_name[len(temp_name) - 1] + '.xml' # rename file by way of file_store filestore_item = rename(filestore_uuid, temp_name) # delete temp file os.unlink(tempfilename.name) # Url for session file fs_url = get_full_url(filestore_item.get_datafile_url()) # IGV url for automatic launch of Java Webstart igv_url = "http://www.broadinstitute.org/igv/projects/current/igv.php" \ "?sessionURL=" + fs_url return igv_url
def add_igv_samples(fields, results_samp, annot_samples=None): """creates phenotype file for IGV :param samples: Solr results for samples to be included :type samples: Array. :param annot_samples: includes annotation files included with solr results :type annot_samples: Array """ # creates human readable indexes of fields to iterate over fields_dict = {} for i in fields: find_index = i.find("_Characteristics_") if find_index > -1: new_key = i.split("_Characteristics_")[0] fields_dict[i] = new_key # Creating temp file to enter into file_store temp_sample_name = tempfile.NamedTemporaryFile(delete=False) # writing header to sample file temp_sample_name.write("#sampleTable" + "\n") # writing column names to sample file col_names = "Linking_id" for k, v in fields_dict.iteritems(): col_names = col_names + '\t' + v temp_sample_name.write(col_names + "\n") # iterating over sample files pheno_results = get_sample_lines(fields_dict, results_samp) try: temp_sample_name.write(pheno_results) except UnicodeEncodeError as e: logger.error("Could not write results to file: %s. " "Trying again with the content to write encoded " "properly." % e) temp_sample_name.write(pheno_results.encode("utf-8")) # if annotations are not null if annot_samples: pheno_annot = get_sample_lines(fields_dict, annot_samples) temp_sample_name.write(pheno_annot) # closing temp file temp_sample_name.close() # getting file_store_uuid filestore_uuid = create(temp_sample_name.name, filetype="txt") filestore_item = import_file(filestore_uuid, refresh=True) # file to rename temp_file = filestore_item.datafile.name.split('/') temp_file = temp_file[len(temp_file) - 1] + '.txt' # rename file by way of file_store filestore_item = rename(filestore_uuid, temp_file) # getting file information based on file_uuids curr_fs = FileStoreItem.objects.get(uuid=filestore_uuid) # full path to selected UUID File curr_url = get_full_url(curr_fs.get_datafile_url()) # delete temp file os.unlink(temp_sample_name.name) return curr_url
def createIGVsessionAnnot(genome, uuids, annot_uuids=None, samp_file=None): """Creates session file for selected file uuids, returns newly created filestore uuid :param genome: Genome to be used in session file i.e. hg18, dm3 :type genome: string. :param uuids: Array of UUIDs to be used :type uuids: array. :param uuids: Host URL i.e. 127.0.0.1:8000 :type uuids: string """ # Create IGV Session file and put into Filestore """ http://www.postneo.com/projects/pyxml/ <?xml version="1.0" encoding="UTF-8"?> <Global genome="hg18" locus="EGFR" version="3"> <Resources> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/tcga/gbm/GBM_batch1-8_level3_exp.txt.recentered.080820.gct.tdf"/> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/annotations/hg18/rna_genes.bed"/> <Resource name="sno/miRNA" path="http://www.broadinstitute.org/igvdata/tcga/gbm/Sample_info.txt"/> </Resources> </Global> """ # Create the minidom document doc = Document() # Create the <wml> base element xml = doc.createElement("Global") xml.setAttribute("genome", genome) xml.setAttribute("locus", "All") xml.setAttribute("version", "4") doc.appendChild(xml) # Add Resources xml_resources = doc.createElement("Resources") xml.appendChild(xml_resources) # adding selected samples to xml file addIGVResource(uuids["node_uuid"], xml_resources, doc) if annot_uuids: # adding selected samples to xml file addIGVResource(annot_uuids["node_uuid"], xml_resources, doc) # adds sample information file to IGV session file if samp_file: # <Resource name="Sample Information" # path="http://igv.broadinstitute.org/data/hg18/tcga/gbm/gbmsubtypes/sampleTable.txt.gz"/> # creates Resource element res = doc.createElement("Resource") res.setAttribute("name", "Sample Information") res.setAttribute("path", samp_file) xml_resources.appendChild(res) # <HiddenAttributes> # <Attribute name="DATA FILE"/> # <Attribute name="Linking_id"/> # <Attribute name="DATA TYPE"/> # </HiddenAttributes> # Adding parameters to hide basic unnecessary sample info hidden_attr = doc.createElement("HiddenAttributes") xml.appendChild(hidden_attr) attr = doc.createElement("Attribute") attr.setAttribute("name", "DATA FILE") hidden_attr.appendChild(attr) attr = doc.createElement("Attribute") attr.setAttribute("name", "Linking_id") hidden_attr.appendChild(attr) attr = doc.createElement("Attribute") attr.setAttribute("name", "DATA TYPE") hidden_attr.appendChild(attr) # Creating temp file to enter into file_store tempfilename = tempfile.NamedTemporaryFile(delete=False) tempfilename.write(doc.toprettyxml(indent=" ")) tempfilename.close() # getting file_store_uuid filestore_uuid = create(tempfilename.name, permanent=True, filetype="xml") filestore_item = import_file(filestore_uuid, permanent=True, refresh=True) # file to rename temp_name = filestore_item.datafile.name.split('/') temp_name = temp_name[len(temp_name) - 1] + '.xml' # rename file by way of file_store filestore_item = rename(filestore_uuid, temp_name) # delete temp file os.unlink(tempfilename.name) # Url for session file sessionfile_url = get_full_url(filestore_item.get_datafile_url()) # IGV url for automatic launch of Java Webstart igv_url = "http://www.broadinstitute.org/igv/projects/current/igv.php" \ "?sessionURL=" + sessionfile_url return igv_url
def createIGVsession(genome, uuids, is_file_uuid=False): """ Creates session file for selected file uuids, returns newly created filestore uuid :param genome: Genome to be used in session file i.e. hg18, dm3 :type genome: string. :param uuids: Array of UUIDs to be used :type uuids: array. :param uuids: Host URL i.e. 127.0.0.1:8000 :type uuids: string """ # Create IGV Session file and put into Filestore """ http://www.postneo.com/projects/pyxml/ <?xml version="1.0" encoding="UTF-8"?> <Global genome="hg18" locus="EGFR" version="3"> <Resources> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/tcga/gbm/GBM_batch1-8_level3_exp.txt.recentered.080820.gct.tdf"/> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/annotations/hg18/rna_genes.bed"/> <Resource name="sno/miRNA" path="http://www.broadinstitute.org/igvdata/tcga/gbm/Sample_info.txt"/> </Resources> </Global> """ logger.debug("visualization_manager.createIGVsession called") # Create the minidom document doc = Document() # Create the <wml> base element xml = doc.createElement("Global") xml.setAttribute("genome", genome) xml.setAttribute("locus", "All") xml.setAttribute("version", "4") doc.appendChild(xml) # Add Resources xml_resources = doc.createElement("Resources") xml.appendChild(xml_resources) # get paths to url for samp in uuids: # gets filestore item curr_name, curr_url = get_file_name(samp, is_file_uuid=is_file_uuid) logger.debug('New resource: ' + curr_name + ' - ' + curr_url) # What to do if fs does not exist? if (curr_name): # creates Resource element res = doc.createElement("Resource") res.setAttribute("name", curr_name) res.setAttribute("path", curr_url) xml_resources.appendChild(res) # Creating temp file to enter into file_store tempfilename = tempfile.NamedTemporaryFile(delete=False) tempfilename.write(doc.toprettyxml(indent=" ")) tempfilename.close() # getting file_store_uuid filestore_uuid = create(tempfilename.name, permanent=True, filetype="xml") filestore_item = import_file(filestore_uuid, permanent=True, refresh=True) # file to rename temp_name = filestore_item.datafile.name.split('/') temp_name = temp_name[len(temp_name) - 1] + '.xml' # rename file by way of file_store filestore_item = rename(filestore_uuid, temp_name) # delete temp file os.unlink(tempfilename.name) # Url for session file fs_url = get_full_url(filestore_item.get_datafile_url()) # IGV url for automatic launch of Java Webstart igv_url = "http://www.broadinstitute.org/igv/projects/current/igv.php" \ "?sessionURL=" + fs_url return igv_url
def create_igv_session_annot(genome, uuids, annot_uuids=None, samp_file=None): """Creates session file for selected file uuids, returns newly created filestore uuid :param genome: Genome to be used in session file i.e. hg18, dm3 :type genome: string. :param uuids: Array of UUIDs to be used :type uuids: array. :param uuids: Host URL i.e. 127.0.0.1:8000 :type uuids: string """ # Create IGV Session file and put into Filestore """ http://www.postneo.com/projects/pyxml/ <?xml version="1.0" encoding="UTF-8"?> <Global genome="hg18" locus="EGFR" version="3"> <Resources> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/tcga/gbm/GBM_batch1-8_level3_exp.txt.recentered.080820.gct.tdf"/> <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/annotations/hg18/rna_genes.bed"/> <Resource name="sno/miRNA" path="http://www.broadinstitute.org/igvdata/tcga/gbm/Sample_info.txt"/> </Resources> </Global> """ # Create the minidom document doc = Document() # Create the <wml> base element xml = doc.createElement("Global") xml.setAttribute("genome", genome) xml.setAttribute("locus", "All") xml.setAttribute("version", "4") doc.appendChild(xml) # Add Resources xml_resources = doc.createElement("Resources") xml.appendChild(xml_resources) # adding selected samples to xml file add_igv_resource(uuids["node_uuid"], xml_resources, doc) if annot_uuids: # adding selected samples to xml file add_igv_resource(annot_uuids["node_uuid"], xml_resources, doc) # adds sample information file to IGV session file if samp_file: # <Resource name="Sample Information" # path="http://igv.broadinstitute.org/data/hg18/tcga/gbm/gbmsubtypes/sampleTable.txt.gz"/> # creates Resource element res = doc.createElement("Resource") res.setAttribute("name", "Sample Information") res.setAttribute("path", samp_file) xml_resources.appendChild(res) # <HiddenAttributes> # <Attribute name="DATA FILE"/> # <Attribute name="Linking_id"/> # <Attribute name="DATA TYPE"/> # </HiddenAttributes> # Adding parameters to hide basic unnecessary sample info hidden_attr = doc.createElement("HiddenAttributes") xml.appendChild(hidden_attr) attr = doc.createElement("Attribute") attr.setAttribute("name", "DATA FILE") hidden_attr.appendChild(attr) attr = doc.createElement("Attribute") attr.setAttribute("name", "Linking_id") hidden_attr.appendChild(attr) attr = doc.createElement("Attribute") attr.setAttribute("name", "DATA TYPE") hidden_attr.appendChild(attr) # Creating temp file to enter into file_store tempfilename = tempfile.NamedTemporaryFile(delete=False) tempfilename.write(doc.toprettyxml(indent=" ")) tempfilename.close() # getting file_store_uuid filestore_uuid = create(tempfilename.name, filetype="xml") filestore_item = import_file(filestore_uuid, refresh=True) # file to rename temp_name = filestore_item.datafile.name.split('/') temp_name = temp_name[len(temp_name) - 1] + '.xml' # rename file by way of file_store filestore_item = rename(filestore_uuid, temp_name) # delete temp file os.unlink(tempfilename.name) # Url for session file sessionfile_url = get_full_url(filestore_item.get_datafile_url()) # IGV url for automatic launch of Java Webstart igv_url = "http://www.broadinstitute.org/igv/projects/current/igv.php" \ "?sessionURL=" + sessionfile_url return igv_url
def add_igv_samples(fields, results_samp, annot_samples=None): """creates phenotype file for IGV :param samples: Solr results for samples to be included :type samples: Array. :param annot_samples: includes annotation files included with solr results :type annot_samples: Array """ # creates human readable indexes of fields to iterate over fields_dict = {} for i in fields: find_index = i.find("_Characteristics_") if find_index > -1: new_key = i.split("_Characteristics_")[0] fields_dict[i] = new_key # Creating temp file to enter into file_store temp_sample_name = tempfile.NamedTemporaryFile(delete=False) # writing header to sample file temp_sample_name.write("#sampleTable" + "\n") # writing column names to sample file col_names = "Linking_id" for k, v in fields_dict.iteritems(): col_names = col_names + "\t" + v temp_sample_name.write(col_names + "\n") # iterating over sample files pheno_results = get_sample_lines(fields_dict, results_samp) try: temp_sample_name.write(pheno_results) except UnicodeEncodeError as e: logger.error( "Could not write results to file: %s. " "Trying again with the content to write encoded " "properly.", e ) temp_sample_name.write(pheno_results.encode("utf-8")) # if annotations are not null if annot_samples: pheno_annot = get_sample_lines(fields_dict, annot_samples) temp_sample_name.write(pheno_annot) # closing temp file temp_sample_name.close() # getting file_store_uuid filestore_uuid = create(temp_sample_name.name, filetype="txt") filestore_item = import_file(filestore_uuid, refresh=True) # file to rename temp_file = filestore_item.datafile.name.split("/") temp_file = temp_file[len(temp_file) - 1] + ".txt" # rename file by way of file_store filestore_item = rename(filestore_uuid, temp_file) # getting file information based on file_uuids curr_fs = FileStoreItem.objects.get(uuid=filestore_uuid) # full path to selected UUID File curr_url = get_full_url(curr_fs.get_datafile_url()) # delete temp file os.unlink(temp_sample_name.name) return curr_url