def tagCompounds(request, action): if request.is_ajax() and request.method == 'POST': rawJson = request.body data = json.loads(rawJson.decode("utf-8")) compoundIds = [] tags = [] try: tags = data["tags"] compoundIds = [int(id) for id in data["ids"]] except: print("failed to parse json for tags and compound ids: " + str(rawJson)) return HttpResponse("failed to parse JSON", status=404) #print("adding tags "+str(tags)+" to compounds "+str(compoundIds)) Tag.ensureAllExist(tags, request.user) tagObjects = Tag.objects.filter(user=request.user, name__in=tags) compounds = Compound.objects.filter(user=request.user, id__in=compoundIds) for compound in compounds: if action == 'add': compound.tags.add(*tagObjects) elif action == 'remove': compound.tags.remove(*tagObjects) else: return HttpResponse("Unknown action given", status=404) compound.save() return HttpResponse('')
def tagDuplicateCompounds(request): from django.db.models import Count import itertools from operator import itemgetter #print("tagging duplicates") if request.is_ajax() and request.method == 'GET': # group all compounds by inchi and return groups with size > 1 groupInfo =Compound.objects.values_list('inchi') \ .annotate(duplicate_count=Count('inchi')) \ .filter(user=request.user,duplicate_count__gt=1) \ .order_by('inchi') dupedInchis = [row[0] for row in groupInfo] numGroups = len(groupInfo) #print("duped inchi's: "+str(dupedInchis)) # get the id and inchi of duplicated compounds dupedCompounds = Compound.objects \ .values_list("id","inchi") \ .filter(user=request.user,inchi__in=dupedInchis) #print("duped compounds: "+str(dupedCompounds)) allIds = [row[0] for row in dupedCompounds] groups = itertools.groupby(sorted(dupedCompounds, key=itemgetter(1)), key=itemgetter(1)) #print("groups: "+str(groups)) # tag all group memebers with 'duplicated' Tag.ensureAllExist(["duplicate-extra"], request.user) extraTag = Tag.objects.filter(user=request.user, name__exact="duplicate-extra").get() #print("duplicatedTag: "+str(extraTag)) # create all tags we'll need at once tagNames = ["duplicate-set-" + str(i) for i in range(numGroups)] Tag.ensureAllExist(tagNames, request.user) tags = Tag.objects.filter(user=request.user, name__in=tagNames).order_by("id") newTags = {} # tag all but the first member of each group with duplicted-X for setNum, group in enumerate( groups ): # groups is a list of tuples, each tuple is (inchi,[ids]) ids = [row[0] for row in list(group[1])] #print("group: "+str(ids)) for i, compound in enumerate( Compound.objects.filter(user=request.user, id__in=ids)): t = newTags.setdefault(compound.id, []) if i != 0: #don't tag first compound in each group, that will be the one to keep compound.tags.add(extraTag) t.append(extraTag.name) compound.tags.add(tags[setNum]) t.append(tags[setNum].name) compound.save() return JsonResponse(newTags) return JsonResponse({})
def showCompounds(request, resource): # perform query for existing myCompounds #(page, matches) = getMyCompounds(request) matches = Compound.objects.filter(user=request.user) #print("page count: "+str(page.count)) #print("page num pages: "+str(page.num_pages)) if resource: if resource == 'deleteAll': deleteMyCompounds(request) matches = None messages.error(request, 'All Compounds Deleted!') if resource == 'downloadSMILES.smi': smiles = makeSMILES(request.user) return HttpResponse(smiles, content_type='text/plain') if resource == 'downloadSDF.sdf': sdf = makeSDF(request.user) return HttpResponse(sdf, content_type='text/plain') if matches: for match in matches: match.smiles = re.match(r"^(\S+)", match.smiles).group(1) match.smiles = urlquote(match.smiles) allTags = Tag.allUserTagNames(request.user) return render(request,'showCompounds.html', dict(matches=matches,tags=allTags))
def addCompoundsAjax(user, source_id, ids, tags): if len(tags) != 0: Tag.ensureAllExist(tags, user) if len(ids) == 0: raise Exception('Empty list of "ids".') if source_id == 'chembl': sdfs = get_chembl_sdfs(ids) sdf = '\n$$$$\n'.join(sdfs) + '\n$$$$\n' newJob = createJob( user, 'Upload Compounds', '', ['--user='******'--tags=' + (','.join(tags))], sdf) time.sleep(2) newJob = updateJob(user, newJob.id) if newJob.status == Job.RUNNING: ajaxResponse = { 'success': True, 'message': 'Compound upload in progress. Check "Past Jobs" for status.' } elif newJob.status == Job.FINISHED: ajaxResponse = { 'success': True, 'message': 'Compounds uploaded successfully.' } else: ajaxResponse = { 'success': False, 'message': 'An error occurred while uploading your compounds.' } return ajaxResponse else: raise Exception('Unknown source_id: {}'.format(source_id))
def showCompoundGrid(request): # perform query for existing myCompounds matches = None givenTags = [] allTags = Tag.allUserTagNames(request.user) if 'tags' in request.POST: givenTags = request.POST.getlist("tags") else: givenTags = ["all"] matches = Compound.byTagNames(givenTags, request.user) return render(request, 'compoundGrid.html', dict(matches=matches, tags=allTags, currentTags=givenTags))
def launch_job(request, category=None): if request.is_ajax(): # for ajax requests, return HTML form for each app currentApp = request.GET['currentApp'] try: application = Application.objects.get(id=currentApp) AppFormSet = getAppForm(application.id, request.user) form = AppFormSet() form = str(form) response = dict(form=form, desc=application.description) except Exception as e: print(traceback.format_exc()) response = dict(form='ERROR') return HttpResponse(dumps(response), 'text/json') if request.method == 'POST': appForm = getAppForm(request.POST['application'], request.user) form = appForm(request.POST, request.FILES) if form.is_valid(): try: appid = int(form.cleaned_data['application']) application = Application.objects.get(id=str(appid)) except Application.DoesNotExist: messages.error(request, 'Application does not exist') return redirect(launch_job, category=category) else: messages.error(request, str(form.errors)) return redirect(launch_job, category=category) commandOptions, optionsList = parseToolForm(form) # setup input if application.input_type == 'chemical/x-mdl-sdfile': input = 'chemical/x-mdl-sdfile' elif application.input_type == 'upload': input = request.FILES['File Upload'].read().decode("utf-8") else: input = '' tagNames = [] if "tags" in request.POST: tagNames = request.POST.getlist("tags") #print("got tagNames: "+str(tagNames)) newJob = createJob(request.user, application.name, optionsList, commandOptions, input, tagNames=tagNames) messages.success(request, 'Success: job launched.') return redirect(view_job, job_id=newJob.id, resource='') else: if category: fromWorkbench = False try: category = ApplicationCategories.objects.get(name=category) compoundCount = Compound.objects.filter( user=request.user).count() if category.name != 'Upload': fromWorkbench = True if category.name == 'Clustering' and compoundCount < 3: messages.info( request, 'Notice: you must have at least 3 compounds to perform clustering. Please use this form to add more compounds and then click "Cluster" again.' ) return redirect(myCompounds.views.uploadCompound, job_id=None, resource=None) if category.name == 'Properties' and compoundCount < 1: messages.info( request, 'Notice: you must have at least one compound to compute properties. Please use this form to add compounds and then click "Properties" again.' ) return redirect(myCompounds.views.uploadCompound, job_id=None, resource=None) title = 'Launch ' + category.name + ' Job' apps = Application.objects.filter(category=category) except: print(traceback.format_exc()) raise Http404 else: title = 'Launch Job' apps = Application.objects.filter() fields = {} fields['application'] = ModelChoiceField(queryset=apps, empty_label='') form = type('%sForm' % 'choose application', (Form, ), fields) allTags = Tag.allUserTagNames(request.user) return render( request, 'submitForm.html', dict(title=title, form=form, tags=allTags, fromWorkbench=fromWorkbench, totalCompounds=Compound.objects.filter( user=request.user).count()))
def view_job( request, job_id, resource=None, filename=None, ): job = updateJob(request.user, job_id) if job == False: raise Http404 if resource: if resource == 'delete': deleteJob(request.user, job.id) return HttpResponse('deleted', content_type='text/plain') if resource == 'download': f = open(job.output, 'r') result = f.read() f.close() if job.application.output_type == 'application/json.canvasxpress': result = re.search(r'smps":"(.*)"}},', result).group(1) return HttpResponse(result, content_type=job.application.output_type) if request.is_ajax(): if job.status == Job.RUNNING: response = dict(status='RUNNING', job_id=job_id) else: response = dict(status='DONE', job_id=job_id) return HttpResponse(dumps(response), 'text/json') if job.status == Job.FINISHED: finalResult = job.output finalResult = re.sub('.*/', '', finalResult, count=0) finalResult = '/working/' + finalResult job.start_time = str(job.start_time) # select correct viewer here based on output type if job.application.output_type == 'chemical/sdfUpload': f = open(job.output, 'r') sdf = f.read() f.close() result, error = batch_sdf_to_smiles(sdf) quotedSmiles = [] for smiles in result.splitlines(): match = re.search(r'(\S+)\s+(\S+)', smiles) smiOnly = match.group(1) cid = match.group(2) quotedSmiles.append({'smiles': urlquote(smiOnly), 'cid': cid}) return render( request, 'sdfUpload.html', dict(title=str(job.application) + ' Results', job=job, compounds=quotedSmiles)) if job.application.output_type == 'text/ei.search.result': f = open(job.output, 'r') csvinput = csv.reader(f, delimiter=' ') csvOutput = [] queryColumnEmpty = True for line in csvinput: if line[0] != "": queryColumnEmpty = False #print("csv line: "+str(line)) csvOutput.append(line) #print("query column empty? "+str(queryColumnEmpty)) f.close() allTags = Tag.allUserTagNames(request.user) #print("job input: "+str(job.input)) if queryColumnEmpty: singleQuery = job.input else: singleQuery = None return render( request, 'structure_search_result.html', dict(title=str(job.application) + ' Results', job=job, tags=allTags, compounds=csvOutput, singleQuery=singleQuery, resultType="ei")) if job.application.output_type == 'text/fp.search.result': f = open(job.output, 'r') csvinput = csv.reader(f, delimiter=' ') csvOutput = [] queryColumnEmpty = True for line in csvinput: if line[0] != "": queryColumnEmpty = False #print("csv line: "+str(line)) csvOutput.append(line) #print("query column empty? "+str(queryColumnEmpty)) f.close() allTags = Tag.allUserTagNames(request.user) if queryColumnEmpty: singleQuery = job.input else: singleQuery = None return render( request, 'structure_search_result.html', dict(title=str(job.application) + ' Results', job=job, compounds=csvOutput, singleQuery=singleQuery, tags=allTags, resultType="fp")) elif job.application.output_type == 'text/sdf.upload': f = open(job.output, 'r') message = f.read() f.close() deleteJob(request.user, job.id) if re.search(r"^ERROR:", message): messages.error(request, message) return redirect(myCompounds.views.uploadCompound, job_id=None, resource=None) else: messages.success(request, message) return redirect(myCompounds.views.showCompounds, resource='') elif job.application.output_type \ == 'application/json.cytoscape': f = open(job.output, 'r') plotJSON = f.read() f.close() return render( request, 'view_network.html', dict(title=str(job.application) + ' Results', result=finalResult, job=job, plotJSON=plotJSON)) elif job.application.output_type \ == 'application/json.canvasxpress': f = open(job.output, 'r') plotJSON = f.read() f.close() if (job.application.name == 'Hierarchical Clustering')\ or (job.application.name == 'Numeric Data Clustering'): tree = True return render( request, 'view_job.html', dict(title=str(job.application) + ' Results', result=finalResult, tree=tree, job=job, plotJSON=plotJSON)) else: tree = False return render( request, 'view_job.html', dict(title=str(job.application) + ' Results', result=finalResult, tree=tree, job=job, plotJSON=plotJSON)) elif job.application.output_type == 'text/properties.table': f = open(job.output, 'r') csvinput = csv.reader(f) csvOutput = [] for line in csvinput: csvOutput.append(line) f.close() return render( request, 'view_csv.html', dict(title=str(job.application) + ' Results', result=finalResult, job=job, csv=csvOutput)) elif job.application.output_type == 'text/bins.table': f = open(job.output, 'r') csvinput = csv.reader(f) bins = defaultdict(list) iterCsvInput = iter(csvinput) next(iterCsvInput) # skip first line for line in iterCsvInput: cid = line[0] binSize = line[1] bin = line[2] bins[bin].append(cid) f.close() bins = OrderedDict(sorted(list(bins.items()), key=lambda t: \ int(t[0]))) return render( request, 'bins.html', dict(title=str(job.application) + ' Results', result=finalResult, job=job, bins=bins)) elif job.application.output_type == 'chemical/x-mdl-sdfile': f = open(job.output, 'r') sdf = f.read() f.close() nextStep = job.input deleteJob(request.user, job.id) if nextStep == 'workbench': newJob = createJob(request.user, 'Upload Compounds', '', ['--user='******'') elif nextStep == 'smiles': result, error = batch_sdf_to_smiles(sdf) return HttpResponse(result, content_type=job.application.output_type) else: return HttpResponse(sdf, content_type=job.application.output_type) else: # if mimetype is unknown, just send the file to the user return redirect(view_job, job_id=job.id, resource='download', filename='output') elif job.status == Job.RUNNING: return render( request, 'wait.html', dict(title=job.application.name + ' Job Running <img src="/static/images/spinner.gif"/>')) elif job.status == Job.FAILED: messages.error( request, 'Job Failed due to invalid input data and/or invalid selected options. Please double check that your uploaded data (compounds and/or numeric data), and input options are valid and try running the tool again.' ) return render( request, 'view_job.html', dict(title='Error: ' + str(job.application) + ' Job Failed', job=job))
def search(request): if request.is_ajax(): if 'application_id' in request.GET: form = getAppForm(request.GET['application_id'], request.user)() return HttpResponse(str(form)) else: return HttpResponse("", status_code=404) elif request.method != 'POST': smi = '' if 'smi' in request.GET: smi = str(request.GET['smi']) smi = urlunquote(smi) allTags = Tag.allUserTagNames(request.user) fields = {} category = ApplicationCategories.objects.get(name="Search") apps = Application.objects.filter(category=category).exclude( name__exact="pubchemID2SDF") fields['application'] = ModelChoiceField(queryset=apps, empty_label='') form = type('%sForm' % 'choose application', (Form, ), fields) return render(request, 'search.html', dict(mode='form', smi=smi, appsForm=form, tags=allTags)) else: sdf = None smiles = None compid = u'query' form = None application = None application_id = None if 'application' in request.POST: application_id = request.POST['application'] application = Application.objects.get(id=application_id) if 'tags' in request.POST: givenTags = request.POST.getlist("tags") compoundList = Compound.byTagNames(givenTags, request.user) if len(compoundList) == 0: messages.error(request, "Error: No compounds found with selected tags") else: sdf = u'' for compound in compoundList: sdf = sdf + compound.sdffile_set.all()[0].sdffile.rstrip( ) + '\n' smiles = sdf_to_smiles(sdf) elif 'smiles' in request.POST: input_mode = 'smiles-input' sdf = u'' try: smiles = request.POST['smiles'] sdf = smiles_to_sdf(smiles) except: messages.error(request, 'Error: Invalid SMILES string!') sdf = None elif 'sdf' in request.FILES: input_mode = 'sdf-upload' try: sdf = request.FILES['sdf'] sdf = first_mol(sdf.read()) smiles = sdf_to_smiles(sdf) except: print(traceback.format_exc()) messages.error(request, 'Invalid SDF!') sdf = None elif 'sdf' in request.POST: if 'draw' in request.POST: input_mode = 'draw' sdf = request.POST['sdf'] + '$$$$' try: smiles = sdf_to_smiles(sdf) smiles = re.match(r"^(\S+)", smiles).group(1) smiles = smiles + ' ' + compid sdf = smiles_to_sdf(smiles) except: print(traceback.format_exc()) messages.error(request, 'Invalid drawing!') sdf = None else: try: input_mode = 'sdf-input' sdf = first_mol(request.POST['sdf']) smiles = sdf_to_smiles(sdf) except: print(traceback.format_exc()) messages.error(request, 'Invalid input SDF!') sdf = None if application_id != None: AppFormSet = getAppForm(request.POST['application'], request.user) form = AppFormSet(request.POST) if form != None and form.is_valid(): commandOptions, optionsList = parseToolForm(form) else: sdf = None messages.error(request, "Invalid form options!") if not sdf: print("no sdf found") return redirect(structure_search.views.search) smiles = re.search(r'(\S+)', smiles).group(1) smiles = urlquote(smiles) #print("command options: "+str(commandOptions)) if application.name == "PubChem Fingerprint Search": newJob = createJob(request.user, application.name, optionsList, commandOptions, sdf, smiles) elif application.name == "ChEMBL Fingerprint Search": newJob = createJob(request.user, application.name, optionsList, commandOptions, sdf, smiles) elif application.name == "ChEMBL EI Search": newJob = createJob(request.user, application.name, optionsList, commandOptions, sdf, smiles) time.sleep(1) return redirect(tools.views.view_job, job_id=newJob.id, resource='')
def uploadCompound(request, resource = None, job_id = None): allTags = Tag.allUserTagNames(request.user) if (request.method == 'GET') and (resource != u'job'): return render(request,'addCompounds.html', dict(input_mode='smiles-input', tags=allTags)) else: sdf = None name = None compid = None smiles = None compoundTags = [] input_mode='smiles-input' if 'tags' in request.POST: compoundTags = set(request.POST.getlist('tags')) Tag.ensureAllExist(compoundTags,request.user) #existingTags = set(allTags) #print("compound tags: "+str(compoundTags)) #for newTag in compoundTags.difference(existingTags): # print("creating new tag: "+newTag+" for user "+request.user.username) # Tag.objects.create(name = newTag, user=request.user) if 'smiles' in request.POST: input_mode = 'smiles-input' sdf = u'' try: smiles = request.POST['smiles'].split('\n') for line in smiles: if re.match(r"^\S+", line): sdf = sdf + smiles_to_sdf(str(line)) except: print("Unexpected error:", sys.exc_info()) traceback.print_tb(sys.exc_info()[2]) messages.error(request, 'Error: Invalid SMILES string!') sdf = None elif resource == 'job': input_mode = 'sdf-upload' job = updateJob(request.user, job_id) f = open(job.output, 'r') sdf = f.read() f.close() elif 'sdf' in request.FILES: input_mode = 'sdf-upload' try: sdf = request.FILES['sdf'] sdf = sdf.read().decode("utf-8") except (InputError, InvalidInputError): messages.error(request, 'Invalid SDF!') sdf = None elif 'sdf' in request.POST: if 'draw' in request.POST: input_mode = 'draw' sdf = request.POST['sdf'] + '$$$$' compid = str(request.POST['id']) compid = re.match(r"^(\S{0,20})", compid).group(1) try: smiles = sdf_to_smiles(sdf) smiles = re.match(r"^(\S+)", smiles).group(1) smiles = smiles + ' ' + compid sdf = smiles_to_sdf(smiles) except: print("Unexpected error:", sys.exc_info()) traceback.print_tb(sys.exc_info()[2]) messages.error(request, 'Invalid drawing!') sdf = None else: input_mode = 'sdf-input' sdf = request.POST['sdf'] if not sdf: messages.error(request, 'No input found!') elif 'pubchem' in request.POST: cids = request.POST['pubchem'] cids = cids.split() input_mode = 'pubchem' filteredCIDs = [] for cid in cids[:]: match = re.search("(\d{1,200})", cid) if match: filteredCIDs.append(int(match.group(1))) if len(filteredCIDs) > 0: try: sdf = DownloadCIDs(cids) except: print("Unexpected error:", sys.exc_info()) traceback.print_tb(sys.exc_info()[2]) messages.error(request, 'Invalid CIDs or no response from PubChem!' ) sdf = None else: messages.error(request, 'Error: No valid CIDs entered!') sdf = None elif 'chembl' in request.POST: cids = tuple(request.POST['chembl'].split()) if len(cids) > 0: try: sdfs = get_chembl_sdfs(cids) sdf = "\n$$$$\n".join(sdfs)+"\n$$$$\n" except: print("Unexpected error:", sys.exc_info()) traceback.print_tb(sys.exc_info()[2]) messages.error(request, 'Invalid CIDs or no response from ChEMBL!' ) sdf = None else: print("no chembl cids given") messages.error(request, 'Error: No valid ChEMBL CIDs entered!') sdf = None if not sdf: return render('addCompounds.html', dict(input_mode=input_mode, post_data=request.POST, tags=compoundTags)) jobArgs = ['--user='******'dedup' in request.POST: jobArgs += ["--deduplicate"] newJob = createJob(request.user, 'Upload Compounds', '',jobArgs , sdf) time.sleep(2) return redirect(tools.views.view_job, job_id=newJob.id, resource='')
def newTS(request): # Default local variables query_submit = False message = None annotation_info = None annotation_matches = None drugind_json = None activity_info = None activity_matches = None allTags = Tag.allUserTagNames(request.user) compoundDbs = readSources("unichem") proteinDbs = readSources("uniprot") defaultCompoundDb = "1" defaultProteinDb = "ACC+ID" groupingCol = 0 similarityJobs = [(job.id, str(job)) for job in Job.objects.filter( user=request.user, application__category_id=5)] # Default GET request variables id_type = 'compound' ids = list() include_activity = False source_id = 1 similarity_job_id = -1 # Retrieve GET request variables if 'id_type' in request.GET: id_type = request.GET['id_type'] if 'ids' in request.GET: if id_type == 'homolog-target': ids = request.GET.getlist('ids') else: ids = list(request.GET['ids'].split()) if 'include_activity' in request.GET: include_activity = True if 'tags' in request.GET: for c in [ compound.cid for compound in Compound.byTagNames( request.GET.getlist("tags"), request.user) ]: ids.append(c) if 'source_id' in request.GET: source_id = request.GET['source_id'] if 'similarity_job_id' in request.GET: similarity_job_id = int(request.GET['similarity_job_id']) # Generate content try: idMapping = {} if id_type == 'compound' and source_id != '1': idMapping = mapToChembl(ids, source_id) ids = list(idMapping.keys()) elif id_type == 'target' and source_id != 'ACC': idMapping = mapToUniprot(ids, source_id) ids = list(idMapping.keys()) elif id_type == 'homolog-target': #full_ids = ids # Full context for homolog handling #ids = [ i.split(',')[2] for i in full_ids ] # Bare Accession IDs for old target-search # Context dictionary for homolog handling # ex: # homolog_context = { # 'P29274': {'paralog': 'P30542'}, # 'P29275': {'paralog': 'P30542'}, # 'P0DMS8': {'paralog': 'P30542'}, # } homolog_context = dict() for i in ids: [relation, src_id, homolog_id] = i.split(',') if homolog_id not in homolog_context.keys(): homolog_context[homolog_id] = dict() if src_id == homolog_id: continue # Don't bother with "X is a homolog of X" if relation not in homolog_context[homolog_id].keys(): homolog_context[homolog_id][relation] = set() homolog_context[homolog_id][relation].add(src_id) ids = list(homolog_context.keys() ) # Bare Accession IDs for old target-search # Prepare homolog relation descriptions homolog_desc = dict() for homolog_id, relations in homolog_context.items(): desc_parts = list() for relation, src_ids in sorted(relations.items()): desc_parts.append("{} of {}".format( relation, ', '.join(sorted(list(src_ids))))) homolog_desc[homolog_id] = '; '.join(desc_parts) if len(ids) != 0: query_submit = True queryIdCol = { "id": "query_id", "sql": None, "table": "Query ID", "name": "Query ID", "desc": "Original compound ID prior to ChEMBL conversion", "visible": True, } headerTextCol = { "id": "header_text", "sql": None, "table": "Header Text", "name": "Header Text", "desc": "Description text to show in row-group headers (i.e. source query IDs, translations, etc.)", "visible": False, } originalQueryCol = { "id": "original_query_id", "sql": None, "table": "Original Query ID", "name": "Original Query ID", "desc": "The compound that the current query compound was originally derived from, based on similarity", "visible": True, } myAnnotationSearch = AnnotationWithDrugIndSearch(id_type, ids) annotation_info = myAnnotationSearch.table_info annotation_matches = myAnnotationSearch.get_grouped_results() drugind_json = drugIndicationData(myAnnotationSearch.drugind_objs) # Exclude ActivitySearch from search-by-target by default if id_type in ['target', 'homolog-target' ] and not include_activity: activity_info = None activity_matches = None else: myActivitySearch = ActivitySearch(id_type, ids) activity_info = myActivitySearch.table_info activity_matches = myActivitySearch.get_grouped_results() if len(idMapping) != 0: groupingCol += 1 addMappedQueryColumn(idMapping, queryIdCol, annotation_info, annotation_matches, activity_info, activity_matches) if similarity_job_id != -1: similarityMapping = readSimilarityMappingData( request.user, similarity_job_id) if len( idMapping ) != 0: # need to compose our mapping with previous mapping similarityMapping = composeMaps(idMapping, similarityMapping) #print("similarity mapping: \n"+str(similarityMapping)) if len(similarityMapping) != 0: groupingCol += 1 addMappedQueryColumn(similarityMapping, originalQueryCol, annotation_info, annotation_matches, activity_info, activity_matches) #if similarity_job_id != -1, then read job_<similarity_job_id> # map chembl id (2nd column) back to the original compound id (first column) # insert new column to show original compound id. if ts_paralog_cache(): homolog_type_value = 'paralog-cache' else: homolog_type_value = 'paralog' except Exception as e: print("exception in newTS:", sys.exc_info()) traceback.print_tb(sys.exc_info()[2]) message = str(e) context = { 'query_submit': query_submit, 'message': message, 'id_type': id_type, 'annotation_info': annotation_info, 'annotation_matches': annotation_matches, 'drugind_json': json.dumps(drugind_json), 'activity_info': activity_info, 'activity_matches': activity_matches, 'tags': allTags, 'compoundDbs': compoundDbs, 'defaultCompoundDb': defaultCompoundDb, 'proteinDbs': proteinDbs, 'defaultProteinDb': defaultProteinDb, 'groupingCol': groupingCol, 'similarityJobs': similarityJobs, 'homolog_type_value': homolog_type_value, } return render(request, 'targetsearch/new_ts.html', context)