def resolve_SampleFmabodysite(self, args, context, info): # accept the pipes and convert to quotes again now that it's been passed across the URL cy = args['cy'].replace("|",'"') if cy == "": return samFMA else: return get_buckets("Sample.fma_body_site","yes",cy)
def resolve_StudyName(self, args, context, info): cy = args['cy'].replace("|",'"') if cy == "": return studyName else: return get_buckets("Study.name","yes",cy)
def resolve_FileSubtype(self, args, context, info): cy = args['cy'].replace("|",'"') if cy == "": return fileSubtype else: return get_buckets("File.subtype","yes",cy)
def resolve_FileFormat(self, args, context, info): cy = args['cy'].replace("|",'"') if cy == "": return fileFormat else: return get_buckets("File.format","yes",cy)
def resolve_SubjectGender(self, args, context, info): cy = args['cy'].replace("|",'"') if cy == "": return subGender else: return get_buckets("Subject.gender","yes",cy)
def resolve_ProjectName(self, args, context, info): cy = args['cy'].replace("|",'"') if cy == "": return proName else: return get_buckets("Project.name","yes",cy)
import graphene from models import SBucketCounter, FileSize, get_buckets, get_total_file_size # Can preload default counts for fast loading, user interaction with facets or # queries will then refine these counts. proName = get_buckets("Project.name","yes","") samFMA = get_buckets("Sample.fma_body_site","yes","") subGender = get_buckets("Subject.gender","yes","") fileFormat = get_buckets("File.format","yes","") fileSubtype = get_buckets("File.subtype","yes","") studyName = get_buckets("Study.name","yes","") fs = FileSize(value=get_total_file_size("")) class Query(graphene.ObjectType): SampleFmabodysite = graphene.Field(SBucketCounter, cy=graphene.String(description='Cypher WHERE parameters')) ProjectName = graphene.Field(SBucketCounter, cy=graphene.String(description='Cypher WHERE parameters')) SubjectGender = graphene.Field(SBucketCounter, cy=graphene.String(description='Cypher WHERE parameters')) FileFormat = graphene.Field(SBucketCounter, cy=graphene.String(description='Cypher WHERE parameters')) FileSubtype = graphene.Field(SBucketCounter, cy=graphene.String(description='Cypher WHERE parameters')) StudyName = graphene.Field(SBucketCounter, cy=graphene.String(description='Cypher WHERE parameters')) fs = graphene.Field(FileSize, cy=graphene.String(description='Cypher WHERE parameters')) def resolve_SampleFmabodysite(self, args, context, info): # accept the pipes and convert to quotes again now that it's been passed across the URL cy = args['cy'].replace("|",'"') if cy == "": return samFMA else: return get_buckets("Sample.fma_body_site","yes",cy)
import graphene from models import Pagination, FileHits, Aggregations, get_buckets, get_file_hits, get_pagination # Can preload aggregate. Note that the get_buckets function needs to be changed # up a bit for files counts since it needs to pull ALL nodes that are tied to # some file and count those unique groups. Should be easy enough, just match by # the relevant edges. Simplified for now. dt = get_buckets("File.node_type","no","") df = get_buckets("File.format","no","") class Query(graphene.ObjectType): pagination = graphene.Field(Pagination, cy=graphene.String(description='Cypher WHERE parameters'), s=graphene.Int(description='size of subset to return'), f=graphene.Int(description='what position of the sort to start at')) hits = graphene.List(FileHits, cy=graphene.String(description='Cypher WHERE parameters'), s=graphene.Int(description='size of subset to return'), o=graphene.String(description='what to sort by'), f=graphene.Int(description='what position of the sort to start at')) aggregations = graphene.Field(Aggregations) def resolve_pagination(self, args, context, info): cy = args['cy'].replace("|",'"') return get_pagination(cy,args['s'],args['f'],'f') def resolve_hits(self, args, context, info): cy = args['cy'].replace("|",'"') # handle quotes for GQL o = args['o'].replace("file_name","Sample.id") # lose the portal ordering syntax o = o.replace(".raw","") if args['cy'] == "": return get_file_hits(args['s'],"Sample.id:asc",args['f'],"") else: return get_file_hits(args['s'],o,args['f'],cy) def resolve_aggregations(self, args, context, info): return Aggregations(dataType=dt, dataFormat=df)
import graphene from models import Pagination, CaseHits, Aggregations, get_buckets, get_case_hits, get_pagination # Can preload counts by declaring these in this next block. # These aggregations can remain stagnant so don't need to update # based on filters as these are used to give a total count of the data. # Any properties with a "###" following it mean that the property being # grabbed isn't exactly what the user sees on the site. For instance, # 'Project name' on the site actually searches for 'Project.project_name'. # This is to provide a more succint search parameter. proN = get_buckets("project.name","no","") ### stuS = get_buckets("study.subtype","no","") stuC = get_buckets("study.center","no","") stuN = get_buckets("study.name","no","") subG = get_buckets("subject.gender","no","") subR = get_buckets("subject.race","no","") visVN = get_buckets("visit.visit_number","no","") ### visI = get_buckets("visit.interval","no","") visD = get_buckets("visit.date","no","") samBP = get_buckets("sample.body_product","no","") samFMA = get_buckets("sample.fma_body_site","no","") samGLN = get_buckets("sample.geo_loc_name","no","") samSCD = get_buckets("sample.samp_collect_device","no","") samEP = get_buckets("sample.env_package","no","") samSS = get_buckets("sample.supersite","no","") samF = get_buckets("sample.feature","no","") samID = get_buckets("sample.id","no","") samM = get_buckets("sample.material","no","") samB = get_buckets("sample.biome","no","") fileF = get_buckets("File.format","no","") fileID = get_buckets("File.id","no","")