def plot_PCA(): content = request.get_json(force=True) print(content) ID_num = int(content['ID_num']) try: with open('MVP/pickles/files.pickle', 'rb') as f: files = pickle.load(f) feature_table = files['feature_table'] tree = files['tree'] taxo_file = files['taxonomy'] metadata = files['metadata'] except: print( 'no files.pickle exist please go to main page get main view first') try: f = open('MVP/pickles/' + metadata.split('/')[-1] + '_mvp_tree.pickle', 'rb') mvp_tree = pickle.load(f) print('read mvp_tree from pickle') mvp_tree.get_subtree(ID_num) f.close() except: string_ = 'there are no pickles to read.please try plot_tree button' result = {0: string_} return jsonify(result) pca_div = PCA_plot_ywch.run_this_script(mvp_tree, ID_num) cols = [ele.name for ele in mvp_tree.subtree.get_terminals()] ann = annotation.Annotation(cols, feature_table, taxo_file) ann_div = ann.plot_annotation() mvp_tree.get_colors(ann.colors, ann.mapped_phylum_colors) mvp_tree.get_subtree(ID_num) tree_div = mvp_tree.plot_tree() result = {0: pca_div, 1: tree_div, 2: ann_div} return jsonify(result)
def getAnnotation ( self, ch, annid ): """Return a RAMON object by identifier""" kvdict = self.annodb.getAnnotationKV ( ch, annid ) annotype = int(kvdict['ann_type']) # switch on the type of annotation if annotype is None: return None elif annotype == annotation.ANNO_SYNAPSE: anno = annotation.AnnSynapse(self.annodb,ch) elif annotype == annotation.ANNO_SEED: anno = annotation.AnnSeed(self.annodb,ch) elif annotype == annotation.ANNO_SEGMENT: anno = annotation.AnnSegment(self.annodb,ch) elif annotype == annotation.ANNO_NEURON: anno = annotation.AnnNeuron(self.annodb,ch) elif annotype == annotation.ANNO_ORGANELLE: anno = annotation.AnnOrganelle(self.annodb,ch) elif annotype == annotation.ANNO_NODE: anno = annotation.AnnNode(self.annodb,ch) elif annotype == annotation.ANNO_SKELETON: anno = annotation.AnnSkeleton(self.annodb,ch) elif annotype == annotation.ANNO_ROI: anno = annotation.AnnROI(self.annodb,ch) elif annotype == annotation.ANNO_ANNOTATION: anno = annotation.Annotation(self.annodb,ch) else: raise NDWSError ( "Unrecognized annotation type {}".format(type) ) # load the annotation anno.fromDict ( kvdict ) return anno
def plot_abun(): content = request.get_json(force=True) feature_table = content['feature_table'] log_flag = content['log_flag'] abun_type = content['abun_type'] # abun abun_div_and_dict = stat_abundance.plot_stat_abun(feature_table, abun_type, log_flag) abun_div = abun_div_and_dict[0] cols = [ele for ele in abun_div_and_dict[1]] # heatmap metadata = content['metadata'] feature_table = content['feature_table'] features = [content['feature0'], content['feature1'], content['feature2']] heatmap_instance = heatmap.Heatmap(metadata, feature_table) heatmap_instance.map() heatmap_instance.sort_by_features(features[0], features[1], features[2]) heatmap_instance.obtain_numerical_matrix(cols) heatmap_div = heatmap_instance.plotly_div() # annotation taxo_file = content['taxonomy_file'] ann = annotation.Annotation(cols, feature_table, taxo_file) ann_div = ann.plot_annotation() result = {0: abun_div, 1: ann_div, 2: heatmap_div} return jsonify(result)
def storeSegment(baseurl, fields, token): """Build a segment and upload it to the database""" # Create the segment and initialize it's fields ann = annotation.Annotation() ann.annid = int(fields[0]) # Exceptional cases if ann.annid in EXCEPTIONS: print "Skipping id ", ann.annid return descriptorstr = fields[40].split("\"") descriptor = descriptorstr[1] ann.kvpairs = {'sourceId': fields[0], 'sourceDescription': descriptor} ann.author = 'Kasthuri,N.' pprint(vars(ann)) h5anno = h5ann.AnnotationtoH5(ann) url = "http://%s/annotate/%s/" % (baseurl, token) print url try: req = urllib2.Request(url, h5anno.fileReader()) response = urllib2.urlopen(req) except urllib2.URLError, e: print "Failed URL", url print "Error %s" % (e.read()) sys.exit(0)
def _readAnnotation(self, id): # create a new annotation anno = annotation.Annotation(self.annodb, self.ch) # set the segment ID anno.setField('annid', id) # fill in the fields # basic metadata first [confidence, status] = self._readAnnoMetadata(id) anno.setField('status', status) anno.setField('confidence', confidence) # parse kvpairs kvpairs = self._readKVPairs(id) for key in kvpairs.keys(): value = kvpairs[key] if key == 'ann_author': anno.setField('author', value) else: anno.setField(key, value) # return newly completed anno object return anno
def plot_ecology_scatters(): content = request.get_json(force=True) obj_col = content['obj_col'] stats_method = content['stats_method'] corr_method = content['corr_method'] ID_num = int(content['ID_num']) try: with open('MVP/pickles/files.pickle', 'rb') as f: files = pickle.load(f) feature_table = files['feature_table'] tree = files['tree'] taxo_file = files['taxonomy'] metadata = files['metadata'] except: print( 'no files.pickle exist please go to main page get main view first') pass try: f = open('MVP/pickles/' + metadata.split('/')[-1] + '_mvp_tree.pickle', 'rb') mvp_tree = pickle.load(f) print('read mvp_tree from pickle') f.close() except: mvp_tree = corr_tree_new.MvpTree(feature_table, tree, metadata, taxo_file, ID_num) file_paras = { 'feature_table': feature_table, 'metadata': metadata, 'taxonomy': taxo_file, 'tree': tree } mvp_tree.get_subtree(ID_num) cols = [ele.name for ele in mvp_tree.subtree.get_terminals()] ann = annotation.Annotation(cols, feature_table, taxo_file) ann_div = ann.plot_annotation() mvp_tree.get_colors(ann.colors, ann.mapped_phylum_colors) tree_div = mvp_tree.plot_tree() scatter_whole_tree = mvp_tree.plot_whole_tree() scatter_div1 = '' scatter_div2 = '' if stats_method != 'None': mvp_tree.stats_test(obj_col, stats_method, ID_num) scatter_div1 = mvp_tree.plot_scatter('pvalue', 'GI', ID_num) scatter_div2 = mvp_tree.plot_scatter('pvalue', 'abundance', ID_num) if corr_method != 'None': mvp_tree.get_corr_coefficient(obj_col, corr_method, ID_num) scatter_div1 = mvp_tree.plot_scatter('corr_coef', 'GI', ID_num) scatter_div2 = mvp_tree.plot_scatter('corr_coef', 'abundance', ID_num) result = { 0: tree_div, 1: scatter_div1, 2: scatter_div2, 3: scatter_whole_tree, 4: ann_div } return jsonify(result)
def __json_extract_annotation(resource, text): surfForm = resource["@surfaceForm"] lpos = text.find(surfForm) rpos = lpos + len(surfForm) uri = resource["@URI"] types = resource["@types"] if "DBpedia:Place" in types: annot_type = "Place" elif "DBpedia:Person" in types: annot_type = "Person" else: annot_type = "Organisation" return annotation.Annotation(lpos, rpos, uri, annot_type)
def __get_annotation(self, restr, annot_text): try: text = annot_text.text[restr.lpos:restr.rpos] text = "".join(self.stem.lemmatize(text)) pg = wikipedia.page(title=text, auto_suggest=True) query = self._query_template % (pg.pageid, restr.type) self.sparql.setQuery(query) uri = self.__retrieve_uri(self.sparql.query().convert()) if uri == "": return None return annotation.Annotation(restr.lpos, restr.rpos, uri, restr.type) except: return None
def storeSegment(baseurl, fields, token): """Build a segment and upload it to the database""" # Create the segment and initialize it's fields ann = annotation.Annotation() ann.annid = int(fields[0]) # Exceptional cases if ann.annid in EXCEPTIONS: print "Skipping id ", ann.annid return descriptorstr = fields[40].split("\"") descriptor = descriptorstr[1] ann.kvpairs = {'sourceId': fields[0], 'sourceDescription': descriptor} ann.author = 'Kasthuri,N.' pprint(vars(ann)) # Make the HDF5 file # Create an in-memory HDF5 file tmpfile = tempfile.NamedTemporaryFile() h5f = h5py.File(tmpfile.name) h5anno = h5ann.AnnotationtoH5(ann, h5f) h5f.close() url = "http://%s/emca/%s/" % (baseurl, token) print url try: tmpfile.seek(0) req = urllib2.Request(url, tmpfile.read()) response = urllib2.urlopen(req) except urllib2.URLError, e: print "Failed URL", url print "Error %s" % (e.read()) sys.exit(0)
def plot_tree(): content = request.get_json(force=True) tree = content['tree_file'] #tree_type = content['tree_type'] #file_type = content['file_type'] ID_num = int(content['node_num']) feature_table = content['feature_table'] taxo_file = content['taxonomy_file'] metadata = content['metadata'] #tree = circular_tree.read_tree(tree_file,file_type) try: f = open('MVP/pickles/' + metadata.split('/')[-1] + '_mvp_tree.pickle', 'rb') mvp_tree = pickle.load(f) print('read mvp_tree from pickle') f.close() except: mvp_tree = corr_tree_new.MvpTree(feature_table, tree, metadata, taxo_file, ID_num) file_paras = { 'feature_table': feature_table, 'metadata': metadata, 'taxonomy': taxo_file, 'tree': tree } with open('MVP/pickles/files.pickle', 'wb') as f: pickle.dump(file_paras, f) with open( 'MVP/pickles/' + metadata.split('/')[-1] + '_mvp_tree.pickle', 'wb') as g: pickle.dump(mvp_tree, g) print('wirte mvp_tree to pickle') mvp_tree.get_subtree(ID_num) cols = [ele.name for ele in mvp_tree.subtree.get_terminals()] # plot_anno ann = annotation.Annotation(cols, feature_table, taxo_file) ann_div = ann.plot_annotation() with open('MVP/pickles/' + taxo_file.split('/')[-1] + '_annotation.pickle', 'wb') as f: pickle.dump(ann, f) mvp_tree.get_colors(ann.colors, ann.mapped_phylum_colors) tree_div = mvp_tree.plot_tree() #plot_heatmap features = [content['feature0'], content['feature1'], content['feature2']] try: f = open('MVP/pickles/' + metadata.split('/')[-1] + '_heatmap.pickle', 'rb') heatmap_instance = pickle.load(f) print('read heatmap from pickle') f.close() except: heatmap_instance = heatmap.Heatmap(metadata, feature_table) heatmap_instance.map() with open('MVP/pickles/' + metadata.split('/')[-1] + '_heatmap.pickle', 'wb') as g: pickle.dump(heatmap_instance, g) print('write heatmap to pickle') heatmap_instance.sort_by_features(features[0], features[1], features[2]) heatmap_instance.obtain_numerical_matrix(cols) show_label = content['show_label'] if show_label == 'show': # show metadata besides the heatmap or not show_label = True else: show_label = False heatmap_div = heatmap_instance.plotly_div(show_label) # total result = {0: tree_div, 1: ann_div, 2: heatmap_div} return jsonify(result)
def process ( self, resolution, startid ): """Build the hierarchy of annotations""" # Get the source database sizes [ximagesz, yimagesz] = self.proj.datasetcfg.imagesz [ resolution ] [xcubedim, ycubedim, zcubedim] = self.proj.datasetcfg.cubedim [ resolution ] # Get the slices [ startslice, endslice ] = self.proj.datasetcfg.slicerange slices = endslice - startslice + 1 # Set the limits for iteration on the number of cubes in each dimension xlimit = (ximagesz-1)/xcubedim+1 ylimit = (yimagesz-1)/ycubedim+1 # Round up the zlimit to the next larger zlimit = (((slices-1)/zcubedim+1)*zcubedim)/zcubedim stride = 16 # iterate over all cubes #for z in range(startslice,endslice,zcubedim): for z in range(startslice+16,endslice,zcubedim): for y in range(0,ylimit,stride): for x in range(0,xlimit,stride): # cutout a cube +/- 256 pixels in x and y +/- 8 pixels in z xlow = max ( 0, x*xcubedim-256 ) ylow = max ( 0, y*ycubedim-256 ) zlow = max ( 0, z-8 ) xhigh = min ( ximagesz, (x+stride)*xcubedim+256 ) yhigh = min ( yimagesz, (y+stride)*ycubedim+256 ) zhigh = min ( endslice-1, z+zcubedim+8 ) # perform the cutout cube = self.annoDB.cutout ( [xlow,ylow,zlow], [xhigh-xlow, yhigh-ylow, zhigh-zlow], resolution ) nzoffs = np.nonzero(cube.data) # no elements? if len(nzoffs[0])==0: print "No data at {}".format((x,y,z)) continue # Points are in xyz, voxels in zyx points = zip(nzoffs[2]+xlow,nzoffs[1]+ylow,nzoffs[0]+zlow+startslice) # for all points in the interior, let's grow the region while len(points) != 0: #grab the first psd seed = None for pt in points: if self.inrange ( pt, (x*xcubedim, y*ycubedim, z), (stride*xcubedim, stride*ycubedim, zcubedim) ): # only points that are not relabeled if (cube.data[pt[2]-zlow-startslice,pt[1]-ylow,pt[0]-xlow] < 100 ): seed = pt break if seed == None: break # We'll assume that there is non-intersectin bounding box around a PSD of at least 16 pixels in x y and 2 in z bbxlow = bbxhigh = seed[0] bbylow = bbyhigh = seed[1] bbzlow = bbzhigh = seed[2] inpoints=[] # loop control variable. terminate when no new points were added somepoints = True while somepoints: outpoints=[] somepoints=False for pt in points: # put points in or out of PSD if pt[0] < bbxlow - 8 or pt[0] > bbxhigh + 8 or pt[1] < bbylow - 8 or pt[1] > bbyhigh + 8 or pt[2] < bbzlow - 2 or pt[2] > bbzhigh + 2: outpoints.append(pt) else: inpoints.append(pt) # somepoints somepoints = True # update bounding box bbxlow = min(bbxlow,pt[0]) bbxhigh = max(bbxhigh,pt[0]) bbylow = min(bbylow,pt[1]) bbyhigh = max(bbyhigh,pt[1]) bbzlow = min(bbzlow,pt[2]) bbzhigh = max(bbzhigh,pt[2]) points = outpoints print "Found PSD id {} of length {}".format(startid,len(inpoints)) # annotate the new object nppoints = np.array ( inpoints, dtype=np.uint32 ) self.annoDB.annotate ( startid, resolution, nppoints ) # create the RAMON object anno = annotation.Annotation() anno.annid = startid anno.kvpairs['vast_type']='PSD' anno.store ( self.annoDB ) self.annoDB.commit() startid+=1
def H5toAnnotation(key, idgrp, annodb, ch): """Return an annotation constructed from the contents of this HDF5 file""" # get the annotation type # if idgrp.get('ANNOTATION_TYPE'): if 'ANNOTATION_TYPE' in idgrp: annotype = idgrp['ANNOTATION_TYPE'][0] else: annotype = annotation.ANNO_ANNOTATION # And get the metadata group mdgrp = idgrp.get('METADATA') if annotype == annotation.ANNO_SEED: # Create the appropriate annotation type anno = annotation.AnnSeed(annodb, ch) # Load metadata if it exists if mdgrp: # load the seed specific metadata if 'PARENT' in mdgrp: anno.parent = mdgrp['PARENT'][0] if 'POSITION' in mdgrp: anno.position = mdgrp['POSITION'][:] if 'CUBE_LOCATION' in mdgrp: anno.cubelocation = mdgrp['CUBE_LOCATION'][0] if 'SOURCE' in mdgrp: anno.source = mdgrp['SOURCE'][0] elif annotype == annotation.ANNO_SYNAPSE: # Create the appropriate annotation type anno = annotation.AnnSynapse(annodb, ch) # Load metadata if it exists if mdgrp: # load the synapse specific metadata if 'SYNAPSE_TYPE' in mdgrp: anno.synapse_type = mdgrp['SYNAPSE_TYPE'][0] if 'WEIGHT' in mdgrp: anno.weight = mdgrp['WEIGHT'][0] if 'SEEDS' in mdgrp: anno.seeds = mdgrp['SEEDS'][:] if 'SEGMENTS' in mdgrp: anno.segments = mdgrp['SEGMENTS'][:] if 'PRESEGMENTS' in mdgrp: anno.presegments = mdgrp['PRESEGMENTS'][:] if 'POSTSEGMENTS' in mdgrp: anno.postsegments = mdgrp['POSTSEGMENTS'][:] elif annotype == annotation.ANNO_SEGMENT: # Create the appropriate annotation type anno = annotation.AnnSegment(annodb, ch) # Load metadata if it exists if mdgrp: # load the synapse specific metadata if mdgrp.get('PARENTSEED'): anno.parentseed = mdgrp['PARENTSEED'][0] if mdgrp.get('SEGMENTCLASS'): anno.segmentclass = mdgrp['SEGMENTCLASS'][0] if mdgrp.get('NEURON'): anno.neuron = mdgrp['NEURON'][0] if mdgrp.get('SYNAPSES') and len(mdgrp['SYNAPSES']) != 0: anno.synapses = mdgrp['SYNAPSES'][:] if mdgrp.get('ORGANELLES') and len(mdgrp['ORGANELLES']) != 0: anno.organelles = mdgrp['ORGANELLES'][:] elif annotype == annotation.ANNO_NEURON: # Create the appropriate annotation type anno = annotation.AnnNeuron(annodb, ch) # Load metadata if it exists if mdgrp: # load the synapse specific metadata if mdgrp.get('SEGMENTS') and len(mdgrp['SEGMENTS']) != 0: anno.segments = mdgrp['SEGMENTS'][:] elif annotype == annotation.ANNO_ORGANELLE: # Create the appropriate annotation type anno = annotation.AnnOrganelle(annodb, ch) # Load metadata if it exists if mdgrp: # load the synapse specific metadata if mdgrp.get('PARENTSEED'): anno.parentseed = mdgrp['PARENTSEED'][0] if mdgrp.get('ORGANELLECLASS'): anno.organelleclass = mdgrp['ORGANELLECLASS'][0] if mdgrp.get('SEEDS') and len(mdgrp['SEEDS']) != 0: anno.seeds = mdgrp['SEEDS'][:] if mdgrp.get('CENTROID'): anno.centroid = mdgrp['CENTROID'][:] elif annotype == annotation.ANNO_NODE: # Create the appropriate annotation type anno = annotation.AnnNode(annodb, ch) # Load metadata if it exists if mdgrp: # load the synapse specific metadata if 'NODETYPE' in mdgrp: anno.nodetype = mdgrp['NODETYPE'][0] if 'PARENTID' in mdgrp: anno.parentid = mdgrp['PARENTID'][0] if 'SKELETONID' in mdgrp: anno.skeletonid = mdgrp['SKELETONID'][0] if 'RADIUS' in mdgrp: anno.radius = mdgrp['RADIUS'][0] if mdgrp.get('CHILDREN') and len(mdgrp['CHILDREN']) != 0: anno.children = mdgrp['CHILDREN'][:] if mdgrp.get('LOCATION'): anno.location = mdgrp['LOCATION'][:] elif annotype == annotation.ANNO_SKELETON: # Create the appropriate annotation type anno = annotation.AnnSkeleton(annodb, ch) # Load metadata if it exists if mdgrp: # load the synapse specific metadata if 'SKELETONTYPE' in mdgrp: anno.skeletontype = mdgrp['SKELETONTYPE'][0] if 'ROOTNODE' in mdgrp: anno.rootnode = mdgrp['ROOTNODE'][0] elif annotype == annotation.ANNO_ROI: # Create the appropriate annotation type anno = annotation.AnnROI(annodb, ch) # Load metadata if it exists if mdgrp: # load the synapse specific metadata if 'PARENT' in mdgrp: anno.parent = mdgrp['PARENT'][0] # No special action if it's a no type elif annotype == annotation.ANNO_ANNOTATION: # Just create a generic annotation object anno = annotation.Annotation(annodb, ch) else: logger.warning("Do not support this annotation type yet. Type = %s" % annotype) raise NDWSError("Do n0t support this annotation type yet. Type = %s" % annotype) # now load the annotation common fields if re.match("^\d+$", key): anno.annid = int(key) else: anno.annid = 0 if mdgrp: # now load the metadata common fields if mdgrp.get('STATUS'): anno.status = mdgrp['STATUS'][0] if mdgrp.get('CONFIDENCE'): anno.confidence = mdgrp['CONFIDENCE'][0] if mdgrp.get('AUTHOR'): anno.author = mdgrp['AUTHOR'][0] # and the key/value pairs if mdgrp.get('KVPAIRS'): fstring = cStringIO.StringIO(mdgrp['KVPAIRS'][0]) csvr = csv.reader(fstring, delimiter=',') for r in csvr: anno.kvpairs[r[0]] = r[1] return anno
def create_annotation(self, event): """ handles annotation additions to the map """ # create new Annotation object new_annotation = annotation.Annotation(event.x, event.y, "") # pass object to AnnotationEditor annotation.AnnotationEditor(new_annotation)